/******************************************************************************
*
-* Copyright (c) 2019 Intel.
+* Copyright (c) 2020 Intel.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*******************************************************************************/
/**
- * @brief xRAN application frgamentation for U-plane packets
+ * @brief xRAN application fragmentation for U-plane packets
*
* @file xran_app_frag.c
* @ingroup group_source_xran
#include <stdio.h>
#include <stddef.h>
#include <errno.h>
-
+#include <immintrin.h>
#include <rte_mbuf.h>
#include <rte_memcpy.h>
#include <rte_mempool.h>
#include "xran_printf.h"
#include "xran_common.h"
-/* Fragment alignment */
-#define XRAN_PAYLOAD_RB_ALIGN (N_SC_PER_PRB*(IQ_BITS/8)*2) /**< at least 12*4=48 bytes per one RB */
-
static inline void __fill_xranhdr_frag(struct xran_up_pkt_hdr *dst,
const struct xran_up_pkt_hdr *src, uint16_t rblen_bytes,
- uint16_t rboff_bytes, struct xran_section_info *sectinfo, uint32_t mf, uint8_t *seqid)
+ uint16_t rboff_bytes, uint16_t startPrbc, uint16_t numPrbc, uint32_t mf, uint8_t *seqid, uint8_t iqWidth)
{
struct data_section_hdr loc_data_sec_hdr;
struct xran_ecpri_hdr loc_ecpri_hdr;
rte_memcpy(dst, src, sizeof(*dst));
- dst->ecpri_hdr.ecpri_seq_id.seq_id = (*seqid)++;
+ dst->ecpri_hdr.ecpri_seq_id.bits.seq_id = (*seqid)++;
- print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n",sectinfo->startPrbc, sectinfo->numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid);
+ print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n",startPrbc, numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid);
loc_data_sec_hdr.fields.all_bits = rte_be_to_cpu_32(dst->data_sec_hdr.fields.all_bits);
/* update RBs */
- loc_data_sec_hdr.fields.start_prbu = sectinfo->startPrbc + rboff_bytes/(N_SC_PER_PRB*(IQ_BITS/8*2));
- loc_data_sec_hdr.fields.num_prbu = rblen_bytes/(N_SC_PER_PRB*(IQ_BITS/8*2));
+ loc_data_sec_hdr.fields.start_prbu = startPrbc + rboff_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
+ loc_data_sec_hdr.fields.num_prbu = rblen_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
- print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",sectinfo->startPrbc, sectinfo->numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu,
+ print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",startPrbc, numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu,
rboff_bytes, rblen_bytes);
dst->data_sec_hdr.fields.all_bits = rte_cpu_to_be_32(loc_data_sec_hdr.fields.all_bits);
- /* update length */
- dst->ecpri_hdr.cmnhdr.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) +
+ dst->ecpri_hdr.cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) +
sizeof(struct data_section_hdr) + rblen_bytes + xran_get_ecpri_hdr_size());
}
+static inline void __fill_xranhdr_frag_comp(struct xran_up_pkt_hdr_comp *dst,
+ const struct xran_up_pkt_hdr_comp *src, uint16_t rblen_bytes,
+ uint16_t rboff_bytes, uint16_t startPrbc, uint16_t numPrbc, uint32_t mf, uint8_t *seqid, uint8_t iqWidth)
+{
+ struct data_section_hdr loc_data_sec_hdr;
+ struct xran_ecpri_hdr loc_ecpri_hdr;
+
+ rte_memcpy(dst, src, sizeof(*dst));
+
+ dst->ecpri_hdr.ecpri_seq_id.bits.seq_id = (*seqid)++;
+
+ print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n", startPrbc, numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid);
+
+ loc_data_sec_hdr.fields.all_bits = rte_be_to_cpu_32(dst->data_sec_hdr.fields.all_bits);
+
+ /* update RBs */
+ loc_data_sec_hdr.fields.start_prbu = startPrbc + rboff_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
+ loc_data_sec_hdr.fields.num_prbu = rblen_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
+
+ print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",startPrbc, numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu,
+ rboff_bytes, rblen_bytes);
+
+ dst->data_sec_hdr.fields.all_bits = rte_cpu_to_be_32(loc_data_sec_hdr.fields.all_bits);
+
+ dst->ecpri_hdr.cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) +
+ sizeof(struct data_section_hdr) + sizeof(struct data_section_compression_hdr) + rblen_bytes + xran_get_ecpri_hdr_size());
+}
+
+
static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
{
uint16_t mtu_size,
struct rte_mempool *pool_direct,
struct rte_mempool *pool_indirect,
- struct xran_section_info *sectinfo,
- uint8_t *seqid)
+ int16_t nRBStart, /**< start RB of RB allocation */
+ int16_t nRBSize, /**< number of RBs used */
+ uint8_t *seqid,
+ uint8_t iqWidth,
+ uint8_t isUdCompHdr)
{
struct rte_mbuf *in_seg = NULL;
uint32_t out_pkt_pos = 0, in_seg_data_pos = 0;
struct eth_xran_up_pkt_hdr *in_hdr;
struct xran_up_pkt_hdr *in_hdr_xran;
+ struct eth_xran_up_pkt_hdr_comp *in_hdr_comp = NULL;
+ struct xran_up_pkt_hdr_comp *in_hdr_xran_comp = NULL;
+
+ int32_t eth_xran_up_headers_sz = 0;
+ eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
+
+ if(isUdCompHdr)
+ eth_xran_up_headers_sz += sizeof(struct data_section_compression_hdr);
+
/*
* Ensure the XRAN payload length of all fragments is aligned to a
* multiple of 48 bytes (1 RB with IQ of 16 bits each)
*/
- frag_size = ((mtu_size - sizeof(struct eth_xran_up_pkt_hdr) - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_RB_ALIGN)*XRAN_PAYLOAD_RB_ALIGN;
-
+ frag_size = ((mtu_size - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqWidth))*XRAN_PAYLOAD_1_RB_SZ(iqWidth);
print_dbg("frag_size %d\n",frag_size);
+ if(isUdCompHdr){
+ in_hdr_comp = rte_pktmbuf_mtod(pkt_in, struct eth_xran_up_pkt_hdr_comp*);
+ in_hdr_xran_comp = &in_hdr_comp->xran_hdr;
+ if (unlikely(frag_size * nb_pkts_out <
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct xran_up_pkt_hdr_comp)))){
+ print_err("-EINVAL\n");
+ return -EINVAL;
+ }
+ }else {
in_hdr = rte_pktmbuf_mtod(pkt_in, struct eth_xran_up_pkt_hdr *);
-
in_hdr_xran = &in_hdr->xran_hdr;
-
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely(frag_size * nb_pkts_out <
(uint16_t)(pkt_in->pkt_len - sizeof (struct xran_up_pkt_hdr)))){
print_err("-EINVAL\n");
return -EINVAL;
}
+ }
in_seg = pkt_in;
+ if(isUdCompHdr){
+ in_seg_data_pos = sizeof(struct eth_xran_up_pkt_hdr_comp);
+ }else{
in_seg_data_pos = sizeof(struct eth_xran_up_pkt_hdr);
+ }
out_pkt_pos = 0;
fragment_offset = 0;
struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
uint32_t more_out_segs;
struct xran_up_pkt_hdr *out_hdr;
+ struct xran_up_pkt_hdr_comp *out_hdr_comp;
/* Allocate direct buffer */
out_pkt = rte_pktmbuf_alloc(pool_direct);
/* Reserve space for the XRAN header that will be built later */
//out_pkt->data_len = sizeof(struct xran_up_pkt_hdr);
//out_pkt->pkt_len = sizeof(struct xran_up_pkt_hdr);
+ if(isUdCompHdr){
+ if(rte_pktmbuf_append(out_pkt, sizeof(struct xran_up_pkt_hdr_comp)) ==NULL){
+ rte_panic("sizeof(struct xran_up_pkt_hdr)");
+ }
+ }else{
if(rte_pktmbuf_append(out_pkt, sizeof(struct xran_up_pkt_hdr)) ==NULL){
rte_panic("sizeof(struct xran_up_pkt_hdr)");
}
+ }
+
frag_bytes_remaining = frag_size;
out_seg_prev = out_pkt;
/* Build the XRAN header */
print_dbg("Build the XRAN header\n");
- out_hdr = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr *);
+
+ if(isUdCompHdr){
+ out_hdr_comp = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr_comp*);
+ __fill_xranhdr_frag_comp(out_hdr_comp, in_hdr_xran_comp,
+ (uint16_t)out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr_comp),
+ fragment_offset, nRBStart, nRBSize, more_in_segs, seqid, iqWidth);
+
+ fragment_offset = (uint16_t)(fragment_offset +
+ out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr_comp));
+ } else {
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr *);
__fill_xranhdr_frag(out_hdr, in_hdr_xran,
(uint16_t)out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr),
- fragment_offset, sectinfo, more_in_segs, seqid);
+ fragment_offset, nRBStart, nRBSize, more_in_segs, seqid, iqWidth);
fragment_offset = (uint16_t)(fragment_offset +
out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr));
+ }
//out_pkt->l3_len = sizeof(struct xran_up_pkt_hdr);