X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?p=o-du%2Fphy.git;a=blobdiff_plain;f=fhi_lib%2Flib%2Fsrc%2Fxran_app_frag.c;fp=fhi_lib%2Flib%2Fsrc%2Fxran_app_frag.c;h=cb526eee39b48858ad7c0b94a53e0b3da6384757;hp=96eb3789b9a41d77c770ccbfa5d0c236c32eb528;hb=2de97529a4c5a1922214ba0e6f0fb84cacbd0bc7;hpb=81a09690b36b3a4e89b4dae34f30933de13f7f90 diff --git a/fhi_lib/lib/src/xran_app_frag.c b/fhi_lib/lib/src/xran_app_frag.c index 96eb378..cb526ee 100644 --- a/fhi_lib/lib/src/xran_app_frag.c +++ b/fhi_lib/lib/src/xran_app_frag.c @@ -1,6 +1,6 @@ /****************************************************************************** * -* Copyright (c) 2019 Intel. +* Copyright (c) 2020 Intel. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ *******************************************************************************/ /** - * @brief xRAN application frgamentation for U-plane packets + * @brief xRAN application fragmentation for U-plane packets * * @file xran_app_frag.c * @ingroup group_source_xran @@ -27,7 +27,7 @@ #include #include #include - +#include #include #include #include @@ -39,38 +39,63 @@ #include "xran_printf.h" #include "xran_common.h" -/* Fragment alignment */ -#define XRAN_PAYLOAD_RB_ALIGN (N_SC_PER_PRB*(IQ_BITS/8)*2) /**< at least 12*4=48 bytes per one RB */ - static inline void __fill_xranhdr_frag(struct xran_up_pkt_hdr *dst, const struct xran_up_pkt_hdr *src, uint16_t rblen_bytes, - uint16_t rboff_bytes, struct xran_section_info *sectinfo, uint32_t mf, uint8_t *seqid) + uint16_t rboff_bytes, uint16_t startPrbc, uint16_t numPrbc, uint32_t mf, uint8_t *seqid, uint8_t iqWidth) { struct data_section_hdr loc_data_sec_hdr; struct xran_ecpri_hdr loc_ecpri_hdr; rte_memcpy(dst, src, sizeof(*dst)); - dst->ecpri_hdr.ecpri_seq_id.seq_id = (*seqid)++; + dst->ecpri_hdr.ecpri_seq_id.bits.seq_id = (*seqid)++; - print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n",sectinfo->startPrbc, sectinfo->numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid); + print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n",startPrbc, numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid); loc_data_sec_hdr.fields.all_bits = rte_be_to_cpu_32(dst->data_sec_hdr.fields.all_bits); /* update RBs */ - loc_data_sec_hdr.fields.start_prbu = sectinfo->startPrbc + rboff_bytes/(N_SC_PER_PRB*(IQ_BITS/8*2)); - loc_data_sec_hdr.fields.num_prbu = rblen_bytes/(N_SC_PER_PRB*(IQ_BITS/8*2)); + loc_data_sec_hdr.fields.start_prbu = startPrbc + rboff_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth); + loc_data_sec_hdr.fields.num_prbu = rblen_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth); - print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",sectinfo->startPrbc, sectinfo->numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu, + print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",startPrbc, numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu, rboff_bytes, rblen_bytes); dst->data_sec_hdr.fields.all_bits = rte_cpu_to_be_32(loc_data_sec_hdr.fields.all_bits); - /* update length */ - dst->ecpri_hdr.cmnhdr.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) + + dst->ecpri_hdr.cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) + sizeof(struct data_section_hdr) + rblen_bytes + xran_get_ecpri_hdr_size()); } +static inline void __fill_xranhdr_frag_comp(struct xran_up_pkt_hdr_comp *dst, + const struct xran_up_pkt_hdr_comp *src, uint16_t rblen_bytes, + uint16_t rboff_bytes, uint16_t startPrbc, uint16_t numPrbc, uint32_t mf, uint8_t *seqid, uint8_t iqWidth) +{ + struct data_section_hdr loc_data_sec_hdr; + struct xran_ecpri_hdr loc_ecpri_hdr; + + rte_memcpy(dst, src, sizeof(*dst)); + + dst->ecpri_hdr.ecpri_seq_id.bits.seq_id = (*seqid)++; + + print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n", startPrbc, numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid); + + loc_data_sec_hdr.fields.all_bits = rte_be_to_cpu_32(dst->data_sec_hdr.fields.all_bits); + + /* update RBs */ + loc_data_sec_hdr.fields.start_prbu = startPrbc + rboff_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth); + loc_data_sec_hdr.fields.num_prbu = rblen_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth); + + print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",startPrbc, numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu, + rboff_bytes, rblen_bytes); + + dst->data_sec_hdr.fields.all_bits = rte_cpu_to_be_32(loc_data_sec_hdr.fields.all_bits); + + dst->ecpri_hdr.cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) + + sizeof(struct data_section_hdr) + sizeof(struct data_section_compression_hdr) + rblen_bytes + xran_get_ecpri_hdr_size()); +} + + static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num) { @@ -107,8 +132,11 @@ xran_app_fragment_packet(struct rte_mbuf *pkt_in, /* eth hdr is prepended */ uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect, - struct xran_section_info *sectinfo, - uint8_t *seqid) + int16_t nRBStart, /**< start RB of RB allocation */ + int16_t nRBSize, /**< number of RBs used */ + uint8_t *seqid, + uint8_t iqWidth, + uint8_t isUdCompHdr) { struct rte_mbuf *in_seg = NULL; uint32_t out_pkt_pos = 0, in_seg_data_pos = 0; @@ -118,28 +146,48 @@ xran_app_fragment_packet(struct rte_mbuf *pkt_in, /* eth hdr is prepended */ struct eth_xran_up_pkt_hdr *in_hdr; struct xran_up_pkt_hdr *in_hdr_xran; + struct eth_xran_up_pkt_hdr_comp *in_hdr_comp = NULL; + struct xran_up_pkt_hdr_comp *in_hdr_xran_comp = NULL; + + int32_t eth_xran_up_headers_sz = 0; + eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr); + + if(isUdCompHdr) + eth_xran_up_headers_sz += sizeof(struct data_section_compression_hdr); + /* * Ensure the XRAN payload length of all fragments is aligned to a * multiple of 48 bytes (1 RB with IQ of 16 bits each) */ - frag_size = ((mtu_size - sizeof(struct eth_xran_up_pkt_hdr) - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_RB_ALIGN)*XRAN_PAYLOAD_RB_ALIGN; - + frag_size = ((mtu_size - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqWidth))*XRAN_PAYLOAD_1_RB_SZ(iqWidth); print_dbg("frag_size %d\n",frag_size); + if(isUdCompHdr){ + in_hdr_comp = rte_pktmbuf_mtod(pkt_in, struct eth_xran_up_pkt_hdr_comp*); + in_hdr_xran_comp = &in_hdr_comp->xran_hdr; + if (unlikely(frag_size * nb_pkts_out < + (uint16_t)(pkt_in->pkt_len - sizeof (struct xran_up_pkt_hdr_comp)))){ + print_err("-EINVAL\n"); + return -EINVAL; + } + }else { in_hdr = rte_pktmbuf_mtod(pkt_in, struct eth_xran_up_pkt_hdr *); - in_hdr_xran = &in_hdr->xran_hdr; - /* Check that pkts_out is big enough to hold all fragments */ if (unlikely(frag_size * nb_pkts_out < (uint16_t)(pkt_in->pkt_len - sizeof (struct xran_up_pkt_hdr)))){ print_err("-EINVAL\n"); return -EINVAL; } + } in_seg = pkt_in; + if(isUdCompHdr){ + in_seg_data_pos = sizeof(struct eth_xran_up_pkt_hdr_comp); + }else{ in_seg_data_pos = sizeof(struct eth_xran_up_pkt_hdr); + } out_pkt_pos = 0; fragment_offset = 0; @@ -148,6 +196,7 @@ xran_app_fragment_packet(struct rte_mbuf *pkt_in, /* eth hdr is prepended */ struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL; uint32_t more_out_segs; struct xran_up_pkt_hdr *out_hdr; + struct xran_up_pkt_hdr_comp *out_hdr_comp; /* Allocate direct buffer */ out_pkt = rte_pktmbuf_alloc(pool_direct); @@ -162,9 +211,16 @@ xran_app_fragment_packet(struct rte_mbuf *pkt_in, /* eth hdr is prepended */ /* Reserve space for the XRAN header that will be built later */ //out_pkt->data_len = sizeof(struct xran_up_pkt_hdr); //out_pkt->pkt_len = sizeof(struct xran_up_pkt_hdr); + if(isUdCompHdr){ + if(rte_pktmbuf_append(out_pkt, sizeof(struct xran_up_pkt_hdr_comp)) ==NULL){ + rte_panic("sizeof(struct xran_up_pkt_hdr)"); + } + }else{ if(rte_pktmbuf_append(out_pkt, sizeof(struct xran_up_pkt_hdr)) ==NULL){ rte_panic("sizeof(struct xran_up_pkt_hdr)"); } + } + frag_bytes_remaining = frag_size; out_seg_prev = out_pkt; @@ -233,14 +289,25 @@ xran_app_fragment_packet(struct rte_mbuf *pkt_in, /* eth hdr is prepended */ /* Build the XRAN header */ print_dbg("Build the XRAN header\n"); - out_hdr = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr *); + + if(isUdCompHdr){ + out_hdr_comp = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr_comp*); + __fill_xranhdr_frag_comp(out_hdr_comp, in_hdr_xran_comp, + (uint16_t)out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr_comp), + fragment_offset, nRBStart, nRBSize, more_in_segs, seqid, iqWidth); + + fragment_offset = (uint16_t)(fragment_offset + + out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr_comp)); + } else { + out_hdr = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr *); __fill_xranhdr_frag(out_hdr, in_hdr_xran, (uint16_t)out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr), - fragment_offset, sectinfo, more_in_segs, seqid); + fragment_offset, nRBStart, nRBSize, more_in_segs, seqid, iqWidth); fragment_offset = (uint16_t)(fragment_offset + out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr)); + } //out_pkt->l3_len = sizeof(struct xran_up_pkt_hdr);