+int32_t
+xran_process_tx_srs_cp_on(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id,
+ uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
+ uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
+{
+ struct xran_up_pkt_gen_params *pxp;
+ struct data_section_hdr *pDataSec;
+ int32_t antElm_eAxC_id = 0;// = ant_id + p_srs_cfg->eAxC_offset;
+
+ struct xran_srs_config *p_srs_cfg;
+
+ char* ext_buff;
+ void *mb_base;
+ char* pStart;
+ struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
+ struct xran_section_info* sectinfo;
+ struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
+ p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
+ struct rte_mbuf_ext_shared_info* p_share_data;
+ struct xran_sectioninfo_db* ptr_sect_elm = NULL;
+ struct rte_mbuf* mb_oran_hdr_ext = NULL;
+ struct xran_ecpri_hdr* ecpri_hdr = NULL;
+ uint16_t* __restrict pDst = NULL;
+
+ uint16_t next;
+ uint16_t ext_buff_len = 0;
+ uint16_t iq_sample_size_bytes=0;
+ uint16_t num_sections = 0, total_sections = 0;
+ uint16_t n_bytes;
+ uint16_t elm_bytes = 0;
+ uint16_t section_id;
+ uint16_t nPktSize=0;
+ uint16_t cid;
+ uint16_t vf_id;
+ const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
+ uint8_t seq_id = 0;
+ uint8_t cc_id, ant_id;
+ uint8_t compMeth;
+ uint8_t iqWidth;
+ uint8_t parm_size;
+
+ const uint8_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
+ uint8_t comp_head_upd = 0;
+
+ const uint8_t total_header_size = (RTE_PKTMBUF_HEADROOM +
+ sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct radio_app_common_hdr) +
+ sizeof(struct data_section_hdr));
+
+ for (cc_id = start_cc; cc_id < (start_cc + num_cc); cc_id++)
+ {
+ for (ant_id = start_ant; ant_id < (start_ant + num_ant); ant_id++)
+ {
+ antElm_eAxC_id = ant_id + p_srs_cfg->eAxC_offset;
+ ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][antElm_eAxC_id];
+
+ if (unlikely(ptr_sect_elm == NULL)){
+ printf("ant_id = %d ctx_id = %d,start_ant = %d, num_ant = %d, antElm_eAxC_id = %d\n",ant_id,ctx_id,start_ant,num_ant,antElm_eAxC_id);
+ rte_panic("ptr_sect_elm == NULL\n");
+ return (0);
+ }
+ if(0!=ptr_sect_elm->cur_index)
+ {
+ num_sections = ptr_sect_elm->cur_index;
+ /* iterate C-Plane configuration to generate corresponding U-Plane */
+ vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, antElm_eAxC_id);//p_xran_dev_ctx->map2vf[direction][cc_id][antElm_eAxC_id][XRAN_UP_VF];
+ mb_base = p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
+ if (unlikely(mb_base == NULL))
+ {
+ rte_panic("mb == NULL\n");
+ }
+ cid = ((cc_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ccId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ccId) | ((antElm_eAxC_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ruPortId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ruPortId);
+ cid = rte_cpu_to_be_16(cid);
+#pragma loop_count min=1, max=16
+ for (next=0; next< num_sections; next++)
+ {
+ sectinfo = &ptr_sect_elm->list[next];
+
+ if (unlikely(sectinfo == NULL)) {
+ print_err("sectinfo == NULL\n");
+ break;
+ }
+ if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
+ { /* only supports type 1 */
+ print_err("Invalid section type in section DB - %d", sectinfo->type);
+ continue;
+ }
+ /* skip, if not scheduled */
+ if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
+ continue;
+ compMeth = sectinfo->compMeth;
+ iqWidth = sectinfo->iqWidth;
+ section_id = sectinfo->id;
+
+ comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
+
+ if(sectinfo->prbElemBegin)
+ {
+ seq_id = xran_get_upul_seqid(pHandle, cc_id, antElm_eAxC_id);
+ iq_sample_size_bytes = 18 + sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct radio_app_common_hdr);
+ }
+
+ if (compMeth)
+ {
+ iq_sample_size_bytes += sizeof(struct data_section_hdr) ;
+
+ if (comp_head_upd)
+ {
+ iq_sample_size_bytes += sizeof(struct data_section_compression_hdr);
+ }
+
+ iq_sample_size_bytes += sectinfo->numPrbc*(iqWidth*3 + 1);
+ }
+
+ print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
+ sectinfo->type, sectinfo->id, sectinfo->startPrbc,
+ sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
+
+ if(sectinfo->prbElemBegin)
+ {
+ p_share_data = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
+ p_share_data->free_cb = extbuf_free_callback;
+ p_share_data->fcb_opaque = NULL;
+ rte_mbuf_ext_refcnt_set(p_share_data, 1);
+
+ /* Create ethernet + eCPRI + radio app header */
+ ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
+
+ ext_buff = ((char*)p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData + sectinfo->sec_desc[sym_id].iq_buffer_offset) - total_header_size;
+ ext_buff_len += (total_header_size + 18);
+
+ if (comp_head_upd)
+ {
+ ext_buff -= sizeof(struct data_section_compression_hdr);
+ ext_buff_len += sizeof(struct data_section_compression_hdr);
+ }
+
+ mb_oran_hdr_ext = xran_ethdi_mbuf_indir_alloc();
+ if (unlikely((mb_oran_hdr_ext) == NULL))
+ {
+ rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
+ }
+
+#ifdef ENABLE_DEBUG_COREDUMP
+ if (unlikely((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova == 0))
+ {
+ rte_panic("Failed rte_mem_virt2iova\n");
+ }
+ if (unlikely(((rte_iova_t)(struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova) == RTE_BAD_IOVA))
+ {
+ rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
+ }
+#endif
+ mb_oran_hdr_ext->buf_addr = ext_buff;
+ mb_oran_hdr_ext->buf_iova = ((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size))->iova + RTE_PTR_DIFF(ext_buff, mb_base);
+ mb_oran_hdr_ext->buf_len = ext_buff_len;
+ mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
+ mb_oran_hdr_ext->shinfo = p_share_data;
+ mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
+ mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
+ mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
+ mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
+ pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
+
+ /* Fill in the ethernet header. */
+ rte_eth_macaddr_get(mb_oran_hdr_ext->port, &((struct rte_ether_hdr*)pStart)->s_addr); /* set source addr */
+ ((struct rte_ether_hdr*)pStart)->d_addr = eth_ctx->entities[vf_id][ID_O_RU]; /* set dst addr */
+ ((struct rte_ether_hdr*)pStart)->ether_type = ETHER_TYPE_ECPRI_BE; /* ethertype */
+
+ nPktSize = sizeof(struct rte_ether_hdr)
+ + sizeof(struct xran_ecpri_hdr)
+ + sizeof(struct radio_app_common_hdr) ;
+
+ ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
+
+ ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
+ ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
+ ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
+
+ /* one to one lls-CU to RU only and band sector is the same */
+ ecpri_hdr->ecpri_xtc_id = cid;
+
+ /* no transport layer fragmentation supported */
+ ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
+ ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
+ ecpri_hdr->cmnhdr.bits.ecpri_payl_size = sizeof(struct radio_app_common_hdr) + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();;;
+
+ } /* if(sectinfo->prbElemBegin) */
+
+ /* Prepare U-Plane section hdr */
+ iqWidth = (iqWidth == 0) ? 16 : iqWidth;
+ switch (compMeth)
+ {
+ case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
+ case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
+ default:
+ parm_size = 0;
+ }
+
+ n_bytes = (3 * iqWidth + parm_size) * sectinfo->numPrbc;
+ n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
+
+ /* Ethernet & eCPRI added already */
+ nPktSize += sizeof(struct data_section_hdr) + n_bytes;
+
+ if (comp_head_upd)
+ nPktSize += sizeof(struct data_section_compression_hdr);
+
+ if(likely((ecpri_hdr!=NULL)))
+ {
+ ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_hdr) + n_bytes ;
+
+ if (comp_head_upd)
+ ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_compression_hdr);
+ }
+ else
+ {
+ print_err("ecpri_hdr should not be NULL\n");
+ }
+
+ if(sectinfo->prbElemBegin)
+ {
+ pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
+ pxp = (struct xran_up_pkt_gen_params *)pDst;
+ /* radio app header */
+ pxp->app_params.data_feature.value = 0x10;
+ pxp->app_params.data_feature.data_direction = direction;
+ pxp->app_params.frame_id = frame_id;
+ pxp->app_params.sf_slot_sym.subframe_id = subframe_id;
+ pxp->app_params.sf_slot_sym.slot_id = slot_id;
+ pxp->app_params.sf_slot_sym.symb_id = sym_id;
+ /* convert to network byte order */
+ pxp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(pxp->app_params.sf_slot_sym.value);
+ pDst += 2;
+ }
+
+ pDataSec = (struct data_section_hdr *)pDst;
+ if(pDataSec){
+ pDataSec->fields.sect_id = section_id;
+ pDataSec->fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(sectinfo->numPrbc);
+ pDataSec->fields.start_prbu = (sectinfo->startPrbc & 0x03ff);
+ pDataSec->fields.sym_inc = 0;
+ pDataSec->fields.rb = 0;
+ /* network byte order */
+ pDataSec->fields.all_bits = rte_cpu_to_be_32(pDataSec->fields.all_bits);
+ pDst += 2;
+ }
+ else
+ {
+ print_err("pDataSec is NULL idx = %u num_sections = %u\n", next, num_sections);
+ // return 0;
+ }
+
+ if (comp_head_upd)
+ {
+ if(pDst == NULL){
+ print_err("pDst == NULL\n");
+ return 0;
+ }
+ ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_comp_meth = compMeth;
+ ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
+ ((struct data_section_compression_hdr *)pDst)->rsrvd = 0;
+ pDst++;
+ }
+
+ //Increment by IQ data len
+ pDst = (uint16_t *)((uint8_t *)pDst + n_bytes) ;
+ if(mb_oran_hdr_ext){
+ rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
+ rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
+ }
+
+ if(sectinfo->prbElemEnd) /* Transmit the packet */
+ {
+ if(likely((ecpri_hdr!=NULL)))
+ ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_hdr->cmnhdr.bits.ecpri_payl_size);
+ else
+ print_err("ecpri_hdr should not be NULL\n");
+ /* if we don't need to do any fragmentation */
+ if (likely(p_xran_dev_ctx->fh_init.mtu >= (iq_sample_size_bytes)))
+ {
+ p_xran_dev_ctx->send_upmbuf2ring(mb_oran_hdr_ext, ETHER_TYPE_ECPRI, vf_id);
+ }
+ else
+ {
+ return 0;
+ }
+ elm_bytes += nPktSize;
+ } /* if(prbElemEnd) */
+ }/* section loop */
+ } /* if ptr_sect_elm->cur_index */
+ total_sections += num_sections;
+ } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
+ } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
+
+ struct xran_common_counters* pCnt = &p_xran_dev_ctx->fh_counters;
+ pCnt->tx_counter += total_sections;
+ pCnt->tx_bytes_counter += elm_bytes;
+ return 1;
+}
+
+