1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN TX functionality
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
40 #include <rte_common.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
49 #include <rte_ethdev.h>
51 #include "xran_fh_o_du.h"
55 #include "xran_up_api.h"
56 #include "xran_cp_api.h"
57 #include "xran_sync_api.h"
58 #include "xran_lib_mlog_tasks_id.h"
59 #include "xran_timer.h"
60 #include "xran_main.h"
61 #include "xran_common.h"
63 #include "xran_frame_struct.h"
64 #include "xran_printf.h"
65 #include "xran_tx_proc.h"
66 #include "xran_cp_proc.h"
68 #include "xran_mlog_lnx.h"
72 XRAN_IN_PREV_PERIOD = 0,
77 extern int32_t first_call;
80 xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
81 struct rte_mbuf_ext_shared_info * p_share_data,
82 enum xran_compression_method compMeth, enum xran_comp_hdr_type staticEn);
86 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
88 /*long t1 = MLogTick();
89 MLogTask(77777, t1, t1+100);*/
92 static inline int32_t XranOffsetSym(int32_t offSym, int32_t otaSym, int32_t numSymTotal, enum xran_in_period* pInPeriod)
96 // Suppose the offset is usually small
97 if (unlikely(offSym > otaSym))
99 sym = numSymTotal - offSym + otaSym;
100 *pInPeriod = XRAN_IN_PREV_PERIOD;
104 sym = otaSym - offSym;
106 if (unlikely(sym >= numSymTotal))
109 *pInPeriod = XRAN_IN_NEXT_PERIOD;
113 *pInPeriod = XRAN_IN_CURR_PERIOD;
120 // Return SFN at current second start, 10 bits, [0, 1023]
121 uint16_t xran_getSfnSecStart(void)
123 return xran_SFN_at_Sec_Start;
126 /* Send burst of packets on an output interface */
128 xran_send_burst(struct xran_device_ctx *dev, struct mbuf_table* p_m_table, uint16_t port)
130 struct xran_common_counters * pCnt = NULL;
131 struct rte_mbuf **m_table;
137 pCnt = &dev->fh_counters;
139 rte_panic("incorrect dev\n");
141 m_table = p_m_table->m_table;
144 for(i = 0; i < n; i++) {
145 /*rte_mbuf_sanity_check(m_table[i], 0);*/
146 /*rte_pktmbuf_dump(stdout, m_table[i], 256);*/
148 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(m_table[i]);
149 ret += dev->send_upmbuf2ring(m_table[i], ETHER_TYPE_ECPRI, port);
152 if (unlikely(ret < n)) {
153 print_err("core %d [p: %d-> vf %d] ret [%d] < n[%d] enq %ld\n",
154 rte_lcore_id(), dev->xran_port_id, port, ret, n, pCnt->tx_counter);
160 /* Send a single 5G symbol over multiple packets */
161 static inline int32_t prepare_symbol_opt(enum xran_pkt_dir direction,
167 const enum xran_input_byte_order iq_buf_byte_order,
174 struct xran_up_pkt_gen_params *xp,
175 enum xran_comp_hdr_type staticEn)
182 iqWidth = (iqWidth==0) ? 16 : iqWidth;
184 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
185 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
189 n_bytes = (3 * iqWidth + parm_size) * prb_num;
190 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
192 nPktSize = sizeof(struct rte_ether_hdr)
193 + sizeof(struct xran_ecpri_hdr)
194 + sizeof(struct radio_app_common_hdr)
195 + sizeof(struct data_section_hdr)
197 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn ==XRAN_COMP_HDR_TYPE_DYNAMIC))
198 nPktSize += sizeof(struct data_section_compression_hdr);
202 /* radio app header */
203 xp->app_params.data_direction = direction;
204 xp->app_params.payl_ver = 1;
205 xp->app_params.filter_id = 0;
206 xp->app_params.frame_id = frame_id;
207 xp->app_params.sf_slot_sym.subframe_id = subframe_id;
208 xp->app_params.sf_slot_sym.slot_id = xran_slotid_convert(slot_id, 0);
209 xp->app_params.sf_slot_sym.symb_id = symbol_no;
211 /* convert to network byte order */
212 xp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp->app_params.sf_slot_sym.value);
215 xp->sec_hdr.fields.sect_id = section_id;
216 xp->sec_hdr.fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(prb_num);
217 xp->sec_hdr.fields.start_prbu = (uint8_t)prb_start;
218 xp->sec_hdr.fields.sym_inc = 0;
219 xp->sec_hdr.fields.rb = 0;
223 xp->compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
224 xp->compr_hdr_param.ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
225 xp->compr_hdr_param.rsrvd = 0;
227 /* network byte order */
228 xp->sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp->sec_hdr.fields.all_bits);
232 errx(1, "out of mbufs after %d packets", 1);
235 prep_bytes = xran_prepare_iq_symbol_portion(mb,
249 errx(1, "failed preparing symbol");
251 rte_pktmbuf_pkt_len(mb) = nPktSize;
252 rte_pktmbuf_data_len(mb) = nPktSize;
255 printf("Symbol %2d prep_bytes (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
261 int32_t xran_process_tx_sym_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id,
266 char *p_sec_iq = NULL;
268 void *send_mb = NULL;
270 uint16_t vf_id = 0 , num_sections = 0, curr_sect_id = 0 ;
272 struct xran_prb_map *prb_map = NULL;
273 //uint8_t num_ant_elm = 0;
275 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
276 if (p_xran_dev_ctx == NULL)
278 struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
279 //struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
280 //struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
282 //num_ant_elm = xran_get_num_ant_elm(pHandle);
283 enum xran_pkt_dir direction;
284 enum xran_comp_hdr_type staticEn = XRAN_COMP_HDR_TYPE_DYNAMIC;
286 struct rte_mbuf *eth_oran_hdr = NULL;
287 char *ext_buff = NULL;
288 uint16_t ext_buff_len = 0;
289 struct rte_mbuf *tmp = NULL;
290 rte_iova_t ext_buff_iova = 0;
291 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
293 staticEn = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
296 if(PortId >= XRAN_PORTS_NUM)
297 rte_panic("incorrect PORT ID\n");
299 struct rte_mbuf_ext_shared_info * p_share_data = NULL;
300 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
301 direction = XRAN_DIR_DL; /* O-DU */
302 //prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
304 direction = XRAN_DIR_UL; /* RU */
305 //prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
308 if(xran_fs_get_slot_type(PortId, cc_id, tti, ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SLOT_TYPE_DL : XRAN_SLOT_TYPE_UL)) == 1
309 || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1
310 || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_FDD) == 1){
312 if(xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SYMBOL_TYPE_DL : XRAN_SYMBOL_TYPE_UL)
313 || xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){
315 vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
316 pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
317 mb = (void*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
318 prb_map = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
323 for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++){
324 //print_err("tti is %d, cc_id is %d, ant_id is %d, prb_map->nPrbElm id - %d", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, prb_map->nPrbElm);
325 struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
326 struct xran_section_desc * p_sec_desc = NULL;
327 uint16_t sec_id = prb_map_elm->nSectId;
328 p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sec_id];
330 if(unlikely(sym_id < prb_map_elm->nStartSymb || sym_id >= (prb_map_elm->nStartSymb + prb_map_elm->numSymb)))
333 if(prb_map_elm == NULL){
334 rte_panic("p_sec_desc == NULL\n");
337 p_sec_desc = &prb_map_elm->sec_desc[sym_id][0];
339 p_sec_iq = ((char*)pos + p_sec_desc->iq_buffer_offset);
341 /* calculate offset for external buffer */
342 ext_buff_len = p_sec_desc->iq_buffer_len;
343 ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
344 sizeof (struct xran_ecpri_hdr) +
345 sizeof (struct radio_app_common_hdr) +
346 sizeof(struct data_section_hdr));
348 ext_buff_len += RTE_PKTMBUF_HEADROOM +
349 sizeof (struct xran_ecpri_hdr) +
350 sizeof (struct radio_app_common_hdr) +
351 sizeof(struct data_section_hdr) + 18;
353 if ((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)){
354 ext_buff -= sizeof (struct data_section_compression_hdr);
355 ext_buff_len += sizeof (struct data_section_compression_hdr);
358 eth_oran_hdr = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
359 if (unlikely (( eth_oran_hdr) == NULL)) {
360 rte_panic("Failed rte_pktmbuf_alloc\n");
363 p_share_data->free_cb = extbuf_free_callback;
364 p_share_data->fcb_opaque = NULL;
365 rte_mbuf_ext_refcnt_set(p_share_data, 1);
367 ext_buff_iova = rte_mempool_virt2iova(mb);
368 if (unlikely (( ext_buff_iova) == 0)) {
369 rte_panic("Failed rte_mem_virt2iova \n");
372 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
373 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
376 rte_pktmbuf_attach_extbuf(eth_oran_hdr,
378 ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
382 rte_pktmbuf_reset_headroom(eth_oran_hdr);
384 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
385 if (unlikely (( tmp) == NULL)) {
386 rte_panic("Failed rte_pktmbuf_prepend \n");
388 send_mb = eth_oran_hdr;
391 uint8_t seq_id = (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
392 xran_get_updl_seqid(pHandle, cc_id, ant_id) :
393 xran_get_upul_seqid(pHandle, cc_id, ant_id);
396 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
397 && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B)
399 num_sections = (prb_map_elm->bf_weight.extType == 1) ? prb_map_elm->bf_weight.numSetBFWs : 1 ;
400 if (prb_map_elm->bf_weight.extType != 1)
401 curr_sect_id = sec_id;
407 prepare_symbol_ex(direction, curr_sect_id,
410 prb_map_elm->compMethod,
411 prb_map_elm->iqWidth,
412 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
413 frame_id, subframe_id, slot_id, sym_id,
414 prb_map_elm->UP_nRBStart, prb_map_elm->UP_nRBSize,
420 p_sec_desc->iq_buffer_offset);
422 curr_sect_id += num_sections;
424 rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
426 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
427 p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, vf_id);
428 } /* for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++) */
430 printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, sym_id);
433 } /* RU mode or C-Plane is not used */
437 int32_t xran_process_tx_prach_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id)
443 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
444 if (p_xran_dev_ctx == NULL)
447 struct xran_prach_cp_config *pPrachCPConfig;
448 if(p_xran_dev_ctx->dssEnable){
449 int i = tti % p_xran_dev_ctx->dssPeriod;
450 if(p_xran_dev_ctx->technology[i]==1) {
451 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
454 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
458 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
461 enum xran_pkt_dir direction = XRAN_DIR_UL;
462 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
465 if(PortId >= XRAN_PORTS_NUM)
466 rte_panic("incorrect PORT ID\n");
469 if(p_xran_dev_ctx->enablePrach
470 && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) && (ant_id < XRAN_MAX_PRACH_ANT_NUM)){
472 if(xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_UL
473 || xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD) { /* Only RU needs to send PRACH I/Q */
475 uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
477 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
478 && (is_prach_slot == 1)
479 && (sym_id >= p_xran_dev_ctx->prach_start_symbol[cc_id])
480 && (sym_id <= p_xran_dev_ctx->prach_last_symbol[cc_id])) {
481 int prach_port_id = ant_id + pPrachCPConfig->eAxC_offset;
484 uint8_t symb_id_offset = sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id];
486 compMethod = p_xran_dev_ctx->fh_cfg.ru_conf.compMeth_PRACH;
489 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
490 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
495 pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[symb_id_offset].pData;
496 //pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id]) * pPrachCPConfig->numPrbc * N_SC_PER_PRB * 4;
497 /*pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id])
498 * (3*p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth + parm_size)
499 * pPrachCPConfig->numPrbc;*/
500 mb = NULL;//(void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
502 struct xran_prach_cp_config *pPrachCPConfig;
503 if(p_xran_dev_ctx->dssEnable){
504 int i = tti % p_xran_dev_ctx->dssPeriod;
505 if(p_xran_dev_ctx->technology[i]==1) {
506 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
509 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
513 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
517 if (1500 == p_xran_dev_ctx->fh_init.mtu && pPrachCPConfig->filterIdx == XRAN_FILTERINDEX_PRACH_012)
519 pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pData;
520 mb = (void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
521 /*one prach for more then one pkg*/
522 send_symbol_mult_section_ex(pHandle,
524 xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, subframe_id, slot_id),
525 (struct rte_mbuf *)mb,
528 p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth,
529 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
530 frame_id, subframe_id, slot_id, sym_id,
531 pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
532 cc_id, prach_port_id,
536 send_symbol_ex(pHandle,
538 xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, subframe_id, slot_id),
539 (struct rte_mbuf *)mb,
542 p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth_PRACH,
543 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
544 frame_id, subframe_id, slot_id, sym_id,
545 pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
546 cc_id, prach_port_id,
547 xran_get_upul_seqid(pHandle, cc_id, prach_port_id));
551 } /* if(p_xran_dev_ctx->enablePrach ..... */
556 xran_process_tx_srs_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id,
557 uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id)
561 char *p_sec_iq = NULL;
563 char *ext_buff = NULL;
564 uint16_t ext_buff_len = 0 , num_sections=0 , section_id=0;
565 int32_t antElm_eAxC_id;
569 enum xran_pkt_dir direction;
570 enum xran_comp_hdr_type staticEn;
572 rte_iova_t ext_buff_iova = 0;
573 struct rte_mbuf *tmp = NULL;
574 struct xran_prb_map *prb_map = NULL;
575 struct xran_device_ctx * p_xran_dev_ctx;
576 struct xran_common_counters *pCnt;
577 //struct xran_prach_cp_config *pPrachCPConfig;
578 struct xran_srs_config *p_srs_cfg;
579 struct rte_mbuf *eth_oran_hdr = NULL;
580 struct rte_mbuf_ext_shared_info *p_share_data = NULL;
583 p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
584 if(p_xran_dev_ctx == NULL)
586 print_err("dev_ctx is NULL. ctx_id=%d, tti=%d, cc_id=%d, ant_id=%d, frame_id=%d, subframe_id=%d, slot_id=%d\n",
587 ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id);
591 if(p_xran_dev_ctx->xran_port_id >= XRAN_PORTS_NUM)
592 rte_panic("incorrect PORT ID\n");
594 pCnt = &p_xran_dev_ctx->fh_counters;
595 //pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
596 p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
598 /* Only O-RU sends SRS U-Plane */
599 direction = XRAN_DIR_UL;
600 staticEn = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
601 antElm_eAxC_id = ant_id + p_srs_cfg->eAxC_offset;
603 prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
606 for(elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++)
608 struct xran_prb_elm *prb_map_elm = &prb_map->prbMap[elmIdx];
609 struct xran_section_desc * p_sec_desc = NULL;
611 if(prb_map_elm == NULL)
612 rte_panic("p_sec_desc == NULL\n");
614 sym_id = prb_map->prbMap[elmIdx].nStartSymb;
615 pos = (char*)p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
616 mb = (void*)p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
619 p_share_data = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
620 p_sec_desc = &prb_map_elm->sec_desc[sym_id][0];
621 p_sec_iq = ((char*)pos + p_sec_desc->iq_buffer_offset);
623 /* calculate offset for external buffer */
624 ext_buff_len = p_sec_desc->iq_buffer_len;
626 ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
627 sizeof (struct xran_ecpri_hdr) +
628 sizeof (struct radio_app_common_hdr) +
629 sizeof(struct data_section_hdr));
631 ext_buff_len += RTE_PKTMBUF_HEADROOM +
632 sizeof (struct xran_ecpri_hdr) +
633 sizeof (struct radio_app_common_hdr) +
634 sizeof(struct data_section_hdr) + 18;
636 if((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)
637 && (staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC))
639 ext_buff -= sizeof (struct data_section_compression_hdr);
640 ext_buff_len += sizeof (struct data_section_compression_hdr);
643 eth_oran_hdr = xran_ethdi_mbuf_indir_alloc();
644 if(unlikely(eth_oran_hdr == NULL))
645 rte_panic("Failed rte_pktmbuf_alloc\n");
647 p_share_data->free_cb = extbuf_free_callback;
648 p_share_data->fcb_opaque = NULL;
649 rte_mbuf_ext_refcnt_set(p_share_data, 1);
651 ext_buff_iova = rte_mempool_virt2iova(mb);
652 if(unlikely(ext_buff_iova == 0 || ext_buff_iova == RTE_BAD_IOVA))
653 rte_panic("Failed rte_mem_virt2iova : %lu\n", ext_buff_iova);
655 rte_pktmbuf_attach_extbuf(eth_oran_hdr,
657 ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
661 rte_pktmbuf_reset_headroom(eth_oran_hdr);
663 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
664 if(unlikely(tmp == NULL))
665 rte_panic("Failed rte_pktmbuf_prepend \n");
667 uint8_t seq_id = xran_get_upul_seqid(pHandle, cc_id, antElm_eAxC_id);
669 num_sections = (prb_map_elm->bf_weight.extType == 1) ? prb_map_elm->bf_weight.numSetBFWs : 1 ;
671 prepare_symbol_ex(direction, prb_map_elm->nSectId,
672 (void *)eth_oran_hdr, (uint8_t *)p_sec_iq,
673 prb_map_elm->compMethod, prb_map_elm->iqWidth,
674 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
675 frame_id, subframe_id, slot_id, sym_id,
676 prb_map_elm->UP_nRBStart, prb_map_elm->UP_nRBSize,
677 cc_id, antElm_eAxC_id,
684 section_id += num_sections;
686 rte_mbuf_sanity_check(eth_oran_hdr, 0);
688 vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, antElm_eAxC_id);
690 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(eth_oran_hdr);
691 p_xran_dev_ctx->send_upmbuf2ring(eth_oran_hdr, ETHER_TYPE_ECPRI, vf_id);
692 } /* for(elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++) */
696 printf("(%d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, antElm_eAxC_id);
703 xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
704 struct rte_mbuf_ext_shared_info * p_share_data,
705 enum xran_compression_method compMeth, enum xran_comp_hdr_type staticEn)
707 struct rte_mbuf *mb_oran_hdr_ext = NULL;
708 struct rte_mbuf *tmp = NULL;
709 int8_t *ext_buff = NULL;
710 rte_iova_t ext_buff_iova = 0;
711 ext_buff = p_ext_buff - (RTE_PKTMBUF_HEADROOM +
712 sizeof(struct xran_ecpri_hdr) +
713 sizeof(struct radio_app_common_hdr) +
714 sizeof(struct data_section_hdr));
716 ext_buff_len += RTE_PKTMBUF_HEADROOM +
717 sizeof(struct xran_ecpri_hdr) +
718 sizeof(struct radio_app_common_hdr) +
719 sizeof(struct data_section_hdr) + 18;
720 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)) {
721 ext_buff -= sizeof (struct data_section_compression_hdr);
722 ext_buff_len += sizeof (struct data_section_compression_hdr);
724 mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
726 if (unlikely (( mb_oran_hdr_ext) == NULL)) {
727 rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
730 p_share_data->free_cb = extbuf_free_callback;
731 p_share_data->fcb_opaque = NULL;
732 rte_mbuf_ext_refcnt_set(p_share_data, 1);
734 ext_buff_iova = rte_mempool_virt2iova(p_ext_buff_start);
735 if (unlikely (( ext_buff_iova) == 0)) {
736 rte_panic("Failed rte_mem_virt2iova \n");
739 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
740 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
743 rte_pktmbuf_attach_extbuf(mb_oran_hdr_ext,
745 ext_buff_iova + RTE_PTR_DIFF(ext_buff , p_ext_buff_start),
749 rte_pktmbuf_reset_headroom(mb_oran_hdr_ext);
751 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(mb_oran_hdr_ext, sizeof(struct rte_ether_hdr));
752 if (unlikely (( tmp) == NULL)) {
753 rte_panic("Failed rte_pktmbuf_prepend \n");
756 return mb_oran_hdr_ext;
759 int32_t xran_process_tx_sym_cp_on_dispatch_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id,
760 uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
761 uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
764 struct cp_up_tx_desc* p_desc = NULL;
765 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
766 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
768 p_desc = xran_pkt_gen_desc_alloc();
770 p_desc->pHandle = pHandle;
771 p_desc->ctx_id = ctx_id;
773 p_desc->start_cc = start_cc;
774 p_desc->cc_num = num_cc;
775 p_desc->start_ant = start_ant;
776 p_desc->ant_num = num_ant;
777 p_desc->frame_id = frame_id;
778 p_desc->subframe_id = subframe_id;
779 p_desc->slot_id = slot_id;
780 p_desc->sym_id = sym_id;
781 p_desc->compType = (uint32_t)compType;
782 p_desc->direction = (uint32_t)direction;
783 p_desc->xran_port_id = xran_port_id;
784 p_desc->p_sec_db = (void*)p_sec_db;
786 if(likely(p_xran_dev_ctx->xran_port_id < XRAN_PORTS_NUM)) {
787 if (rte_ring_enqueue(eth_ctx->up_dl_pkt_gen_ring[p_xran_dev_ctx->xran_port_id], p_desc->mb) == 0)
788 return 1; /* success */
790 xran_pkt_gen_desc_free(p_desc);
792 rte_panic("incorrect port %d", p_xran_dev_ctx->xran_port_id);
795 print_dbg("xran_pkt_gen_desc_alloc failure %d", p_xran_dev_ctx->xran_port_id);
802 xran_process_tx_sym_cp_on_dispatch(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id, uint32_t subframe_id,
803 uint32_t slot_id, uint32_t sym_id)
806 struct cp_up_tx_desc* p_desc = NULL;
807 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
808 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
810 p_desc = xran_pkt_gen_desc_alloc();
812 p_desc->pHandle = pHandle;
813 p_desc->ctx_id = ctx_id;
815 p_desc->start_cc = start_cc;
816 p_desc->cc_num = num_cc;
817 p_desc->start_ant = start_ant;
818 p_desc->ant_num = num_ant;
819 p_desc->frame_id = frame_id;
820 p_desc->subframe_id = subframe_id;
821 p_desc->slot_id = slot_id;
822 p_desc->sym_id = sym_id;
824 if(likely(p_xran_dev_ctx->xran_port_id < XRAN_PORTS_NUM)) {
825 if (rte_ring_enqueue(eth_ctx->up_dl_pkt_gen_ring[p_xran_dev_ctx->xran_port_id], p_desc->mb) == 0)
826 return 1; /* success */
828 xran_pkt_gen_desc_free(p_desc);
830 rte_panic("incorrect port %d", p_xran_dev_ctx->xran_port_id);
833 print_dbg("xran_pkt_gen_desc_alloc failure %d", p_xran_dev_ctx->xran_port_id);
840 xran_process_tx_sym_cp_on(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t cc_id, int32_t start_ant, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
841 uint32_t slot_id, uint32_t sym_id)
844 uint16_t ext_buff_len = 0;
846 char *p_sec_iq = NULL;
848 struct rte_mbuf *to_free_mbuf = NULL;
850 uint16_t iq_sample_size_bits = 16;
852 int32_t num_sections = 0;
856 struct mbuf_table loc_tx_mbufs;
857 struct xran_up_pkt_gen_params loc_xp;
859 struct xran_section_info *sectinfo = NULL;
860 struct xran_device_ctx *p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
861 enum xran_pkt_dir direction;
863 enum xran_comp_hdr_type compType = XRAN_COMP_HDR_TYPE_DYNAMIC;
865 struct rte_mbuf_ext_shared_info * p_share_data = NULL;
867 if (p_xran_dev_ctx != NULL)
869 compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
871 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
872 direction = XRAN_DIR_DL; /* O-DU */
873 //prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
875 direction = XRAN_DIR_UL; /* RU */
876 //prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
879 vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
881 num_sections = xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id, ctx_id);
882 /* iterate C-Plane configuration to generate corresponding U-Plane */
884 prepare_sf_slot_sym(direction, frame_id, subframe_id, slot_id, sym_id, &loc_xp);
886 loc_tx_mbufs.len = 0;
887 while(next < num_sections) {
888 sectinfo = xran_cp_iterate_section_info(pHandle, direction, cc_id, ant_id, ctx_id, &next);
893 if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) { /* only supports type 1 */
894 print_err("Invalid section type in section DB - %d", sectinfo->type);
898 /* skip, if not scheduled */
899 if(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol)
903 if(sectinfo->compMeth)
904 iq_sample_size_bits = sectinfo->iqWidth;
906 print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
907 sectinfo->type, sectinfo->id, sectinfo->startPrbc,
908 sectinfo->numPrbc,sectinfo->startSymId, sectinfo->numSymbol);
910 p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sectinfo->id];
912 len = loc_tx_mbufs.len;
916 //Added for Klocworks
917 if (len >= MBUF_TABLE_SIZE) {
918 len = MBUF_TABLE_SIZE - 1;
919 rte_panic("len >= MBUF_TABLE_SIZE\n");
922 to_free_mbuf = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id];
923 pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
924 mb = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
927 rte_panic("mb == NULL\n");
930 p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
931 ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
933 mb = xran_attach_up_ext_buf(vf_id, (int8_t *)mb, (int8_t *) p_sec_iq,
934 (uint16_t) ext_buff_len,
935 p_share_data, (enum xran_compression_method) sectinfo->compMeth, compType);
936 p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id] = mb;
937 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
940 rte_pktmbuf_free(to_free_mbuf);
944 prepare_symbol_opt(direction, sectinfo->id,
946 (struct rb_map *)p_sec_iq,
949 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
954 xran_get_updl_seqid(pHandle, cc_id, ant_id),
959 /* if we don't need to do any fragmentation */
960 if (likely (p_xran_dev_ctx->fh_init.mtu >=
961 sectinfo->numPrbc * (3*iq_sample_size_bits + 1))) {
962 /* no fragmentation */
963 loc_tx_mbufs.m_table[len] = mb;
966 /* current code should not go to fragmentation as it should be taken care of by section allocation already */
967 print_err("should not go to fragmentation mtu %d packet size %d\n", p_xran_dev_ctx->fh_init.mtu, sectinfo->numPrbc * (3*iq_sample_size_bits + 1));
971 for (i = len; i < len + len2; i ++) {
973 m = loc_tx_mbufs.m_table[i];
974 struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
975 rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
976 if (eth_hdr == NULL) {
977 rte_panic("No headroom in mbuf.\n");
983 if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM)) {
984 rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
986 loc_tx_mbufs.len = len;
987 } /* while(section) */
989 /* Transmit packets */
990 xran_send_burst(p_xran_dev_ctx, &loc_tx_mbufs, vf_id);
991 loc_tx_mbufs.len = 0;
999 xran_prepare_up_dl_sym(uint16_t xran_port_id, uint32_t nSlotIdx, uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
1000 uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
1004 uint32_t numSlotMu1 = 5;
1006 uint32_t mlogVar[15];
1007 uint32_t mlogVarCnt = 0;
1009 unsigned long t1 = MLogXRANTick();
1011 void *pHandle = NULL;
1014 uint8_t num_eAxc = 0;
1015 uint8_t num_eAxc_prach = 0;
1016 uint8_t num_eAxAntElm = 0;
1017 uint8_t num_CCPorts = 0;
1018 uint32_t frame_id = 0;
1019 uint32_t subframe_id = 0;
1020 uint32_t slot_id = 0;
1021 uint32_t sym_id = 0;
1022 uint32_t sym_idx_to_send = 0;
1025 enum xran_in_period inPeriod;
1028 struct xran_device_ctx * p_xran_dev_ctx = NULL;
1030 p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
1032 if(p_xran_dev_ctx == NULL)
1035 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
1038 interval = p_xran_dev_ctx->interval_us_local;
1039 PortId = p_xran_dev_ctx->xran_port_id;
1041 pHandle = p_xran_dev_ctx;
1043 for (idxSym = nSymStart; idxSym < (nSymStart + nSymNum) && idxSym < XRAN_NUM_OF_SYMBOL_PER_SLOT; idxSym++) {
1044 t1 = MLogXRANTick();
1045 if(((1 << idxSym) & nSymMask) ) {
1046 sym_idx_to_send = nSlotIdx*XRAN_NUM_OF_SYMBOL_PER_SLOT + idxSym;
1047 XranOffsetSym(p_xran_dev_ctx->sym_up, sym_idx_to_send, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME(interval)*1000, &inPeriod);
1048 tti = XranGetTtiNum(sym_idx_to_send, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1049 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1050 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
1052 uint16_t sfnSecStart = xran_getSfnSecStart();
1053 if(unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
1056 sfnSecStart = (sfnSecStart + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
1058 else if(unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
1061 if (sfnSecStart >= NUM_OF_FRAMES_PER_SECOND)
1063 sfnSecStart -= NUM_OF_FRAMES_PER_SECOND;
1067 sfnSecStart += NUM_OF_FRAMES_PER_SFN_PERIOD - NUM_OF_FRAMES_PER_SECOND;
1070 frame_id = XranGetFrameNum(tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1071 // ORAN frameId, 8 bits, [0, 255]
1072 frame_id = (frame_id & 0xff);
1074 sym_id = XranGetSymNum(sym_idx_to_send, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1075 ctx_id = tti % XRAN_MAX_SECTIONDB_CTX;
1077 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
1081 mlogVar[mlogVarCnt++] = 0xAAAAAAAA;
1082 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
1083 mlogVar[mlogVarCnt++] = idxSym;
1084 mlogVar[mlogVarCnt++] = abs(p_xran_dev_ctx->sym_up);
1085 mlogVar[mlogVarCnt++] = tti;
1086 mlogVar[mlogVarCnt++] = frame_id;
1087 mlogVar[mlogVarCnt++] = subframe_id;
1088 mlogVar[mlogVarCnt++] = slot_id;
1089 mlogVar[mlogVarCnt++] = sym_id;
1090 mlogVar[mlogVarCnt++] = PortId;
1091 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
1093 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
1094 && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B)
1096 num_eAxc = xran_get_num_eAxcUl(pHandle);
1100 num_eAxc = xran_get_num_eAxc(pHandle);
1103 num_eAxc_prach = ((num_eAxc > XRAN_MAX_PRACH_ANT_NUM)? XRAN_MAX_PRACH_ANT_NUM : num_eAxc);
1104 num_CCPorts = xran_get_num_cc(pHandle);
1107 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP) {
1108 enum xran_comp_hdr_type compType;
1109 enum xran_pkt_dir direction;
1111 uint32_t loc_ret = 1;
1112 uint16_t xran_port_id;
1113 PSECTION_DB_TYPE p_sec_db = NULL;
1115 compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
1117 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)
1119 direction = XRAN_DIR_DL; /* O-DU */
1120 //prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
1124 direction = XRAN_DIR_UL; /* RU */
1125 //prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
1128 if(unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM))
1130 print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
1134 if(unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX))
1136 print_err("Invalid Context id - %d", ctx_id);
1140 if(unlikely(direction > XRAN_DIR_MAX))
1142 print_err("Invalid direction - %d", direction);
1146 if(unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX))
1148 print_err("Invalid CC id - %d", num_CCPorts);
1152 if(unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR)))
1154 print_err("Invalid eAxC id - %d", num_eAxc);
1158 xran_port_id = p_xran_dev_ctx->xran_port_id;
1159 p_sec_db = p_sectiondb[p_xran_dev_ctx->xran_port_id];
1160 if(unlikely(p_sec_db == NULL))
1162 print_err("p_sec_db == NULL\n");
1167 retval = xran_process_tx_sym_cp_on_opt(pHandle, ctx_id, tti,
1168 nCcStart, nCcNum, nAntStart, nAntNum, frame_id, subframe_id, slot_id, idxSym,
1169 compType, direction, xran_port_id, p_sec_db);
1171 print_err("loc_ret %d\n", loc_ret);
1175 for (ant_id = 0; ant_id < num_eAxc; ant_id++) {
1176 for (cc_id = 0; cc_id < num_CCPorts; cc_id++) {
1177 //struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
1178 if(p_xran_dev_ctx->puschMaskEnable)
1180 if((tti % numSlotMu1) != p_xran_dev_ctx->puschMaskSlot)
1181 retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
1184 retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
1186 if(p_xran_dev_ctx->enablePrach && (ant_id < num_eAxc_prach) )
1188 retval = xran_process_tx_prach_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
1194 /* SRS U-Plane, only for O-RU emulation with Cat B */
1195 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
1196 && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B
1197 && p_xran_dev_ctx->enableSrs
1198 && ((p_xran_dev_ctx->srs_cfg.symbMask >> idxSym)&1))
1200 struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
1202 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1204 /* check special frame */
1205 if((xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
1206 || (xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1))
1208 if(((tti % p_xran_dev_ctx->fh_cfg.frame_conf.nTddPeriod) == pSrsCfg->slot)
1209 && (p_xran_dev_ctx->ndm_srs_scheduled == 0))
1212 struct xran_prb_map *prb_map;
1213 prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
1215 /* if PRB map is present in first antenna, assume SRS might be scheduled. */
1216 if(prb_map && prb_map->nPrbElm)
1218 /* NDM U-Plane is not enabled */
1219 if(pSrsCfg->ndm_offset == 0)
1222 if (prb_map->nPrbElm > 0)
1224 /* Check symbol range in PRB Map */
1225 if(sym_id >= prb_map->prbMap[0].nStartSymb
1226 && sym_id < (prb_map->prbMap[0].nStartSymb + prb_map->prbMap[0].numSymb))
1227 for(ant_id=0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
1228 xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id);
1232 /* NDM U-Plane is enabled, SRS U-Planes will be transmitted after ndm_offset (in slots) */
1235 p_xran_dev_ctx->ndm_srs_scheduled = 1;
1236 p_xran_dev_ctx->ndm_srs_tti = tti;
1237 p_xran_dev_ctx->ndm_srs_txtti = (tti + pSrsCfg->ndm_offset)%2000;
1238 p_xran_dev_ctx->ndm_srs_schedperiod = pSrsCfg->slot;
1243 /* check SRS NDM UP has been scheduled in non special slots */
1244 else if(p_xran_dev_ctx->ndm_srs_scheduled
1245 && p_xran_dev_ctx->ndm_srs_txtti == tti)
1248 uint32_t srs_tti, srsFrame, srsSubframe, srsSlot;
1251 srs_tti = p_xran_dev_ctx->ndm_srs_tti;
1252 num_eAxAntElm = xran_get_num_ant_elm(pHandle);
1253 ndm_step = num_eAxAntElm / pSrsCfg->ndm_txduration;
1255 srsSlot = XranGetSlotNum(srs_tti, SLOTNUM_PER_SUBFRAME(interval));
1256 srsSubframe = XranGetSubFrameNum(srs_tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
1257 srsFrame = XranGetFrameNum(srs_tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1258 srsFrame = (srsFrame & 0xff);
1259 srsCtx = srs_tti % XRAN_MAX_SECTIONDB_CTX;
1261 if(sym_id < pSrsCfg->ndm_txduration)
1263 for(ant_id=sym_id*ndm_step; ant_id < (sym_id+1)*ndm_step; ant_id++)
1264 xran_process_tx_srs_cp_off(pHandle, srsCtx, srs_tti, cc_id, ant_id, srsFrame, srsSubframe, srsSlot);
1268 p_xran_dev_ctx->ndm_srs_scheduled = 0;
1269 p_xran_dev_ctx->ndm_srs_tti = 0;
1270 p_xran_dev_ctx->ndm_srs_txtti = 0;
1271 p_xran_dev_ctx->ndm_srs_schedperiod = 0;
1277 MLogXRANTask(PID_DISPATCH_TX_SYM, t1, MLogXRANTick());
1284 static inline uint16_t
1285 xran_tx_sym_from_ring(struct xran_device_ctx* p_xran_dev_ctx, struct rte_ring *r, uint16_t vf_id)
1287 struct rte_mbuf *mbufs[XRAN_MAX_MEM_IF_RING_SIZE];
1288 uint16_t dequeued, sent = 0;
1290 //long t1 = MLogXRANTick();
1292 dequeued = rte_ring_dequeue_burst(r, (void **)mbufs, XRAN_MAX_MEM_IF_RING_SIZE,
1295 return 0; /* Nothing to send. */
1298 //sent += p_xran_dev_ctx->send_upmbuf2ring(mbufs[sent], ETHER_TYPE_ECPRI, vf_id);
1299 sent += rte_eth_tx_burst(vf_id, 0, &mbufs[sent], dequeued - sent);
1300 if (sent == dequeued){
1301 // MLogXRANTask(PID_REQUEUE_TX_SYM, t1, MLogXRANTick());
1308 xran_process_tx_sym_cp_on_ring(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id,
1309 uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
1310 uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
1312 struct rte_ring *ring = NULL;
1313 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
1318 for (cc_id = start_cc; cc_id < (start_cc + num_cc); cc_id++) {
1319 for (ant_id = start_ant; ant_id < (start_ant + num_ant); ant_id++) {
1320 vf_id = p_xran_dev_ctx->map2vf[direction][cc_id][ant_id][XRAN_UP_VF];
1321 ring = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pRing;
1322 xran_tx_sym_from_ring(p_xran_dev_ctx, ring, vf_id);
1328 //#define TRANSMIT_BURST
1329 //#define ENABLE_DEBUG_COREDUMP
1331 #define ETHER_TYPE_ECPRI_BE (0xFEAE)
1334 xran_process_tx_sym_cp_on_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id,
1335 uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
1336 uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
1338 struct xran_up_pkt_gen_params *pxp;
1339 struct data_section_hdr *pDataSec;
1342 struct rte_ring *ring;
1344 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
1345 struct xran_section_info* sectinfo;
1346 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
1347 struct rte_mbuf_ext_shared_info* p_share_data;
1348 struct xran_sectioninfo_db* ptr_sect_elm = NULL;
1349 struct rte_mbuf* mb_oran_hdr_ext = NULL;
1350 struct xran_ecpri_hdr* ecpri_hdr = NULL;
1351 //uint16_t* __restrict pSrc = NULL;
1352 uint16_t* __restrict pDst = NULL;
1355 uint16_t ext_buff_len = 0;
1356 uint16_t iq_sample_size_bytes=0;
1357 uint16_t num_sections = 0, total_sections = 0;
1359 uint16_t elm_bytes = 0;
1360 uint16_t section_id;
1361 uint16_t nPktSize=0;
1364 const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
1366 uint8_t cc_id, ant_id;
1368 #ifdef TRANSMIT_BURST
1371 //uint16_t len2 = 0, len_frag = 0;
1375 #ifdef TRANSMIT_BURST
1376 struct mbuf_table loc_tx_mbufs;
1377 struct mbuf_table loc_tx_mbufs_fragmented = {0};
1379 uint8_t fragNeeded=0;
1381 const uint8_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
1382 uint8_t comp_head_upd = 0;
1384 const uint8_t total_header_size = (RTE_PKTMBUF_HEADROOM +
1385 sizeof(struct xran_ecpri_hdr) +
1386 sizeof(struct radio_app_common_hdr) +
1387 sizeof(struct data_section_hdr));
1390 for (cc_id = start_cc; cc_id < (start_cc + num_cc); cc_id++)
1392 for (ant_id = start_ant; ant_id < (start_ant + num_ant); ant_id++)
1394 ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][ant_id];
1395 if (unlikely(ptr_sect_elm == NULL)){
1396 rte_panic("ptr_sect_elm == NULL\n");
1400 if(0!=ptr_sect_elm->cur_index)
1402 num_sections = ptr_sect_elm->cur_index;
1403 /* iterate C-Plane configuration to generate corresponding U-Plane */
1404 vf_id = p_xran_dev_ctx->map2vf[direction][cc_id][ant_id][XRAN_UP_VF];
1405 mb_base = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
1406 ring = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pRing;
1407 if (unlikely(mb_base == NULL))
1409 rte_panic("mb == NULL\n");
1411 cid = ((cc_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ccId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ccId) | ((ant_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ruPortId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ruPortId);
1412 cid = rte_cpu_to_be_16(cid);
1414 #ifdef TRANSMIT_BURST
1415 loc_tx_mbufs.len = 0;
1418 #pragma loop_count min=1, max=16
1419 for (next=0; next< num_sections; next++)
1421 sectinfo = &ptr_sect_elm->list[next];
1423 if (unlikely(sectinfo == NULL)) {
1424 print_err("sectinfo == NULL\n");
1427 if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
1428 { /* only supports type 1 */
1429 print_err("Invalid section type in section DB - %d", sectinfo->type);
1432 /* skip, if not scheduled */
1433 if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
1436 compMeth = sectinfo->compMeth;
1437 iqWidth = sectinfo->iqWidth;
1438 section_id = sectinfo->id;
1440 comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
1442 if(sectinfo->prbElemBegin || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable)
1444 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)
1445 seq_id = xran_updl_seq_id_num[xran_port_id][cc_id][ant_id]++;
1447 seq_id = xran_upul_seq_id_num[xran_port_id][cc_id][ant_id]++;
1448 iq_sample_size_bytes = 18 + sizeof(struct xran_ecpri_hdr) +
1449 sizeof(struct radio_app_common_hdr);
1455 iq_sample_size_bytes += sizeof(struct data_section_hdr) ;
1459 iq_sample_size_bytes += sizeof(struct data_section_compression_hdr);
1462 iq_sample_size_bytes += sectinfo->numPrbc*(iqWidth*3 + 1);
1465 print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
1466 sectinfo->type, sectinfo->id, sectinfo->startPrbc,
1467 sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
1470 #ifdef TRANSMIT_BURST
1471 len = loc_tx_mbufs.len;
1472 //Added for Klocworks
1473 if (unlikely(len >= MBUF_TABLE_SIZE))
1475 len = MBUF_TABLE_SIZE - 1;
1476 rte_panic("len >= MBUF_TABLE_SIZE\n");
1479 if(sectinfo->prbElemBegin || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable)
1481 p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][section_id];
1482 p_share_data->free_cb = extbuf_free_callback;
1483 p_share_data->fcb_opaque = NULL;
1484 rte_mbuf_ext_refcnt_set(p_share_data, 1);
1486 /* Create ethernet + eCPRI + radio app header */
1487 ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
1489 ext_buff = ((char*)p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData + sectinfo->sec_desc[sym_id].iq_buffer_offset) - total_header_size;
1490 ext_buff_len += (total_header_size + 18);
1494 ext_buff -= sizeof(struct data_section_compression_hdr);
1495 ext_buff_len += sizeof(struct data_section_compression_hdr);
1498 mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
1499 if (unlikely((mb_oran_hdr_ext) == NULL))
1501 rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
1504 #ifdef ENABLE_DEBUG_COREDUMP
1505 if (unlikely((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova == 0))
1507 rte_panic("Failed rte_mem_virt2iova\n");
1509 if (unlikely(((rte_iova_t)(struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova) == RTE_BAD_IOVA))
1511 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
1514 mb_oran_hdr_ext->buf_addr = ext_buff;
1515 mb_oran_hdr_ext->buf_iova = ((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size))->iova + RTE_PTR_DIFF(ext_buff, mb_base);
1516 mb_oran_hdr_ext->buf_len = ext_buff_len;
1517 mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
1518 mb_oran_hdr_ext->shinfo = p_share_data;
1519 mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
1520 mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
1521 mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
1522 mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
1524 p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id] = (void*)mb_oran_hdr_ext;
1525 rte_pktmbuf_refcnt_update((void*)mb_oran_hdr_ext, 1); /* make sure eth won't free our mbuf */
1526 if (p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id])
1528 rte_pktmbuf_free(p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id]);
1531 pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
1533 /* Fill in the ethernet header. */
1534 #ifndef TRANSMIT_BURST
1535 rte_eth_macaddr_get(mb_oran_hdr_ext->port, &((struct rte_ether_hdr*)pStart)->s_addr); /* set source addr */
1536 ((struct rte_ether_hdr*)pStart)->d_addr = eth_ctx->entities[vf_id][ID_O_RU]; /* set dst addr */
1537 ((struct rte_ether_hdr*)pStart)->ether_type = ETHER_TYPE_ECPRI_BE; /* ethertype */
1539 nPktSize = sizeof(struct rte_ether_hdr)
1540 + sizeof(struct xran_ecpri_hdr)
1541 + sizeof(struct radio_app_common_hdr) ;
1543 ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
1545 ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
1546 ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
1547 ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
1549 /* one to one lls-CU to RU only and band sector is the same */
1550 ecpri_hdr->ecpri_xtc_id = cid;
1552 /* no transport layer fragmentation supported */
1553 ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
1554 ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
1555 ecpri_hdr->cmnhdr.bits.ecpri_payl_size = sizeof(struct radio_app_common_hdr) + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();;;
1557 } /* if(sectinfo->prbElemBegin) */
1559 /* Prepare U-Plane section hdr */
1560 iqWidth = (iqWidth == 0) ? 16 : iqWidth;
1563 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
1564 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
1569 n_bytes = (3 * iqWidth + parm_size) * sectinfo->numPrbc; //Dont understand this
1570 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
1572 /* Ethernet & eCPRI added already */
1573 nPktSize += sizeof(struct data_section_hdr) + n_bytes;
1576 nPktSize += sizeof(struct data_section_compression_hdr);
1578 if(likely((ecpri_hdr!=NULL)))
1580 ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_hdr) + n_bytes ;
1583 ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_compression_hdr);
1587 print_err("ecpri_hdr should not be NULL\n");
1589 //ecpri_hdr->cmnhdr.bits.ecpri_payl_size += ecpri_payl_size;
1593 if(sectinfo->prbElemBegin || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable)
1595 pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
1596 pxp = (struct xran_up_pkt_gen_params *)pDst;
1597 /* radio app header */
1598 pxp->app_params.data_feature.value = 0x10;
1599 pxp->app_params.data_feature.data_direction = direction;
1600 pxp->app_params.frame_id = frame_id;
1601 pxp->app_params.sf_slot_sym.subframe_id = subframe_id;
1602 pxp->app_params.sf_slot_sym.slot_id = slot_id;
1603 pxp->app_params.sf_slot_sym.symb_id = sym_id;
1604 /* convert to network byte order */
1605 pxp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(pxp->app_params.sf_slot_sym.value);
1609 pDataSec = (struct data_section_hdr *)pDst;
1611 pDataSec->fields.sect_id = section_id;
1612 pDataSec->fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(sectinfo->numPrbc);
1613 pDataSec->fields.start_prbu = (sectinfo->startPrbc & 0x03ff);
1614 pDataSec->fields.sym_inc = 0;
1615 pDataSec->fields.rb = 0;
1616 /* network byte order */
1617 pDataSec->fields.all_bits = rte_cpu_to_be_32(pDataSec->fields.all_bits);
1622 print_err("pDataSec is NULL idx = %u num_sections = %u\n", next, num_sections);
1629 print_err("pDst == NULL\n");
1632 ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_comp_meth = compMeth;
1633 ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
1634 ((struct data_section_compression_hdr *)pDst)->rsrvd = 0;
1638 //Increment by IQ data len
1639 pDst = (uint16_t *)((uint8_t *)pDst + n_bytes) ;
1640 if(mb_oran_hdr_ext){
1641 rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
1642 rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
1645 if(sectinfo->prbElemEnd || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable) /* Transmit the packet */
1647 if(likely((ecpri_hdr!=NULL)))
1648 ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_hdr->cmnhdr.bits.ecpri_payl_size);
1650 print_err("ecpri_hdr should not be NULL\n");
1651 /* if we don't need to do any fragmentation */
1652 if (likely(p_xran_dev_ctx->fh_init.mtu >= (iq_sample_size_bytes)))
1654 /* no fragmentation */
1656 #ifdef TRANSMIT_BURST
1657 loc_tx_mbufs.m_table[len++] = (void*)mb_oran_hdr_ext;
1658 if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM))
1660 rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
1662 loc_tx_mbufs.len = len;
1665 if(p_xran_dev_ctx->fh_init.io_cfg.bbu_offload){
1666 rte_ring_enqueue(ring, mb_oran_hdr_ext);
1668 xran_enqueue_mbuf(mb_oran_hdr_ext, eth_ctx->tx_ring[vf_id]);
1674 /* current code should not go to fragmentation as it should be taken care of by section allocation already */
1675 // print_err("should not go into fragmentation mtu %d packet size %d\n", p_xran_dev_ctx->fh_init.mtu, sectinfo->numPrbc * (3*iq_sample_size_bits + 1));
1678 elm_bytes += nPktSize;
1679 } /* if(prbElemEnd) */
1681 } /* if ptr_sect_elm->cur_index */
1683 total_sections += num_sections;
1685 /* Transmit packets */
1686 #ifdef TRANSMIT_BURST
1687 if (loc_tx_mbufs.len)
1689 for (int32_t i = 0; i < loc_tx_mbufs.len; i++)
1691 if(p_xran_dev_ctx->fh_init.io_cfg.bbu_offload){
1692 rte_ring_enqueue(ring, loc_tx_mbufs_fragmented.m_table[i]);
1694 p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs.m_table[i], ETHER_TYPE_ECPRI, vf_id);
1697 loc_tx_mbufs.len = 0;
1700 /* Transmit fragmented packets */
1701 if (unlikely(fragNeeded))
1703 #if 0 /* There is no logic populating loc_tx_mbufs_fragmented. hence disabling this code */
1704 for (int32_t i = 0; i < loc_tx_mbufs_fragmented.len; i++)
1706 if(p_xran_dev_ctx->fh_init.io_cfg.bbu_offload){
1707 rte_ring_enqueue(ring, loc_tx_mbufs_fragmented.m_table[i]);
1709 p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs_fragmented.m_table[i], ETHER_TYPE_ECPRI, vf_id);
1714 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
1715 } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
1717 struct xran_common_counters* pCnt = &p_xran_dev_ctx->fh_counters;
1718 pCnt->tx_counter += total_sections;
1719 pCnt->tx_bytes_counter += elm_bytes;
1725 xran_process_tx_srs_cp_on(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id,
1726 uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
1727 uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
1729 struct xran_up_pkt_gen_params *pxp;
1730 struct data_section_hdr *pDataSec;
1731 int32_t antElm_eAxC_id = 0;// = ant_id + p_srs_cfg->eAxC_offset;
1733 struct xran_srs_config *p_srs_cfg;
1738 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
1739 struct xran_section_info* sectinfo;
1740 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
1741 p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
1742 struct rte_mbuf_ext_shared_info* p_share_data;
1743 struct xran_sectioninfo_db* ptr_sect_elm = NULL;
1744 struct rte_mbuf* mb_oran_hdr_ext = NULL;
1745 struct xran_ecpri_hdr* ecpri_hdr = NULL;
1746 uint16_t* __restrict pDst = NULL;
1749 uint16_t ext_buff_len = 0;
1750 uint16_t iq_sample_size_bytes=0;
1751 uint16_t num_sections = 0, total_sections = 0;
1753 uint16_t elm_bytes = 0;
1754 uint16_t section_id;
1755 uint16_t nPktSize=0;
1758 const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
1760 uint8_t cc_id, ant_id;
1765 const uint8_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
1766 uint8_t comp_head_upd = 0;
1768 const uint8_t total_header_size = (RTE_PKTMBUF_HEADROOM +
1769 sizeof(struct xran_ecpri_hdr) +
1770 sizeof(struct radio_app_common_hdr) +
1771 sizeof(struct data_section_hdr));
1773 for (cc_id = start_cc; cc_id < (start_cc + num_cc); cc_id++)
1775 for (ant_id = start_ant; ant_id < (start_ant + num_ant); ant_id++)
1777 antElm_eAxC_id = ant_id + p_srs_cfg->eAxC_offset;
1778 ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][antElm_eAxC_id];
1780 if (unlikely(ptr_sect_elm == NULL)){
1781 printf("ant_id = %d ctx_id = %d,start_ant = %d, num_ant = %d, antElm_eAxC_id = %d\n",ant_id,ctx_id,start_ant,num_ant,antElm_eAxC_id);
1782 rte_panic("ptr_sect_elm == NULL\n");
1785 if(0!=ptr_sect_elm->cur_index)
1787 num_sections = ptr_sect_elm->cur_index;
1788 /* iterate C-Plane configuration to generate corresponding U-Plane */
1789 vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, antElm_eAxC_id);//p_xran_dev_ctx->map2vf[direction][cc_id][antElm_eAxC_id][XRAN_UP_VF];
1790 mb_base = p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
1791 if (unlikely(mb_base == NULL))
1793 rte_panic("mb == NULL\n");
1795 cid = ((cc_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ccId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ccId) | ((antElm_eAxC_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ruPortId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ruPortId);
1796 cid = rte_cpu_to_be_16(cid);
1797 #pragma loop_count min=1, max=16
1798 for (next=0; next< num_sections; next++)
1800 sectinfo = &ptr_sect_elm->list[next];
1802 if (unlikely(sectinfo == NULL)) {
1803 print_err("sectinfo == NULL\n");
1806 if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
1807 { /* only supports type 1 */
1808 print_err("Invalid section type in section DB - %d", sectinfo->type);
1811 /* skip, if not scheduled */
1812 if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
1814 compMeth = sectinfo->compMeth;
1815 iqWidth = sectinfo->iqWidth;
1816 section_id = sectinfo->id;
1818 comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
1820 if(sectinfo->prbElemBegin)
1822 seq_id = xran_get_upul_seqid(pHandle, cc_id, antElm_eAxC_id);
1823 iq_sample_size_bytes = 18 + sizeof(struct xran_ecpri_hdr) +
1824 sizeof(struct radio_app_common_hdr);
1829 iq_sample_size_bytes += sizeof(struct data_section_hdr) ;
1833 iq_sample_size_bytes += sizeof(struct data_section_compression_hdr);
1836 iq_sample_size_bytes += sectinfo->numPrbc*(iqWidth*3 + 1);
1839 print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
1840 sectinfo->type, sectinfo->id, sectinfo->startPrbc,
1841 sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
1843 if(sectinfo->prbElemBegin)
1845 p_share_data = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
1846 p_share_data->free_cb = extbuf_free_callback;
1847 p_share_data->fcb_opaque = NULL;
1848 rte_mbuf_ext_refcnt_set(p_share_data, 1);
1850 /* Create ethernet + eCPRI + radio app header */
1851 ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
1853 ext_buff = ((char*)p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData + sectinfo->sec_desc[sym_id].iq_buffer_offset) - total_header_size;
1854 ext_buff_len += (total_header_size + 18);
1858 ext_buff -= sizeof(struct data_section_compression_hdr);
1859 ext_buff_len += sizeof(struct data_section_compression_hdr);
1862 mb_oran_hdr_ext = xran_ethdi_mbuf_indir_alloc();
1863 if (unlikely((mb_oran_hdr_ext) == NULL))
1865 rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
1868 #ifdef ENABLE_DEBUG_COREDUMP
1869 if (unlikely((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova == 0))
1871 rte_panic("Failed rte_mem_virt2iova\n");
1873 if (unlikely(((rte_iova_t)(struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova) == RTE_BAD_IOVA))
1875 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
1878 mb_oran_hdr_ext->buf_addr = ext_buff;
1879 mb_oran_hdr_ext->buf_iova = ((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size))->iova + RTE_PTR_DIFF(ext_buff, mb_base);
1880 mb_oran_hdr_ext->buf_len = ext_buff_len;
1881 mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
1882 mb_oran_hdr_ext->shinfo = p_share_data;
1883 mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
1884 mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
1885 mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
1886 mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
1887 pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
1889 /* Fill in the ethernet header. */
1890 rte_eth_macaddr_get(mb_oran_hdr_ext->port, &((struct rte_ether_hdr*)pStart)->s_addr); /* set source addr */
1891 ((struct rte_ether_hdr*)pStart)->d_addr = eth_ctx->entities[vf_id][ID_O_RU]; /* set dst addr */
1892 ((struct rte_ether_hdr*)pStart)->ether_type = ETHER_TYPE_ECPRI_BE; /* ethertype */
1894 nPktSize = sizeof(struct rte_ether_hdr)
1895 + sizeof(struct xran_ecpri_hdr)
1896 + sizeof(struct radio_app_common_hdr) ;
1898 ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
1900 ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
1901 ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
1902 ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
1904 /* one to one lls-CU to RU only and band sector is the same */
1905 ecpri_hdr->ecpri_xtc_id = cid;
1907 /* no transport layer fragmentation supported */
1908 ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
1909 ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
1910 ecpri_hdr->cmnhdr.bits.ecpri_payl_size = sizeof(struct radio_app_common_hdr) + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();;;
1912 } /* if(sectinfo->prbElemBegin) */
1914 /* Prepare U-Plane section hdr */
1915 iqWidth = (iqWidth == 0) ? 16 : iqWidth;
1918 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
1919 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
1924 n_bytes = (3 * iqWidth + parm_size) * sectinfo->numPrbc;
1925 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
1927 /* Ethernet & eCPRI added already */
1928 nPktSize += sizeof(struct data_section_hdr) + n_bytes;
1931 nPktSize += sizeof(struct data_section_compression_hdr);
1933 if(likely((ecpri_hdr!=NULL)))
1935 ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_hdr) + n_bytes ;
1938 ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_compression_hdr);
1942 print_err("ecpri_hdr should not be NULL\n");
1945 if(sectinfo->prbElemBegin)
1947 pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
1948 pxp = (struct xran_up_pkt_gen_params *)pDst;
1949 /* radio app header */
1950 pxp->app_params.data_feature.value = 0x10;
1951 pxp->app_params.data_feature.data_direction = direction;
1952 pxp->app_params.frame_id = frame_id;
1953 pxp->app_params.sf_slot_sym.subframe_id = subframe_id;
1954 pxp->app_params.sf_slot_sym.slot_id = slot_id;
1955 pxp->app_params.sf_slot_sym.symb_id = sym_id;
1956 /* convert to network byte order */
1957 pxp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(pxp->app_params.sf_slot_sym.value);
1961 pDataSec = (struct data_section_hdr *)pDst;
1963 pDataSec->fields.sect_id = section_id;
1964 pDataSec->fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(sectinfo->numPrbc);
1965 pDataSec->fields.start_prbu = (sectinfo->startPrbc & 0x03ff);
1966 pDataSec->fields.sym_inc = 0;
1967 pDataSec->fields.rb = 0;
1968 /* network byte order */
1969 pDataSec->fields.all_bits = rte_cpu_to_be_32(pDataSec->fields.all_bits);
1974 print_err("pDataSec is NULL idx = %u num_sections = %u\n", next, num_sections);
1981 print_err("pDst == NULL\n");
1984 ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_comp_meth = compMeth;
1985 ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
1986 ((struct data_section_compression_hdr *)pDst)->rsrvd = 0;
1990 //Increment by IQ data len
1991 pDst = (uint16_t *)((uint8_t *)pDst + n_bytes) ;
1992 if(mb_oran_hdr_ext){
1993 rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
1994 rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
1997 if(sectinfo->prbElemEnd) /* Transmit the packet */
1999 if(likely((ecpri_hdr!=NULL)))
2000 ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_hdr->cmnhdr.bits.ecpri_payl_size);
2002 print_err("ecpri_hdr should not be NULL\n");
2003 /* if we don't need to do any fragmentation */
2004 if (likely(p_xran_dev_ctx->fh_init.mtu >= (iq_sample_size_bytes)))
2006 p_xran_dev_ctx->send_upmbuf2ring(mb_oran_hdr_ext, ETHER_TYPE_ECPRI, vf_id);
2012 elm_bytes += nPktSize;
2013 } /* if(prbElemEnd) */
2015 } /* if ptr_sect_elm->cur_index */
2016 total_sections += num_sections;
2017 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
2018 } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
2020 struct xran_common_counters* pCnt = &p_xran_dev_ctx->fh_counters;
2021 pCnt->tx_counter += total_sections;
2022 pCnt->tx_bytes_counter += elm_bytes;
2028 int32_t xran_process_tx_sym(void *arg)
2032 uint32_t numSlotMu1 = 5;
2034 uint32_t mlogVar[15];
2035 uint32_t mlogVarCnt = 0;
2037 unsigned long t1 = MLogXRANTick();
2039 void *pHandle = NULL;
2042 uint8_t num_eAxc = 0;
2043 uint8_t num_eAxc_prach = 0;
2044 uint8_t num_eAxAntElm = 0;
2045 uint8_t num_CCPorts = 0;
2046 uint32_t frame_id = 0;
2047 uint32_t subframe_id = 0;
2048 uint32_t slot_id = 0;
2049 uint32_t sym_id = 0;
2050 uint32_t sym_idx = 0;
2052 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *) arg;
2053 enum xran_in_period inPeriod;
2054 uint32_t interval = p_xran_dev_ctx->interval_us_local;
2055 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
2057 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
2060 pHandle = p_xran_dev_ctx;
2062 /* O-RU: send symb after OTA time with delay (UL) */
2063 /* O-DU: send symb in advance of OTA time (DL) */
2064 sym_idx = XranOffsetSym(p_xran_dev_ctx->sym_up, xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME(interval)*1000, &inPeriod);
2066 tti = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
2067 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
2068 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
2070 uint16_t sfnSecStart = xran_getSfnSecStart();
2071 if(unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
2074 sfnSecStart = (sfnSecStart + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
2076 else if(unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
2079 if (sfnSecStart >= NUM_OF_FRAMES_PER_SECOND)
2081 sfnSecStart -= NUM_OF_FRAMES_PER_SECOND;
2085 sfnSecStart += NUM_OF_FRAMES_PER_SFN_PERIOD - NUM_OF_FRAMES_PER_SECOND;
2088 frame_id = XranGetFrameNum(tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
2089 // ORAN frameId, 8 bits, [0, 255]
2090 frame_id = (frame_id & 0xff);
2092 sym_id = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
2093 ctx_id = tti % XRAN_MAX_SECTIONDB_CTX;
2095 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
2098 mlogVar[mlogVarCnt++] = 0xAAAAAAAA;
2099 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
2100 mlogVar[mlogVarCnt++] = sym_idx;
2101 mlogVar[mlogVarCnt++] = abs(p_xran_dev_ctx->sym_up);
2102 mlogVar[mlogVarCnt++] = tti;
2103 mlogVar[mlogVarCnt++] = frame_id;
2104 mlogVar[mlogVarCnt++] = subframe_id;
2105 mlogVar[mlogVarCnt++] = slot_id;
2106 mlogVar[mlogVarCnt++] = sym_id;
2107 mlogVar[mlogVarCnt++] = PortId;
2108 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
2111 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
2112 && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B)
2114 num_eAxc = xran_get_num_eAxcUl(pHandle);
2118 num_eAxc = xran_get_num_eAxc(pHandle);
2121 num_eAxc_prach = ((num_eAxc > XRAN_MAX_PRACH_ANT_NUM)? XRAN_MAX_PRACH_ANT_NUM : num_eAxc);
2122 num_CCPorts = xran_get_num_cc(pHandle);
2125 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP)
2127 if(p_xran_dev_ctx->tx_sym_gen_func)
2129 enum xran_comp_hdr_type compType;
2130 uint8_t loc_ret = 1;
2131 uint16_t xran_port_id;
2132 PSECTION_DB_TYPE p_sec_db = NULL;
2134 compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
2136 if(unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM))
2138 print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
2142 if(unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX))
2144 print_err("Invalid Context id - %d", ctx_id);
2148 if(unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX))
2150 print_err("Invalid CC id - %d", num_CCPorts);
2154 if(unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR)))
2156 print_err("Invalid eAxC id - %d", num_eAxc);
2160 xran_port_id = p_xran_dev_ctx->xran_port_id;
2161 p_sec_db = p_sectiondb[xran_port_id];
2164 p_xran_dev_ctx->tx_sym_gen_func(pHandle, ctx_id, tti,
2165 0, num_CCPorts, 0, num_eAxc, frame_id, subframe_id, slot_id, sym_id,
2166 compType, XRAN_DIR_DL, xran_port_id, p_sec_db);
2175 rte_panic("p_xran_dev_ctx->tx_sym_gen_func== NULL\n");
2178 else if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && p_xran_dev_ctx->enableCP)
2181 enum xran_comp_hdr_type compType;
2182 uint16_t xran_port_id;
2183 PSECTION_DB_TYPE p_sec_db = NULL;
2185 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1
2186 || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1
2187 || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_FDD) == 1){
2189 if(xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_UL
2190 || xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){
2192 uint8_t loc_ret = 1;
2193 compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
2194 if(unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM))
2196 print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
2200 if(unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX))
2202 print_err("Invalid Context id - %d", ctx_id);
2206 if(unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX))
2208 print_err("Invalid CC id - %d", num_CCPorts);
2212 if(unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR)))
2214 print_err("Invalid eAxC id - %d", num_eAxc);
2218 xran_port_id = p_xran_dev_ctx->xran_port_id;
2219 p_sec_db = p_sectiondb[xran_port_id];
2222 xran_process_tx_sym_cp_on_opt(pHandle, ctx_id, tti,
2223 0, num_CCPorts, 0, num_eAxc, frame_id, subframe_id, slot_id, sym_id,
2224 compType, XRAN_DIR_UL, xran_port_id, p_sec_db);
2233 if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_B
2234 && p_xran_dev_ctx->enableSrs
2235 && ((p_xran_dev_ctx->srs_cfg.symbMask >> sym_id)&1))
2237 xran_port_id = p_xran_dev_ctx->xran_port_id;
2238 p_sec_db = p_sectiondb[xran_port_id];
2239 compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
2240 struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
2241 struct xran_prb_map *prb_map;
2242 /* check special frame */
2243 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
2245 if(((tti % p_xran_dev_ctx->fh_cfg.frame_conf.nTddPeriod) == pSrsCfg->slot)
2246 && (p_xran_dev_ctx->ndm_srs_scheduled == 0))
2249 prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
2250 /* NDM U-Plane is not enabled */
2251 if(pSrsCfg->ndm_offset == 0)
2253 retval = xran_process_tx_srs_cp_on(pHandle, ctx_id, tti,
2254 0, num_CCPorts, 0, xran_get_num_ant_elm(pHandle), frame_id, subframe_id, slot_id, sym_id,
2255 compType, XRAN_DIR_UL, xran_port_id, p_sec_db);
2257 /* NDM U-Plane is enabled, SRS U-Planes will be transmitted after ndm_offset (in slots) */
2260 p_xran_dev_ctx->ndm_srs_scheduled = 1;
2261 p_xran_dev_ctx->ndm_srs_tti = tti;
2262 p_xran_dev_ctx->ndm_srs_txtti = (tti + pSrsCfg->ndm_offset)%2000;
2263 p_xran_dev_ctx->ndm_srs_schedperiod = pSrsCfg->slot;
2267 /* check SRS NDM UP has been scheduled in non special slots */
2268 else if(p_xran_dev_ctx->ndm_srs_scheduled
2269 && p_xran_dev_ctx->ndm_srs_txtti == tti)
2271 prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
2272 xran_port_id = p_xran_dev_ctx->xran_port_id;
2273 p_sec_db = p_sectiondb[xran_port_id];
2275 uint32_t srs_tti, srsFrame, srsSubframe, srsSlot, srs_sym;
2277 if(prb_map && prb_map->nPrbElm)
2279 srs_sym = prb_map->prbMap[0].nStartSymb;
2281 srs_tti = p_xran_dev_ctx->ndm_srs_tti;
2282 num_eAxAntElm = xran_get_num_ant_elm(pHandle);
2283 ndm_step = num_eAxAntElm / pSrsCfg->ndm_txduration;
2285 srsSlot = XranGetSlotNum(srs_tti, SLOTNUM_PER_SUBFRAME(interval));
2286 srsSubframe = XranGetSubFrameNum(srs_tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
2287 srsFrame = XranGetFrameNum(srs_tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
2288 srsFrame = (srsFrame & 0xff);
2289 srsCtx = srs_tti % XRAN_MAX_SECTIONDB_CTX;
2291 if(sym_id < pSrsCfg->ndm_txduration)
2293 retval = xran_process_tx_srs_cp_on(pHandle, srsCtx, srs_tti,
2294 0, num_CCPorts, sym_id*ndm_step, ndm_step, srsFrame, srsSubframe, srsSlot, srs_sym,
2295 compType, XRAN_DIR_UL, xran_port_id, p_sec_db);
2299 p_xran_dev_ctx->ndm_srs_scheduled = 0;
2300 p_xran_dev_ctx->ndm_srs_tti = 0;
2301 p_xran_dev_ctx->ndm_srs_txtti = 0;
2302 p_xran_dev_ctx->ndm_srs_schedperiod = 0;
2311 for (ant_id = 0; ant_id < num_eAxc; ant_id++)
2313 for (cc_id = 0; cc_id < num_CCPorts; cc_id++)
2315 //struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
2316 if(p_xran_dev_ctx->puschMaskEnable)
2318 if((tti % numSlotMu1) != p_xran_dev_ctx->puschMaskSlot)
2319 retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
2322 retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
2324 if(p_xran_dev_ctx->enablePrach && (ant_id < num_eAxc_prach) )
2326 retval = xran_process_tx_prach_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
2331 if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_B
2332 && p_xran_dev_ctx->enableSrs
2333 && ((p_xran_dev_ctx->srs_cfg.symbMask >> sym_id)&1))
2335 struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
2337 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
2339 /* check special frame */
2340 if((xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
2341 ||(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1))
2343 if(((tti % p_xran_dev_ctx->fh_cfg.frame_conf.nTddPeriod) == pSrsCfg->slot)
2344 && (p_xran_dev_ctx->ndm_srs_scheduled == 0))
2347 struct xran_prb_map *prb_map;
2348 prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
2350 /* if PRB map is present in first antenna, assume SRS might be scheduled. */
2351 if(prb_map && prb_map->nPrbElm)
2353 /* NDM U-Plane is not enabled */
2354 if(pSrsCfg->ndm_offset == 0)
2357 if (prb_map->nPrbElm > 0)
2359 if(sym_id >= prb_map->prbMap[0].nStartSymb
2360 && sym_id < (prb_map->prbMap[0].nStartSymb + prb_map->prbMap[0].numSymb))
2361 for(ant_id=0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
2362 xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id);
2366 /* NDM U-Plane is enabled, SRS U-Planes will be transmitted after ndm_offset (in slots) */
2369 p_xran_dev_ctx->ndm_srs_scheduled = 1;
2370 p_xran_dev_ctx->ndm_srs_tti = tti;
2371 p_xran_dev_ctx->ndm_srs_txtti = (tti + pSrsCfg->ndm_offset)%2000;
2372 p_xran_dev_ctx->ndm_srs_schedperiod = pSrsCfg->slot;
2377 /* check SRS NDM UP has been scheduled in non special slots */
2378 /*NDM feature enables the spread of SRS packets
2379 Non delay measurement SRS PDSCH PUSCH delay measure it*/
2380 else if(p_xran_dev_ctx->ndm_srs_scheduled
2381 && p_xran_dev_ctx->ndm_srs_txtti == tti)
2384 uint32_t srs_tti, srsFrame, srsSubframe, srsSlot;
2387 srs_tti = p_xran_dev_ctx->ndm_srs_tti;
2388 num_eAxAntElm = xran_get_num_ant_elm(pHandle);
2389 ndm_step = num_eAxAntElm / pSrsCfg->ndm_txduration;
2391 srsSlot = XranGetSlotNum(srs_tti, SLOTNUM_PER_SUBFRAME(interval));
2392 srsSubframe = XranGetSubFrameNum(srs_tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
2393 srsFrame = XranGetFrameNum(srs_tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
2394 srsFrame = (srsFrame & 0xff);
2395 srsCtx = srs_tti % XRAN_MAX_SECTIONDB_CTX;
2397 if(sym_id < pSrsCfg->ndm_txduration)
2399 for(ant_id=sym_id*ndm_step; ant_id < (sym_id+1)*ndm_step; ant_id++)
2400 xran_process_tx_srs_cp_off(pHandle, srsCtx, srs_tti, cc_id, ant_id, srsFrame, srsSubframe, srsSlot);
2404 p_xran_dev_ctx->ndm_srs_scheduled = 0;
2405 p_xran_dev_ctx->ndm_srs_tti = 0;
2406 p_xran_dev_ctx->ndm_srs_txtti = 0;
2407 p_xran_dev_ctx->ndm_srs_schedperiod = 0;
2415 MLogXRANTask(PID_DISPATCH_TX_SYM, t1, MLogXRANTick());
2419 struct cp_up_tx_desc *
2420 xran_pkt_gen_desc_alloc(void)
2422 struct rte_mbuf * mb = rte_pktmbuf_alloc(_eth_mbuf_pkt_gen);
2423 struct cp_up_tx_desc * p_desc = NULL;
2424 char * start = NULL;
2427 start = rte_pktmbuf_append(mb, sizeof(struct cp_up_tx_desc));
2429 p_desc = rte_pktmbuf_mtod(mb, struct cp_up_tx_desc *);
2440 xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc)
2444 rte_pktmbuf_free(p_desc->mb);
2447 rte_panic("p_desc->mb == NULL\n");