1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN TX functionality
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
40 #include <rte_common.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
50 #include "xran_fh_o_du.h"
54 #include "xran_up_api.h"
55 #include "xran_cp_api.h"
56 #include "xran_sync_api.h"
57 #include "xran_lib_mlog_tasks_id.h"
58 #include "xran_timer.h"
59 #include "xran_main.h"
60 #include "xran_common.h"
62 #include "xran_frame_struct.h"
63 #include "xran_printf.h"
64 #include "xran_app_frag.h"
65 #include "xran_tx_proc.h"
66 #include "xran_cp_proc.h"
68 #include "xran_mlog_lnx.h"
72 XRAN_IN_PREV_PERIOD = 0,
79 xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
80 struct rte_mbuf_ext_shared_info * p_share_data,
81 enum xran_compression_method compMeth, enum xran_comp_hdr_type staticEn);
85 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
87 /*long t1 = MLogTick();
88 MLogTask(77777, t1, t1+100);*/
91 static inline int32_t XranOffsetSym(int32_t offSym, int32_t otaSym, int32_t numSymTotal, enum xran_in_period* pInPeriod)
95 // Suppose the offset is usually small
96 if (unlikely(offSym > otaSym))
98 sym = numSymTotal - offSym + otaSym;
99 *pInPeriod = XRAN_IN_PREV_PERIOD;
103 sym = otaSym - offSym;
105 if (unlikely(sym >= numSymTotal))
108 *pInPeriod = XRAN_IN_NEXT_PERIOD;
112 *pInPeriod = XRAN_IN_CURR_PERIOD;
119 // Return SFN at current second start, 10 bits, [0, 1023]
120 uint16_t xran_getSfnSecStart(void)
122 return xran_SFN_at_Sec_Start;
125 /* Send burst of packets on an output interface */
127 xran_send_burst(struct xran_device_ctx *dev, struct mbuf_table* p_m_table, uint16_t port)
129 struct xran_common_counters * pCnt = NULL;
130 struct rte_mbuf **m_table;
136 pCnt = &dev->fh_counters;
138 rte_panic("incorrect dev\n");
140 m_table = p_m_table->m_table;
143 for(i = 0; i < n; i++) {
144 /*rte_mbuf_sanity_check(m_table[i], 0);*/
145 /*rte_pktmbuf_dump(stdout, m_table[i], 256);*/
147 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(m_table[i]);
148 ret += dev->send_upmbuf2ring(m_table[i], ETHER_TYPE_ECPRI, port);
151 if (unlikely(ret < n)) {
152 print_err("core %d [p: %d-> vf %d] ret [%d] < n[%d] enq %ld\n",
153 rte_lcore_id(), dev->xran_port_id, port, ret, n, pCnt->tx_counter);
159 /* Send a single 5G symbol over multiple packets */
160 static inline int32_t prepare_symbol_opt(enum xran_pkt_dir direction,
166 const enum xran_input_byte_order iq_buf_byte_order,
173 struct xran_up_pkt_gen_params *xp,
174 enum xran_comp_hdr_type staticEn)
183 iqWidth = (iqWidth==0) ? 16 : iqWidth;
185 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
186 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
190 n_bytes = (3 * iqWidth + parm_size) * prb_num;
191 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
193 nPktSize = sizeof(struct rte_ether_hdr)
194 + sizeof(struct xran_ecpri_hdr)
195 + sizeof(struct radio_app_common_hdr)
196 + sizeof(struct data_section_hdr)
198 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn ==XRAN_COMP_HDR_TYPE_DYNAMIC))
199 nPktSize += sizeof(struct data_section_compression_hdr);
203 /* radio app header */
204 xp->app_params.data_direction = direction;
205 xp->app_params.payl_ver = 1;
206 xp->app_params.filter_id = 0;
207 xp->app_params.frame_id = frame_id;
208 xp->app_params.sf_slot_sym.subframe_id = subframe_id;
209 xp->app_params.sf_slot_sym.slot_id = xran_slotid_convert(slot_id, 0);
210 xp->app_params.sf_slot_sym.symb_id = symbol_no;
212 /* convert to network byte order */
213 xp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp->app_params.sf_slot_sym.value);
216 xp->sec_hdr.fields.sect_id = section_id;
217 xp->sec_hdr.fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(prb_num);
218 xp->sec_hdr.fields.start_prbu = (uint8_t)prb_start;
219 xp->sec_hdr.fields.sym_inc = 0;
220 xp->sec_hdr.fields.rb = 0;
224 xp->compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
225 xp->compr_hdr_param.ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
226 xp->compr_hdr_param.rsrvd = 0;
228 /* network byte order */
229 xp->sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp->sec_hdr.fields.all_bits);
233 errx(1, "out of mbufs after %d packets", 1);
236 prep_bytes = xran_prepare_iq_symbol_portion(mb,
247 errx(1, "failed preparing symbol");
249 rte_pktmbuf_pkt_len(mb) = nPktSize;
250 rte_pktmbuf_data_len(mb) = nPktSize;
253 printf("Symbol %2d prep_bytes (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
259 int32_t xran_process_tx_sym_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id,
264 char *p_sec_iq = NULL;
266 void *send_mb = NULL;
268 uint16_t iq_sample_size_bits = 16;
271 struct xran_prb_map *prb_map = NULL;
272 uint8_t num_ant_elm = 0;
274 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
275 if (p_xran_dev_ctx == NULL)
277 struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
278 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
279 struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
281 num_ant_elm = xran_get_num_ant_elm(pHandle);
282 enum xran_pkt_dir direction;
283 enum xran_comp_hdr_type staticEn = XRAN_COMP_HDR_TYPE_DYNAMIC;
285 struct rte_mbuf *eth_oran_hdr = NULL;
286 char *ext_buff = NULL;
287 uint16_t ext_buff_len = 0;
288 struct rte_mbuf *tmp = NULL;
289 rte_iova_t ext_buff_iova = 0;
290 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
292 staticEn = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
295 if(PortId >= XRAN_PORTS_NUM)
296 rte_panic("incorrect PORT ID\n");
298 struct rte_mbuf_ext_shared_info * p_share_data = NULL;
299 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
300 direction = XRAN_DIR_DL; /* O-DU */
301 prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
303 direction = XRAN_DIR_UL; /* RU */
304 prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
307 if(xran_fs_get_slot_type(PortId, cc_id, tti, ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SLOT_TYPE_DL : XRAN_SLOT_TYPE_UL)) == 1
308 || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1
309 || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_FDD) == 1){
311 if(xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SYMBOL_TYPE_DL : XRAN_SYMBOL_TYPE_UL)
312 || xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){
314 vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
315 pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
316 mb = (void*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
317 prb_map = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
322 for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++){
323 //print_err("tti is %d, cc_id is %d, ant_id is %d, prb_map->nPrbElm id - %d", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, prb_map->nPrbElm);
324 uint16_t sec_id = elmIdx;
325 struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
326 struct xran_section_desc * p_sec_desc = NULL;
327 p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sec_id];
329 if(prb_map_elm == NULL){
330 rte_panic("p_sec_desc == NULL\n");
333 p_sec_desc = prb_map_elm->p_sec_desc[sym_id][0];
335 p_sec_iq = ((char*)pos + p_sec_desc->iq_buffer_offset);
337 /* calculate offset for external buffer */
338 ext_buff_len = p_sec_desc->iq_buffer_len;
339 ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
340 sizeof (struct xran_ecpri_hdr) +
341 sizeof (struct radio_app_common_hdr) +
342 sizeof(struct data_section_hdr));
344 ext_buff_len += RTE_PKTMBUF_HEADROOM +
345 sizeof (struct xran_ecpri_hdr) +
346 sizeof (struct radio_app_common_hdr) +
347 sizeof(struct data_section_hdr) + 18;
349 if ((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)){
350 ext_buff -= sizeof (struct data_section_compression_hdr);
351 ext_buff_len += sizeof (struct data_section_compression_hdr);
354 eth_oran_hdr = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
355 if (unlikely (( eth_oran_hdr) == NULL)) {
356 rte_panic("Failed rte_pktmbuf_alloc\n");
359 p_share_data->free_cb = extbuf_free_callback;
360 p_share_data->fcb_opaque = NULL;
361 rte_mbuf_ext_refcnt_set(p_share_data, 1);
363 ext_buff_iova = rte_mempool_virt2iova(mb);
364 if (unlikely (( ext_buff_iova) == 0)) {
365 rte_panic("Failed rte_mem_virt2iova \n");
368 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
369 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
372 rte_pktmbuf_attach_extbuf(eth_oran_hdr,
374 ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
378 rte_pktmbuf_reset_headroom(eth_oran_hdr);
380 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
381 if (unlikely (( tmp) == NULL)) {
382 rte_panic("Failed rte_pktmbuf_prepend \n");
384 send_mb = eth_oran_hdr;
387 uint8_t seq_id = (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
388 xran_get_updl_seqid(pHandle, cc_id, ant_id) :
389 xran_get_upul_seqid(pHandle, cc_id, ant_id);
394 int32_t num_bytes = prepare_symbol_ex(direction, sec_id,
397 prb_map_elm->compMethod,
398 prb_map_elm->iqWidth,
399 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
400 frame_id, subframe_id, slot_id, sym_id,
401 prb_map_elm->nRBStart, prb_map_elm->nRBSize,
407 rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
409 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
410 p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, vf_id);
413 printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, sym_id);
416 if(p_xran_dev_ctx->enablePrach
417 && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)) { /* Only RU needs to send PRACH I/Q */
418 uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
420 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
421 && (is_prach_slot == 1)
422 && (sym_id >= p_xran_dev_ctx->prach_start_symbol[cc_id])
423 && (sym_id <= p_xran_dev_ctx->prach_last_symbol[cc_id])) {
424 int prach_port_id = ant_id + pPrachCPConfig->eAxC_offset;
425 int compMethod, parm_size;
426 uint8_t symb_id_offset = sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id];
428 compMethod = p_xran_dev_ctx->fh_cfg.ru_conf.compMeth_PRACH;
430 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
431 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
435 pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[symb_id_offset].pData;
436 //pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id]) * pPrachCPConfig->numPrbc * N_SC_PER_PRB * 4;
437 /*pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id])
438 * (3*p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth + parm_size)
439 * pPrachCPConfig->numPrbc;*/
440 mb = NULL;//(void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
442 send_symbol_ex(pHandle,
444 xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, slot_id),
445 (struct rte_mbuf *)mb,
448 p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth_PRACH,
449 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
450 frame_id, subframe_id, slot_id, sym_id,
451 pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
452 cc_id, prach_port_id,
453 xran_get_upul_seqid(pHandle, cc_id, prach_port_id));
456 } /* if(p_xran_dev_ctx->enablePrach ..... */
457 } /* RU mode or C-Plane is not used */
464 xran_process_tx_srs_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id)
468 char *p_sec_iq = NULL;
470 void *send_mb = NULL;
472 uint16_t iq_sample_size_bits = 16;
474 struct xran_prb_map *prb_map = NULL;
475 uint8_t num_ant_elm = 0;
477 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
478 struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
479 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
480 struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
482 num_ant_elm = xran_get_num_ant_elm(pHandle);
483 enum xran_pkt_dir direction;
485 struct rte_mbuf *eth_oran_hdr = NULL;
486 char *ext_buff = NULL;
487 uint16_t ext_buff_len = 0;
488 struct rte_mbuf *tmp = NULL;
489 rte_iova_t ext_buff_iova = 0;
490 int32_t ant_elm_eAxC_id = ant_id + p_srs_cfg->eAxC_offset;
492 enum xran_comp_hdr_type staticEn = XRAN_COMP_HDR_TYPE_DYNAMIC;
494 if (p_xran_dev_ctx != NULL)
497 if(p_xran_dev_ctx->xran_port_id >= XRAN_PORTS_NUM)
498 rte_panic("incorrect PORT ID\n");
500 struct rte_mbuf_ext_shared_info * p_share_data = NULL;
502 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
503 direction = XRAN_DIR_DL; /* O-DU */
504 prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
505 rte_panic("incorrect O_DU\n");
507 direction = XRAN_DIR_UL; /* RU */
508 prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
512 staticEn = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
519 if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) == 1
520 || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_FDD) == 1) {
521 if(xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_UL
522 || xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD) {
524 pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
525 mb = (void*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
526 prb_map = (struct xran_prb_map *) p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
527 vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_elm_eAxC_id);
531 for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++) {
532 uint16_t sec_id = elmIdx;
533 struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
534 struct xran_section_desc * p_sec_desc = NULL;
536 if(prb_map_elm == NULL) {
537 rte_panic("p_sec_desc == NULL\n");
540 /* skip, if not scheduled */
541 if(sym_id < prb_map_elm->nStartSymb || sym_id >= prb_map_elm->nStartSymb + prb_map_elm->numSymb)
544 p_share_data = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
545 p_sec_desc = prb_map_elm->p_sec_desc[sym_id][0];
546 p_sec_iq = ((char*)pos + p_sec_desc->iq_buffer_offset);
548 /* calculate offset for external buffer */
549 ext_buff_len = p_sec_desc->iq_buffer_len;
550 ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
551 sizeof (struct xran_ecpri_hdr) +
552 sizeof (struct radio_app_common_hdr) +
553 sizeof(struct data_section_hdr));
555 ext_buff_len += RTE_PKTMBUF_HEADROOM +
556 sizeof (struct xran_ecpri_hdr) +
557 sizeof (struct radio_app_common_hdr) +
558 sizeof(struct data_section_hdr) + 18;
560 if ((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)){
561 ext_buff -= sizeof (struct data_section_compression_hdr);
562 ext_buff_len += sizeof (struct data_section_compression_hdr);
565 // eth_oran_hdr = rte_pktmbuf_alloc(_eth_mbuf_pool_small);
566 eth_oran_hdr = xran_ethdi_mbuf_indir_alloc();
568 if (unlikely (( eth_oran_hdr) == NULL)) {
569 rte_panic("Failed rte_pktmbuf_alloc\n");
572 p_share_data->free_cb = extbuf_free_callback;
573 p_share_data->fcb_opaque = NULL;
574 rte_mbuf_ext_refcnt_set(p_share_data, 1);
576 ext_buff_iova = rte_mempool_virt2iova(mb);
577 if (unlikely (( ext_buff_iova) == 0)) {
578 rte_panic("Failed rte_mem_virt2iova \n");
581 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
582 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
585 rte_pktmbuf_attach_extbuf(eth_oran_hdr,
587 ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
591 rte_pktmbuf_reset_headroom(eth_oran_hdr);
593 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
594 if (unlikely (( tmp) == NULL)) {
595 rte_panic("Failed rte_pktmbuf_prepend \n");
597 send_mb = eth_oran_hdr;
599 uint8_t seq_id = (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
600 xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :
601 xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id);
603 int32_t num_bytes = prepare_symbol_ex(direction, sec_id,
606 prb_map_elm->compMethod,
607 prb_map_elm->iqWidth,
608 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
609 frame_id, subframe_id, slot_id, sym_id,
610 prb_map_elm->nRBStart, prb_map_elm->nRBSize,
611 cc_id, ant_elm_eAxC_id,
616 rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
618 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
619 p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, vf_id);
622 printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_elm_eAxC_id, sym_id);
632 xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
633 struct rte_mbuf_ext_shared_info * p_share_data,
634 enum xran_compression_method compMeth, enum xran_comp_hdr_type staticEn)
636 struct rte_mbuf *mb_oran_hdr_ext = NULL;
637 struct rte_mbuf *tmp = NULL;
638 int8_t *ext_buff = NULL;
639 rte_iova_t ext_buff_iova = 0;
640 ext_buff = p_ext_buff - (RTE_PKTMBUF_HEADROOM +
641 sizeof(struct xran_ecpri_hdr) +
642 sizeof(struct radio_app_common_hdr) +
643 sizeof(struct data_section_hdr));
645 ext_buff_len += RTE_PKTMBUF_HEADROOM +
646 sizeof(struct xran_ecpri_hdr) +
647 sizeof(struct radio_app_common_hdr) +
648 sizeof(struct data_section_hdr) + 18;
649 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)) {
650 ext_buff -= sizeof (struct data_section_compression_hdr);
651 ext_buff_len += sizeof (struct data_section_compression_hdr);
653 mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
655 if (unlikely (( mb_oran_hdr_ext) == NULL)) {
656 rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
659 p_share_data->free_cb = extbuf_free_callback;
660 p_share_data->fcb_opaque = NULL;
661 rte_mbuf_ext_refcnt_set(p_share_data, 1);
663 ext_buff_iova = rte_mempool_virt2iova(p_ext_buff_start);
664 if (unlikely (( ext_buff_iova) == 0)) {
665 rte_panic("Failed rte_mem_virt2iova \n");
668 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
669 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
672 rte_pktmbuf_attach_extbuf(mb_oran_hdr_ext,
674 ext_buff_iova + RTE_PTR_DIFF(ext_buff , p_ext_buff_start),
678 rte_pktmbuf_reset_headroom(mb_oran_hdr_ext);
680 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(mb_oran_hdr_ext, sizeof(struct rte_ether_hdr));
681 if (unlikely (( tmp) == NULL)) {
682 rte_panic("Failed rte_pktmbuf_prepend \n");
685 return mb_oran_hdr_ext;
688 int32_t xran_process_tx_sym_cp_on_dispatch_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t num_cc, int32_t num_ant, uint32_t frame_id,
689 uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
690 uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
693 struct cp_up_tx_desc* p_desc = NULL;
694 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
695 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
697 p_desc = xran_pkt_gen_desc_alloc();
699 p_desc->pHandle = pHandle;
700 p_desc->ctx_id = ctx_id;
702 p_desc->cc_id = num_cc;
703 p_desc->ant_id = num_ant;
704 p_desc->frame_id = frame_id;
705 p_desc->subframe_id = subframe_id;
706 p_desc->slot_id = slot_id;
707 p_desc->sym_id = sym_id;
708 p_desc->compType = (uint32_t)compType;
709 p_desc->direction = (uint32_t)direction;
710 p_desc->xran_port_id = xran_port_id;
711 p_desc->p_sec_db = (void*)p_sec_db;
713 if(likely(p_xran_dev_ctx->xran_port_id < XRAN_PORTS_NUM)) {
714 if (rte_ring_enqueue(eth_ctx->up_dl_pkt_gen_ring[p_xran_dev_ctx->xran_port_id], p_desc->mb) == 0)
715 return 1; /* success */
717 xran_pkt_gen_desc_free(p_desc);
719 rte_panic("incorrect port %d", p_xran_dev_ctx->xran_port_id);
722 print_dbg("xran_pkt_gen_desc_alloc failure %d", p_xran_dev_ctx->xran_port_id);
729 xran_process_tx_sym_cp_on_dispatch(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
730 uint32_t slot_id, uint32_t sym_id)
733 struct cp_up_tx_desc* p_desc = NULL;
734 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
735 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
737 p_desc = xran_pkt_gen_desc_alloc();
739 p_desc->pHandle = pHandle;
740 p_desc->ctx_id = ctx_id;
742 p_desc->cc_id = cc_id;
743 p_desc->ant_id = ant_id;
744 p_desc->frame_id = frame_id;
745 p_desc->subframe_id = subframe_id;
746 p_desc->slot_id = slot_id;
747 p_desc->sym_id = sym_id;
749 if(likely(p_xran_dev_ctx->xran_port_id < XRAN_PORTS_NUM)) {
750 if (rte_ring_enqueue(eth_ctx->up_dl_pkt_gen_ring[p_xran_dev_ctx->xran_port_id], p_desc->mb) == 0)
751 return 1; /* success */
753 xran_pkt_gen_desc_free(p_desc);
755 rte_panic("incorrect port %d", p_xran_dev_ctx->xran_port_id);
758 print_dbg("xran_pkt_gen_desc_alloc failure %d", p_xran_dev_ctx->xran_port_id);
765 xran_process_tx_sym_cp_on(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
766 uint32_t slot_id, uint32_t sym_id)
770 struct rte_mbuf *eth_oran_hdr = NULL;
771 char *ext_buff = NULL;
772 uint16_t ext_buff_len = 0;
773 struct rte_mbuf *tmp = NULL;
774 rte_iova_t ext_buff_iova = 0;
776 char *p_sec_iq = NULL;
778 struct rte_mbuf *to_free_mbuf = NULL;
780 uint16_t iq_sample_size_bits = 16;
782 int32_t num_sections = 0;
788 struct mbuf_table loc_tx_mbufs;
789 struct xran_up_pkt_gen_params loc_xp;
791 struct xran_section_info *sectinfo = NULL;
792 struct xran_device_ctx *p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
793 enum xran_pkt_dir direction;
795 enum xran_comp_hdr_type compType = XRAN_COMP_HDR_TYPE_DYNAMIC;
797 struct rte_mbuf_ext_shared_info * p_share_data = NULL;
799 if (p_xran_dev_ctx != NULL)
801 compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
804 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
805 direction = XRAN_DIR_DL; /* O-DU */
806 prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
808 direction = XRAN_DIR_UL; /* RU */
809 prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
812 vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
814 num_sections = xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id, ctx_id);
815 /* iterate C-Plane configuration to generate corresponding U-Plane */
817 prepare_sf_slot_sym(direction, frame_id, subframe_id, slot_id, sym_id, &loc_xp);
819 loc_tx_mbufs.len = 0;
820 while(next < num_sections) {
821 sectinfo = xran_cp_iterate_section_info(pHandle, direction, cc_id, ant_id, ctx_id, &next);
826 if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) { /* only supports type 1 */
827 print_err("Invalid section type in section DB - %d", sectinfo->type);
831 /* skip, if not scheduled */
832 if(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol)
836 if(sectinfo->compMeth)
837 iq_sample_size_bits = sectinfo->iqWidth;
839 print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
840 sectinfo->type, sectinfo->id, sectinfo->startPrbc,
841 sectinfo->numPrbc,sectinfo->startSymId, sectinfo->numSymbol);
843 p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sectinfo->id];
845 len = loc_tx_mbufs.len;
849 //Added for Klocworks
850 if (len >= MBUF_TABLE_SIZE) {
851 len = MBUF_TABLE_SIZE - 1;
852 rte_panic("len >= MBUF_TABLE_SIZE\n");
855 to_free_mbuf = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id];
856 pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
857 mb = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
860 rte_panic("mb == NULL\n");
863 p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
864 ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
866 mb = xran_attach_up_ext_buf(vf_id, (int8_t *)mb, (int8_t *) p_sec_iq,
867 (uint16_t) ext_buff_len,
868 p_share_data, (enum xran_compression_method) sectinfo->compMeth, compType);
869 p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id] = mb;
870 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
873 rte_pktmbuf_free(to_free_mbuf);
877 prepare_symbol_opt(direction, sectinfo->id,
879 (struct rb_map *)p_sec_iq,
882 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
887 xran_get_updl_seqid(pHandle, cc_id, ant_id),
892 /* if we don't need to do any fragmentation */
893 if (likely (p_xran_dev_ctx->fh_init.mtu >=
894 sectinfo->numPrbc * (3*iq_sample_size_bits + 1))) {
895 /* no fragmentation */
896 loc_tx_mbufs.m_table[len] = mb;
900 uint8_t * seq_num = xran_get_updl_seqid_addr(pHandle, cc_id, ant_id);
904 rte_panic("pointer to seq number is NULL [CC %d Ant %d]\n", cc_id, ant_id);
906 len2 = xran_app_fragment_packet(mb,
907 &loc_tx_mbufs.m_table[len],
908 (uint16_t)(MBUF_TABLE_SIZE - len),
909 p_xran_dev_ctx->fh_init.mtu,
910 p_xran_dev_ctx->direct_pool,
911 p_xran_dev_ctx->indirect_pool,
916 ((sectinfo->iqWidth == 16)||(compType==XRAN_COMP_HDR_TYPE_STATIC)) ? 0 : 1);
918 /* Free input packet */
919 rte_pktmbuf_free(mb);
921 /* If we fail to fragment the packet */
922 if (unlikely (len2 < 0)){
923 print_err("len2= %d\n", len2);
928 for (i = len; i < len + len2; i ++) {
930 m = loc_tx_mbufs.m_table[i];
931 struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
932 rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
933 if (eth_hdr == NULL) {
934 rte_panic("No headroom in mbuf.\n");
940 if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM)) {
941 rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
943 loc_tx_mbufs.len = len;
944 } /* while(section) */
946 /* Transmit packets */
947 xran_send_burst(p_xran_dev_ctx, &loc_tx_mbufs, vf_id);
948 loc_tx_mbufs.len = 0;
955 //#define TRANSMIT_BURST
956 //#define ENABLE_DEBUG_COREDUMP
958 #define ETHER_TYPE_ECPRI_BE (0xFEAE)
960 int32_t xran_process_tx_sym_cp_on_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t num_cc, int32_t num_ant, uint32_t frame_id,
961 uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
962 uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
965 int32_t cc_id = 0, ant_id = 0;
966 char* ext_buff = NULL;
967 uint16_t ext_buff_len = 0;
968 rte_iova_t ext_buff_iova = 0;
970 char* p_sec_iq = NULL;
971 void* mb = NULL, *mb_base = NULL;
972 struct rte_mbuf* to_free_mbuf = NULL;
973 uint16_t iq_sample_size_bits = 16;
975 int32_t num_sections = 0, total_sections = 0;
976 uint16_t len = 0, len2 = 0, len_frag = 0;
979 uint8_t compMeth = 0;
982 int32_t n_bytes = 0, elm_bytes = 0;
984 uint16_t prb_num = 0;
985 uint16_t prb_start = 0;
986 int16_t nPktSize = 0;
987 uint16_t ecpri_payl_size = 0;
988 #ifdef TRANSMIT_BURST
989 struct mbuf_table loc_tx_mbufs;
991 struct mbuf_table loc_tx_mbufs_fragmented;
992 struct xran_up_pkt_gen_params xp;
993 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
994 struct xran_section_info* sectinfo = NULL;
995 struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
997 struct rte_mbuf_ext_shared_info* p_share_data = NULL;
998 struct xran_sectioninfo_db* ptr_sect_elm = NULL;
999 struct rte_mbuf* mb_oran_hdr_ext = NULL;
1000 struct rte_mempool_objhdr* iova_hdr = NULL;
1001 struct xran_eaxcid_config* conf = &(p_xran_dev_ctx->eAxc_id_cfg);
1002 struct rte_ether_hdr* ether_hdr = NULL;
1003 struct xran_ecpri_hdr* ecpri_hdr = NULL;
1004 struct radio_app_common_hdr* app_hdr = NULL;
1005 struct data_section_hdr* section_hdr = NULL;
1006 struct data_section_compression_hdr* compression_hdr = NULL;
1007 const int16_t ccid_pos = conf->bit_ccId;
1008 const int16_t ccid_mask = conf->mask_ccId;
1009 const int16_t antid_pos = conf->bit_ruPortId;
1010 const int16_t antid_mask = conf->mask_ruPortId;
1012 const int16_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
1013 const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
1014 uint16_t comp_head_upd = 0;
1016 const int16_t total_header_size = (RTE_PKTMBUF_HEADROOM +
1017 sizeof(struct xran_ecpri_hdr) +
1018 sizeof(struct radio_app_common_hdr) +
1019 sizeof(struct data_section_hdr));
1021 uint16_t* __restrict pSrc = NULL;
1022 uint16_t* __restrict pDst = NULL;
1024 const enum xran_input_byte_order iq_buf_byte_order = p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder;
1026 /* radio app header */
1027 xp.app_params.data_feature.value = 0x10;
1028 xp.app_params.data_feature.data_direction = direction;
1029 xp.app_params.frame_id = frame_id;
1030 xp.app_params.sf_slot_sym.subframe_id = subframe_id;
1031 xp.app_params.sf_slot_sym.slot_id = slot_id;
1032 xp.app_params.sf_slot_sym.symb_id = sym_id;
1033 /* convert to network byte order */
1034 xp.app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp.app_params.sf_slot_sym.value);
1037 for (cc_id = 0; cc_id < num_cc; cc_id++)
1039 for (ant_id = 0; ant_id < num_ant; ant_id++)
1041 ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][ant_id];
1042 if (unlikely(ptr_sect_elm == NULL))
1044 num_sections = ptr_sect_elm->cur_index;
1046 /* iterate C-Plane configuration to generate corresponding U-Plane */
1047 vf_id = p_xran_dev_ctx->map2vf[direction][cc_id][ant_id][XRAN_UP_VF];
1048 pos = (char*)p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
1049 mb_base = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
1050 if (unlikely(mb_base == NULL))
1052 rte_panic("mb == NULL\n");
1055 cid = ((cc_id << ccid_pos) & ccid_mask) | ((ant_id << antid_pos) & antid_mask);
1056 cid = rte_cpu_to_be_16(cid);
1057 iq_sample_size_bits = 16;
1059 #ifdef TRANSMIT_BURST
1060 loc_tx_mbufs.len = 0;
1062 loc_tx_mbufs_fragmented.len = 0;
1064 #pragma loop_count min=1, max=16
1065 for (next=0; next< num_sections; next++)
1067 sectinfo = &ptr_sect_elm->list[next];
1069 if (unlikely(sectinfo == NULL))
1071 if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
1072 { /* only supports type 1 */
1073 print_err("Invalid section type in section DB - %d", sectinfo->type);
1076 /* skip, if not scheduled */
1077 if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
1080 compMeth = sectinfo->compMeth;
1081 iqWidth = sectinfo->iqWidth;
1082 section_id = sectinfo->id;
1083 prb_start = sectinfo->startPrbc;
1084 prb_num = sectinfo->numPrbc;
1085 seq_id = xran_updl_seq_id_num[xran_port_id][cc_id][ant_id]++;
1089 iq_sample_size_bits = iqWidth;
1091 comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
1093 print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
1094 sectinfo->type, sectinfo->id, sectinfo->startPrbc,
1095 sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
1097 p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][section_id];
1098 p_share_data->free_cb = extbuf_free_callback;
1099 p_share_data->fcb_opaque = NULL;
1100 rte_mbuf_ext_refcnt_set(p_share_data, 1);
1102 #ifdef TRANSMIT_BURST
1103 len = loc_tx_mbufs.len;
1104 //Added for Klocworks
1105 if (unlikely(len >= MBUF_TABLE_SIZE))
1107 len = MBUF_TABLE_SIZE - 1;
1108 rte_panic("len >= MBUF_TABLE_SIZE\n");
1111 p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
1112 ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
1114 ext_buff = p_sec_iq - total_header_size;
1115 ext_buff_len += (total_header_size + 18);
1119 ext_buff -= sizeof(struct data_section_compression_hdr);
1120 ext_buff_len += sizeof(struct data_section_compression_hdr);
1123 mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
1124 if (unlikely((mb_oran_hdr_ext) == NULL))
1126 rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
1129 iova_hdr = (struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size);
1130 ext_buff_iova = iova_hdr->iova;
1132 #ifdef ENABLE_DEBUG_COREDUMP
1133 if (unlikely(ext_buff_iova == 0))
1135 rte_panic("Failed rte_mem_virt2iova\n");
1137 if (unlikely(((rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA))
1139 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
1142 mb_oran_hdr_ext->buf_addr = ext_buff;
1143 mb_oran_hdr_ext->buf_iova = ext_buff_iova + RTE_PTR_DIFF(ext_buff, mb_base);
1144 mb_oran_hdr_ext->buf_len = ext_buff_len;
1145 mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
1146 mb_oran_hdr_ext->shinfo = p_share_data;
1147 mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
1148 mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
1149 mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
1150 mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
1152 mb = (void*)mb_oran_hdr_ext;
1154 to_free_mbuf = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id];
1155 p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id] = mb;
1156 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
1159 rte_pktmbuf_free(to_free_mbuf);
1162 pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
1164 ether_hdr = (struct rte_ether_hdr*)pStart;
1166 /* Fill in the ethernet header. */
1167 #ifndef TRANSMIT_BURST
1168 rte_eth_macaddr_get(mb_oran_hdr_ext->port, ðer_hdr->s_addr); /* set source addr */
1169 ether_hdr->d_addr = eth_ctx->entities[vf_id][ID_O_RU]; /* set dst addr */
1170 ether_hdr->ether_type = ETHER_TYPE_ECPRI_BE; /* ethertype */
1172 iqWidth = (iqWidth == 0) ? 16 : iqWidth;
1175 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
1176 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
1180 n_bytes = (3 * iqWidth + parm_size) * prb_num;
1181 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
1183 nPktSize = sizeof(struct rte_ether_hdr)
1184 + sizeof(struct xran_ecpri_hdr)
1185 + sizeof(struct radio_app_common_hdr)
1186 + sizeof(struct data_section_hdr)
1190 nPktSize += sizeof(struct data_section_compression_hdr);
1192 xp.sec_hdr.fields.sect_id = section_id;
1193 xp.sec_hdr.fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(prb_num);
1194 xp.sec_hdr.fields.start_prbu = (uint8_t)prb_start;
1195 xp.sec_hdr.fields.sym_inc = 0;
1196 xp.sec_hdr.fields.rb = 0;
1197 /* network byte order */
1198 xp.sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp.sec_hdr.fields.all_bits);
1201 xp.compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
1202 xp.compr_hdr_param.ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
1203 xp.compr_hdr_param.rsrvd = 0;
1205 ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
1207 ecpri_payl_size = n_bytes
1208 + sizeof(struct data_section_hdr)
1209 + sizeof(struct radio_app_common_hdr)
1210 + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();
1213 ecpri_payl_size += sizeof(struct data_section_compression_hdr);
1215 ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
1216 ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
1217 ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
1218 ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_payl_size);
1220 /* one to one lls-CU to RU only and band sector is the same */
1221 ecpri_hdr->ecpri_xtc_id = cid;
1223 /* no transport layer fragmentation supported */
1224 ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
1225 ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
1227 pSrc = (uint16_t*)&(xp.app_params);
1228 pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
1238 rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
1239 rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
1241 elm_bytes += nPktSize;
1243 /* Restore fragmentation support in this code version */
1244 /* if we don't need to do any fragmentation */
1245 if (likely(p_xran_dev_ctx->fh_init.mtu >= sectinfo->numPrbc * (3 * iq_sample_size_bits + 1)))
1247 /* no fragmentation */
1249 #ifdef TRANSMIT_BURST
1250 loc_tx_mbufs.m_table[len++] = mb;
1251 if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM))
1253 rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
1255 loc_tx_mbufs.len = len;
1257 xran_enqueue_mbuf(mb_oran_hdr_ext, eth_ctx->tx_ring[vf_id]);
1263 /* only burst transmission mode is supported for fragmented packets*/
1264 uint8_t* p_seq_num = &xran_updl_seq_id_num[xran_port_id][cc_id][ant_id];
1267 len2 = xran_app_fragment_packet(mb_oran_hdr_ext,
1268 &loc_tx_mbufs_fragmented.m_table[len_frag],
1269 (uint16_t)(MBUF_TABLE_SIZE - len_frag),
1270 p_xran_dev_ctx->fh_init.mtu,
1271 p_xran_dev_ctx->direct_pool,
1272 p_xran_dev_ctx->indirect_pool,
1277 ((iqWidth == 16) || (compType == XRAN_COMP_HDR_TYPE_STATIC)) ? 0 : 1);
1279 /* Free input packet */
1280 rte_pktmbuf_free(mb_oran_hdr_ext);
1282 /* If we fail to fragment the packet */
1283 if (unlikely(len2 < 0))
1285 print_err("len2= %d\n", len2);
1288 if (unlikely(len2 > 1))
1290 for (int32_t i = len_frag; i < len_frag + len2; i++)
1293 m = loc_tx_mbufs_fragmented.m_table[i];
1294 struct rte_ether_hdr* eth_hdr = (struct rte_ether_hdr*)
1295 rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
1296 if (eth_hdr == NULL)
1298 rte_panic("No headroom in mbuf.\n");
1304 if (unlikely(len_frag > XRAN_MAX_PKT_BURST_PER_SYM)) {
1305 rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
1307 loc_tx_mbufs_fragmented.len = len_frag;
1309 } /* section loop */
1310 total_sections += num_sections;
1312 /* Transmit packets */
1313 #ifdef TRANSMIT_BURST
1314 if (loc_tx_mbufs.len)
1316 for (int32_t i = 0; i < loc_tx_mbufs.len; i++)
1318 p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs.m_table[i], ETHER_TYPE_ECPRI, vf_id);
1320 loc_tx_mbufs.len = 0;
1323 /* Transmit fragmented packets */
1324 if (unlikely(loc_tx_mbufs_fragmented.len))
1326 for (int32_t i = 0; i < loc_tx_mbufs_fragmented.len; i++)
1328 p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs_fragmented.m_table[i], ETHER_TYPE_ECPRI, vf_id);
1330 loc_tx_mbufs_fragmented.len = 0;
1332 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
1333 } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
1335 struct xran_common_counters* pCnt = &p_xran_dev_ctx->fh_counters;
1336 pCnt->tx_counter += total_sections;
1337 pCnt->tx_bytes_counter += elm_bytes;
1343 int32_t xran_process_tx_sym(void *arg)
1347 uint32_t numSlotMu1 = 5;
1349 uint32_t mlogVar[15];
1350 uint32_t mlogVarCnt = 0;
1352 unsigned long t1 = MLogTick();
1354 void *pHandle = NULL;
1357 uint8_t num_eAxc = 0;
1358 uint8_t num_eAxAntElm = 0;
1359 uint8_t num_CCPorts = 0;
1360 uint32_t frame_id = 0;
1361 uint32_t subframe_id = 0;
1362 uint32_t slot_id = 0;
1363 uint32_t sym_id = 0;
1364 uint32_t sym_idx = 0;
1367 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *) arg;
1368 enum xran_in_period inPeriod;
1369 uint32_t interval = p_xran_dev_ctx->interval_us_local;
1370 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
1372 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
1375 pHandle = p_xran_dev_ctx;
1377 /* O-RU: send symb after OTA time with delay (UL) */
1378 /* O-DU: send symb in advance of OTA time (DL) */
1379 sym_idx = XranOffsetSym(p_xran_dev_ctx->sym_up, xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME(interval)*1000, &inPeriod);
1381 tti = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1382 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1383 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
1385 uint16_t sfnSecStart = xran_getSfnSecStart();
1386 if (unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
1389 sfnSecStart = (sfnSecStart + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
1391 else if (unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
1394 if (sfnSecStart >= NUM_OF_FRAMES_PER_SECOND)
1396 sfnSecStart -= NUM_OF_FRAMES_PER_SECOND;
1400 sfnSecStart += NUM_OF_FRAMES_PER_SFN_PERIOD - NUM_OF_FRAMES_PER_SECOND;
1403 frame_id = XranGetFrameNum(tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1404 // ORAN frameId, 8 bits, [0, 255]
1405 frame_id = (frame_id & 0xff);
1407 sym_id = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1408 ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval)) % XRAN_MAX_SECTIONDB_CTX;
1410 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
1413 mlogVar[mlogVarCnt++] = 0xAAAAAAAA;
1414 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
1415 mlogVar[mlogVarCnt++] = sym_idx;
1416 mlogVar[mlogVarCnt++] = abs(p_xran_dev_ctx->sym_up);
1417 mlogVar[mlogVarCnt++] = tti;
1418 mlogVar[mlogVarCnt++] = frame_id;
1419 mlogVar[mlogVarCnt++] = subframe_id;
1420 mlogVar[mlogVarCnt++] = slot_id;
1421 mlogVar[mlogVarCnt++] = sym_id;
1422 mlogVar[mlogVarCnt++] = PortId;
1423 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
1426 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {
1427 num_eAxc = xran_get_num_eAxcUl(pHandle);
1429 num_eAxc = xran_get_num_eAxc(pHandle);
1432 num_CCPorts = xran_get_num_cc(pHandle);
1435 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP)
1437 if(p_xran_dev_ctx->tx_sym_gen_func) {
1438 enum xran_comp_hdr_type compType;
1439 enum xran_pkt_dir direction;
1440 uint32_t prb_num, loc_ret = 1;
1441 uint16_t xran_port_id;
1442 PSECTION_DB_TYPE p_sec_db = NULL;
1444 compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
1446 if (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
1447 direction = XRAN_DIR_DL; /* O-DU */
1448 prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
1451 direction = XRAN_DIR_UL; /* RU */
1452 prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
1455 if (unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM)) {
1456 print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
1460 if (unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX)) {
1461 print_err("Invalid Context id - %d", ctx_id);
1465 if (unlikely(direction > XRAN_DIR_MAX)) {
1466 print_err("Invalid direction - %d", direction);
1470 if (unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX)) {
1471 print_err("Invalid CC id - %d", num_CCPorts);
1475 if (unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR))) {
1476 print_err("Invalid eAxC id - %d", num_eAxc);
1480 xran_port_id = p_xran_dev_ctx->xran_port_id;
1481 p_sec_db = p_sectiondb[p_xran_dev_ctx->xran_port_id];
1485 retval = p_xran_dev_ctx->tx_sym_gen_func(pHandle, ctx_id, tti, num_CCPorts, num_eAxc, frame_id, subframe_id, slot_id, sym_id,
1486 compType, direction, xran_port_id, p_sec_db);
1495 rte_panic("p_xran_dev_ctx->tx_sym_gen_func== NULL\n");
1500 for (ant_id = 0; ant_id < num_eAxc; ant_id++)
1502 for (cc_id = 0; cc_id < num_CCPorts; cc_id++)
1504 struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
1506 if(p_xran_dev_ctx->puschMaskEnable)
1508 if((tti % numSlotMu1 == p_xran_dev_ctx->puschMaskSlot))
1511 retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
1514 retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
1516 if(p_xran_dev_ctx->enableSrs && (p_srs_cfg->symbMask & (1 << sym_id)))
1518 retval = xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
1524 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && p_xran_dev_ctx->enableSrs && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {
1525 num_eAxAntElm = xran_get_num_ant_elm(pHandle);
1526 struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
1527 for(num_eAxc = 0; ant_id < num_eAxAntElm; ant_id++) {
1528 for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
1529 if( p_srs_cfg->symbMask & (1 << sym_id)) {
1530 retval = xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
1536 MLogTask(PID_DISPATCH_TX_SYM, t1, MLogTick());
1540 struct cp_up_tx_desc *
1541 xran_pkt_gen_desc_alloc(void)
1543 struct rte_mbuf * mb = rte_pktmbuf_alloc(_eth_mbuf_pkt_gen);
1544 struct cp_up_tx_desc * p_desc = NULL;
1545 char * start = NULL;
1548 start = rte_pktmbuf_append(mb, sizeof(struct cp_up_tx_desc));
1550 p_desc = rte_pktmbuf_mtod(mb, struct cp_up_tx_desc *);
1561 xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc)
1565 rte_pktmbuf_free(p_desc->mb);
1568 rte_panic("p_desc->mb == NULL\n");