O-RAN E Maintenance Release contribution for ODULOW
[o-du/phy.git] / fhi_lib / lib / src / xran_tx_proc.c
1 /******************************************************************************
2 *
3 *   Copyright (c) 2020 Intel.
4 *
5 *   Licensed under the Apache License, Version 2.0 (the "License");
6 *   you may not use this file except in compliance with the License.
7 *   You may obtain a copy of the License at
8 *
9 *       http://www.apache.org/licenses/LICENSE-2.0
10 *
11 *   Unless required by applicable law or agreed to in writing, software
12 *   distributed under the License is distributed on an "AS IS" BASIS,
13 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 *   See the License for the specific language governing permissions and
15 *   limitations under the License.
16 *
17 *******************************************************************************/
18
19 /**
20  * @brief XRAN TX functionality
21  * @file xran_tx.c
22  * @ingroup group_source_xran
23  * @author Intel Corporation
24  **/
25
26 #define _GNU_SOURCE
27 #include <sched.h>
28 #include <assert.h>
29 #include <err.h>
30 #include <libgen.h>
31 #include <sys/time.h>
32 #include <sys/queue.h>
33 #include <time.h>
34 #include <unistd.h>
35 #include <stdio.h>
36 #include <pthread.h>
37 #include <malloc.h>
38 #include <immintrin.h>
39
40 #include <rte_common.h>
41 #include <rte_eal.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_mbuf.h>
48 #include <rte_ring.h>
49
50 #include "xran_fh_o_du.h"
51
52 #include "ethdi.h"
53 #include "xran_pkt.h"
54 #include "xran_up_api.h"
55 #include "xran_cp_api.h"
56 #include "xran_sync_api.h"
57 #include "xran_lib_mlog_tasks_id.h"
58 #include "xran_timer.h"
59 #include "xran_main.h"
60 #include "xran_common.h"
61 #include "xran_dev.h"
62 #include "xran_frame_struct.h"
63 #include "xran_printf.h"
64 #include "xran_app_frag.h"
65 #include "xran_tx_proc.h"
66 #include "xran_cp_proc.h"
67
68 #include "xran_mlog_lnx.h"
69
70 enum xran_in_period
71 {
72      XRAN_IN_PREV_PERIOD  = 0,
73      XRAN_IN_CURR_PERIOD,
74      XRAN_IN_NEXT_PERIOD
75 };
76
77
78 struct rte_mbuf *
79 xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
80                 struct rte_mbuf_ext_shared_info * p_share_data,
81                 enum xran_compression_method compMeth, enum xran_comp_hdr_type staticEn);
82
83
84 static void
85 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
86 {
87     /*long t1 = MLogTick();
88     MLogTask(77777, t1, t1+100);*/
89 }
90
91 static inline int32_t XranOffsetSym(int32_t offSym, int32_t otaSym, int32_t numSymTotal, enum xran_in_period* pInPeriod)
92 {
93     int32_t sym;
94
95     // Suppose the offset is usually small
96     if (unlikely(offSym > otaSym))
97     {
98         sym = numSymTotal - offSym + otaSym;
99         *pInPeriod = XRAN_IN_PREV_PERIOD;
100     }
101     else
102     {
103         sym = otaSym - offSym;
104
105         if (unlikely(sym >= numSymTotal))
106         {
107             sym -= numSymTotal;
108             *pInPeriod = XRAN_IN_NEXT_PERIOD;
109         }
110         else
111         {
112             *pInPeriod = XRAN_IN_CURR_PERIOD;
113         }
114     }
115
116     return sym;
117 }
118
119 // Return SFN at current second start, 10 bits, [0, 1023]
120 uint16_t xran_getSfnSecStart(void)
121 {
122     return xran_SFN_at_Sec_Start;
123 }
124
125 /* Send burst of packets on an output interface */
126 static inline int
127 xran_send_burst(struct xran_device_ctx *dev, struct mbuf_table* p_m_table, uint16_t port)
128 {
129     struct xran_common_counters *  pCnt  = NULL;
130     struct rte_mbuf **m_table;
131     int32_t i   = 0;
132     int32_t n   = 0;
133     int32_t ret = 0;
134
135     if(dev)
136         pCnt = &dev->fh_counters;
137     else
138         rte_panic("incorrect dev\n");
139
140     m_table = p_m_table->m_table;
141     n       = p_m_table->len;
142
143     for(i = 0; i < n; i++) {
144         /*rte_mbuf_sanity_check(m_table[i], 0);*/
145         /*rte_pktmbuf_dump(stdout, m_table[i], 256);*/
146         pCnt->tx_counter++;
147         pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(m_table[i]);
148         ret += dev->send_upmbuf2ring(m_table[i], ETHER_TYPE_ECPRI, port);
149     }
150
151     if (unlikely(ret < n)) {
152         print_err("core %d [p: %d-> vf %d] ret [%d] < n[%d] enq %ld\n",
153              rte_lcore_id(), dev->xran_port_id, port, ret, n, pCnt->tx_counter);
154     }
155
156     return 0;
157 }
158
159 /* Send a single 5G symbol over multiple packets */
160 static inline int32_t prepare_symbol_opt(enum xran_pkt_dir direction,
161                 uint16_t section_id,
162                 struct rte_mbuf *mb,
163                 struct rb_map *data,
164                 uint8_t compMeth,
165                 uint8_t iqWidth,
166                 const enum xran_input_byte_order iq_buf_byte_order,
167                 int prb_start,
168                 int prb_num,
169                 uint8_t CC_ID,
170                 uint8_t RU_Port_ID,
171                 uint8_t seq_id,
172                 uint32_t do_copy,
173                 struct xran_up_pkt_gen_params *xp,
174                 enum xran_comp_hdr_type staticEn)
175 {
176     int parm_size;
177     int32_t n_bytes;
178     int32_t prep_bytes;
179     int16_t nPktSize;
180     uint32_t off;
181
182
183     iqWidth = (iqWidth==0) ? 16 : iqWidth;
184     switch(compMeth) {
185         case XRAN_COMPMETHOD_BLKFLOAT:      parm_size = 1; break;
186         case XRAN_COMPMETHOD_MODULATION:    parm_size = 0; break;
187         default:
188             parm_size = 0;
189         }
190     n_bytes = (3 * iqWidth + parm_size) * prb_num;
191     n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
192
193     nPktSize = sizeof(struct rte_ether_hdr)
194                 + sizeof(struct xran_ecpri_hdr)
195                 + sizeof(struct radio_app_common_hdr)
196                 + sizeof(struct data_section_hdr)
197                 + n_bytes;
198     if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn ==XRAN_COMP_HDR_TYPE_DYNAMIC))
199         nPktSize += sizeof(struct data_section_compression_hdr);
200
201
202 #if 0
203     /* radio app header */
204     xp->app_params.data_direction = direction;
205     xp->app_params.payl_ver       = 1;
206     xp->app_params.filter_id      = 0;
207     xp->app_params.frame_id       = frame_id;
208     xp->app_params.sf_slot_sym.subframe_id    = subframe_id;
209     xp->app_params.sf_slot_sym.slot_id        = xran_slotid_convert(slot_id, 0);
210     xp->app_params.sf_slot_sym.symb_id        = symbol_no;
211
212     /* convert to network byte order */
213     xp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp->app_params.sf_slot_sym.value);
214 #endif
215
216     xp->sec_hdr.fields.sect_id    = section_id;
217     xp->sec_hdr.fields.num_prbu   = (uint8_t)XRAN_CONVERT_NUMPRBC(prb_num);
218     xp->sec_hdr.fields.start_prbu = (uint8_t)prb_start;
219     xp->sec_hdr.fields.sym_inc    = 0;
220     xp->sec_hdr.fields.rb         = 0;
221
222
223     /* compression */
224     xp->compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
225     xp->compr_hdr_param.ud_comp_hdr.ud_iq_width  = XRAN_CONVERT_IQWIDTH(iqWidth);
226     xp->compr_hdr_param.rsrvd                    = 0;
227
228     /* network byte order */
229     xp->sec_hdr.fields.all_bits  = rte_cpu_to_be_32(xp->sec_hdr.fields.all_bits);
230
231     if (mb == NULL){
232         MLogPrint(NULL);
233         errx(1, "out of mbufs after %d packets", 1);
234     }
235
236     prep_bytes = xran_prepare_iq_symbol_portion(mb,
237                                                 data,
238                                                 iq_buf_byte_order,
239                                                 n_bytes,
240                                                 xp,
241                                                 CC_ID,
242                                                 RU_Port_ID,
243                                                 seq_id,
244                                                 staticEn,
245                                                 do_copy);
246     if (prep_bytes <= 0)
247         errx(1, "failed preparing symbol");
248
249     rte_pktmbuf_pkt_len(mb)  = nPktSize;
250     rte_pktmbuf_data_len(mb) = nPktSize;
251
252 #ifdef DEBUG
253     printf("Symbol %2d prep_bytes (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
254 #endif
255
256     return prep_bytes;
257 }
258
259 int32_t xran_process_tx_sym_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id,
260     int32_t do_srs)
261 {
262     int32_t     retval = 0;
263     char        *pos = NULL;
264     char        *p_sec_iq = NULL;
265     void        *mb  = NULL;
266     void        *send_mb  = NULL;
267     int         prb_num = 0;
268     uint16_t    iq_sample_size_bits = 16;
269     uint16_t    vf_id = 0;
270
271     struct xran_prb_map *prb_map = NULL;
272     uint8_t  num_ant_elm  = 0;
273
274     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
275     if (p_xran_dev_ctx == NULL)
276         return retval;
277     struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
278     struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
279     struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
280
281     num_ant_elm = xran_get_num_ant_elm(pHandle);
282     enum xran_pkt_dir direction;
283     enum xran_comp_hdr_type staticEn = XRAN_COMP_HDR_TYPE_DYNAMIC;
284
285     struct rte_mbuf *eth_oran_hdr = NULL;
286     char        *ext_buff = NULL;
287     uint16_t    ext_buff_len = 0;
288     struct rte_mbuf *tmp = NULL;
289     rte_iova_t ext_buff_iova = 0;
290     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
291
292     staticEn = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
293
294
295     if(PortId >= XRAN_PORTS_NUM)
296         rte_panic("incorrect PORT ID\n");
297
298     struct rte_mbuf_ext_shared_info * p_share_data = NULL;
299     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
300         direction = XRAN_DIR_DL; /* O-DU */
301         prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
302     } else {
303         direction = XRAN_DIR_UL; /* RU */
304         prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
305     }
306
307     if(xran_fs_get_slot_type(PortId, cc_id, tti, ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SLOT_TYPE_DL : XRAN_SLOT_TYPE_UL)) ==  1
308             || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) ==  1
309             || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_FDD) ==  1){
310
311         if(xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SYMBOL_TYPE_DL : XRAN_SYMBOL_TYPE_UL)
312            || xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){
313
314             vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
315             pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
316             mb  = (void*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
317             prb_map  = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
318
319
320             if(prb_map){
321                 int32_t elmIdx = 0;
322                 for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++){
323                     //print_err("tti is %d, cc_id is %d, ant_id is %d, prb_map->nPrbElm id - %d", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, prb_map->nPrbElm);
324                     uint16_t sec_id  = elmIdx;
325                     struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
326                     struct xran_section_desc * p_sec_desc = NULL;
327                     p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sec_id];
328
329                     if(prb_map_elm == NULL){
330                         rte_panic("p_sec_desc == NULL\n");
331                     }
332
333                     p_sec_desc =  prb_map_elm->p_sec_desc[sym_id][0];
334
335                     p_sec_iq     = ((char*)pos + p_sec_desc->iq_buffer_offset);
336
337                     /* calculate offset for external buffer */
338                     ext_buff_len = p_sec_desc->iq_buffer_len;
339                     ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
340                                     sizeof (struct xran_ecpri_hdr) +
341                                     sizeof (struct radio_app_common_hdr) +
342                                     sizeof(struct data_section_hdr));
343
344                     ext_buff_len += RTE_PKTMBUF_HEADROOM +
345                                     sizeof (struct xran_ecpri_hdr) +
346                                     sizeof (struct radio_app_common_hdr) +
347                                     sizeof(struct data_section_hdr) + 18;
348
349                         if ((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)){
350                         ext_buff     -= sizeof (struct data_section_compression_hdr);
351                         ext_buff_len += sizeof (struct data_section_compression_hdr);
352                     }
353
354                     eth_oran_hdr = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
355                     if (unlikely (( eth_oran_hdr) == NULL)) {
356                         rte_panic("Failed rte_pktmbuf_alloc\n");
357                     }
358
359                     p_share_data->free_cb = extbuf_free_callback;
360                     p_share_data->fcb_opaque = NULL;
361                     rte_mbuf_ext_refcnt_set(p_share_data, 1);
362
363                     ext_buff_iova = rte_mempool_virt2iova(mb);
364                     if (unlikely (( ext_buff_iova) == 0)) {
365                         rte_panic("Failed rte_mem_virt2iova \n");
366                     }
367
368                     if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
369                         rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
370                     }
371
372                     rte_pktmbuf_attach_extbuf(eth_oran_hdr,
373                                               ext_buff,
374                                               ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
375                                               ext_buff_len,
376                                               p_share_data);
377
378                     rte_pktmbuf_reset_headroom(eth_oran_hdr);
379
380                     tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
381                     if (unlikely (( tmp) == NULL)) {
382                         rte_panic("Failed rte_pktmbuf_prepend \n");
383                     }
384                     send_mb = eth_oran_hdr;
385
386
387                     uint8_t seq_id = (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
388                                           xran_get_updl_seqid(pHandle, cc_id, ant_id) :
389                                           xran_get_upul_seqid(pHandle, cc_id, ant_id);
390
391
392
393                     /* first all PRBs */
394                     int32_t num_bytes = prepare_symbol_ex(direction, sec_id,
395                                       send_mb,
396                                       (uint8_t *)p_sec_iq,
397                                       prb_map_elm->compMethod,
398                                       prb_map_elm->iqWidth,
399                                       p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
400                                       frame_id, subframe_id, slot_id, sym_id,
401                                       prb_map_elm->nRBStart, prb_map_elm->nRBSize,
402                                       cc_id, ant_id,
403                                       seq_id,
404                                           0,
405                                           staticEn);
406
407                     rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
408                     pCnt->tx_counter++;
409                     pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
410                     p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, vf_id);
411                 }
412             } else {
413                 printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, sym_id);
414             }
415
416             if(p_xran_dev_ctx->enablePrach
417                 && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)) {   /* Only RU needs to send PRACH I/Q */
418                 uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
419
420                 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
421                         && (is_prach_slot == 1)
422                         && (sym_id >= p_xran_dev_ctx->prach_start_symbol[cc_id])
423                         && (sym_id <= p_xran_dev_ctx->prach_last_symbol[cc_id])) {
424                     int prach_port_id = ant_id + pPrachCPConfig->eAxC_offset;
425                     int compMethod, parm_size;
426                     uint8_t symb_id_offset = sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id];
427
428                     compMethod = p_xran_dev_ctx->fh_cfg.ru_conf.compMeth_PRACH;
429                     switch(compMethod) {
430                         case XRAN_COMPMETHOD_BLKFLOAT:      parm_size = 1; break;
431                         case XRAN_COMPMETHOD_MODULATION:    parm_size = 0; break;
432                         default:
433                             parm_size = 0;
434                         }
435                     pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[symb_id_offset].pData;
436                     //pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id]) * pPrachCPConfig->numPrbc * N_SC_PER_PRB * 4;
437                     /*pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id])
438                             * (3*p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth + parm_size)
439                             * pPrachCPConfig->numPrbc;*/
440                     mb  = NULL;//(void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
441
442                     send_symbol_ex(pHandle,
443                             direction,
444                             xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, slot_id),
445                             (struct rte_mbuf *)mb,
446                             (uint8_t *)pos,
447                             compMethod,
448                             p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth_PRACH,
449                             p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
450                             frame_id, subframe_id, slot_id, sym_id,
451                             pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
452                             cc_id, prach_port_id,
453                             xran_get_upul_seqid(pHandle, cc_id, prach_port_id));
454                     retval = 1;
455                 }
456             } /* if(p_xran_dev_ctx->enablePrach ..... */
457         } /* RU mode or C-Plane is not used */
458     }
459
460     return retval;
461 }
462
463 int32_t
464 xran_process_tx_srs_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id)
465 {
466     int32_t     retval = 0;
467     char        *pos = NULL;
468     char        *p_sec_iq = NULL;
469     void        *mb  = NULL;
470     void        *send_mb  = NULL;
471     int         prb_num = 0;
472     uint16_t    iq_sample_size_bits = 16;
473
474     struct xran_prb_map *prb_map = NULL;
475     uint8_t  num_ant_elm  = 0;
476
477     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
478     struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
479     struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
480     struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
481
482     num_ant_elm = xran_get_num_ant_elm(pHandle);
483     enum xran_pkt_dir direction;
484
485     struct rte_mbuf *eth_oran_hdr = NULL;
486     char        *ext_buff = NULL;
487     uint16_t    ext_buff_len = 0;
488     struct rte_mbuf *tmp = NULL;
489     rte_iova_t ext_buff_iova = 0;
490     int32_t ant_elm_eAxC_id = ant_id + p_srs_cfg->eAxC_offset;
491     uint32_t vf_id = 0;
492     enum xran_comp_hdr_type staticEn = XRAN_COMP_HDR_TYPE_DYNAMIC;
493
494     if (p_xran_dev_ctx != NULL)
495     {
496
497     if(p_xran_dev_ctx->xran_port_id >= XRAN_PORTS_NUM)
498         rte_panic("incorrect PORT ID\n");
499
500     struct rte_mbuf_ext_shared_info * p_share_data = NULL;
501
502     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
503         direction = XRAN_DIR_DL; /* O-DU */
504         prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
505         rte_panic("incorrect O_DU\n");
506     } else {
507         direction = XRAN_DIR_UL; /* RU */
508         prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
509     }
510
511
512         staticEn = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
513
514
515 #if 1
516     if (tti % 5 == 3) {
517         {
518 #else
519     if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) ==  1
520             || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_FDD) ==  1) {
521         if(xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_UL
522            || xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD) {
523 #endif
524             pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
525             mb  = (void*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
526             prb_map  = (struct xran_prb_map *) p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
527             vf_id  = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_elm_eAxC_id);
528
529             if(prb_map) {
530                 int32_t elmIdx = 0;
531                 for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++) {
532                     uint16_t sec_id  = elmIdx;
533                     struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
534                     struct xran_section_desc * p_sec_desc = NULL;
535
536                     if(prb_map_elm == NULL) {
537                         rte_panic("p_sec_desc == NULL\n");
538                     }
539
540                     /* skip, if not scheduled */
541                     if(sym_id < prb_map_elm->nStartSymb || sym_id >= prb_map_elm->nStartSymb + prb_map_elm->numSymb)
542                         return 0;
543
544                     p_share_data = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
545                     p_sec_desc =  prb_map_elm->p_sec_desc[sym_id][0];
546                     p_sec_iq     = ((char*)pos + p_sec_desc->iq_buffer_offset);
547
548                         /* calculate offset for external buffer */
549                     ext_buff_len = p_sec_desc->iq_buffer_len;
550                     ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
551                                     sizeof (struct xran_ecpri_hdr) +
552                                     sizeof (struct radio_app_common_hdr) +
553                                     sizeof(struct data_section_hdr));
554
555                     ext_buff_len += RTE_PKTMBUF_HEADROOM +
556                                     sizeof (struct xran_ecpri_hdr) +
557                                     sizeof (struct radio_app_common_hdr) +
558                                     sizeof(struct data_section_hdr) + 18;
559
560                         if ((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)){
561                         ext_buff     -= sizeof (struct data_section_compression_hdr);
562                         ext_buff_len += sizeof (struct data_section_compression_hdr);
563                     }
564
565 //                    eth_oran_hdr =  rte_pktmbuf_alloc(_eth_mbuf_pool_small);
566                     eth_oran_hdr = xran_ethdi_mbuf_indir_alloc();
567
568                     if (unlikely (( eth_oran_hdr) == NULL)) {
569                         rte_panic("Failed rte_pktmbuf_alloc\n");
570                     }
571
572                     p_share_data->free_cb = extbuf_free_callback;
573                     p_share_data->fcb_opaque = NULL;
574                     rte_mbuf_ext_refcnt_set(p_share_data, 1);
575
576                     ext_buff_iova = rte_mempool_virt2iova(mb);
577                     if (unlikely (( ext_buff_iova) == 0)) {
578                         rte_panic("Failed rte_mem_virt2iova \n");
579                     }
580
581                     if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
582                         rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
583                     }
584
585                     rte_pktmbuf_attach_extbuf(eth_oran_hdr,
586                                               ext_buff,
587                                               ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
588                                               ext_buff_len,
589                                               p_share_data);
590
591                     rte_pktmbuf_reset_headroom(eth_oran_hdr);
592
593                     tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
594                     if (unlikely (( tmp) == NULL)) {
595                         rte_panic("Failed rte_pktmbuf_prepend \n");
596                     }
597                     send_mb = eth_oran_hdr;
598
599                     uint8_t seq_id = (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
600                                           xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :
601                                           xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id);
602                     /* first all PRBs */
603                     int32_t num_bytes = prepare_symbol_ex(direction, sec_id,
604                                       send_mb,
605                                       (uint8_t *)p_sec_iq,
606                                       prb_map_elm->compMethod,
607                                       prb_map_elm->iqWidth,
608                                       p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
609                                       frame_id, subframe_id, slot_id, sym_id,
610                                       prb_map_elm->nRBStart, prb_map_elm->nRBSize,
611                                       cc_id, ant_elm_eAxC_id,
612                                       seq_id,
613                                           0,
614                                           staticEn);
615
616                     rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
617                     pCnt->tx_counter++;
618                     pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
619                     p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, vf_id);
620                 }
621             } else {
622                 printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_elm_eAxC_id, sym_id);
623             }
624         }
625     }
626     }
627
628     return retval;
629 }
630
631 struct rte_mbuf *
632 xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
633                 struct rte_mbuf_ext_shared_info * p_share_data,
634                 enum xran_compression_method compMeth, enum xran_comp_hdr_type staticEn)
635 {
636     struct rte_mbuf *mb_oran_hdr_ext = NULL;
637     struct rte_mbuf *tmp             = NULL;
638     int8_t          *ext_buff        = NULL;
639     rte_iova_t ext_buff_iova         = 0;
640     ext_buff =      p_ext_buff - (RTE_PKTMBUF_HEADROOM +
641                     sizeof(struct xran_ecpri_hdr) +
642                     sizeof(struct radio_app_common_hdr) +
643                     sizeof(struct data_section_hdr));
644
645     ext_buff_len += RTE_PKTMBUF_HEADROOM +
646                     sizeof(struct xran_ecpri_hdr) +
647                     sizeof(struct radio_app_common_hdr) +
648                     sizeof(struct data_section_hdr) + 18;
649     if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)) {
650         ext_buff     -= sizeof (struct data_section_compression_hdr);
651         ext_buff_len += sizeof (struct data_section_compression_hdr);
652     }
653     mb_oran_hdr_ext =  rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
654
655     if (unlikely (( mb_oran_hdr_ext) == NULL)) {
656         rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
657     }
658
659     p_share_data->free_cb = extbuf_free_callback;
660     p_share_data->fcb_opaque = NULL;
661     rte_mbuf_ext_refcnt_set(p_share_data, 1);
662
663     ext_buff_iova = rte_mempool_virt2iova(p_ext_buff_start);
664     if (unlikely (( ext_buff_iova) == 0)) {
665         rte_panic("Failed rte_mem_virt2iova \n");
666     }
667
668     if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
669         rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
670     }
671
672     rte_pktmbuf_attach_extbuf(mb_oran_hdr_ext,
673                               ext_buff,
674                               ext_buff_iova + RTE_PTR_DIFF(ext_buff , p_ext_buff_start),
675                               ext_buff_len,
676                               p_share_data);
677
678     rte_pktmbuf_reset_headroom(mb_oran_hdr_ext);
679
680     tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(mb_oran_hdr_ext, sizeof(struct rte_ether_hdr));
681     if (unlikely (( tmp) == NULL)) {
682         rte_panic("Failed rte_pktmbuf_prepend \n");
683     }
684
685     return mb_oran_hdr_ext;
686 }
687
688 int32_t xran_process_tx_sym_cp_on_dispatch_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t num_cc, int32_t num_ant, uint32_t frame_id,
689     uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
690     uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
691 {
692     int32_t     retval = 0;
693     struct cp_up_tx_desc*   p_desc = NULL;
694     struct xran_ethdi_ctx*  eth_ctx = xran_ethdi_get_ctx();
695     struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
696
697     p_desc = xran_pkt_gen_desc_alloc();
698     if(p_desc) {
699         p_desc->pHandle     = pHandle;
700         p_desc->ctx_id      = ctx_id;
701         p_desc->tti         = tti;
702         p_desc->cc_id       = num_cc;
703         p_desc->ant_id      = num_ant;
704         p_desc->frame_id    = frame_id;
705         p_desc->subframe_id = subframe_id;
706         p_desc->slot_id     = slot_id;
707         p_desc->sym_id      = sym_id;
708         p_desc->compType    = (uint32_t)compType;
709         p_desc->direction    = (uint32_t)direction;
710         p_desc->xran_port_id    = xran_port_id;
711         p_desc->p_sec_db = (void*)p_sec_db;
712
713         if(likely(p_xran_dev_ctx->xran_port_id < XRAN_PORTS_NUM)) {
714             if (rte_ring_enqueue(eth_ctx->up_dl_pkt_gen_ring[p_xran_dev_ctx->xran_port_id], p_desc->mb) == 0)
715                 return 1;   /* success */
716             else
717                 xran_pkt_gen_desc_free(p_desc);
718         } else {
719             rte_panic("incorrect port %d", p_xran_dev_ctx->xran_port_id);
720         }
721     } else {
722         print_dbg("xran_pkt_gen_desc_alloc failure %d", p_xran_dev_ctx->xran_port_id);
723     }
724
725     return retval;
726 }
727
728 int32_t
729 xran_process_tx_sym_cp_on_dispatch(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
730                                    uint32_t slot_id, uint32_t sym_id)
731 {
732     int32_t     retval = 0;
733     struct cp_up_tx_desc*   p_desc = NULL;
734     struct xran_ethdi_ctx*  eth_ctx = xran_ethdi_get_ctx();
735     struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
736
737     p_desc = xran_pkt_gen_desc_alloc();
738     if(p_desc) {
739         p_desc->pHandle     = pHandle;
740         p_desc->ctx_id      = ctx_id;
741         p_desc->tti         = tti;
742         p_desc->cc_id       = cc_id;
743         p_desc->ant_id      = ant_id;
744         p_desc->frame_id    = frame_id;
745         p_desc->subframe_id = subframe_id;
746         p_desc->slot_id     = slot_id;
747         p_desc->sym_id      = sym_id;
748
749         if(likely(p_xran_dev_ctx->xran_port_id < XRAN_PORTS_NUM)) {
750             if (rte_ring_enqueue(eth_ctx->up_dl_pkt_gen_ring[p_xran_dev_ctx->xran_port_id], p_desc->mb) == 0)
751                 return 1;   /* success */
752             else
753                 xran_pkt_gen_desc_free(p_desc);
754         } else {
755             rte_panic("incorrect port %d", p_xran_dev_ctx->xran_port_id);
756         }
757     } else {
758         print_dbg("xran_pkt_gen_desc_alloc failure %d", p_xran_dev_ctx->xran_port_id);
759     }
760
761     return retval;
762 }
763
764 int32_t
765 xran_process_tx_sym_cp_on(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
766     uint32_t slot_id, uint32_t sym_id)
767 {
768     int32_t     retval = 0;
769
770     struct rte_mbuf *eth_oran_hdr = NULL;
771     char        *ext_buff = NULL;
772     uint16_t    ext_buff_len = 0;
773     struct rte_mbuf *tmp = NULL;
774     rte_iova_t ext_buff_iova = 0;
775     char        *pos      = NULL;
776     char        *p_sec_iq = NULL;
777     void        *mb  = NULL;
778     struct rte_mbuf *to_free_mbuf =  NULL;
779     int         prb_num = 0;
780     uint16_t    iq_sample_size_bits = 16;
781     uint32_t    next = 0;
782     int32_t     num_sections = 0;
783     uint16_t    len  = 0;
784     int16_t     len2 = 0;
785     uint16_t    i    = 0;
786
787     uint64_t    t1;
788     struct mbuf_table  loc_tx_mbufs;
789     struct xran_up_pkt_gen_params loc_xp;
790
791     struct xran_section_info *sectinfo = NULL;
792     struct xran_device_ctx   *p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
793     enum xran_pkt_dir direction;
794     uint16_t vf_id = 0;
795     enum xran_comp_hdr_type compType = XRAN_COMP_HDR_TYPE_DYNAMIC;
796
797     struct rte_mbuf_ext_shared_info * p_share_data = NULL;
798
799     if (p_xran_dev_ctx != NULL)
800     {
801         compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
802
803
804     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
805         direction = XRAN_DIR_DL; /* O-DU */
806         prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
807     } else {
808         direction = XRAN_DIR_UL; /* RU */
809         prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
810     }
811
812     vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
813     next = 0;
814     num_sections = xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id, ctx_id);
815     /* iterate C-Plane configuration to generate corresponding U-Plane */
816     if(num_sections)
817         prepare_sf_slot_sym(direction, frame_id, subframe_id, slot_id, sym_id, &loc_xp);
818
819     loc_tx_mbufs.len = 0;
820     while(next < num_sections) {
821         sectinfo = xran_cp_iterate_section_info(pHandle, direction, cc_id, ant_id, ctx_id, &next);
822
823         if(sectinfo == NULL)
824             break;
825
826         if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) {   /* only supports type 1 */
827             print_err("Invalid section type in section DB - %d", sectinfo->type);
828             continue;
829         }
830
831         /* skip, if not scheduled */
832         if(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol)
833             continue;
834
835
836         if(sectinfo->compMeth)
837             iq_sample_size_bits = sectinfo->iqWidth;
838
839         print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
840                     sectinfo->type, sectinfo->id, sectinfo->startPrbc,
841                     sectinfo->numPrbc,sectinfo->startSymId, sectinfo->numSymbol);
842
843         p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sectinfo->id];
844
845         len  = loc_tx_mbufs.len;
846         len2 = 0;
847         i    = 0;
848
849         //Added for Klocworks
850         if (len >= MBUF_TABLE_SIZE) {
851             len = MBUF_TABLE_SIZE - 1;
852             rte_panic("len >= MBUF_TABLE_SIZE\n");
853         }
854
855         to_free_mbuf  = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id];
856         pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
857         mb  = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
858
859         if(mb == NULL) {
860             rte_panic("mb == NULL\n");
861         }
862
863         p_sec_iq     = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
864         ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
865
866         mb = xran_attach_up_ext_buf(vf_id, (int8_t *)mb, (int8_t *) p_sec_iq,
867                             (uint16_t) ext_buff_len,
868                                 p_share_data, (enum xran_compression_method) sectinfo->compMeth, compType);
869         p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id] =  mb;
870         rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
871
872         if(to_free_mbuf) {
873             rte_pktmbuf_free(to_free_mbuf);
874         }
875
876         /* first all PRBs */
877         prepare_symbol_opt(direction, sectinfo->id,
878                           mb,
879                           (struct rb_map *)p_sec_iq,
880                           sectinfo->compMeth,
881                           sectinfo->iqWidth,
882                           p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
883                               sectinfo->startPrbc,
884                               sectinfo->numPrbc,
885                               cc_id,
886                               ant_id,
887                           xran_get_updl_seqid(pHandle, cc_id, ant_id),
888                           0,
889                               &loc_xp,
890                               compType);
891
892         /* if we don't need to do any fragmentation */
893         if (likely (p_xran_dev_ctx->fh_init.mtu >=
894                         sectinfo->numPrbc * (3*iq_sample_size_bits + 1))) {
895             /* no fragmentation */
896             loc_tx_mbufs.m_table[len] = mb;
897             len2 = 1;
898         } else {
899             /* fragmentation */
900             uint8_t * seq_num = xran_get_updl_seqid_addr(pHandle, cc_id, ant_id);
901             if(seq_num)
902                 (*seq_num)--;
903             else
904                 rte_panic("pointer to seq number is NULL [CC %d Ant %d]\n", cc_id, ant_id);
905
906             len2 = xran_app_fragment_packet(mb,
907                             &loc_tx_mbufs.m_table[len],
908                             (uint16_t)(MBUF_TABLE_SIZE - len),
909                             p_xran_dev_ctx->fh_init.mtu,
910                             p_xran_dev_ctx->direct_pool,
911                             p_xran_dev_ctx->indirect_pool,
912                             sectinfo->startPrbc,
913                             sectinfo->numPrbc,
914                             seq_num,
915                             sectinfo->iqWidth,
916                             ((sectinfo->iqWidth == 16)||(compType==XRAN_COMP_HDR_TYPE_STATIC)) ? 0 : 1);
917
918             /* Free input packet */
919             rte_pktmbuf_free(mb);
920
921             /* If we fail to fragment the packet */
922             if (unlikely (len2 < 0)){
923                 print_err("len2= %d\n", len2);
924                 return 0;
925             }
926         }
927         if(len2 > 1){
928             for (i = len; i < len + len2; i ++) {
929                 struct rte_mbuf *m;
930                 m = loc_tx_mbufs.m_table[i];
931                 struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
932                     rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
933                 if (eth_hdr == NULL) {
934                     rte_panic("No headroom in mbuf.\n");
935                 }
936             }
937         }
938
939         len += len2;
940         if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM)) {
941               rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
942         }
943         loc_tx_mbufs.len = len;
944     } /* while(section) */
945
946     /* Transmit packets */
947     xran_send_burst(p_xran_dev_ctx, &loc_tx_mbufs, vf_id);
948     loc_tx_mbufs.len = 0;
949     retval = 1;
950     }
951
952     return retval;
953 }
954
955 //#define TRANSMIT_BURST
956 //#define ENABLE_DEBUG_COREDUMP
957
958 #define ETHER_TYPE_ECPRI_BE (0xFEAE)
959
960 int32_t xran_process_tx_sym_cp_on_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t num_cc, int32_t num_ant, uint32_t frame_id,
961     uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
962     uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
963 {
964     uint8_t seq_id = 0;
965     int32_t cc_id = 0, ant_id = 0;
966     char* ext_buff = NULL;
967     uint16_t ext_buff_len = 0;
968     rte_iova_t ext_buff_iova = 0;
969     char* pos = NULL;
970     char* p_sec_iq = NULL;
971     void* mb = NULL, *mb_base = NULL;
972     struct rte_mbuf* to_free_mbuf = NULL;
973     uint16_t iq_sample_size_bits = 16;
974     uint32_t next = 0;
975     int32_t num_sections = 0, total_sections = 0;
976     uint16_t len = 0, len2 = 0, len_frag = 0;
977     char* pStart = 0;
978     uint16_t cid = 0;
979     uint8_t compMeth = 0;
980     uint8_t iqWidth = 0;
981     int parm_size = 0;
982     int32_t n_bytes = 0, elm_bytes = 0;
983     uint16_t section_id;
984     uint16_t prb_num = 0;
985     uint16_t prb_start = 0;
986     int16_t nPktSize = 0;
987     uint16_t ecpri_payl_size = 0;
988 #ifdef TRANSMIT_BURST
989     struct mbuf_table  loc_tx_mbufs;
990 #endif
991     struct mbuf_table  loc_tx_mbufs_fragmented;
992     struct xran_up_pkt_gen_params xp;
993     struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
994     struct xran_section_info* sectinfo = NULL;
995     struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
996     uint16_t vf_id = 0;
997     struct rte_mbuf_ext_shared_info* p_share_data = NULL;
998     struct xran_sectioninfo_db* ptr_sect_elm = NULL;
999     struct rte_mbuf* mb_oran_hdr_ext = NULL;
1000     struct rte_mempool_objhdr* iova_hdr = NULL;
1001     struct xran_eaxcid_config* conf = &(p_xran_dev_ctx->eAxc_id_cfg);
1002     struct rte_ether_hdr* ether_hdr = NULL;
1003     struct xran_ecpri_hdr* ecpri_hdr = NULL;
1004     struct radio_app_common_hdr* app_hdr = NULL;
1005     struct data_section_hdr* section_hdr = NULL;
1006     struct data_section_compression_hdr* compression_hdr = NULL;
1007     const int16_t ccid_pos = conf->bit_ccId;
1008     const int16_t ccid_mask = conf->mask_ccId;
1009     const int16_t antid_pos = conf->bit_ruPortId;
1010     const int16_t antid_mask = conf->mask_ruPortId;
1011
1012     const int16_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
1013     const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
1014     uint16_t comp_head_upd = 0;
1015
1016     const int16_t total_header_size = (RTE_PKTMBUF_HEADROOM +
1017         sizeof(struct xran_ecpri_hdr) +
1018         sizeof(struct radio_app_common_hdr) +
1019         sizeof(struct data_section_hdr));
1020
1021     uint16_t* __restrict pSrc = NULL;
1022     uint16_t* __restrict pDst = NULL;
1023
1024     const enum xran_input_byte_order iq_buf_byte_order = p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder;
1025
1026     /* radio app header */
1027     xp.app_params.data_feature.value = 0x10;
1028     xp.app_params.data_feature.data_direction = direction;
1029     xp.app_params.frame_id = frame_id;
1030     xp.app_params.sf_slot_sym.subframe_id = subframe_id;
1031     xp.app_params.sf_slot_sym.slot_id = slot_id;
1032     xp.app_params.sf_slot_sym.symb_id = sym_id;
1033     /* convert to network byte order */
1034     xp.app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp.app_params.sf_slot_sym.value);
1035
1036
1037     for (cc_id = 0; cc_id < num_cc; cc_id++)
1038     {
1039         for (ant_id = 0; ant_id < num_ant; ant_id++)
1040         {
1041             ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][ant_id];
1042             if (unlikely(ptr_sect_elm == NULL))
1043                 return (0);
1044             num_sections = ptr_sect_elm->cur_index;
1045
1046             /* iterate C-Plane configuration to generate corresponding U-Plane */
1047             vf_id = p_xran_dev_ctx->map2vf[direction][cc_id][ant_id][XRAN_UP_VF];
1048             pos = (char*)p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
1049             mb_base = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
1050             if (unlikely(mb_base == NULL))
1051             {
1052                 rte_panic("mb == NULL\n");
1053             }
1054
1055             cid = ((cc_id << ccid_pos) & ccid_mask) | ((ant_id << antid_pos) & antid_mask);
1056             cid = rte_cpu_to_be_16(cid);
1057             iq_sample_size_bits = 16;
1058
1059 #ifdef TRANSMIT_BURST
1060             loc_tx_mbufs.len = 0;
1061 #endif
1062             loc_tx_mbufs_fragmented.len = 0;
1063             len_frag = 0;
1064 #pragma loop_count min=1, max=16
1065             for (next=0; next< num_sections; next++)
1066             {
1067                 sectinfo = &ptr_sect_elm->list[next];
1068
1069                 if (unlikely(sectinfo == NULL))
1070                     break;
1071                 if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
1072                 {   /* only supports type 1 */
1073                     print_err("Invalid section type in section DB - %d", sectinfo->type);
1074                     continue;
1075                 }
1076                 /* skip, if not scheduled */
1077                 if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
1078                     continue;
1079
1080                 compMeth = sectinfo->compMeth;
1081                 iqWidth = sectinfo->iqWidth;
1082                 section_id = sectinfo->id;
1083                 prb_start = sectinfo->startPrbc;
1084                 prb_num = sectinfo->numPrbc;
1085                 seq_id = xran_updl_seq_id_num[xran_port_id][cc_id][ant_id]++;
1086                 len2 = 0;
1087
1088                 if (compMeth)
1089                     iq_sample_size_bits = iqWidth;
1090
1091                 comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
1092
1093                 print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
1094                     sectinfo->type, sectinfo->id, sectinfo->startPrbc,
1095                     sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
1096
1097                 p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][section_id];
1098                 p_share_data->free_cb = extbuf_free_callback;
1099                 p_share_data->fcb_opaque = NULL;
1100                 rte_mbuf_ext_refcnt_set(p_share_data, 1);
1101
1102 #ifdef TRANSMIT_BURST
1103                 len = loc_tx_mbufs.len;
1104                 //Added for Klocworks
1105                 if (unlikely(len >= MBUF_TABLE_SIZE))
1106                 {
1107                     len = MBUF_TABLE_SIZE - 1;
1108                     rte_panic("len >= MBUF_TABLE_SIZE\n");
1109                 }
1110 #endif
1111                 p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
1112                 ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
1113
1114                 ext_buff = p_sec_iq - total_header_size;
1115                 ext_buff_len += (total_header_size + 18);
1116
1117                 if (comp_head_upd)
1118                 {
1119                     ext_buff -= sizeof(struct data_section_compression_hdr);
1120                     ext_buff_len += sizeof(struct data_section_compression_hdr);
1121                 }
1122
1123                 mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
1124                 if (unlikely((mb_oran_hdr_ext) == NULL))
1125                 {
1126                     rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
1127                 }
1128
1129                 iova_hdr = (struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size);
1130                 ext_buff_iova = iova_hdr->iova;
1131
1132 #ifdef ENABLE_DEBUG_COREDUMP
1133                 if (unlikely(ext_buff_iova == 0))
1134                 {
1135                     rte_panic("Failed rte_mem_virt2iova\n");
1136                 }
1137                 if (unlikely(((rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA))
1138                 {
1139                     rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
1140                 }
1141 #endif
1142                 mb_oran_hdr_ext->buf_addr = ext_buff;
1143                 mb_oran_hdr_ext->buf_iova = ext_buff_iova + RTE_PTR_DIFF(ext_buff, mb_base);
1144                 mb_oran_hdr_ext->buf_len = ext_buff_len;
1145                 mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
1146                 mb_oran_hdr_ext->shinfo = p_share_data;
1147                 mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
1148                 mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
1149                 mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
1150                 mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
1151
1152                 mb = (void*)mb_oran_hdr_ext;
1153
1154                 to_free_mbuf = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id];
1155                 p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id] = mb;
1156                 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
1157                 if (to_free_mbuf)
1158                 {
1159                     rte_pktmbuf_free(to_free_mbuf);
1160                 }
1161
1162                 pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
1163
1164                 ether_hdr = (struct rte_ether_hdr*)pStart;
1165
1166                 /* Fill in the ethernet header. */
1167 #ifndef TRANSMIT_BURST
1168                 rte_eth_macaddr_get(mb_oran_hdr_ext->port, &ether_hdr->s_addr);         /* set source addr */
1169                 ether_hdr->d_addr = eth_ctx->entities[vf_id][ID_O_RU];                  /* set dst addr */
1170                 ether_hdr->ether_type = ETHER_TYPE_ECPRI_BE;                            /* ethertype */
1171 #endif
1172                 iqWidth = (iqWidth == 0) ? 16 : iqWidth;
1173                 switch (compMeth)
1174                 {
1175                     case XRAN_COMPMETHOD_BLKFLOAT:      parm_size = 1; break;
1176                     case XRAN_COMPMETHOD_MODULATION:    parm_size = 0; break;
1177                     default:
1178                         parm_size = 0;
1179                 }
1180                 n_bytes = (3 * iqWidth + parm_size) * prb_num;
1181                 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
1182
1183                 nPktSize = sizeof(struct rte_ether_hdr)
1184                         + sizeof(struct xran_ecpri_hdr)
1185                         + sizeof(struct radio_app_common_hdr)
1186                         + sizeof(struct data_section_hdr)
1187                         + n_bytes;
1188
1189                 if (comp_head_upd)
1190                     nPktSize += sizeof(struct data_section_compression_hdr);
1191
1192                 xp.sec_hdr.fields.sect_id = section_id;
1193                 xp.sec_hdr.fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(prb_num);
1194                 xp.sec_hdr.fields.start_prbu = (uint8_t)prb_start;
1195                 xp.sec_hdr.fields.sym_inc = 0;
1196                 xp.sec_hdr.fields.rb = 0;
1197                     /* network byte order */
1198                 xp.sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp.sec_hdr.fields.all_bits);
1199
1200                     /* compression */
1201                 xp.compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
1202                 xp.compr_hdr_param.ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
1203                 xp.compr_hdr_param.rsrvd = 0;
1204
1205                 ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
1206
1207                 ecpri_payl_size = n_bytes
1208                     + sizeof(struct data_section_hdr)
1209                     + sizeof(struct radio_app_common_hdr)
1210                     + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();
1211
1212                 if (comp_head_upd)
1213                     ecpri_payl_size += sizeof(struct data_section_compression_hdr);
1214
1215                 ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
1216                 ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
1217                 ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
1218                 ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_payl_size);
1219
1220                     /* one to one lls-CU to RU only and band sector is the same */
1221                 ecpri_hdr->ecpri_xtc_id = cid;
1222
1223                     /* no transport layer fragmentation supported */
1224                 ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
1225                 ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
1226
1227                 pSrc = (uint16_t*)&(xp.app_params);
1228                 pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
1229                 *pDst++ = *pSrc++;
1230                 *pDst++ = *pSrc++;
1231                 *pDst++ = *pSrc++;
1232                 *pDst++ = *pSrc++;
1233                 if (comp_head_upd)
1234                 {
1235                     *pDst++ = *pSrc++;
1236                 }
1237
1238                 rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
1239                 rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
1240
1241                 elm_bytes += nPktSize;
1242
1243                 /* Restore fragmentation support in this code version */
1244                 /* if we don't need to do any fragmentation */
1245                 if (likely(p_xran_dev_ctx->fh_init.mtu >= sectinfo->numPrbc * (3 * iq_sample_size_bits + 1)))
1246                 {
1247                     /* no fragmentation */
1248                     len2 = 1;
1249 #ifdef TRANSMIT_BURST
1250                     loc_tx_mbufs.m_table[len++] = mb;
1251                     if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM))
1252                     {
1253                         rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
1254                     }
1255                     loc_tx_mbufs.len = len;
1256 #else
1257                     xran_enqueue_mbuf(mb_oran_hdr_ext, eth_ctx->tx_ring[vf_id]);
1258 #endif
1259                 }
1260                 else
1261                 {
1262                     /* fragmentation */
1263                     /* only burst transmission mode is supported for fragmented packets*/
1264                     uint8_t* p_seq_num = &xran_updl_seq_id_num[xran_port_id][cc_id][ant_id];
1265                     (*p_seq_num)--;
1266
1267                     len2 = xran_app_fragment_packet(mb_oran_hdr_ext,
1268                         &loc_tx_mbufs_fragmented.m_table[len_frag],
1269                         (uint16_t)(MBUF_TABLE_SIZE - len_frag),
1270                         p_xran_dev_ctx->fh_init.mtu,
1271                         p_xran_dev_ctx->direct_pool,
1272                         p_xran_dev_ctx->indirect_pool,
1273                         prb_start,
1274                         prb_num,
1275                         p_seq_num,
1276                         iqWidth,
1277                         ((iqWidth == 16) || (compType == XRAN_COMP_HDR_TYPE_STATIC)) ? 0 : 1);
1278
1279                     /* Free input packet */
1280                     rte_pktmbuf_free(mb_oran_hdr_ext);
1281
1282                     /* If we fail to fragment the packet */
1283                     if (unlikely(len2 < 0))
1284                     {
1285                         print_err("len2= %d\n", len2);
1286                         continue;
1287                     }
1288                     if (unlikely(len2 > 1))
1289                     {
1290                         for (int32_t i = len_frag; i < len_frag + len2; i++)
1291                         {
1292                             struct rte_mbuf* m;
1293                             m = loc_tx_mbufs_fragmented.m_table[i];
1294                             struct rte_ether_hdr* eth_hdr = (struct rte_ether_hdr*)
1295                                 rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
1296                             if (eth_hdr == NULL)
1297                             {
1298                                 rte_panic("No headroom in mbuf.\n");
1299                             }
1300                         }
1301                     }
1302
1303                     len_frag += len2;
1304                     if (unlikely(len_frag > XRAN_MAX_PKT_BURST_PER_SYM)) {
1305                         rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
1306                     }
1307                     loc_tx_mbufs_fragmented.len = len_frag;
1308                 }
1309             } /* section loop */
1310             total_sections += num_sections;
1311
1312             /* Transmit packets */
1313 #ifdef TRANSMIT_BURST
1314             if (loc_tx_mbufs.len)
1315             {
1316                 for (int32_t i = 0; i < loc_tx_mbufs.len; i++)
1317                 {
1318                     p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs.m_table[i], ETHER_TYPE_ECPRI, vf_id);
1319                 }
1320                 loc_tx_mbufs.len = 0;
1321             }
1322 #endif
1323             /* Transmit fragmented packets */
1324             if (unlikely(loc_tx_mbufs_fragmented.len))
1325             {
1326                 for (int32_t i = 0; i < loc_tx_mbufs_fragmented.len; i++)
1327                 {
1328                     p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs_fragmented.m_table[i], ETHER_TYPE_ECPRI, vf_id);
1329                 }
1330                 loc_tx_mbufs_fragmented.len = 0;
1331             }
1332         } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
1333     } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
1334
1335     struct xran_common_counters* pCnt = &p_xran_dev_ctx->fh_counters;
1336     pCnt->tx_counter += total_sections;
1337     pCnt->tx_bytes_counter += elm_bytes;
1338
1339     return 1;
1340 }
1341
1342
1343 int32_t xran_process_tx_sym(void *arg)
1344 {
1345     int32_t     retval = 0;
1346     uint32_t    tti=0;
1347     uint32_t    numSlotMu1 = 5;
1348 #if XRAN_MLOG_VAR
1349     uint32_t    mlogVar[15];
1350     uint32_t    mlogVarCnt = 0;
1351 #endif
1352     unsigned long t1 = MLogTick();
1353
1354     void        *pHandle = NULL;
1355     int32_t     ant_id   = 0;
1356     int32_t     cc_id    = 0;
1357     uint8_t     num_eAxc = 0;
1358     uint8_t     num_eAxAntElm = 0;
1359     uint8_t     num_CCPorts = 0;
1360     uint32_t    frame_id    = 0;
1361     uint32_t    subframe_id = 0;
1362     uint32_t    slot_id     = 0;
1363     uint32_t    sym_id      = 0;
1364     uint32_t    sym_idx     = 0;
1365
1366     uint8_t     ctx_id;
1367     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *) arg;
1368     enum xran_in_period inPeriod;
1369     uint32_t interval = p_xran_dev_ctx->interval_us_local;
1370     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
1371
1372     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
1373         return 0;
1374
1375     pHandle =  p_xran_dev_ctx;
1376
1377     /* O-RU: send symb after OTA time with delay (UL) */
1378     /* O-DU: send symb in advance of OTA time (DL) */
1379     sym_idx     = XranOffsetSym(p_xran_dev_ctx->sym_up, xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME(interval)*1000, &inPeriod);
1380
1381     tti         = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1382     slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1383     subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
1384
1385     uint16_t sfnSecStart = xran_getSfnSecStart();
1386     if (unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
1387     {
1388         // For DU
1389         sfnSecStart = (sfnSecStart + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
1390     }
1391     else if (unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
1392     {
1393         // For RU
1394         if (sfnSecStart >= NUM_OF_FRAMES_PER_SECOND)
1395         {
1396             sfnSecStart -= NUM_OF_FRAMES_PER_SECOND;
1397         }
1398         else
1399         {
1400             sfnSecStart += NUM_OF_FRAMES_PER_SFN_PERIOD - NUM_OF_FRAMES_PER_SECOND;
1401         }
1402     }
1403     frame_id    = XranGetFrameNum(tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1404     // ORAN frameId, 8 bits, [0, 255]
1405     frame_id = (frame_id & 0xff);
1406
1407     sym_id      = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1408     ctx_id      = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval)) % XRAN_MAX_SECTIONDB_CTX;
1409
1410     print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
1411
1412 #if XRAN_MLOG_VAR
1413     mlogVar[mlogVarCnt++] = 0xAAAAAAAA;
1414     mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
1415     mlogVar[mlogVarCnt++] = sym_idx;
1416     mlogVar[mlogVarCnt++] = abs(p_xran_dev_ctx->sym_up);
1417     mlogVar[mlogVarCnt++] = tti;
1418     mlogVar[mlogVarCnt++] = frame_id;
1419     mlogVar[mlogVarCnt++] = subframe_id;
1420     mlogVar[mlogVarCnt++] = slot_id;
1421     mlogVar[mlogVarCnt++] = sym_id;
1422     mlogVar[mlogVarCnt++] = PortId;
1423     MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
1424 #endif
1425
1426     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {
1427             num_eAxc    = xran_get_num_eAxcUl(pHandle);
1428     } else {
1429             num_eAxc    = xran_get_num_eAxc(pHandle);
1430     }
1431
1432     num_CCPorts = xran_get_num_cc(pHandle);
1433
1434     /* U-Plane */
1435     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP)
1436     {
1437         if(p_xran_dev_ctx->tx_sym_gen_func) {
1438             enum xran_comp_hdr_type compType;
1439             enum xran_pkt_dir direction;
1440             uint32_t prb_num, loc_ret = 1;
1441             uint16_t xran_port_id;
1442             PSECTION_DB_TYPE p_sec_db = NULL;
1443
1444             compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
1445
1446             if (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
1447                 direction = XRAN_DIR_DL; /* O-DU */
1448                 prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
1449             }
1450             else {
1451                 direction = XRAN_DIR_UL; /* RU */
1452                 prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
1453             }
1454
1455             if (unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM)) {
1456                 print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
1457                 loc_ret = 0;
1458             }
1459
1460             if (unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX)) {
1461                 print_err("Invalid Context id - %d", ctx_id);
1462                 loc_ret = 0;
1463             }
1464
1465             if (unlikely(direction > XRAN_DIR_MAX)) {
1466                 print_err("Invalid direction - %d", direction);
1467                 loc_ret = 0;
1468             }
1469
1470             if (unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX)) {
1471                 print_err("Invalid CC id - %d", num_CCPorts);
1472                 loc_ret = 0;
1473             }
1474
1475             if (unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR))) {
1476                 print_err("Invalid eAxC id - %d", num_eAxc);
1477                 loc_ret = 0;
1478             }
1479
1480             xran_port_id = p_xran_dev_ctx->xran_port_id;
1481             p_sec_db = p_sectiondb[p_xran_dev_ctx->xran_port_id];
1482
1483             if (loc_ret)
1484             {
1485                 retval = p_xran_dev_ctx->tx_sym_gen_func(pHandle, ctx_id, tti, num_CCPorts, num_eAxc, frame_id, subframe_id, slot_id, sym_id,
1486                     compType, direction, xran_port_id, p_sec_db);
1487             }
1488             else
1489             {
1490                 retval = 0;
1491             }
1492          }
1493          else
1494          {
1495             rte_panic("p_xran_dev_ctx->tx_sym_gen_func== NULL\n");
1496          }
1497     }
1498     else
1499     {
1500         for (ant_id = 0; ant_id < num_eAxc; ant_id++)
1501         {
1502             for (cc_id = 0; cc_id < num_CCPorts; cc_id++)
1503             {
1504                 struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
1505
1506                 if(p_xran_dev_ctx->puschMaskEnable)
1507                 {
1508                     if((tti % numSlotMu1 == p_xran_dev_ctx->puschMaskSlot))
1509                         ;
1510                     else
1511                         retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
1512                 }
1513                 else
1514                     retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
1515
1516                 if(p_xran_dev_ctx->enableSrs && (p_srs_cfg->symbMask & (1 << sym_id)))
1517                 {
1518                     retval = xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
1519                 }
1520             }
1521         }
1522     }
1523
1524     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && p_xran_dev_ctx->enableSrs && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {
1525         num_eAxAntElm = xran_get_num_ant_elm(pHandle);
1526         struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
1527         for(num_eAxc = 0; ant_id < num_eAxAntElm; ant_id++) {
1528             for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
1529                 if( p_srs_cfg->symbMask & (1 << sym_id)) {
1530                     retval = xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
1531                 }
1532             }
1533         }
1534     }
1535
1536     MLogTask(PID_DISPATCH_TX_SYM, t1, MLogTick());
1537     return retval;
1538 }
1539
1540 struct cp_up_tx_desc *
1541 xran_pkt_gen_desc_alloc(void)
1542 {
1543     struct rte_mbuf * mb =  rte_pktmbuf_alloc(_eth_mbuf_pkt_gen);
1544     struct cp_up_tx_desc * p_desc = NULL;
1545     char * start     = NULL;
1546
1547     if(mb){
1548         start     = rte_pktmbuf_append(mb, sizeof(struct cp_up_tx_desc));
1549         if(start) {
1550             p_desc = rte_pktmbuf_mtod(mb,  struct cp_up_tx_desc *);
1551             if(p_desc){
1552                 p_desc->mb = mb;
1553                 return p_desc;
1554             }
1555         }
1556     }
1557     return p_desc;
1558 }
1559
1560 int32_t
1561 xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc)
1562 {
1563     if (p_desc){
1564         if(p_desc->mb){
1565             rte_pktmbuf_free(p_desc->mb);
1566             return 0;
1567         } else {
1568             rte_panic("p_desc->mb == NULL\n");
1569         }
1570     }
1571     return -1;
1572 }
1573