O-RAN E Maintenance Release contribution for ODULOW
[o-du/phy.git] / fhi_lib / lib / src / xran_main.c
1 /******************************************************************************
2 *
3 *   Copyright (c) 2020 Intel.
4 *
5 *   Licensed under the Apache License, Version 2.0 (the "License");
6 *   you may not use this file except in compliance with the License.
7 *   You may obtain a copy of the License at
8 *
9 *       http://www.apache.org/licenses/LICENSE-2.0
10 *
11 *   Unless required by applicable law or agreed to in writing, software
12 *   distributed under the License is distributed on an "AS IS" BASIS,
13 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 *   See the License for the specific language governing permissions and
15 *   limitations under the License.
16 *
17 *******************************************************************************/
18
19 /**
20  * @brief XRAN main functionality module
21  * @file xran_main.c
22  * @ingroup group_source_xran
23  * @author Intel Corporation
24  **/
25
26 #define _GNU_SOURCE
27 #include <sched.h>
28 #include <assert.h>
29 #include <err.h>
30 #include <libgen.h>
31 #include <sys/time.h>
32 #include <sys/queue.h>
33 #include <time.h>
34 #include <unistd.h>
35 #include <stdio.h>
36 #include <pthread.h>
37 #include <malloc.h>
38 #include <immintrin.h>
39
40 #include <rte_common.h>
41 #include <rte_eal.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_mbuf.h>
48 #include <rte_ring.h>
49 #include <rte_version.h>
50 #include <rte_flow.h>
51 #if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */
52 #include <rte_ecpri.h>
53 #endif
54 #include "xran_fh_o_du.h"
55 #include "xran_main.h"
56
57 #include "ethdi.h"
58 #include "xran_mem_mgr.h"
59 #include "xran_tx_proc.h"
60 #include "xran_rx_proc.h"
61 #include "xran_pkt.h"
62 #include "xran_up_api.h"
63 #include "xran_cp_api.h"
64 #include "xran_sync_api.h"
65 #include "xran_lib_mlog_tasks_id.h"
66 #include "xran_timer.h"
67 #include "xran_common.h"
68 #include "xran_dev.h"
69 #include "xran_frame_struct.h"
70 #include "xran_printf.h"
71 #include "xran_app_frag.h"
72 #include "xran_cp_proc.h"
73 #include "xran_tx_proc.h"
74 #include "xran_rx_proc.h"
75 #include "xran_cb_proc.h"
76 #include "xran_ecpri_owd_measurements.h"
77
78 #include "xran_mlog_lnx.h"
79
80 static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {NULL};
81
82 uint64_t interval_us = 1000; //the TTI interval of the cell with maximum numerology
83
84 uint32_t xran_lib_ota_tti[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Slot index in a second [0:(1000000/TTI-1)] */
85 uint32_t xran_lib_ota_sym[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Symbol index in a slot [0:13] */
86 uint32_t xran_lib_ota_sym_idx[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1]
87                                                 where TTI is TTI interval in microseconds */
88
89 uint16_t xran_SFN_at_Sec_Start   = 0; /**< SFN at current second start */
90 uint16_t xran_max_frame          = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2 System Frame Number Calculation */
91
92 static uint64_t xran_total_tick = 0, xran_used_tick = 0;
93 static uint32_t xran_num_cores_used = 0;
94 static uint32_t xran_core_used[64] = {0};
95 static int32_t first_call = 0;
96
97 struct cp_up_tx_desc * xran_pkt_gen_desc_alloc(void);
98 int32_t xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc);
99
100 void tti_ota_cb(struct rte_timer *tim, void *arg);
101 void tti_to_phy_cb(struct rte_timer *tim, void *arg);
102
103 int32_t xran_pkt_gen_process_ring(struct rte_ring *r);
104
105 void
106 xran_updateSfnSecStart(void)
107 {
108     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
109     struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
110     int32_t xran_ports  = p_xran_dev_ctx->fh_init.xran_ports;
111     int32_t o_xu_id = 0;
112     uint64_t currentSecond = timing_get_current_second();
113     // Assume always positive
114     uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;
115     uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND;
116     uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));
117     xran_SFN_at_Sec_Start = sfn;
118
119     for(o_xu_id = 0; o_xu_id < xran_ports; o_xu_id++){
120     pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter;
121     pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter;
122     pCnt->tx_bytes_counter = 0;
123     pCnt->rx_bytes_counter = 0;
124         p_xran_dev_ctx++;
125         pCnt = &p_xran_dev_ctx->fh_counters;
126     }
127 }
128
129 static inline int32_t
130 xran_getSlotIdxSecond(uint32_t interval)
131 {
132     int32_t frameIdxSecond = xran_getSfnSecStart();
133     int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME(interval);
134     return slotIndxSecond;
135 }
136
137 enum xran_if_state
138 xran_get_if_state(void)
139         {
140     return xran_if_current_state;
141 }
142
143 int32_t xran_is_prach_slot(uint8_t PortId, uint32_t subframe_id, uint32_t slot_id)
144 {
145     int32_t is_prach_slot = 0;
146     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
147     if (p_xran_dev_ctx == NULL)
148 {
149         print_err("PortId %d not exist\n", PortId);
150         return is_prach_slot;
151 }
152     struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
153     uint8_t nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
154
155     if (nNumerology < 2){
156         //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index
157         if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){
158             if (pPrachCPConfig->nrofPrachInSlot == 0){
159                 if(slot_id == 0)
160                     is_prach_slot = 1;
161             }
162             else if (pPrachCPConfig->nrofPrachInSlot == 2)
163                 is_prach_slot = 1;
164             else{
165                 if (nNumerology == 0)
166                     is_prach_slot = 1;
167                 else if (slot_id == 1)
168                     is_prach_slot = 1;
169             }
170         }
171     } else if (nNumerology == 3){
172         //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot
173         uint32_t slotidx;
174         slotidx = subframe_id * SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local) + slot_id;
175         if (pPrachCPConfig->nrofPrachInSlot == 2){
176             if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1)
177                 is_prach_slot = 1;
178         } else {
179             if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){
180                 is_prach_slot = 1;
181             }
182         }
183     } else
184         print_err("Numerology %d not supported", nNumerology);
185     return is_prach_slot;
186 }
187
188 int32_t
189 xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
190 {
191     struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg);
192
193     if(p_srs){
194         p_srs->symbMask = pConf->srs_conf.symbMask;
195         p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset;
196         print_dbg("SRS sym         %d\n", p_srs->symbMask );
197         print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset);
198     }
199     return (XRAN_STATUS_SUCCESS);
200 }
201
202 int32_t
203 xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
204 {
205     /* update Rach for LTE */
206     return xran_init_prach(pConf, p_xran_dev_ctx);
207 }
208
209 int32_t
210 xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
211 {
212     int32_t i;
213     uint8_t slotNr;
214     struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf);
215     const xRANPrachConfigTableStruct *pxRANPrachConfigTable;
216     uint8_t nNumerology = pConf->frame_conf.nNumerology;
217     uint8_t nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
218     struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
219
220     if (nNumerology > 2)
221         pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx];
222     else if (pConf->frame_conf.nFrameDuplexType == 1)
223         pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx];
224     else
225         pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx];
226
227     uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0];
228     const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt];
229     memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config));
230     if(pConf->log_level)
231         printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot);
232
233     pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC;         // 3, PRACH preamble format A1~3, B1~4, C0, C2
234     pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym;
235     pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart;
236     pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70;
237     pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp;
238     pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing);
239     pPrachCPConfig->x = pxRANPrachConfigTable->x;
240     pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot;
241     pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0];
242     pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1];
243     if (preambleFmrt >= FORMAT_A1)
244     {
245         pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration;
246         pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot;
247     }
248     else
249     {
250         pPrachCPConfig->numSymbol = 1;
251         pPrachCPConfig->occassionsInPrachSlot = 1;
252     }
253
254     if(pConf->log_level)
255         printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]);
256     pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1;
257     for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++)
258     {
259         slotNr = pxRANPrachConfigTable->slotNr[i];
260         if (slotNr > 0){
261             pPrachCPConfig->isPRACHslot[slotNr] = 1;
262             if(pConf->log_level)
263                 printf(" %u ..", slotNr);
264         }
265     }
266     printf("\n");
267     for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){
268         p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId;
269         p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
270     }
271     if(pConf->log_level){
272         printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]);
273     }
274
275     pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(p_xran_dev_ctx);
276     print_dbg("PRACH eAxC_offset %d\n",  pPrachCPConfig->eAxC_offset);
277
278     /* Save some configs for app */
279     pPRACHConfig->startSymId    = pPrachCPConfig->startSymId;
280     pPRACHConfig->lastSymId     = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
281     pPRACHConfig->startPrbc     = pPrachCPConfig->startPrbc;
282     pPRACHConfig->numPrbc       = pPrachCPConfig->numPrbc;
283     pPRACHConfig->timeOffset    = pPrachCPConfig->timeOffset;
284     pPRACHConfig->freqOffset    = pPrachCPConfig->freqOffset;
285     pPRACHConfig->eAxC_offset   = pPrachCPConfig->eAxC_offset;
286
287         return (XRAN_STATUS_SUCCESS);
288         }
289
290 uint32_t
291 xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid
292 {
293     return slot_id;
294 #if 0
295     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
296     uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
297     uint8_t FR = 1;
298     if (mu > 2)
299         FR=2;
300     if (dir == 0)
301     {
302         if (FR == 1)
303         {
304             return (slot_id << (2-mu));
305         }
306         else
307         {
308             return (slot_id << (3-mu));
309         }
310     }
311     else
312     {
313         if (FR == 1)
314         {
315             return (slot_id >> (2-mu));
316         }
317         else
318         {
319             return (slot_id >> (3-mu));
320         }
321     }
322 #endif
323 }
324
325 void
326 sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick)
327 {
328     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
329     long t1 = MLogTick(), t2;
330     long t3;
331
332     if(XranGetSymNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){
333         t3 = xran_tick();
334         tti_ota_cb(NULL, (void*)p_xran_dev_ctx);
335         *used_tick += get_ticks_diff(xran_tick(), t3);
336     }
337
338             t3 = xran_tick();
339     if (xran_process_tx_sym(p_xran_dev_ctx))
340     {
341         *used_tick += get_ticks_diff(xran_tick(), t3);
342     }
343
344     /* check if there is call back to do something else on this symbol */
345     struct cb_elem_entry *cb_elm;
346     LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[xran_lib_ota_sym[p_xran_dev_ctx->xran_port_id]], pointers){
347         if(cb_elm){
348             cb_elm->pSymCallback(&p_xran_dev_ctx->dpdk_timer[p_xran_dev_ctx->ctx % MAX_NUM_OF_DPDK_TIMERS], cb_elm->pSymCallbackTag, cb_elm->p_dev_ctx);
349             p_xran_dev_ctx->ctx = DpdkTimerIncrementCtx(p_xran_dev_ctx->ctx);
350         }
351     }
352
353     t2 = MLogTick();
354     MLogTask(PID_SYM_OTA_CB, t1, t2);
355 }
356
357 uint32_t
358 xran_schedule_to_worker(enum xran_job_type_id job_type_id, struct xran_device_ctx * p_xran_dev_ctx)
359 {
360     struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
361     uint32_t tim_lcore = eth_ctx->io_cfg.timing_core; /* default to timing core */
362
363     if(eth_ctx) {
364         if(eth_ctx->num_workers == 0) { /* no workers */
365             tim_lcore = eth_ctx->io_cfg.timing_core;
366         } else if (eth_ctx->num_workers == 1) { /* one worker */
367             switch (job_type_id)
368             {
369                 case XRAN_JOB_TYPE_OTA_CB:
370                     tim_lcore = eth_ctx->io_cfg.timing_core;
371                     break;
372                 case XRAN_JOB_TYPE_CP_DL:
373                 case XRAN_JOB_TYPE_CP_UL:
374                 case XRAN_JOB_TYPE_DEADLINE:
375                 case XRAN_JOB_TYPE_SYM_CB:
376                     tim_lcore = eth_ctx->worker_core[0];
377                     break;
378                 default:
379                     print_err("incorrect job type id %d\n", job_type_id);
380                     tim_lcore = eth_ctx->io_cfg.timing_core;
381                     break;
382             }
383         } else if (eth_ctx->num_workers >= 2 && eth_ctx->num_workers <= 6) {
384             switch (job_type_id)
385             {
386                 case XRAN_JOB_TYPE_OTA_CB:
387                     tim_lcore = eth_ctx->worker_core[0];
388                     break;
389                 case XRAN_JOB_TYPE_CP_DL:
390                     tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_DL]];
391                     break;
392                 case XRAN_JOB_TYPE_CP_UL:
393                     tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_UL]];
394                     break;
395                 case XRAN_JOB_TYPE_DEADLINE:
396                 case XRAN_JOB_TYPE_SYM_CB:
397                     tim_lcore = eth_ctx->worker_core[0];
398                     break;
399                 default:
400                     print_err("incorrect job type id %d\n", job_type_id);
401                     tim_lcore = eth_ctx->io_cfg.timing_core;
402                     break;
403             }
404         } else {
405             print_err("incorrect eth_ctx->num_workers id %d\n", eth_ctx->num_workers);
406             tim_lcore = eth_ctx->io_cfg.timing_core;
407         }
408     }
409
410     return tim_lcore;
411 }
412
413 void
414 tti_ota_cb(struct rte_timer *tim, void *arg)
415 {
416     uint32_t    frame_id    = 0;
417     uint32_t    subframe_id = 0;
418     uint32_t    slot_id     = 0;
419     uint32_t    next_tti    = 0;
420
421     uint32_t mlogVar[10];
422     uint32_t mlogVarCnt = 0;
423     uint64_t t1 = MLogTick();
424     uint64_t t3 = 0;
425     uint32_t reg_tti  = 0;
426     uint32_t reg_sfn  = 0;
427     uint32_t i;
428
429     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
430     struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)p_xran_dev_ctx->timer_ctx;
431     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
432     uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
433
434     unsigned tim_lcore =  xran_schedule_to_worker(XRAN_JOB_TYPE_OTA_CB, p_xran_dev_ctx);
435
436     MLogTask(PID_TTI_TIMER, t1, MLogTick());
437
438     if(p_xran_dev_ctx->xran_port_id == 0){
439     /* To match TTbox */
440         if(xran_lib_ota_tti[0] == 0)
441             reg_tti = xran_fs_get_max_slot(PortId) - 1;
442     else
443             reg_tti = xran_lib_ota_tti[0] -1;
444
445     MLogIncrementCounter();
446         reg_sfn    = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us))*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME(interval_us), SUBFRAMES_PER_SYSTEMFRAME);;
447     /* subframe and slot */
448         MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME(interval_us)));
449     MLogMark(1, t1);
450     }
451
452     slot_id     = XranGetSlotNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local));
453     subframe_id = XranGetSubFrameNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
454     frame_id    = XranGetFrameNum(xran_lib_ota_tti[PortId],xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
455
456     pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process = xran_lib_ota_tti[PortId];
457
458     mlogVar[mlogVarCnt++] = 0x11111111;
459     mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId];
460     mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
461     mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId] / 14;
462     mlogVar[mlogVarCnt++] = frame_id;
463     mlogVar[mlogVarCnt++] = subframe_id;
464     mlogVar[mlogVarCnt++] = slot_id;
465     mlogVar[mlogVarCnt++] = 0;
466     MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
467
468
469     if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU)
470         next_tti = xran_lib_ota_tti[PortId] + 1;
471     else{
472         next_tti = xran_lib_ota_tti[PortId];
473     }
474
475     if(next_tti>= xran_fs_get_max_slot(PortId)){
476         print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
477         next_tti=0;
478     }
479
480     slot_id     = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
481     subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
482     frame_id    = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
483
484     print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
485
486     if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){
487         pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = next_tti;
488     } else {
489         pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti[PortId] & 1)^1].tti_to_process;
490     }
491
492     if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]) {
493     p_xran_dev_ctx->phy_tti_cb_done = 0;
494         xran_timer_arm_ex(&p_xran_dev_ctx->tti_to_phy_timer[xran_lib_ota_tti[PortId] % MAX_TTI_TO_PHY_TIMER], tti_to_phy_cb, (void*)p_xran_dev_ctx, tim_lcore);
495     }
496     //slot index is increased to next slot at the beginning of current OTA slot
497     xran_lib_ota_tti[PortId]++;
498     if(xran_lib_ota_tti[PortId] >= xran_fs_get_max_slot(PortId)) {
499         print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti[PortId], frame_id, subframe_id, slot_id);
500         xran_lib_ota_tti[PortId] = 0;
501     }
502     MLogTask(PID_TTI_CB, t1, MLogTick());
503 }
504
505 void
506 tx_cp_dl_cb(struct rte_timer *tim, void *arg)
507 {
508     long t1 = MLogTick();
509     int tti, buf_id;
510     uint32_t slot_id, subframe_id, frame_id;
511     int cc_id;
512     uint8_t ctx_id;
513     uint8_t ant_id, num_eAxc, num_CCPorts;
514     void *pHandle;
515     int num_list;
516     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
517     if(!p_xran_dev_ctx)
518     {
519         print_err("Null xRAN context!!\n");
520         return;
521     }
522     struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
523     uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
524     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
525     pHandle     = p_xran_dev_ctx;
526
527     num_eAxc    = xran_get_num_eAxc(pHandle);
528     num_CCPorts = xran_get_num_cc(pHandle);
529
530     if(first_call && p_xran_dev_ctx->enableCP) {
531
532         tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
533         buf_id = tti % XRAN_N_FE_BUF_LEN;
534
535         slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
536         subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
537         frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
538         if (tti == 0){
539             /* Wrap around to next second */
540             frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
541         }
542
543         ctx_id      = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval_us_local)) % XRAN_MAX_SECTIONDB_CTX;
544
545         print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
546         for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
547             for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) {
548                 /* start new section information list */
549                 xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
550                 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
551                     if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
552                     if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
553                         num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
554                             (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
555                             p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
556                     } else {
557                             print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
558                         }
559                     } else {
560                         print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pBuffers] \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
561                     }
562                 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
563             } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
564         } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
565         MLogTask(PID_CP_DL_CB, t1, MLogTick());
566     }
567 }
568
569 void
570 rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg)
571 {
572     long t1 = MLogTick();
573     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
574     xran_status_t status;
575     /* half of RX for current TTI as measured against current OTA time */
576     int32_t rx_tti;
577     int32_t cc_id;
578     uint32_t nFrameIdx;
579     uint32_t nSubframeIdx;
580     uint32_t nSlotIdx;
581     uint64_t nSecond;
582     struct xran_timer_ctx* p_timer_ctx = NULL;
583     /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
584     rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
585            + nSubframeIdx*SLOTNUM_PER_SUBFRAME
586            + nSlotIdx;*/
587     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
588         return;
589
590     p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
591     if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
592         p_xran_dev_ctx->timer_put = 0;
593
594     rx_tti = p_timer_ctx->tti_to_process;
595
596     for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
597         if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
598             if(p_xran_dev_ctx->pCallback[cc_id]) {
599             struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
600                 if(pTag) {
601                     //pTag->cellId = cc_id;
602             pTag->slotiId = rx_tti;
603             pTag->symbol  = 0; /* last 7 sym means full slot of Symb */
604             status = XRAN_STATUS_SUCCESS;
605
606                p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
607                 }
608             }
609         } else {
610             p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
611         }
612     }
613
614     if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
615         if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
616             p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
617         }else{
618             p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
619         }
620     }
621
622     MLogTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogTick());
623 }
624
625 void
626 rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg)
627 {
628     long t1 = MLogTick();
629     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
630     xran_status_t status = 0;
631     int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
632     int32_t cc_id = 0;
633     uint32_t nFrameIdx;
634     uint32_t nSubframeIdx;
635     uint32_t nSlotIdx;
636     uint64_t nSecond;
637     struct xran_timer_ctx* p_timer_ctx = NULL;
638
639     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
640         return;
641
642     /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
643     rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
644         + nSubframeIdx*SLOTNUM_PER_SUBFRAME
645         + nSlotIdx;*/
646     p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
647
648     if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
649         p_xran_dev_ctx->timer_put = 0;
650
651     rx_tti = p_timer_ctx->tti_to_process;
652 #if 1
653     if(rx_tti == 0)
654        rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
655     else
656        rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
657 #endif
658     /* U-Plane */
659     for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
660         if(p_xran_dev_ctx->pCallback[cc_id]){
661         struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
662             if(pTag) {
663                 //pTag->cellId = cc_id;
664         pTag->slotiId = rx_tti;
665         pTag->symbol  = 7; /* last 7 sym means full slot of Symb */
666         status = XRAN_STATUS_SUCCESS;
667             p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
668             }
669         }
670
671         if(p_xran_dev_ctx->pPrachCallback[cc_id]){
672             struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];
673             if(pTag) {
674                 //pTag->cellId = cc_id;
675             pTag->slotiId = rx_tti;
676             pTag->symbol  = 7; /* last 7 sym means full slot of Symb */
677             p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);
678         }
679         }
680
681         if(p_xran_dev_ctx->pSrsCallback[cc_id]){
682             struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
683             if(pTag) {
684                 //pTag->cellId = cc_id;
685             pTag->slotiId = rx_tti;
686             pTag->symbol  = 7; /* last 7 sym means full slot of Symb */
687             p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
688         }
689     }
690     }
691
692     /* user call backs if any */
693     if(p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX]){
694         if(p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX] <= 0){
695             p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_FULL_SLOT_RX]);
696         }else{
697             p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX]--;
698         }
699     }
700
701     MLogTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogTick());
702 }
703
704 void
705 rx_ul_user_sym_cb(struct rte_timer *tim, void *arg)
706 {
707     long t1 = MLogTick();
708     struct xran_device_ctx * p_dev_ctx = NULL;
709     struct cb_user_per_sym_ctx *p_sym_cb_ctx = (struct cb_user_per_sym_ctx *)arg;
710     xran_status_t status = 0;
711     int32_t rx_tti = 0; //(int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
712     int32_t cc_id = 0;
713     uint32_t nFrameIdx;
714     uint32_t nSubframeIdx;
715     uint32_t nSlotIdx;
716     uint64_t nSecond;
717     uint32_t interval, ota_sym_idx = 0;
718     uint8_t nNumerology = 0;
719     struct xran_timer_ctx* p_timer_ctx =  NULL;
720
721     if(p_sym_cb_ctx->p_dev)
722         p_dev_ctx = (struct xran_device_ctx *)p_sym_cb_ctx->p_dev;
723     else
724         rte_panic("p_sym_cb_ctx->p_dev == NULL");
725
726     if(p_dev_ctx->xran2phy_mem_ready == 0)
727         return;
728     nNumerology = xran_get_conf_numerology(p_dev_ctx);
729     interval = p_dev_ctx->interval_us_local;
730
731     p_timer_ctx = &p_sym_cb_ctx->user_cb_timer_ctx[p_sym_cb_ctx->user_timer_get++ % MAX_CB_TIMER_CTX];
732     if (p_sym_cb_ctx->user_timer_get >= MAX_CB_TIMER_CTX)
733         p_sym_cb_ctx->user_timer_get = 0;
734
735     rx_tti = p_timer_ctx->tti_to_process;
736
737     if( p_sym_cb_ctx->sym_diff > 0)
738         /* + advacne TX Wind: at OTA Time we indicating event in future */
739         ota_sym_idx = ((p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology));
740     else if (p_sym_cb_ctx->sym_diff < 0) {
741         /* - dealy RX Win: at OTA Time we indicate event in the past */
742         if(p_timer_ctx->ota_sym_idx >= abs(p_sym_cb_ctx->sym_diff)) {
743             ota_sym_idx = p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff;
744         } else {
745             ota_sym_idx = ((xran_max_ota_sym_idx(nNumerology) + p_timer_ctx->ota_sym_idx) + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology);
746         }
747     } else /* 0 - OTA exact time */
748         ota_sym_idx = p_timer_ctx->ota_sym_idx;
749
750     rx_tti = (int32_t)XranGetTtiNum(ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
751
752     if(p_sym_cb_ctx->symCbTimeInfo) {
753             struct xran_sense_of_time *p_sense_time = p_sym_cb_ctx->symCbTimeInfo;
754             p_sense_time->type_of_event = p_sym_cb_ctx->cb_type_id;
755             p_sense_time->nSymIdx       = p_sym_cb_ctx->symb_num_req;
756             p_sense_time->tti_counter   = rx_tti;
757             p_sense_time->nSlotIdx      = (uint32_t)XranGetSlotNum(rx_tti, SLOTNUM_PER_SUBFRAME(interval));
758             p_sense_time->nSubframeIdx  = (uint32_t)XranGetSubFrameNum(rx_tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
759             p_sense_time->nFrameIdx     = (uint32_t)XranGetFrameNum(rx_tti, p_timer_ctx->xran_sfn_at_sec_start,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
760             p_sense_time->nSecond       = p_timer_ctx->current_second;
761     }
762
763     /* user call backs if any */
764     if(p_sym_cb_ctx->symCb){
765         p_sym_cb_ctx->symCb(p_sym_cb_ctx->symCbParam, p_sym_cb_ctx->symCbTimeInfo);
766     }
767
768     MLogTask(PID_UP_UL_USER_DEAD_LINE_CB, t1, MLogTick());
769 }
770
771 void
772 tx_cp_ul_cb(struct rte_timer *tim, void *arg)
773 {
774     long t1 = MLogTick();
775     int tti, buf_id;
776     int ret;
777     uint32_t slot_id, subframe_id, frame_id;
778     int32_t cc_id;
779     int ant_id, prach_port_id;
780     uint16_t occasionid;
781     uint16_t beam_id;
782     uint8_t num_eAxc, num_CCPorts;
783     uint8_t ctx_id;
784
785     void *pHandle;
786     int num_list;
787
788     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
789     if(!p_xran_dev_ctx)
790     {
791         print_err("Null xRAN context!!\n");
792         return;
793     }
794     struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
795     struct xran_timer_ctx *pTCtx =  &p_xran_dev_ctx->timer_ctx[0];
796     uint32_t interval = p_xran_dev_ctx->interval_us_local;
797     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
798
799     tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
800     buf_id = tti % XRAN_N_FE_BUF_LEN;
801     slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
802     subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
803     frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
804     if (tti == 0) {
805         //Wrap around to next second
806         frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
807     }
808     ctx_id      = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval)) % XRAN_MAX_SECTIONDB_CTX;
809
810     pHandle = p_xran_dev_ctx;
811     if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
812         num_eAxc    = xran_get_num_eAxc(pHandle);
813     else
814         num_eAxc    = xran_get_num_eAxcUl(pHandle);
815     num_CCPorts = xran_get_num_cc(pHandle);
816
817     if(first_call && p_xran_dev_ctx->enableCP) {
818
819         print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
820
821         for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
822             for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
823                 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1
824                 /*  || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_SP) == 1*/ ) {
825                     /* start new section information list */
826                     xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
827                     if(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers){
828                         if(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
829                     num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
830                         (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
831                         p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
832                         }
833                     }
834                 }
835             }
836         }
837
838         if(p_xran_dev_ctx->enablePrach) {
839             uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
840             if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0]) && (is_prach_slot==1)) {   //is prach slot
841                 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
842                     for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
843                         for (occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++) {
844                         struct xran_cp_gen_params params;
845                         struct xran_section_gen_info sect_geninfo[8];
846                         struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
847                         prach_port_id = ant_id + num_eAxc;
848                         /* start new section information list */
849                         xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, ctx_id);
850
851                         beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id);
852                         ret = generate_cpmsg_prach(pHandle, &params, sect_geninfo, mbuf, p_xran_dev_ctx,
853                                     frame_id, subframe_id, slot_id,
854                                         beam_id, cc_id, prach_port_id, occasionid,
855                                     xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));
856                         if (ret == XRAN_STATUS_SUCCESS)
857                             send_cpmsg(pHandle, mbuf, &params, sect_geninfo,
858                                 cc_id, prach_port_id, xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));
859                     }
860                 }
861             }
862         }
863         }
864     } /* if(p_xran_dev_ctx->enableCP) */
865
866     MLogTask(PID_CP_UL_CB, t1, MLogTick());
867 }
868
869 void
870 tti_to_phy_cb(struct rte_timer *tim, void *arg)
871 {
872     long t1 = MLogTick();
873     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
874     uint32_t interval = p_xran_dev_ctx->interval_us_local;
875
876     p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */
877     if (first_call){
878         if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
879             if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){
880                 p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]);
881             }else{
882                 p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--;
883             }
884         }
885     } else {
886         if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
887             int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT);
888             uint32_t slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
889             uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
890             uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
891             if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME(interval)-1)) {  //(tti == xran_fs_get_max_slot()-1)
892                 first_call = 1;
893             }
894         }
895     }
896
897     MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());
898 }
899
900 int32_t
901 xran_timing_source_thread(void *args)
902 {
903     int res = 0;
904     cpu_set_t cpuset;
905     int32_t   do_reset = 0;
906     uint64_t  t1 = 0;
907     uint64_t  delta;
908     int32_t   result1,i,j;
909
910     uint32_t xran_port_id = 0;
911     static int owdm_init_done = 0;
912
913     struct sched_param sched_param;
914     struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *) args ;
915     uint64_t tWake = 0, tWakePrev = 0, tUsed = 0;
916     struct cb_elem_entry * cb_elm = NULL;
917
918     struct xran_device_ctx * p_dev_ctx_run = NULL;
919     /* ToS = Top of Second start +- 1.5us */
920     struct timespec ts;
921     char thread_name[32];
922     char buff[100];
923
924     printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());
925     memset(&sched_param, 0, sizeof(struct sched_param));
926     /* set main thread affinity mask to CPU2 */
927     sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
928     CPU_ZERO(&cpuset);
929     CPU_SET(p_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);
930
931     if (result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset))
932     {
933         printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);
934     }
935     if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
936     {
937         printf("priority is not changed: coreId = 2, result1 = %d\n",result1);
938     }
939
940     snprintf(thread_name, RTE_DIM(thread_name), "%s-%d", "fh_main_poll", rte_lcore_id());
941     if ((res = pthread_setname_np(pthread_self(), thread_name))) {
942         printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res);
943         }
944
945     printf("TTI interval %ld [us]\n", interval_us);
946
947     if (!p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable) {
948         if ((res = xran_timing_create_cbs(args)) < 0){
949         return res;
950         }
951         }
952
953         do {
954            timespec_get(&ts, TIME_UTC);
955         }while (ts.tv_nsec >1500);
956
957         struct tm * ptm = gmtime(&ts.tv_sec);
958         if(ptm){
959             strftime(buff, sizeof buff, "%D %T", ptm);
960         printf("%s: thread_run start time: %s.%09ld UTC [%ld]\n",
961         (p_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
962     }
963
964     do {
965        timespec_get(&ts, TIME_UTC);
966     }while (ts.tv_nsec == 0);
967
968     p_dev_ctx->timing_source_thread_running = 1;
969     while(1) {
970
971         /* Check if owdm finished to create the timing cbs based on measurement results */
972         if ((p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable)&&(!owdm_init_done)&&unlikely(XRAN_RUNNING == xran_if_current_state)) {
973             // Adjust Windows based on Delay Measurement results
974             xran_adjust_timing_parameters(p_dev_ctx);
975             if ((res = xran_timing_create_cbs(args)) < 0){
976                 return res;
977                 }
978             printf("TTI interval %ld [us]\n", interval_us);
979             owdm_init_done = 1;
980
981         }
982
983
984
985         /* Update Usage Stats */
986         tWake = xran_tick();
987         xran_used_tick += tUsed;
988         if (tWakePrev)
989         {
990             xran_total_tick += get_ticks_diff(tWake, tWakePrev);
991         }
992         tWakePrev = tWake;
993         tUsed = 0;
994
995         delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed);
996         if (XRAN_STOPPED == xran_if_current_state)
997             break;
998
999         if (likely(XRAN_RUNNING == xran_if_current_state)) {
1000             for(xran_port_id =  0; xran_port_id < XRAN_PORTS_NUM; xran_port_id++ ) {
1001                 p_dev_ctx_run = xran_dev_get_ctx_by_id(xran_port_id);
1002                 if(p_dev_ctx_run) {
1003                     if(p_dev_ctx_run->xran_port_id == xran_port_id) {
1004                         if(XranGetSymNum(xran_lib_ota_sym_idx[p_dev_ctx_run->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == xran_lib_ota_sym[xran_port_id])
1005                         {
1006                             sym_ota_cb(&p_dev_ctx_run->sym_timer, p_dev_ctx_run, &tUsed);
1007                             xran_lib_ota_sym[xran_port_id]++;
1008                             if(xran_lib_ota_sym[xran_port_id] >= N_SYM_PER_SLOT)
1009                                 xran_lib_ota_sym[xran_port_id]=0;
1010                         }
1011                     }
1012                     else  {
1013                         rte_panic("p_dev_ctx_run == xran_port_id");
1014     }
1015                 }
1016             }
1017         }
1018     }
1019
1020     xran_timing_destroy_cbs(args);
1021     printf("Closing timing source thread...\n");
1022     return res;
1023 }
1024
1025 /* Handle ecpri format. */
1026 #define MBUFS_CNT 16
1027
1028 int32_t handle_ecpri_ethertype(struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num)
1029 {
1030     struct rte_mbuf* pkt, * pkt0;
1031     uint16_t i;
1032     struct rte_ether_hdr* eth_hdr;
1033     struct xran_ecpri_hdr* ecpri_hdr;
1034     union xran_ecpri_cmn_hdr* ecpri_cmn;
1035     unsigned long t1;
1036     int32_t ret = MBUF_FREE;
1037     uint32_t ret_data[MBUFS_CNT] = { MBUFS_CNT * MBUF_FREE };
1038     struct xran_device_ctx* p_dev_ctx = xran_dev_get_ctx_by_id(xport_id);
1039     uint16_t num_data = 0, num_control = 0, num_meas = 0;
1040     struct rte_mbuf* pkt_data[MBUFS_CNT], * pkt_control[MBUFS_CNT], * pkt_meas[MBUFS_CNT], *pkt_adj[MBUFS_CNT];
1041     static uint32_t owdm_rx_first_pass = 1;
1042
1043     if (p_dev_ctx == NULL)
1044         return ret;
1045
1046     for (i = 0; i < num; i++)
1047     {
1048         pkt = pkt_q[i];
1049
1050 //        rte_prefetch0(rte_pktmbuf_mtod(pkt, void*));
1051
1052         rte_pktmbuf_adj(pkt, sizeof(*eth_hdr));
1053     ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *);
1054
1055         p_dev_ctx->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
1056
1057         pkt_adj[i] = pkt;
1058         switch (ecpri_hdr->cmnhdr.bits.ecpri_mesg_type)
1059         {
1060         case ECPRI_IQ_DATA:
1061                 pkt_data[num_data++] = pkt;
1062             break;
1063         // For RU emulation
1064         case ECPRI_RT_CONTROL_DATA:
1065                 pkt_control[num_control++] = pkt;
1066             break;
1067             case ECPRI_DELAY_MEASUREMENT:
1068                 if (owdm_rx_first_pass != 0)
1069 {
1070                     // Initialize and verify that Payload Length is in range */
1071                     xran_initialize_and_verify_owd_pl_length((void*)p_dev_ctx);
1072                     owdm_rx_first_pass = 0;
1073
1074                 }
1075                 pkt_meas[num_meas++] = pkt;
1076                 break;
1077             default:
1078                 if (p_dev_ctx->fh_init.io_cfg.id == O_DU) {
1079                     print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.bits.ecpri_mesg_type);
1080         }
1081                 break;
1082     }
1083 }
1084
1085     if(num_data == MBUFS_CNT && p_dev_ctx->fh_cfg.ru_conf.xranCat == XRAN_CATEGORY_B) /* w/a for Cat A issue */
1086 {
1087         for (i = 0; i < MBUFS_CNT; i++)
1088 {
1089             ret_data[i] == MBUF_FREE;
1090 }
1091
1092         if (p_dev_ctx->fh_init.io_cfg.id == O_DU || p_dev_ctx->fh_init.io_cfg.id == O_RU)
1093 {
1094             if (p_dev_ctx->xran2phy_mem_ready != 0)
1095                 ret = process_mbuf_batch(pkt_data, (void*)p_dev_ctx, MBUFS_CNT, p_cid,  ret_data );
1096             for (i = 0; i < MBUFS_CNT; i++)
1097                     {
1098                 if (ret_data[i] == MBUF_FREE)
1099                     rte_pktmbuf_free(pkt_data[i]);
1100                     }
1101             }
1102     else
1103 {
1104             for (i = 0; i < MBUFS_CNT; i++)
1105 {
1106                 if (ret_data[i] == MBUF_FREE)
1107                     rte_pktmbuf_free(pkt_data[i]);
1108             }
1109             print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id);
1110         }
1111         }
1112     else
1113 {
1114         for (i = 0; i < num_data; i++)
1115     {
1116             ret = process_mbuf(pkt_data[i], (void*)p_dev_ctx, p_cid);
1117             if (ret == MBUF_FREE)
1118                 rte_pktmbuf_free(pkt_data[i]);
1119     }
1120
1121         for (i = 0; i < num_control; i++)
1122     {
1123             t1 = MLogTick();
1124             if (p_dev_ctx->fh_init.io_cfg.id == O_RU)
1125         {
1126                 ret = process_cplane(pkt_control[i], (void*)p_dev_ctx);
1127                 p_dev_ctx->fh_counters.rx_counter++;
1128                 if (ret == MBUF_FREE)
1129                     rte_pktmbuf_free(pkt_control[i]);
1130         }
1131         else
1132         {
1133                 print_err("O-DU recevied C-Plane message!");
1134         }
1135             MLogTask(PID_PROCESS_CP_PKT, t1, MLogTick());
1136     }
1137
1138         for (i = 0; i < num_meas; i++)
1139         {
1140             t1 = MLogTick();
1141             ret = process_delay_meas(pkt_meas[i], (void*)p_dev_ctx, xport_id);
1142             //                printf("Got delay_meas_pkt xport_id %d p_dev_ctx %08"PRIx64"\n", xport_id,(int64_t*)p_dev_ctx) ;
1143             if (ret == MBUF_FREE)
1144                 rte_pktmbuf_free(pkt_meas[i]);
1145             MLogTask(PID_PROCESS_DELAY_MEAS_PKT, t1, MLogTick());
1146     }
1147             }
1148
1149     return MBUF_FREE;
1150 }
1151
1152 int32_t
1153 xran_packet_and_dpdk_timer_thread(void *args)
1154 {
1155     struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1156
1157     uint64_t prev_tsc = 0;
1158     uint64_t cur_tsc = rte_rdtsc();
1159     uint64_t diff_tsc = cur_tsc - prev_tsc;
1160     cpu_set_t cpuset;
1161     struct sched_param sched_param;
1162     int res = 0;
1163     printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());
1164
1165     memset(&sched_param, 0, sizeof(struct sched_param));
1166     sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1167
1168     if ((res  = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
1169     {
1170         printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1171     }
1172
1173     while(1){
1174
1175         cur_tsc  = rte_rdtsc();
1176         diff_tsc = cur_tsc - prev_tsc;
1177         if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
1178             rte_timer_manage();
1179             prev_tsc = cur_tsc;
1180         }
1181
1182         if (XRAN_STOPPED == xran_if_current_state)
1183             break;
1184     }
1185
1186     printf("Closing pkts timer thread...\n");
1187     return 0;
1188 }
1189
1190 void xran_initialize_ecpri_owd_meas_cmn( struct xran_io_cfg *ptr)
1191 {
1192 //    ptr->eowd_cmn.initiator_en = 0; // Initiator 1, Recipient 0
1193 //    ptr->eowd_cmn.filterType = 0;  // 0 Simple average based on number of measurements
1194     // Set default values if the Timeout and numberOfSamples are not set
1195     if ( ptr->eowd_cmn[ptr->id].responseTo == 0)
1196         ptr->eowd_cmn[ptr->id].responseTo = 10E6; // 10 ms timeout expressed in ns
1197     if ( ptr->eowd_cmn[ptr->id].numberOfSamples == 0)
1198         ptr->eowd_cmn[ptr->id].numberOfSamples = 8; // Number of samples to be averaged
1199 }
1200 void xran_initialize_ecpri_owd_meas_per_port (int i, struct xran_io_cfg *ptr )
1201 {
1202    /* This function initializes one_way delay measurements on a per port basis,
1203       most variables default to zero    */
1204    ptr->eowd_port[ptr->id][i].portid = (uint8_t)i;
1205 }
1206
1207 int32_t
1208 xran_init(int argc, char *argv[],
1209            struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle)
1210 {
1211     int32_t ret = XRAN_STATUS_SUCCESS;
1212     int32_t i;
1213     int32_t j;
1214     int32_t o_xu_id = 0;
1215
1216     struct xran_io_cfg      *p_io_cfg       = NULL;
1217     struct xran_device_ctx * p_xran_dev_ctx = NULL;
1218
1219     int32_t  lcore_id = 0;
1220     char filename[64];
1221
1222     const char *version = rte_version();
1223
1224     if (version == NULL)
1225         rte_panic("version == NULL");
1226
1227     printf("'%s'\n", version);
1228
1229     if (p_xran_fh_init->xran_ports < 1 || p_xran_fh_init->xran_ports > XRAN_PORTS_NUM) {
1230         ret = XRAN_STATUS_INVALID_PARAM;
1231         print_err("fh_init xran_ports= %d is wrong [%d]\n", p_xran_fh_init->xran_ports, ret);
1232         return ret;
1233     }
1234
1235     p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg;
1236
1237     if ((ret = xran_dev_create_ctx(p_xran_fh_init->xran_ports)) < 0) {
1238         print_err("context allocation error [%d]\n", ret);
1239         return ret;
1240     }
1241
1242     for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1243         p_xran_dev_ctx  = xran_dev_get_ctx_by_id(o_xu_id);
1244     memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx));
1245         p_xran_dev_ctx->xran_port_id  = o_xu_id;
1246
1247     /* copy init */
1248     p_xran_dev_ctx->fh_init = *p_xran_fh_init;
1249     printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu);
1250
1251     memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config));
1252     /* To make sure to set default functions */
1253     p_xran_dev_ctx->send_upmbuf2ring    = NULL;
1254     p_xran_dev_ctx->send_cpmbuf2ring    = NULL;
1255         // Ecpri initialization for One Way delay measurements common variables to default values
1256         xran_initialize_ecpri_owd_meas_cmn(&p_xran_dev_ctx->fh_init.io_cfg);
1257     }
1258
1259     /* default values if not set */
1260     if(p_io_cfg->nEthLinePerPort == 0)
1261         p_io_cfg->nEthLinePerPort = 1;
1262
1263     if(p_io_cfg->nEthLineSpeed == 0)
1264         p_io_cfg->nEthLineSpeed = 25;
1265
1266     /** at least 1 RX Q */
1267     if(p_io_cfg->num_rxq == 0)
1268         p_io_cfg->num_rxq = 1;
1269
1270     if (p_io_cfg->id == 1) {
1271         /* 1 HW for O-RU */
1272         p_io_cfg->num_rxq =  1;
1273     }
1274
1275 #if (RTE_VER_YEAR < 21) /* eCPRI flow supported with DPDK 21.02 or later */
1276     if (p_io_cfg->num_rxq > 1){
1277         p_io_cfg->num_rxq =  1;
1278         printf("%s does support eCPRI flows. Set rxq to %d\n", version, p_io_cfg->num_rxq);
1279     }
1280 #endif
1281     printf("PF Eth line speed %dG\n",p_io_cfg->nEthLineSpeed);
1282     printf("PF Eth lines per O-xU port %d\n",p_io_cfg->nEthLinePerPort);
1283     printf("RX HW queues per O-xU Eth line %d \n",p_io_cfg->num_rxq);
1284
1285     if(p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane)  != p_io_cfg->num_vfs) {
1286         print_err("Incorrect VFs configurations: For %d O-xUs with %d Ethernet ports expected number of VFs is %d. [provided %d]\n",
1287             p_xran_fh_init->xran_ports, p_io_cfg->nEthLinePerPort,
1288             p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane), p_io_cfg->num_vfs);
1289     }
1290
1291     xran_if_current_state = XRAN_INIT;
1292     xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype);
1293     if (p_io_cfg->id == 0)
1294         xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1295                            p_io_cfg,
1296                            &lcore_id,
1297                            (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1298                            (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1299                            p_xran_dev_ctx->fh_init.mtu);
1300     else
1301         xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1302                            p_io_cfg,
1303                            &lcore_id,
1304                            (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1305                            (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1306                            p_xran_dev_ctx->fh_init.mtu);
1307
1308     for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1309         p_xran_dev_ctx  = xran_dev_get_ctx_by_id(o_xu_id);
1310
1311         for(i = 0; i < MAX_TTI_TO_PHY_TIMER; i++ )
1312             rte_timer_init(&p_xran_dev_ctx->tti_to_phy_timer[i]);
1313
1314         rte_timer_init(&p_xran_dev_ctx->sym_timer);
1315     for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++)
1316             rte_timer_init(&p_xran_dev_ctx->dpdk_timer[i]);
1317
1318     p_xran_dev_ctx->direct_pool   = socket_direct_pool;
1319     p_xran_dev_ctx->indirect_pool = socket_indirect_pool;
1320
1321
1322         for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){
1323             LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[j]);
1324     }
1325
1326     }
1327
1328     for (i=0; i<XRAN_PORTS_NUM; i++){
1329     for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){
1330             xran_fs_clear_slot_type(i,nCellIdx);
1331         }
1332     }
1333
1334     *pXranLayerHandle = xran_dev_get_ctx();
1335
1336
1337     // The ecpri initialization loop needs to be done per pf and vf (Outer loop pf and inner loop vf)
1338     for (i=0;  i< p_io_cfg->num_vfs; i++)
1339     {
1340         /* Initialize ecpri one-way delay measurement info on a per vf port basis */
1341         xran_initialize_ecpri_owd_meas_per_port (i, p_io_cfg);
1342     }
1343
1344     return ret;
1345 }
1346
1347 int32_t
1348 xran_sector_get_instances (uint32_t xran_port, void * pDevHandle, uint16_t nNumInstances,
1349                xran_cc_handle_t * pSectorInstanceHandles)
1350 {
1351     xran_status_t nStatus = XRAN_STATUS_FAIL;
1352     struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle;
1353     XranSectorHandleInfo *pCcHandle = NULL;
1354     int32_t i = 0;
1355
1356     pDev += xran_port;
1357
1358     /* Check for the Valid Parameters */
1359     CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM);
1360
1361     if (!nNumInstances) {
1362         print_dbg("Instance is not assigned for this function !!! \n");
1363         return XRAN_STATUS_INVALID_PARAM;
1364     }
1365
1366     for (i = 0; i < nNumInstances; i++) {
1367
1368         /* Allocate Memory for CC handles */
1369         pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64);
1370
1371         if(pCcHandle == NULL)
1372             return XRAN_STATUS_RESOURCE;
1373
1374         memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo)));
1375
1376         pCcHandle->nIndex    = i;
1377         pCcHandle->nXranPort = pDev->xran_port_id;
1378
1379         printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle);
1380         pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle;
1381
1382         printf("Handle: %p Instance: %p\n",
1383             &pSectorInstanceHandles[i], pSectorInstanceHandles[i]);
1384     }
1385
1386     return XRAN_STATUS_SUCCESS;
1387 }
1388
1389
1390 int32_t
1391 xran_5g_fronthault_config (void * pHandle,
1392                 struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1393                 struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1394                 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1395                 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1396                 xran_transport_callback_fn pCallback,
1397                 void *pCallbackTag)
1398 {
1399     int j, i = 0, z, k;
1400     XranSectorHandleInfo* pXranCc = NULL;
1401     struct xran_device_ctx * p_xran_dev_ctx = NULL;
1402
1403     if(NULL == pHandle) {
1404         printf("Handle is NULL!\n");
1405         return XRAN_STATUS_FAIL;
1406     }
1407
1408     pXranCc = (XranSectorHandleInfo*) pHandle;
1409     p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1410     if (p_xran_dev_ctx == NULL) {
1411         printf ("p_xran_dev_ctx is NULL\n");
1412         return XRAN_STATUS_FAIL;
1413     }
1414
1415     i = pXranCc->nIndex;
1416
1417     for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
1418         for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
1419             /* U-plane TX */
1420
1421             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0;
1422             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1423             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1424             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1425             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1426             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];
1427
1428             if(pSrcBuffer[z][j])
1429                 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcBuffer[z][j];
1430             else
1431                 memset(&p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcBuffer[z][j]));
1432
1433
1434             /* C-plane TX */
1435             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1436             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1437             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1438             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1439             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1440             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
1441
1442             if(pSrcCpBuffer[z][j])
1443                 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcCpBuffer[z][j];
1444             else
1445                 memset(&p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcCpBuffer[z][j]));
1446             /* U-plane RX */
1447
1448             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0;
1449             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1450             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1451             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1452             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1453             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];
1454
1455             if(pDstBuffer[z][j])
1456                 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
1457             else
1458                 memset(&p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1459
1460
1461             /* C-plane RX */
1462             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1463             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1464             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1465             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1466             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1467             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
1468
1469             if(pDstCpBuffer[z][j])
1470                 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pDstCpBuffer[z][j];
1471             else
1472                 memset(&p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
1473
1474         }
1475     }
1476
1477     p_xran_dev_ctx->pCallback[i]    = pCallback;
1478     p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag;
1479     print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
1480         p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pCallback[i], p_xran_dev_ctx->pCallbackTag[i]);
1481
1482     p_xran_dev_ctx->xran2phy_mem_ready = 1;
1483
1484     return XRAN_STATUS_SUCCESS;
1485 }
1486
1487 int32_t
1488 xran_5g_prach_req (void *  pHandle,
1489                 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1490                 struct xran_buffer_list *pDstBufferDecomp[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],                
1491                 xran_transport_callback_fn pCallback,
1492                 void *pCallbackTag)
1493 {
1494     int j, i = 0, z;
1495     XranSectorHandleInfo* pXranCc = NULL;
1496     struct xran_device_ctx * p_xran_dev_ctx = NULL;
1497
1498     if(NULL == pHandle) {
1499         printf("Handle is NULL!\n");
1500         return XRAN_STATUS_FAIL;
1501     }
1502
1503     pXranCc = (XranSectorHandleInfo*) pHandle;
1504     p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1505     if (p_xran_dev_ctx == NULL) {
1506         printf ("p_xran_dev_ctx is NULL\n");
1507         return XRAN_STATUS_FAIL;
1508     }
1509
1510     i = pXranCc->nIndex;
1511
1512     for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
1513         for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
1514            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0;
1515            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1516            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1517            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1518            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number.
1519            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];
1520            if(pDstBuffer[z][j])
1521                p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
1522             else
1523                 memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1524                 
1525             p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0];
1526             if(pDstBufferDecomp[z][j])
1527                 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList =   *pDstBufferDecomp[z][j];
1528                 
1529         }
1530     }
1531
1532     p_xran_dev_ctx->pPrachCallback[i]    = pCallback;
1533     p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
1534
1535     print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
1536         p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]);
1537
1538     return XRAN_STATUS_SUCCESS;
1539 }
1540
1541 int32_t
1542 xran_5g_srs_req (void *  pHandle,
1543                 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
1544                 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
1545                 xran_transport_callback_fn pCallback,
1546                 void *pCallbackTag)
1547 {
1548     int j, i = 0, z;
1549     XranSectorHandleInfo* pXranCc = NULL;
1550     struct xran_device_ctx * p_xran_dev_ctx = NULL;
1551
1552     if(NULL == pHandle) {
1553         printf("Handle is NULL!\n");
1554         return XRAN_STATUS_FAIL;
1555     }
1556
1557     pXranCc = (XranSectorHandleInfo*) pHandle;
1558     p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1559     if (p_xran_dev_ctx == NULL) {
1560         printf ("p_xran_dev_ctx is NULL\n");
1561         return XRAN_STATUS_FAIL;
1562     }
1563
1564     i = pXranCc->nIndex;
1565
1566     for(j=0; j<XRAN_N_FE_BUF_LEN; j++) {
1567         for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){
1568            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;
1569            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1570            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1571            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1572            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
1573            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
1574            if(pDstBuffer[z][j])
1575                p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
1576             else
1577                 memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1578
1579             /* C-plane SRS */
1580             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1581             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1582             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1583             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1584             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1585             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z];
1586
1587             if(pDstCpBuffer[z][j])
1588                 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pDstCpBuffer[z][j];
1589             else
1590                 memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
1591
1592         }
1593     }
1594
1595     p_xran_dev_ctx->pSrsCallback[i]    = pCallback;
1596     p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
1597
1598     print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
1599         p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]);
1600
1601     return XRAN_STATUS_SUCCESS;
1602 }
1603
1604 uint32_t
1605 xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear)
1606 {
1607     uint32_t i;
1608
1609     *num_core_used = xran_num_cores_used;
1610     for (i = 0; i < xran_num_cores_used; i++)
1611     {
1612         core_used[i] = xran_core_used[i];
1613     }
1614
1615     *total_time = xran_total_tick;
1616     *used_time = xran_used_tick;
1617
1618     if (clear)
1619     {
1620         xran_total_tick = 0;
1621         xran_used_tick = 0;
1622     }
1623
1624     return 0;
1625 }
1626
1627 uint8_t*
1628 xran_add_cp_hdr_offset(uint8_t  *dst)
1629 {
1630     dst += (RTE_PKTMBUF_HEADROOM +
1631             sizeof(struct xran_ecpri_hdr) +
1632             sizeof(struct xran_cp_radioapp_section1_header) +
1633             sizeof(struct xran_cp_radioapp_section1));
1634
1635     dst = RTE_PTR_ALIGN_CEIL(dst, 64);
1636
1637     return dst;
1638 }
1639
1640 uint8_t*
1641 xran_add_hdr_offset(uint8_t  *dst, int16_t compMethod)
1642 {
1643     dst+= (RTE_PKTMBUF_HEADROOM +
1644           sizeof (struct xran_ecpri_hdr) +
1645           sizeof (struct radio_app_common_hdr) +
1646           sizeof(struct data_section_hdr));
1647     if(compMethod != XRAN_COMPMETHOD_NONE)
1648           dst += sizeof (struct data_section_compression_hdr);
1649     dst = RTE_PTR_ALIGN_CEIL(dst, 64);
1650
1651     return dst;
1652 }
1653
1654 int32_t
1655 xran_pkt_gen_process_ring(struct rte_ring *r)
1656 {
1657     assert(r);
1658     int32_t     retval = 0;
1659     struct rte_mbuf *mbufs[16];
1660     int i;
1661     uint32_t remaining;
1662     uint64_t t1;
1663     struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
1664     const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
1665         RTE_DIM(mbufs), &remaining);
1666
1667     if (!dequeued)
1668         return 0;
1669
1670     t1 = MLogTick();
1671     for (i = 0; i < dequeued; ++i) {
1672         struct cp_up_tx_desc * p_tx_desc =  (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i],  struct cp_up_tx_desc *);
1673         retval = xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle,
1674                                         p_tx_desc->ctx_id,
1675                                         p_tx_desc->tti,
1676                                         p_tx_desc->cc_id,
1677                                         p_tx_desc->ant_id,
1678                                         p_tx_desc->frame_id,
1679                                         p_tx_desc->subframe_id,
1680                                         p_tx_desc->slot_id,
1681                                         p_tx_desc->sym_id,
1682                                         (enum xran_comp_hdr_type)p_tx_desc->compType,
1683                                         (enum xran_pkt_dir) p_tx_desc->direction,
1684                                         p_tx_desc->xran_port_id,
1685                                         (PSECTION_DB_TYPE)p_tx_desc->p_sec_db);
1686
1687         xran_pkt_gen_desc_free(p_tx_desc);
1688         if (XRAN_STOPPED == xran_if_current_state){
1689             MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());
1690             return -1;
1691         }
1692     }
1693
1694     if(p_io_cfg->io_sleep)
1695        nanosleep(&sleeptime,NULL);
1696
1697     MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());
1698
1699     return remaining;
1700 }
1701
1702 int32_t
1703 xran_dl_pkt_ring_processing_func(void* args)
1704 {
1705     struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1706     uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF);
1707     uint16_t current_port;
1708
1709     rte_timer_manage();
1710
1711     for (current_port = 0; current_port < XRAN_PORTS_NUM;  current_port++) {
1712         if( xran_port_mask & (1<<current_port)) {
1713             xran_pkt_gen_process_ring(ctx->up_dl_pkt_gen_ring[current_port]);
1714         }
1715     }
1716
1717     if (XRAN_STOPPED == xran_if_current_state)
1718         return -1;
1719
1720     return 0;
1721 }
1722
1723 /** Function to peforms serves of DPDK times */
1724 int32_t
1725 xran_processing_timer_only_func(void* args)
1726 {
1727     rte_timer_manage();
1728     if (XRAN_STOPPED == xran_if_current_state)
1729         return -1;
1730
1731     return 0;
1732 }
1733
1734 /** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */
1735 int32_t
1736 xran_all_tasks(void* arg)
1737 {
1738
1739     ring_processing_func(arg);
1740     process_dpdk_io(arg);
1741     return 0;
1742 }
1743
1744 /** Function to pefromrm TX and RX on ETH device */
1745 int32_t
1746 xran_eth_trx_tasks(void* arg)
1747 {
1748     process_dpdk_io(arg);
1749     return 0;
1750 }
1751
1752 /** Function to pefromrm RX on ETH device */
1753 int32_t
1754 xran_eth_rx_tasks(void* arg)
1755 {
1756     process_dpdk_io_rx(arg);
1757     return 0;
1758 }
1759
1760 /** Function to porcess ORAN FH packet per port */
1761 int32_t
1762 ring_processing_func_per_port(void* args)
1763 {
1764     struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1765     int16_t retPoll = 0;
1766     int32_t i;
1767     uint64_t t1, t2;
1768     uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF);
1769     queueid_t qi;
1770
1771     for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) {
1772         if (ctx->vf2xran_port[i] == port_id) {
1773             for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
1774                 if (process_ring(ctx->rx_ring[i][qi], i, qi))
1775                     return 0;
1776             }
1777         }
1778     }
1779
1780     if (XRAN_STOPPED == xran_if_current_state)
1781         return -1;
1782
1783     return 0;
1784 }
1785
1786 /** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */
1787 int32_t
1788 xran_spawn_workers(void)
1789 {
1790     uint64_t nWorkerCore = 1LL;
1791     uint32_t coreNum     = sysconf(_SC_NPROCESSORS_CONF);
1792     int32_t  i = 0;
1793     uint32_t total_num_cores  = 1; /*start with timing core */
1794     uint32_t worker_num_cores = 0;
1795     uint32_t icx_cpu = 0;
1796     int32_t core_map[2*sizeof(uint64_t)*8];
1797     uint32_t xran_port_mask = 0;
1798
1799     struct xran_ethdi_ctx  *eth_ctx   = xran_ethdi_get_ctx();
1800     struct xran_device_ctx *p_dev     = NULL;
1801     struct xran_fh_init    *fh_init   = NULL;
1802     struct xran_fh_config  *fh_cfg    = NULL;
1803     struct xran_worker_th_ctx* pThCtx = NULL;
1804
1805     p_dev =  xran_dev_get_ctx_by_id(0);
1806     if(p_dev == NULL) {
1807         print_err("p_dev\n");
1808         return XRAN_STATUS_FAIL;
1809     }
1810
1811     fh_init = &p_dev->fh_init;
1812     if(fh_init == NULL) {
1813         print_err("fh_init\n");
1814         return XRAN_STATUS_FAIL;
1815     }
1816
1817     fh_cfg = &p_dev->fh_cfg;
1818     if(fh_cfg == NULL) {
1819         print_err("fh_cfg\n");
1820         return XRAN_STATUS_FAIL;
1821     }
1822
1823     for (i = 0; i < coreNum && i < 64; i++) {
1824         if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
1825             core_map[worker_num_cores++] = i;
1826             total_num_cores++;
1827         }
1828         nWorkerCore = nWorkerCore << 1;
1829     }
1830
1831     nWorkerCore = 1LL;
1832     for (i = 64; i < coreNum && i < 128; i++) {
1833         if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
1834             core_map[worker_num_cores++] = i;
1835             total_num_cores++;
1836         }
1837         nWorkerCore = nWorkerCore << 1;
1838     }
1839
1840     extern int _may_i_use_cpu_feature(unsigned __int64);
1841     icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52);
1842
1843     printf("O-XU      %d\n", eth_ctx->io_cfg.id);
1844     printf("HW        %d\n", icx_cpu);
1845     printf("Num cores %d\n", total_num_cores);
1846     printf("Num ports %d\n", fh_init->xran_ports);
1847     printf("O-RU Cat  %d\n", fh_cfg->ru_conf.xranCat);
1848     printf("O-RU CC   %d\n", fh_cfg->nCC);
1849     printf("O-RU eAxC %d\n", fh_cfg->neAxc);
1850
1851     for (i = 0; i < fh_init->xran_ports; i++){
1852         xran_port_mask |= 1<<i;
1853     }
1854
1855     for (i = 0; i < fh_init->xran_ports; i++) {
1856         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
1857         if(p_dev_update == NULL){
1858             print_err("p_dev_update\n");
1859             return XRAN_STATUS_FAIL;
1860         }
1861         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1;
1862         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1;
1863         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
1864         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
1865     }
1866
1867     if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
1868         switch(total_num_cores) {
1869             case 1: /** only timing core */
1870                 eth_ctx->time_wrk_cfg.f = xran_all_tasks;
1871                 eth_ctx->time_wrk_cfg.arg   = NULL;
1872                 eth_ctx->time_wrk_cfg.state = 1;
1873             break;
1874             case 2:
1875                 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
1876                 eth_ctx->time_wrk_cfg.arg   = NULL;
1877                 eth_ctx->time_wrk_cfg.state = 1;
1878
1879                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1880                 if(pThCtx == NULL){
1881                     print_err("pThCtx allocation error\n");
1882                     return XRAN_STATUS_FAIL;
1883                 }
1884                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1885                 pThCtx->worker_id    = 0;
1886                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1887                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1888                 pThCtx->task_func = ring_processing_func;
1889                 pThCtx->task_arg  = NULL;
1890                 eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
1891                 eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
1892             break;
1893             case 3:
1894                 /* timing core */
1895                 eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
1896                 eth_ctx->time_wrk_cfg.arg   = NULL;
1897                 eth_ctx->time_wrk_cfg.state = 1;
1898
1899                 /* workers */
1900                 /** 0 **/
1901                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1902                 if(pThCtx == NULL){
1903                     print_err("pThCtx allocation error\n");
1904                     return XRAN_STATUS_FAIL;
1905                 }
1906                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1907                 pThCtx->worker_id      = 0;
1908                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1909                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1910                 pThCtx->task_func = ring_processing_func;
1911                 pThCtx->task_arg  = NULL;
1912                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
1913                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
1914
1915                 for (i = 0; i < fh_init->xran_ports; i++) {
1916                     struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
1917                     if(p_dev_update == NULL) {
1918                         print_err("p_dev_update\n");
1919                         return XRAN_STATUS_FAIL;
1920                     }
1921                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
1922                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
1923                     printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
1924                     printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
1925                 }
1926
1927                 /** 1 - CP GEN **/
1928                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1929                 if(pThCtx == NULL){
1930                     print_err("pThCtx allocation error\n");
1931                     return XRAN_STATUS_FAIL;
1932                 }
1933                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1934                 pThCtx->worker_id      = 1;
1935                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1936                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
1937                 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
1938                 pThCtx->task_arg  = (void*)xran_port_mask;
1939                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
1940                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
1941             break;
1942             default:
1943                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
1944                 return XRAN_STATUS_FAIL;
1945         }
1946     } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1) {
1947         switch(total_num_cores) {
1948             case 1: /** only timing core */
1949                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
1950                 return XRAN_STATUS_FAIL;
1951             break;
1952             case 2:
1953                 eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
1954                 eth_ctx->time_wrk_cfg.arg   = NULL;
1955                 eth_ctx->time_wrk_cfg.state = 1;
1956
1957                 p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
1958
1959                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1960                 if(pThCtx == NULL){
1961                     print_err("pThCtx allocation error\n");
1962                     return XRAN_STATUS_FAIL;
1963                 }
1964                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1965                 pThCtx->worker_id    = 0;
1966                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1967                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1968                 pThCtx->task_func = ring_processing_func;
1969                 pThCtx->task_arg  = NULL;
1970                 eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
1971                 eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
1972             break;
1973             case 3:
1974                 if(icx_cpu) {
1975                     /* timing core */
1976                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
1977                     eth_ctx->time_wrk_cfg.arg   = NULL;
1978                     eth_ctx->time_wrk_cfg.state = 1;
1979
1980                     /* workers */
1981                     /** 0 **/
1982                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1983                     if(pThCtx == NULL){
1984                         print_err("pThCtx allocation error\n");
1985                         return XRAN_STATUS_FAIL;
1986                     }
1987                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1988                     pThCtx->worker_id      = 0;
1989                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1990                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1991                     pThCtx->task_func = ring_processing_func;
1992                     pThCtx->task_arg  = NULL;
1993                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
1994                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
1995
1996                     for (i = 0; i < fh_init->xran_ports; i++) {
1997                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
1998                         if(p_dev_update == NULL) {
1999                             print_err("p_dev_update\n");
2000                             return XRAN_STATUS_FAIL;
2001                         }
2002                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2003                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2004                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2005                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2006                     }
2007
2008                     /** 1 - CP GEN **/
2009                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2010                     if(pThCtx == NULL){
2011                         print_err("pThCtx allocation error\n");
2012                         return XRAN_STATUS_FAIL;
2013                     }
2014                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2015                     pThCtx->worker_id      = 1;
2016                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2017                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2018                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2019                     pThCtx->task_arg  = (void*)xran_port_mask;
2020                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2021                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2022                 } else {
2023                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2024                     return XRAN_STATUS_FAIL;
2025                 }
2026             break;
2027             case 4:
2028                 if(icx_cpu) {
2029                     /* timing core */
2030                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2031                     eth_ctx->time_wrk_cfg.arg   = NULL;
2032                     eth_ctx->time_wrk_cfg.state = 1;
2033
2034                     /* workers */
2035                     /** 0 **/
2036                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2037                     if(pThCtx == NULL){
2038                         print_err("pThCtx allocation error\n");
2039                         return XRAN_STATUS_FAIL;
2040                     }
2041                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2042                     pThCtx->worker_id      = 0;
2043                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2044                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2045                     pThCtx->task_func = ring_processing_func;
2046                     pThCtx->task_arg  = NULL;
2047                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2048                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2049
2050                     /** 1 - CP GEN **/
2051                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2052                     if(pThCtx == NULL){
2053                         print_err("pThCtx allocation error\n");
2054                         return XRAN_STATUS_FAIL;
2055                     }
2056                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2057                     pThCtx->worker_id      = 1;
2058                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2059                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2060                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2061                     pThCtx->task_arg  = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
2062                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2063                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2064
2065                     /** 2 UP GEN **/
2066                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2067                     if(pThCtx == NULL){
2068                         print_err("pThCtx allocation error\n");
2069                         return XRAN_STATUS_FAIL;
2070                     }
2071                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2072                     pThCtx->worker_id    = 2;
2073                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2074                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2075                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2076                     pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
2077                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2078                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2079
2080                     for (i = 1; i < fh_init->xran_ports; i++) {
2081                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2082                         if(p_dev_update == NULL) {
2083                             print_err("p_dev_update\n");
2084                             return XRAN_STATUS_FAIL;
2085                         }
2086                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2087                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2088                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2089                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2090                     }
2091                 } else {
2092                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2093                     return XRAN_STATUS_FAIL;
2094                 }
2095                 break;
2096             case 5:
2097                 if(icx_cpu) {
2098                     /* timing core */
2099                     eth_ctx->time_wrk_cfg.f     = xran_eth_rx_tasks;
2100                     eth_ctx->time_wrk_cfg.arg   = NULL;
2101                     eth_ctx->time_wrk_cfg.state = 1;
2102
2103                     /* workers */
2104                     /** 0 **/
2105                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2106                     if(pThCtx == NULL){
2107                         print_err("pThCtx allocation error\n");
2108                         return XRAN_STATUS_FAIL;
2109                     }
2110                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2111                     pThCtx->worker_id      = 0;
2112                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2113                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2114                     pThCtx->task_func = ring_processing_func;
2115                     pThCtx->task_arg  = NULL;
2116                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2117                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2118
2119                     /** 1 - CP GEN **/
2120                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2121                     if(pThCtx == NULL){
2122                         print_err("pThCtx allocation error\n");
2123                         return XRAN_STATUS_FAIL;
2124                     }
2125                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2126                     pThCtx->worker_id      = 1;
2127                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2128                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2129                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2130                     pThCtx->task_arg  = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
2131                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2132                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2133
2134                     /** 2 UP GEN **/
2135                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2136                     if(pThCtx == NULL){
2137                         print_err("pThCtx allocation error\n");
2138                         return XRAN_STATUS_FAIL;
2139                     }
2140                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2141                     pThCtx->worker_id    = 2;
2142                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2143                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2144                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2145                     pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
2146                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2147                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2148
2149                     /** 3 UP GEN **/
2150                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2151                     if(pThCtx == NULL){
2152                         print_err("pThCtx allocation error\n");
2153                         return XRAN_STATUS_FAIL;
2154                     }
2155                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2156                     pThCtx->worker_id    = 3;
2157                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2158                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2159                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2160                     pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
2161                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2162                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2163
2164                     for (i = 1; i < fh_init->xran_ports; i++) {
2165                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2166                         if(p_dev_update == NULL) {
2167                             print_err("p_dev_update\n");
2168                             return XRAN_STATUS_FAIL;
2169                         }
2170                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2171                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2172                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2173                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2174                     }
2175                 } else {
2176                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2177                     return XRAN_STATUS_FAIL;
2178                 }
2179                 break;
2180             case 6:
2181                 if(eth_ctx->io_cfg.id == O_DU) {
2182                     /* timing core */
2183                     eth_ctx->time_wrk_cfg.f     = xran_eth_rx_tasks;
2184                     eth_ctx->time_wrk_cfg.arg   = NULL;
2185                     eth_ctx->time_wrk_cfg.state = 1;
2186
2187                     /* workers */
2188                     /** 0 **/
2189                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2190                     if(pThCtx == NULL){
2191                         print_err("pThCtx allocation error\n");
2192                         return XRAN_STATUS_FAIL;
2193                     }
2194                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2195                     pThCtx->worker_id      = 0;
2196                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2197                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2198                     pThCtx->task_func = ring_processing_func;
2199                     pThCtx->task_arg  = NULL;
2200                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2201                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2202
2203                     /** 1 Eth Tx **/
2204                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2205
2206                     if(pThCtx == NULL){
2207                         print_err("pThCtx allocation error\n");
2208                         return XRAN_STATUS_FAIL;
2209                     }
2210                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2211                     pThCtx->worker_id = 1;
2212                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2213                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2214                     pThCtx->task_func = process_dpdk_io_tx;
2215                     pThCtx->task_arg  = (void*)2;
2216                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2217                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2218
2219                     /** 2 - CP GEN **/
2220                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2221                     if(pThCtx == NULL){
2222                         print_err("pThCtx allocation error\n");
2223                         return XRAN_STATUS_FAIL;
2224                     }
2225                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2226                     pThCtx->worker_id      = 2;
2227                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2228                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2229                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2230                     pThCtx->task_arg  = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
2231                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2232                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2233
2234                     /** 3 UP GEN **/
2235                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2236                     if(pThCtx == NULL){
2237                         print_err("pThCtx allocation error\n");
2238                         return XRAN_STATUS_FAIL;
2239                     }
2240                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2241                     pThCtx->worker_id    = 3;
2242                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2243                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2244                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2245                     pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
2246                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2247                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2248
2249                     /** 4 UP GEN **/
2250                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2251                     if(pThCtx == NULL){
2252                         print_err("pThCtx allocation error\n");
2253                         return XRAN_STATUS_FAIL;
2254                     }
2255                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2256                     pThCtx->worker_id    = 4;
2257                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2258                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2259                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2260                     pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
2261                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2262                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2263
2264                     for (i = 0; i < fh_init->xran_ports; i++) {
2265                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2266                         if(p_dev_update == NULL) {
2267                             print_err("p_dev_update\n");
2268                             return XRAN_STATUS_FAIL;
2269                         }
2270                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id;
2271                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id;
2272                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2273                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2274                     }
2275                 } else if(eth_ctx->io_cfg.id == O_RU) {
2276                     /*** O_RU specific config */
2277                     /* timing core */
2278                     eth_ctx->time_wrk_cfg.f     = NULL;
2279                     eth_ctx->time_wrk_cfg.arg   = NULL;
2280                     eth_ctx->time_wrk_cfg.state = 1;
2281
2282                     /* workers */
2283                     /** 0  Eth RX */
2284                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2285                     if(pThCtx == NULL){
2286                         print_err("pThCtx allocation error\n");
2287                         return XRAN_STATUS_FAIL;
2288                     }
2289                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2290                     pThCtx->worker_id = 0;
2291                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2292                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
2293                     pThCtx->task_func = process_dpdk_io_rx;
2294                     pThCtx->task_arg  = NULL;
2295                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2296                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2297
2298                     /** 1  FH RX and BBDEV */
2299                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2300                     if(pThCtx == NULL){
2301                         print_err("pThCtx allocation error\n");
2302                         return XRAN_STATUS_FAIL;
2303                     }
2304                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2305                     pThCtx->worker_id = 1;
2306                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2307                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
2308                     pThCtx->task_func = ring_processing_func_per_port;
2309                     pThCtx->task_arg  = (void*)0;
2310                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2311                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2312
2313                     /** 2  FH RX and BBDEV */
2314                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2315                     if(pThCtx == NULL){
2316                         print_err("pThCtx allocation error\n");
2317                         return XRAN_STATUS_FAIL;
2318                     }
2319                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2320                     pThCtx->worker_id = 2;
2321                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2322                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
2323                     pThCtx->task_func = ring_processing_func_per_port;
2324                     pThCtx->task_arg  = (void*)1;
2325                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2326                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2327
2328                     /** 3  FH RX and BBDEV */
2329                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2330                     if(pThCtx == NULL){
2331                         print_err("pThCtx allocation error\n");
2332                         return XRAN_STATUS_FAIL;
2333                     }
2334                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2335                     pThCtx->worker_id = 3;
2336                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2337                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
2338                     pThCtx->task_func = ring_processing_func_per_port;
2339                     pThCtx->task_arg  = (void*)2;
2340                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2341                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2342
2343                     /**  FH TX and BBDEV */
2344                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2345                     if(pThCtx == NULL){
2346                         print_err("pThCtx allocation error\n");
2347                         return XRAN_STATUS_FAIL;
2348                     }
2349                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2350                     pThCtx->worker_id = 4;
2351                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2352                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2353                     pThCtx->task_func = process_dpdk_io_tx;
2354                     pThCtx->task_arg  = (void*)2;
2355                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2356                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2357                 } else {
2358                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2359                     return XRAN_STATUS_FAIL;
2360                 }
2361                 break;
2362             default:
2363                 print_err("unsupported configuration\n");
2364                 return XRAN_STATUS_FAIL;
2365         }
2366     } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) {
2367         switch(total_num_cores) {
2368             case 1:
2369             case 2:
2370                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2371                 return XRAN_STATUS_FAIL;
2372             break;
2373             case 3:
2374                 if(icx_cpu) {
2375                     /* timing core */
2376                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2377                     eth_ctx->time_wrk_cfg.arg   = NULL;
2378                     eth_ctx->time_wrk_cfg.state = 1;
2379
2380                     /* workers */
2381                     /** 0 **/
2382                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2383                     if(pThCtx == NULL){
2384                         print_err("pThCtx allocation error\n");
2385                         return XRAN_STATUS_FAIL;
2386                     }
2387                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2388                     pThCtx->worker_id      = 0;
2389                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2390                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2391                     pThCtx->task_func = ring_processing_func;
2392                     pThCtx->task_arg  = NULL;
2393                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2394                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2395
2396                     for (i = 1; i < fh_init->xran_ports; i++) {
2397                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2398                         if(p_dev_update == NULL) {
2399                             print_err("p_dev_update\n");
2400                             return XRAN_STATUS_FAIL;
2401                         }
2402                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2403                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2404                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2405                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2406                     }
2407
2408                     /** 1 - CP GEN **/
2409                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2410                     if(pThCtx == NULL){
2411                         print_err("pThCtx allocation error\n");
2412                         return XRAN_STATUS_FAIL;
2413                     }
2414                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2415                     pThCtx->worker_id      = 1;
2416                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2417                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2418                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2419                     pThCtx->task_arg  = (void*)xran_port_mask;
2420                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2421                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2422                 } else {
2423                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2424                     return XRAN_STATUS_FAIL;
2425                 }
2426             break;
2427             case 4:
2428                 if(icx_cpu) {
2429                     /* timing core */
2430                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2431                     eth_ctx->time_wrk_cfg.arg   = NULL;
2432                     eth_ctx->time_wrk_cfg.state = 1;
2433
2434                     /* workers */
2435                     /** 0 **/
2436                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2437                     if(pThCtx == NULL){
2438                         print_err("pThCtx allocation error\n");
2439                         return XRAN_STATUS_FAIL;
2440                     }
2441                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2442                     pThCtx->worker_id      = 0;
2443                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2444                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2445                     pThCtx->task_func = ring_processing_func;
2446                     pThCtx->task_arg  = NULL;
2447                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2448                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2449
2450                     /** 1 - CP GEN **/
2451                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2452                     if(pThCtx == NULL){
2453                         print_err("pThCtx allocation error\n");
2454                         return XRAN_STATUS_FAIL;
2455                     }
2456                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2457                     pThCtx->worker_id      = 1;
2458                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2459                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2460                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2461                     pThCtx->task_arg  = (void*)(((1<<1) | (1<<2)) & xran_port_mask);
2462                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2463                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2464
2465                     /** 2 UP GEN **/
2466                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2467                     if(pThCtx == NULL){
2468                         print_err("pThCtx allocation error\n");
2469                         return XRAN_STATUS_FAIL;
2470                     }
2471                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2472                     pThCtx->worker_id    = 2;
2473                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2474                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2475                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2476                     pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
2477                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2478                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2479
2480                     for (i = 1; i < fh_init->xran_ports; i++) {
2481                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2482                         if(p_dev_update == NULL) {
2483                             print_err("p_dev_update\n");
2484                             return XRAN_STATUS_FAIL;
2485                         }
2486                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2487                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2488                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2489                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2490                     }
2491                 } else {
2492                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2493                     return XRAN_STATUS_FAIL;
2494                 }
2495             break;
2496             case 5:
2497                     /* timing core */
2498                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2499                     eth_ctx->time_wrk_cfg.arg   = NULL;
2500                     eth_ctx->time_wrk_cfg.state = 1;
2501
2502                     /* workers */
2503                     /** 0  FH RX and BBDEV */
2504                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2505                     if(pThCtx == NULL){
2506                         print_err("pThCtx allocation error\n");
2507                         return XRAN_STATUS_FAIL;
2508                     }
2509                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2510                     pThCtx->worker_id = 0;
2511                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2512                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2513                     pThCtx->task_func = ring_processing_func;
2514                     pThCtx->task_arg  = NULL;
2515                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2516                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2517
2518                     /** 1 - CP GEN **/
2519                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2520                     if(pThCtx == NULL){
2521                         print_err("pThCtx allocation error\n");
2522                         return XRAN_STATUS_FAIL;
2523                     }
2524                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2525                     pThCtx->worker_id = 1;
2526                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2527                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2528                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2529                     pThCtx->task_arg  = (void*)(1<<0);
2530                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2531                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2532
2533                     /** 2 UP GEN **/
2534                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2535                     if(pThCtx == NULL){
2536                         print_err("pThCtx allocation error\n");
2537                         return XRAN_STATUS_FAIL;
2538                     }
2539                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2540                     pThCtx->worker_id = 2;
2541                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2542                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
2543                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2544                     pThCtx->task_arg  = (void*)(1<<1);
2545                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2546                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2547
2548                     /** 3 UP GEN **/
2549                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2550                     if(pThCtx == NULL){
2551                         print_err("pThCtx allocation error\n");
2552                         return XRAN_STATUS_FAIL;
2553                     }
2554                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2555                     pThCtx->worker_id = 3;
2556                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2557                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
2558                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2559                     pThCtx->task_arg  = (void*)(1<<2);
2560                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2561                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2562             break;
2563             case 6:
2564                 if(eth_ctx->io_cfg.id == O_DU){
2565                     /* timing core */
2566                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2567                     eth_ctx->time_wrk_cfg.arg   = NULL;
2568                     eth_ctx->time_wrk_cfg.state = 1;
2569
2570                     /* workers */
2571                     /** 0 **/
2572                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2573                     if(pThCtx == NULL){
2574                         print_err("pThCtx allocation error\n");
2575                         return XRAN_STATUS_FAIL;
2576                     }
2577                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2578                     pThCtx->worker_id      = 0;
2579                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2580                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2581                     pThCtx->task_func = ring_processing_func;
2582                     pThCtx->task_arg  = NULL;
2583                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2584                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2585
2586                     /** 1 - CP GEN **/
2587                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2588                     if(pThCtx == NULL){
2589                         print_err("pThCtx allocation error\n");
2590                         return XRAN_STATUS_FAIL;
2591                     }
2592                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2593                     pThCtx->worker_id      = 1;
2594                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2595                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2596                     pThCtx->task_func = xran_processing_timer_only_func;
2597                     pThCtx->task_arg  = NULL;
2598                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2599                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2600
2601                     /** 2 UP GEN **/
2602                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2603                     if(pThCtx == NULL){
2604                         print_err("pThCtx allocation error\n");
2605                         return XRAN_STATUS_FAIL;
2606                     }
2607                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2608                     pThCtx->worker_id    = 2;
2609                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2610                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2611                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2612                     pThCtx->task_arg  = (void*)(1<<0);
2613                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2614                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2615
2616                     /** 3 UP GEN **/
2617                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2618                     if(pThCtx == NULL){
2619                         print_err("pThCtx allocation error\n");
2620                         return XRAN_STATUS_FAIL;
2621                     }
2622                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2623                     pThCtx->worker_id    = 3;
2624                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2625                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2626                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2627                     pThCtx->task_arg  = (void*)(1<<1);
2628                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2629                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2630
2631                     /** 4 UP GEN **/
2632                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2633                     if(pThCtx == NULL){
2634                         print_err("pThCtx allocation error\n");
2635                         return XRAN_STATUS_FAIL;
2636                     }
2637                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2638                     pThCtx->worker_id    = 4;
2639                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2640                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2641                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2642                     pThCtx->task_arg  = (void*)(1<<2);
2643                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2644                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2645                 } else {
2646                     /*** O_RU specific config */
2647                     /* timing core */
2648                     eth_ctx->time_wrk_cfg.f     = NULL;
2649                     eth_ctx->time_wrk_cfg.arg   = NULL;
2650                     eth_ctx->time_wrk_cfg.state = 1;
2651
2652                     /* workers */
2653                     /** 0  Eth RX */
2654                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2655                     if(pThCtx == NULL){
2656                         print_err("pThCtx allocation error\n");
2657                         return XRAN_STATUS_FAIL;
2658                     }
2659                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2660                     pThCtx->worker_id = 0;
2661                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2662                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
2663                     pThCtx->task_func = process_dpdk_io_rx;
2664                     pThCtx->task_arg  = NULL;
2665                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2666                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2667
2668                     /** 1  FH RX and BBDEV */
2669                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2670                     if(pThCtx == NULL){
2671                         print_err("pThCtx allocation error\n");
2672                         return XRAN_STATUS_FAIL;
2673                     }
2674                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2675                     pThCtx->worker_id = 1;
2676                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2677                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
2678                     pThCtx->task_func = ring_processing_func_per_port;
2679                     pThCtx->task_arg  = (void*)0;
2680                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2681                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2682
2683                     /** 2  FH RX and BBDEV */
2684                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2685                     if(pThCtx == NULL){
2686                         print_err("pThCtx allocation error\n");
2687                         return XRAN_STATUS_FAIL;
2688                     }
2689                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2690                     pThCtx->worker_id = 2;
2691                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2692                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
2693                     pThCtx->task_func = ring_processing_func_per_port;
2694                     pThCtx->task_arg  = (void*)1;
2695                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2696                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2697
2698                     /** 3  FH RX and BBDEV */
2699                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2700                     if(pThCtx == NULL){
2701                         print_err("pThCtx allocation error\n");
2702                         return XRAN_STATUS_FAIL;
2703                     }
2704                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2705                     pThCtx->worker_id = 3;
2706                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2707                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
2708                     pThCtx->task_func = ring_processing_func_per_port;
2709                     pThCtx->task_arg  = (void*)2;
2710                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2711                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2712
2713                     /**  FH TX and BBDEV */
2714                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2715                     if(pThCtx == NULL){
2716                         print_err("pThCtx allocation error\n");
2717                         return XRAN_STATUS_FAIL;
2718                     }
2719                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2720                     pThCtx->worker_id = 4;
2721                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2722                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2723                     pThCtx->task_func = process_dpdk_io_tx;
2724                     pThCtx->task_arg  = (void*)2;
2725                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2726                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2727                 }
2728             break;
2729             default:
2730                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2731                 return XRAN_STATUS_FAIL;
2732         }
2733     } else {
2734         print_err("unsupported configuration\n");
2735         return XRAN_STATUS_FAIL;
2736     }
2737
2738     nWorkerCore = 1LL;
2739     if(eth_ctx->io_cfg.pkt_proc_core) {
2740         for (i = 0; i < coreNum && i < 64; i++) {
2741             if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
2742                 xran_core_used[xran_num_cores_used++] = i;
2743                 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
2744                     rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
2745                 eth_ctx->pkt_wrk_cfg[i].state = 1;
2746                 if(eth_ctx->pkt_proc_core_id == 0)
2747                     eth_ctx->pkt_proc_core_id = i;
2748                 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
2749                 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
2750             }
2751             nWorkerCore = nWorkerCore << 1;
2752         }
2753     }
2754
2755     nWorkerCore = 1LL;
2756     if(eth_ctx->io_cfg.pkt_proc_core_64_127) {
2757         for (i = 64; i < coreNum && i < 128; i++) {
2758             if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
2759                 xran_core_used[xran_num_cores_used++] = i;
2760                 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
2761                     rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
2762                 eth_ctx->pkt_wrk_cfg[i].state = 1;
2763                 if(eth_ctx->pkt_proc_core_id == 0)
2764                     eth_ctx->pkt_proc_core_id = i;
2765                 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
2766                 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
2767             }
2768             nWorkerCore = nWorkerCore << 1;
2769         }
2770     }
2771
2772     return XRAN_STATUS_SUCCESS;
2773 }
2774 int32_t
2775 xran_open(void *pHandle, struct xran_fh_config* pConf)
2776 {
2777     int32_t ret = XRAN_STATUS_SUCCESS;
2778     int32_t i;
2779     uint8_t nNumerology = 0;
2780     int32_t  lcore_id = 0;
2781     struct xran_device_ctx  *p_xran_dev_ctx = NULL;
2782     struct xran_fh_config   *pFhCfg  = NULL;
2783     struct xran_fh_init     *fh_init = NULL;
2784     struct xran_ethdi_ctx   *eth_ctx = xran_ethdi_get_ctx();
2785     int32_t wait_time = 10;
2786     int64_t offset_sec, offset_nsec;
2787
2788      if(pConf->dpdk_port < XRAN_PORTS_NUM) {
2789         p_xran_dev_ctx  = xran_dev_get_ctx_by_id(pConf->dpdk_port);
2790     } else {
2791         print_err("@0x%08p [ru %d ] pConf->dpdk_port > XRAN_PORTS_NUM\n", pConf,  pConf->dpdk_port);
2792         return XRAN_STATUS_FAIL;
2793     }
2794
2795     if(p_xran_dev_ctx == NULL) {
2796         print_err("[ru %d] p_xran_dev_ctx == NULL ", pConf->dpdk_port);
2797         return XRAN_STATUS_FAIL;
2798     }
2799
2800     pFhCfg = &p_xran_dev_ctx->fh_cfg;
2801     memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));
2802
2803     fh_init = &p_xran_dev_ctx->fh_init;
2804     if(fh_init == NULL)
2805         return XRAN_STATUS_FAIL;
2806
2807     if(pConf->log_level) {
2808         printf(" %s: %s Category %s\n", __FUNCTION__,
2809         (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE",
2810         (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
2811     }
2812
2813     p_xran_dev_ctx->enableCP    = pConf->enableCP;
2814     p_xran_dev_ctx->enablePrach = pConf->prachEnable;
2815     p_xran_dev_ctx->enableSrs   = pConf->srsEnable;
2816     p_xran_dev_ctx->puschMaskEnable = pConf->puschMaskEnable;
2817     p_xran_dev_ctx->puschMaskSlot = pConf->puschMaskSlot;
2818     p_xran_dev_ctx->DynamicSectionEna = pConf->DynamicSectionEna;
2819
2820     if(pConf->GPS_Alpha || pConf->GPS_Beta ){
2821         offset_sec = pConf->GPS_Beta / 100;    /* resolution of beta is 10ms */
2822         offset_nsec = (pConf->GPS_Beta - offset_sec * 100) * 1e7 + pConf->GPS_Alpha;
2823         p_xran_dev_ctx->offset_sec = offset_sec;
2824         p_xran_dev_ctx->offset_nsec = offset_nsec;
2825     }else {
2826         p_xran_dev_ctx->offset_sec  = 0;
2827         p_xran_dev_ctx->offset_nsec = 0;
2828     }
2829
2830
2831     nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
2832
2833     if (pConf->nCC > XRAN_MAX_SECTOR_NR) {
2834         if(pConf->log_level)
2835             printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR);
2836         pConf->nCC = XRAN_MAX_SECTOR_NR;
2837     }
2838
2839     if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER  || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ) {
2840         print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder);
2841         return XRAN_STATUS_FAIL;
2842     }
2843
2844     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) {
2845         if((ret = xran_ruemul_init(p_xran_dev_ctx)) < 0) {
2846             return ret;
2847         }
2848     }
2849
2850     /* setup PRACH configuration for C-Plane */
2851     if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR) {
2852         if((ret  = xran_init_prach(pConf, p_xran_dev_ctx))< 0){
2853             return ret;
2854         }
2855     } else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE) {
2856         if((ret  =  xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0){
2857             return ret;
2858         }
2859     }
2860
2861     if((ret  = xran_init_srs(pConf, p_xran_dev_ctx))< 0){
2862         return ret;
2863     }
2864
2865     if((ret  = xran_cp_init_sectiondb(p_xran_dev_ctx)) < 0){
2866         return ret;
2867     }
2868
2869     if((ret  = xran_init_sectionid(p_xran_dev_ctx)) < 0){
2870         return ret;
2871     }
2872
2873     if((ret  = xran_init_seqid(p_xran_dev_ctx)) < 0){
2874         return ret;
2875     }
2876
2877     if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
2878         if((ret  = xran_init_vfs_mapping(p_xran_dev_ctx)) < 0) {
2879             return ret;
2880         }
2881
2882         if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->fh_init.io_cfg.num_rxq > 1) {
2883             if((ret  = xran_init_vf_rxq_to_pcid_mapping(p_xran_dev_ctx)) < 0) {
2884                 return ret;
2885             }
2886         }
2887     }
2888
2889     if(pConf->ru_conf.xran_max_frame) {
2890        xran_max_frame = pConf->ru_conf.xran_max_frame;
2891        printf("xran_max_frame %d\n", xran_max_frame);
2892     }
2893
2894     p_xran_dev_ctx->interval_us_local = xran_fs_get_tti_interval(nNumerology);
2895     if (interval_us > p_xran_dev_ctx->interval_us_local)
2896     {
2897         interval_us = xran_fs_get_tti_interval(nNumerology); //only update interval_us based on maximum numerology
2898     }
2899
2900 //    if(pConf->log_level){
2901         printf("%s: interval_us=%ld, interval_us_local=%d\n", __FUNCTION__, interval_us, p_xran_dev_ctx->interval_us_local);
2902 //    }
2903     if (nNumerology >= timing_get_numerology())
2904     {
2905     timing_set_numerology(nNumerology);
2906     }
2907
2908     for(i = 0 ; i <pConf->nCC; i++){
2909         xran_fs_set_slot_type(pConf->dpdk_port, i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod,
2910             pConf->frame_conf.sSlotConfig);
2911     }
2912
2913     xran_fs_slot_limit_init(pConf->dpdk_port, xran_fs_get_tti_interval(nNumerology));
2914
2915     /* if send_xpmbuf2ring needs to be changed from default functions,
2916      * then those should be set between xran_init and xran_open */
2917     if(p_xran_dev_ctx->send_cpmbuf2ring == NULL)
2918         p_xran_dev_ctx->send_cpmbuf2ring    = xran_ethdi_mbuf_send_cp;
2919     if(p_xran_dev_ctx->send_upmbuf2ring == NULL)
2920         p_xran_dev_ctx->send_upmbuf2ring    = xran_ethdi_mbuf_send;
2921
2922     if(pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
2923         if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
2924             p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
2925     } else {
2926         if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
2927             p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_dispatch_opt;
2928     }
2929
2930     if(pConf->dpdk_port == 0) {
2931         /* create all thread on open of port 0 */
2932         xran_num_cores_used = 0;
2933         if(eth_ctx->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){
2934             eth_ctx->bbdev_dec = pConf->bbdev_dec;
2935             eth_ctx->bbdev_enc = pConf->bbdev_enc;
2936         }
2937
2938         if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
2939             printf("XRAN_UP_VF: 0x%04x\n", eth_ctx->io_cfg.port[XRAN_UP_VF]);
2940             p_xran_dev_ctx->timing_source_thread_running = 0;
2941             xran_core_used[xran_num_cores_used++] = eth_ctx->io_cfg.timing_core;
2942             if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), eth_ctx->io_cfg.timing_core))
2943             rte_panic("thread_run() failed to start\n");
2944         } else if(pConf->log_level) {
2945                 printf("Eth port was not open. Processing thread was not started\n");
2946         }
2947     } else {
2948         if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF) {
2949             if ((ret = xran_timing_create_cbs(p_xran_dev_ctx)) < 0) {
2950                 return ret;
2951             }
2952         }
2953     }
2954
2955     if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
2956         if(pConf->dpdk_port == (fh_init->xran_ports - 1)) {
2957             if((ret = xran_spawn_workers()) < 0) {
2958                 return ret;
2959                 }
2960             }
2961         printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  sched_getcpu(), getpid());
2962         printf("Waiting on Timing thread...\n");
2963         while (p_xran_dev_ctx->timing_source_thread_running == 0 && wait_time--) {
2964             usleep(100);
2965         }
2966     }
2967
2968     print_dbg("%s : %d", __FUNCTION__, pConf->dpdk_port);
2969     return ret;
2970 }
2971
2972 int32_t
2973 xran_start(void *pHandle)
2974 {
2975     struct tm * ptm;
2976     /* ToS = Top of Second start +- 1.5us */
2977     struct timespec ts;
2978     char buff[100];
2979
2980     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
2981     if(xran_get_if_state() == XRAN_RUNNING) {
2982         print_err("Already STARTED!!");
2983         return (-1);
2984         }
2985     timespec_get(&ts, TIME_UTC);
2986     ptm = gmtime(&ts.tv_sec);
2987     if(ptm){
2988         strftime(buff, sizeof(buff), "%D %T", ptm);
2989         printf("%s: XRAN start time: %s.%09ld UTC [%ld]\n",
2990             (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
2991     }
2992
2993     if (p_xran_dev_ctx->fh_init.io_cfg.eowd_cmn[p_xran_dev_ctx->fh_init.io_cfg.id].owdm_enable)
2994         {
2995         xran_if_current_state = XRAN_OWDM;
2996         }
2997     else
2998         {
2999     xran_if_current_state = XRAN_RUNNING;
3000         }
3001     return 0;
3002 }
3003
3004 int32_t
3005 xran_stop(void *pHandle)
3006 {
3007     if(xran_get_if_state() == XRAN_STOPPED) {
3008         print_err("Already STOPPED!!");
3009         return (-1);
3010         }
3011
3012     xran_if_current_state = XRAN_STOPPED;
3013     return 0;
3014 }
3015
3016 int32_t
3017 xran_close(void *pHandle)
3018 {
3019     int32_t ret = XRAN_STATUS_SUCCESS;
3020     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
3021
3022     xran_if_current_state = XRAN_STOPPED;
3023     ret = xran_cp_free_sectiondb(p_xran_dev_ctx);
3024
3025     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)
3026         xran_ruemul_release(p_xran_dev_ctx);
3027
3028 #ifdef RTE_LIBRTE_PDUMP
3029     /* uninitialize packet capture framework */
3030     rte_pdump_uninit();
3031 #endif
3032     return ret;
3033 }
3034
3035 /* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open
3036  * each cb will be set by default duing open if it is set by NULL */
3037 int32_t
3038 xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up)
3039 {
3040     struct xran_device_ctx *p_xran_dev_ctx;
3041
3042     if(xran_get_if_state() == XRAN_RUNNING) {
3043         print_err("Cannot register callback while running!!\n");
3044         return (-1);
3045         }
3046
3047     p_xran_dev_ctx = xran_dev_get_ctx();
3048
3049     p_xran_dev_ctx->send_cpmbuf2ring    = mbuf_send_cp;
3050     p_xran_dev_ctx->send_upmbuf2ring    = mbuf_send_up;
3051
3052     p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
3053
3054     return (0);
3055 }
3056
3057 int32_t
3058 xran_get_slot_idx (uint32_t PortId, uint32_t *nFrameIdx, uint32_t *nSubframeIdx,  uint32_t *nSlotIdx, uint64_t *nSecond)
3059 {
3060     int32_t tti = 0;
3061     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
3062     if (!p_xran_dev_ctx)
3063 {
3064         print_err("Null xRAN context on port id %u!!\n", PortId);
3065         return 0;
3066 }
3067
3068     tti           = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT);
3069     *nSlotIdx     = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
3070     *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
3071     *nFrameIdx    = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
3072     *nSecond      = timing_get_current_second();
3073
3074     return tti;
3075 }
3076
3077 int32_t
3078 xran_set_debug_stop(int32_t value, int32_t count)
3079 {
3080     return timing_set_debug_stop(value, count);
3081     }