* INTC Contribution to the O-RAN F Release for O-DU Low
[o-du/phy.git] / fhi_lib / lib / src / xran_main.c
1 /******************************************************************************
2 *
3 *   Copyright (c) 2020 Intel.
4 *
5 *   Licensed under the Apache License, Version 2.0 (the "License");
6 *   you may not use this file except in compliance with the License.
7 *   You may obtain a copy of the License at
8 *
9 *       http://www.apache.org/licenses/LICENSE-2.0
10 *
11 *   Unless required by applicable law or agreed to in writing, software
12 *   distributed under the License is distributed on an "AS IS" BASIS,
13 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 *   See the License for the specific language governing permissions and
15 *   limitations under the License.
16 *
17 *******************************************************************************/
18
19 /**
20  * @brief XRAN main functionality module
21  * @file xran_main.c
22  * @ingroup group_source_xran
23  * @author Intel Corporation
24  **/
25
26 #define _GNU_SOURCE
27 #include <sched.h>
28 #include <assert.h>
29 #include <err.h>
30 #include <libgen.h>
31 #include <sys/time.h>
32 #include <sys/queue.h>
33 #include <time.h>
34 #include <unistd.h>
35 #include <stdio.h>
36 #include <pthread.h>
37 #include <malloc.h>
38 #include <immintrin.h>
39 #include <numa.h>
40 #include <rte_common.h>
41 #include <rte_eal.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_mbuf.h>
48 #include <rte_ring.h>
49 #include <rte_version.h>
50 #include <rte_flow.h>
51 #if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */
52 #include <rte_ecpri.h>
53 #endif
54 #include "xran_fh_o_du.h"
55 #include "xran_fh_o_ru.h"
56 #include "xran_main.h"
57
58 #include "ethdi.h"
59 #include "xran_mem_mgr.h"
60 #include "xran_tx_proc.h"
61 #include "xran_rx_proc.h"
62 #include "xran_pkt.h"
63 #include "xran_up_api.h"
64 #include "xran_cp_api.h"
65 #include "xran_sync_api.h"
66 #include "xran_lib_mlog_tasks_id.h"
67 #include "xran_timer.h"
68 #include "xran_common.h"
69 #include "xran_dev.h"
70 #include "xran_frame_struct.h"
71 #include "xran_printf.h"
72 #include "xran_cp_proc.h"
73 #include "xran_tx_proc.h"
74 #include "xran_rx_proc.h"
75 #include "xran_cb_proc.h"
76 #include "xran_ecpri_owd_measurements.h"
77
78 #include "xran_mlog_lnx.h"
79
80 static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {{NULL}};
81
82 uint64_t interval_us = 1000; //the TTI interval of the cell with maximum numerology
83
84 uint32_t xran_lib_ota_tti[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Slot index in a second [0:(1000000/TTI-1)] */
85 uint32_t xran_lib_ota_sym[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a slot [0:13] */
86 uint32_t xran_lib_ota_sym_idx[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1]
87                                                 where TTI is TTI interval in microseconds */
88
89 uint16_t xran_SFN_at_Sec_Start   = 0; /**< SFN at current second start */
90 uint16_t xran_max_frame          = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2 System Frame Number Calculation */
91
92 static uint64_t xran_total_tick = 0, xran_used_tick = 0;
93 static uint32_t xran_num_cores_used = 0;
94 static uint32_t xran_core_used[64] = {0};
95 int32_t first_call = 0;
96 int32_t mlogxranenable = 0;
97
98 struct cp_up_tx_desc * xran_pkt_gen_desc_alloc(void);
99 int32_t xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc);
100
101 void tti_ota_cb(struct rte_timer *tim, void *arg);
102 void tti_to_phy_cb(struct rte_timer *tim, void *arg);
103
104 int32_t xran_pkt_gen_process_ring(struct rte_ring *r);
105
106 void
107 xran_updateSfnSecStart(void)
108 {
109     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
110     struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
111     int32_t xran_ports  = p_xran_dev_ctx->fh_init.xran_ports;
112     int32_t o_xu_id = 0;
113     uint64_t currentSecond = timing_get_current_second();
114     // Assume always positive
115     uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;
116     uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND;
117     uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));
118     xran_SFN_at_Sec_Start = sfn;
119
120     for(o_xu_id = 0; o_xu_id < xran_ports; o_xu_id++){
121     pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter;
122     pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter;
123     pCnt->tx_bytes_counter = 0;
124     pCnt->rx_bytes_counter = 0;
125         p_xran_dev_ctx++;
126         pCnt = &p_xran_dev_ctx->fh_counters;
127     }
128 }
129
130 #if 0
131 static inline int32_t
132 xran_getSlotIdxSecond(uint32_t interval)
133 {
134     int32_t frameIdxSecond = xran_getSfnSecStart();
135     int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME(interval);
136     return slotIndxSecond;
137 }
138 #endif
139
140 enum xran_if_state
141 xran_get_if_state(void)
142         {
143     return xran_if_current_state;
144 }
145
146 int32_t xran_is_prach_slot(uint8_t PortId, uint32_t subframe_id, uint32_t slot_id)
147 {
148     int32_t is_prach_slot = 0;
149     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
150     if (p_xran_dev_ctx == NULL)
151 {
152         print_err("PortId %d not exist\n", PortId);
153         return is_prach_slot;
154 }
155     struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
156     uint8_t nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
157
158     if (nNumerology < 2){
159         //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index
160         if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){
161             if (pPrachCPConfig->nrofPrachInSlot == 0){
162                 if(slot_id == 0)
163                     is_prach_slot = 1;
164             }
165             else if (pPrachCPConfig->nrofPrachInSlot == 2)
166                 is_prach_slot = 1;
167             else{
168                 if (nNumerology == 0)
169                     is_prach_slot = 1;
170                 else if (slot_id == 1)
171                     is_prach_slot = 1;
172             }
173         }
174     } else if (nNumerology == 3){
175         //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot
176         uint32_t slotidx;
177         slotidx = subframe_id * SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local) + slot_id;
178         if (pPrachCPConfig->nrofPrachInSlot == 2){
179             if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1)
180                 is_prach_slot = 1;
181         } else {
182             if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){
183                 is_prach_slot = 1;
184             }
185         }
186     } else
187         print_err("Numerology %d not supported", nNumerology);
188     return is_prach_slot;
189 }
190
191 int32_t
192 xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
193 {
194     struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg);
195
196     if(p_srs){
197         p_srs->symbMask = pConf->srs_conf.symbMask;     /* deprecated */
198         p_srs->slot             = pConf->srs_conf.slot;
199         p_srs->ndm_offset       = pConf->srs_conf.ndm_offset;
200         p_srs->ndm_txduration   = pConf->srs_conf.ndm_txduration;
201         p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset;
202
203         print_dbg("SRS sym         %d\n", p_srs->slot);
204         print_dbg("SRS NDM offset  %d\n", p_srs->ndm_offset);
205         print_dbg("SRS NDM Tx      %d\n", p_srs->ndm_txduration);
206         print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset);
207     }
208     return (XRAN_STATUS_SUCCESS);
209 }
210
211 int32_t
212 xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
213 {
214     /* update Rach for LTE */
215     return xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_LTE);
216 }
217
218 int32_t
219 xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx, enum xran_ran_tech xran_tech)
220 {
221     int32_t i;
222     uint8_t slotNr;
223     struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf);
224     const xRANPrachConfigTableStruct *pxRANPrachConfigTable;
225     uint8_t nNumerology = pConf->frame_conf.nNumerology;
226     uint8_t nPrachConfIdx = -1;// = pPRACHConfig->nPrachConfIdx;
227     struct xran_prach_cp_config *pPrachCPConfig = NULL;
228     if(pConf->dssEnable){
229         /*Check Slot type and */
230         if(xran_tech == XRAN_RAN_5GNR){
231             pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
232             nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
233         }
234         else{
235             pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
236             nPrachConfIdx = pPRACHConfig->nPrachConfIdxLTE;
237         }
238     }
239     else{
240         pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
241         nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
242     }
243     if (nNumerology > 2)
244         pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx];
245     else if (pConf->frame_conf.nFrameDuplexType == 1)
246         pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx];
247     else
248         pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx];
249
250     uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0];
251     const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt];
252     memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config));
253     if(pConf->log_level)
254         printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot);
255
256     if (preambleFmrt <= 2)
257     {
258         pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_012;         // 1 PRACH preamble format 0 1 2
259     }
260     else if (preambleFmrt == 3)
261     {
262         pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_3;         // 1 PRACH preamble format 3
263     }
264     else
265     {
266     pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC;         // 3, PRACH preamble format A1~3, B1~4, C0, C2
267     }
268     pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym;
269     pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart;
270     pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70;
271     pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp;
272     pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing);
273     pPrachCPConfig->x = pxRANPrachConfigTable->x;
274     pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot;
275     pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0];
276     pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1];
277     if (preambleFmrt >= FORMAT_A1)
278     {
279         pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration;
280         pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot;
281     }
282     else
283     {
284         pPrachCPConfig->numSymbol = 1;
285         pPrachCPConfig->occassionsInPrachSlot = 1;
286     }
287
288     if(pConf->log_level)
289         printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]);
290     pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1;
291     for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++)
292     {
293         slotNr = pxRANPrachConfigTable->slotNr[i];
294         if (slotNr > 0){
295             pPrachCPConfig->isPRACHslot[slotNr] = 1;
296             if(pConf->log_level)
297                 printf(" %u ..", slotNr);
298         }
299     }
300     printf("\n");
301     for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){
302         p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId;
303         p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
304     }
305     if(pConf->log_level){
306         printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]);
307     }
308
309     pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(p_xran_dev_ctx);
310     print_dbg("PRACH eAxC_offset %d\n",  pPrachCPConfig->eAxC_offset);
311
312     /* Save some configs for app */
313     pPRACHConfig->startSymId    = pPrachCPConfig->startSymId;
314     pPRACHConfig->lastSymId     = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
315     pPRACHConfig->startPrbc     = pPrachCPConfig->startPrbc;
316     pPRACHConfig->numPrbc       = pPrachCPConfig->numPrbc;
317     pPRACHConfig->timeOffset    = pPrachCPConfig->timeOffset;
318     pPRACHConfig->freqOffset    = pPrachCPConfig->freqOffset;
319     pPRACHConfig->eAxC_offset   = pPrachCPConfig->eAxC_offset;
320
321         return (XRAN_STATUS_SUCCESS);
322         }
323
324 uint32_t
325 xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid
326 {
327     return slot_id;
328 #if 0
329     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
330     uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
331     uint8_t FR = 1;
332     if (mu > 2)
333         FR=2;
334     if (dir == 0)
335     {
336         if (FR == 1)
337         {
338             return (slot_id << (2-mu));
339         }
340         else
341         {
342             return (slot_id << (3-mu));
343         }
344     }
345     else
346     {
347         if (FR == 1)
348         {
349             return (slot_id >> (2-mu));
350         }
351         else
352         {
353             return (slot_id >> (3-mu));
354         }
355     }
356 #endif
357 }
358
359 void
360 sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick)
361 {
362     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
363     long t1 = MLogXRANTick(), t2;
364     long t3;
365
366     if(XranGetSymNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){
367         t3 = xran_tick();
368         tti_ota_cb(NULL, (void*)p_xran_dev_ctx);
369         *used_tick += get_ticks_diff(xran_tick(), t3);
370     }
371
372             t3 = xran_tick();
373     if (xran_process_tx_sym(p_xran_dev_ctx))
374     {
375         *used_tick += get_ticks_diff(xran_tick(), t3);
376     }
377
378     /* check if there is call back to do something else on this symbol */
379     struct cb_elem_entry *cb_elm;
380     LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[xran_lib_ota_sym[p_xran_dev_ctx->xran_port_id]], pointers){
381         if(cb_elm){
382             cb_elm->pSymCallback(&p_xran_dev_ctx->dpdk_timer[p_xran_dev_ctx->ctx % MAX_NUM_OF_DPDK_TIMERS], cb_elm->pSymCallbackTag, cb_elm->p_dev_ctx);
383             p_xran_dev_ctx->ctx = DpdkTimerIncrementCtx(p_xran_dev_ctx->ctx);
384         }
385     }
386
387     t2 = MLogXRANTick();
388     MLogXRANTask(PID_SYM_OTA_CB, t1, t2);
389 }
390
391 uint32_t
392 xran_schedule_to_worker(enum xran_job_type_id job_type_id, struct xran_device_ctx * p_xran_dev_ctx)
393 {
394     struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
395     uint32_t tim_lcore = eth_ctx->io_cfg.timing_core; /* default to timing core */
396
397     if(eth_ctx) {
398         if(eth_ctx->num_workers == 0) { /* no workers */
399             tim_lcore = eth_ctx->io_cfg.timing_core;
400         } else if (eth_ctx->num_workers == 1) { /* one worker */
401             switch (job_type_id)
402             {
403                 case XRAN_JOB_TYPE_OTA_CB:
404                     tim_lcore = eth_ctx->io_cfg.timing_core;
405                     break;
406                 case XRAN_JOB_TYPE_CP_DL:
407                 case XRAN_JOB_TYPE_CP_UL:
408                 case XRAN_JOB_TYPE_DEADLINE:
409                 case XRAN_JOB_TYPE_SYM_CB:
410                     tim_lcore = eth_ctx->worker_core[0];
411                     break;
412                 default:
413                     print_err("incorrect job type id %d\n", job_type_id);
414                     tim_lcore = eth_ctx->io_cfg.timing_core;
415                     break;
416             }
417         } else if (eth_ctx->num_workers >= 2 && eth_ctx->num_workers <= 6) {
418             switch (job_type_id)
419             {
420                 case XRAN_JOB_TYPE_OTA_CB:
421                     tim_lcore = eth_ctx->worker_core[0];
422                     break;
423                 case XRAN_JOB_TYPE_CP_DL:
424                     tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_DL]];
425                     break;
426                 case XRAN_JOB_TYPE_CP_UL:
427                     tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_UL]];
428                     break;
429                 case XRAN_JOB_TYPE_DEADLINE:
430                 case XRAN_JOB_TYPE_SYM_CB:
431                     tim_lcore = eth_ctx->worker_core[0];
432                     break;
433                 default:
434                     print_err("incorrect job type id %d\n", job_type_id);
435                     tim_lcore = eth_ctx->io_cfg.timing_core;
436                     break;
437             }
438         } else {
439             print_err("incorrect eth_ctx->num_workers id %d\n", eth_ctx->num_workers);
440             tim_lcore = eth_ctx->io_cfg.timing_core;
441         }
442     }
443
444     return tim_lcore;
445 }
446
447 void
448 tti_ota_cb(struct rte_timer *tim, void *arg)
449 {
450     uint32_t    frame_id    = 0;
451     uint32_t    subframe_id = 0;
452     uint32_t    slot_id     = 0;
453     uint32_t    next_tti    = 0;
454
455     uint32_t mlogVar[10];
456     uint32_t mlogVarCnt = 0;
457     uint64_t t1 = MLogTick();
458     uint32_t reg_tti  = 0;
459     uint32_t reg_sfn  = 0;
460
461     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
462     struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)p_xran_dev_ctx->timer_ctx;
463     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
464     uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
465
466     unsigned tim_lcore =  xran_schedule_to_worker(XRAN_JOB_TYPE_OTA_CB, p_xran_dev_ctx);
467
468     MLogTask(PID_TTI_TIMER, t1, MLogTick());
469
470     if(p_xran_dev_ctx->xran_port_id == 0){
471     /* To match TTbox */
472         if(xran_lib_ota_tti[0] == 0)
473             reg_tti = xran_fs_get_max_slot(PortId) - 1;
474     else
475             reg_tti = xran_lib_ota_tti[0] -1;
476
477     MLogIncrementCounter();
478         reg_sfn    = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us))*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME(interval_us), SUBFRAMES_PER_SYSTEMFRAME);;
479     /* subframe and slot */
480         MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME(interval_us)));
481     MLogMark(1, t1);
482     }
483
484     slot_id     = XranGetSlotNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local));
485     subframe_id = XranGetSubFrameNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
486     frame_id    = XranGetFrameNum(xran_lib_ota_tti[PortId],xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
487
488     pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process = xran_lib_ota_tti[PortId];
489
490     /** tti as seen from PHY */
491     int32_t nSfIdx = -1;
492     uint32_t nFrameIdx;
493     uint32_t nSubframeIdx;
494     uint32_t nSlotIdx;
495     uint64_t nSecond;
496     uint8_t Numerlogy = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
497     uint8_t nNrOfSlotInSf = 1<<Numerlogy;
498
499     xran_get_slot_idx(0, &nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
500     nSfIdx = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*nNrOfSlotInSf
501              + nSubframeIdx*nNrOfSlotInSf
502              + nSlotIdx;
503
504     mlogVar[mlogVarCnt++] = 0x11111111;
505     mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId];
506     mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
507     mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId] / 14;
508     mlogVar[mlogVarCnt++] = frame_id;
509     mlogVar[mlogVarCnt++] = subframe_id;
510     mlogVar[mlogVarCnt++] = slot_id;
511     mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId] % XRAN_N_FE_BUF_LEN;
512     mlogVar[mlogVarCnt++] = nSfIdx;
513     mlogVar[mlogVarCnt++] = nSfIdx % XRAN_N_FE_BUF_LEN;
514     MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
515
516
517     if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU)
518         next_tti = xran_lib_ota_tti[PortId] + 1;
519     else{
520         next_tti = xran_lib_ota_tti[PortId];
521     }
522
523     if(next_tti>= xran_fs_get_max_slot(PortId)){
524         print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
525         next_tti=0;
526     }
527
528     slot_id     = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
529     subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
530     frame_id    = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
531
532     print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
533
534     if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){
535         pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = next_tti;
536     } else {
537         pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti[PortId] & 1)^1].tti_to_process;
538     }
539
540     if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]) {
541     p_xran_dev_ctx->phy_tti_cb_done = 0;
542         xran_timer_arm_ex(&p_xran_dev_ctx->tti_to_phy_timer[xran_lib_ota_tti[PortId] % MAX_TTI_TO_PHY_TIMER], tti_to_phy_cb, (void*)p_xran_dev_ctx, tim_lcore);
543     }
544     //slot index is increased to next slot at the beginning of current OTA slot
545     xran_lib_ota_tti[PortId]++;
546     if(xran_lib_ota_tti[PortId] >= xran_fs_get_max_slot(PortId)) {
547         print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti[PortId], frame_id, subframe_id, slot_id);
548         xran_lib_ota_tti[PortId] = 0;
549     }
550     MLogXRANTask(PID_TTI_CB, t1, MLogTick());
551 }
552
553
554 int32_t
555 xran_prepare_cp_dl_slot(uint16_t xran_port_id, uint32_t nSlotIdx,  uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
556                             uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
557 {
558     long t1 = MLogXRANTick();
559     int32_t ret = XRAN_STATUS_SUCCESS;
560     int tti, buf_id;
561     uint32_t slot_id, subframe_id, frame_id;
562     int cc_id;
563     uint8_t ctx_id;
564     uint8_t ant_id, num_eAxc, num_CCPorts;
565     void *pHandle;
566     //int num_list;
567     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
568     if(unlikely(!p_xran_dev_ctx))
569     {
570         print_err("Null xRAN context!!\n");
571         return ret;
572     }
573     //struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
574     uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
575     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
576     pHandle     = p_xran_dev_ctx;
577
578     num_eAxc    = xran_get_num_eAxc(pHandle);
579     num_CCPorts = xran_get_num_cc(pHandle);
580
581     if(first_call && p_xran_dev_ctx->enableCP)
582     {
583         tti    = nSlotIdx ;//pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
584         buf_id = tti % XRAN_N_FE_BUF_LEN;
585
586         slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
587         subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
588         frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
589         if (tti == 0)
590         {
591             /* Wrap around to next second */
592             frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
593         }
594
595         ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
596
597         print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
598 #if defined(__INTEL_COMPILER)
599 #pragma vector always
600 #endif
601         for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum)  && ant_id < num_eAxc); ++ant_id) {
602             for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) {
603                 /* start new section information list */
604                 xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
605                 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
606                     if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
607                         if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData) {
608                             /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
609                                 (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
610                                 &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]),
611                                 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
612                         } else {
613                                print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
614                         }
615                     } else {
616                         print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pBuffers] \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
617                     }
618                 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
619             } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
620         } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
621         MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick());
622     }
623     return ret;
624 }
625
626 void
627 tx_cp_dl_cb(struct rte_timer *tim, void *arg)
628 {
629     long t1 = MLogXRANTick();
630     int tti, buf_id;
631     uint32_t slot_id, subframe_id, frame_id;
632     int cc_id;
633     uint8_t ctx_id;
634     uint8_t ant_id, num_eAxc, num_CCPorts;
635     void *pHandle;
636     //int num_list;
637     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
638
639     if(unlikely(!p_xran_dev_ctx))
640     {
641         print_err("Null xRAN context!!\n");
642         return;
643     }
644
645     if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
646         return;
647
648     struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
649     uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
650     uint8_t PortId = p_xran_dev_ctx->xran_port_id;
651     pHandle     = p_xran_dev_ctx;
652
653     num_eAxc    = xran_get_num_eAxc(pHandle);
654     num_CCPorts = xran_get_num_cc(pHandle);
655
656     if(first_call && p_xran_dev_ctx->enableCP)
657     {
658         tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
659         buf_id = tti % XRAN_N_FE_BUF_LEN;
660
661         slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
662         subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
663         frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
664         if (tti == 0)
665         {
666             /* Wrap around to next second */
667             frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
668         }
669
670         ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
671
672         print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
673         for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
674             for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) {
675                 if(0== p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id].numSymsRemaining)
676                 {/* Start of new slot - reset the section info */
677                 xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
678                 }
679                 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
680                     if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
681                     if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
682                             /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
683                             (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
684                                     &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]),
685                             p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
686                         }
687                         else
688                             print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
689                     }
690                 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
691             } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
692         } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
693         MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick());
694     }
695 }
696
697 void
698 rx_ul_static_srs_cb(struct rte_timer *tim, void *arg)
699 {
700     long t1 = MLogXRANTick();
701     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
702     xran_status_t status = 0;
703     int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
704     int32_t cc_id = 0;
705     //uint32_t nFrameIdx;
706     //uint32_t nSubframeIdx;
707     //uint32_t nSlotIdx;
708     //uint64_t nSecond;
709     struct xran_timer_ctx* p_timer_ctx = NULL;
710
711     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
712         return;
713
714     p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
715
716     if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
717         p_xran_dev_ctx->timer_put = 0;
718
719     rx_tti = p_timer_ctx->tti_to_process;
720
721     if(rx_tti == 0)
722        rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
723     else
724        rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
725
726     /* U-Plane */
727     for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
728
729         if(0 == p_xran_dev_ctx->enableSrsCp)
730         {
731             if(p_xran_dev_ctx->pSrsCallback[cc_id]){
732                 struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
733                 if(pTag) {
734                     //pTag->cellId = cc_id;
735                     pTag->slotiId = rx_tti;
736                     pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
737                     p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
738                 }
739             }
740         }
741     }
742     MLogXRANTask(PID_UP_STATIC_SRS_DEAD_LINE_CB, t1, MLogXRANTick());
743 }
744
745
746
747 void
748 rx_ul_deadline_one_fourths_cb(struct rte_timer *tim, void *arg)
749 {
750     long t1 = MLogXRANTick();
751     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
752     xran_status_t status;
753     /* half of RX for current TTI as measured against current OTA time */
754     int32_t rx_tti;
755     int32_t cc_id;
756     //uint32_t nFrameIdx;
757     //uint32_t nSubframeIdx;
758     //uint32_t nSlotIdx;
759     //uint64_t nSecond;
760     struct xran_timer_ctx* p_timer_ctx = NULL;
761     /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
762     rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
763            + nSubframeIdx*SLOTNUM_PER_SUBFRAME
764            + nSlotIdx;*/
765     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
766         return;
767
768     p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
769     if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
770         p_xran_dev_ctx->timer_put = 0;
771
772     rx_tti = p_timer_ctx->tti_to_process;
773
774     for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
775         if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
776             if(p_xran_dev_ctx->pCallback[cc_id]) {
777                 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
778                 if(pTag) {
779                     //pTag->cellId = cc_id;
780                     pTag->slotiId = rx_tti;
781                     pTag->symbol  = XRAN_ONE_FOURTHS_CB_SYM;
782                     status = XRAN_STATUS_SUCCESS;
783
784                     p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
785                 }
786             }
787         } else {
788             p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
789         }
790     }
791
792     if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
793         if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
794             p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
795         }else{
796             p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
797         }
798     }
799
800     MLogXRANTask(PID_UP_UL_ONE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick());
801 }
802
803 void
804 rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg)
805 {
806     long t1 = MLogXRANTick();
807     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
808     xran_status_t status;
809     /* half of RX for current TTI as measured against current OTA time */
810     int32_t rx_tti;
811     int32_t cc_id;
812     //uint32_t nFrameIdx;
813     //uint32_t nSubframeIdx;
814     //uint32_t nSlotIdx;
815     //uint64_t nSecond;
816     struct xran_timer_ctx* p_timer_ctx = NULL;
817     /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
818     rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
819            + nSubframeIdx*SLOTNUM_PER_SUBFRAME
820            + nSlotIdx;*/
821     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
822         return;
823
824     p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
825     if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
826         p_xran_dev_ctx->timer_put = 0;
827
828     rx_tti = p_timer_ctx->tti_to_process;
829
830     for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
831         if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
832             if(p_xran_dev_ctx->pCallback[cc_id]) {
833                 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
834                 if(pTag) {
835                     //pTag->cellId = cc_id;
836                     pTag->slotiId = rx_tti;
837                     pTag->symbol  = XRAN_HALF_CB_SYM;
838                     status = XRAN_STATUS_SUCCESS;
839
840                     p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
841                 }
842             }
843         } else {
844             p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
845         }
846     }
847
848     if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
849         if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
850             p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
851         }else{
852             p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
853         }
854     }
855
856     MLogXRANTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogXRANTick());
857 }
858
859 void
860 rx_ul_deadline_three_fourths_cb(struct rte_timer *tim, void *arg)
861 {
862     long t1 = MLogXRANTick();
863     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
864     xran_status_t status;
865     /* half of RX for current TTI as measured against current OTA time */
866     int32_t rx_tti;
867     int32_t cc_id;
868     //uint32_t nFrameIdx;
869     //uint32_t nSubframeIdx;
870     //uint32_t nSlotIdx;
871     //uint64_t nSecond;
872     struct xran_timer_ctx* p_timer_ctx = NULL;
873     /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
874     rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
875            + nSubframeIdx*SLOTNUM_PER_SUBFRAME
876            + nSlotIdx;*/
877     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
878         return;
879
880     p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
881     if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
882         p_xran_dev_ctx->timer_put = 0;
883
884     rx_tti = p_timer_ctx->tti_to_process;
885
886     for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
887         if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
888             if(p_xran_dev_ctx->pCallback[cc_id]) {
889             struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
890                 if(pTag) {
891                     //pTag->cellId = cc_id;
892             pTag->slotiId = rx_tti;
893                     pTag->symbol  = XRAN_THREE_FOURTHS_CB_SYM;
894             status = XRAN_STATUS_SUCCESS;
895
896                p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
897                 }
898             }
899         } else {
900             p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
901         }
902     }
903
904     if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
905         if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
906             p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
907         }else{
908             p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
909         }
910     }
911
912     MLogXRANTask(PID_UP_UL_THREE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick());
913 }
914
915 void
916 rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg)
917 {
918     long t1 = MLogXRANTick();
919     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
920     xran_status_t status = 0;
921     int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
922     int32_t cc_id = 0;
923     //uint32_t nFrameIdx;
924     //uint32_t nSubframeIdx;
925     //uint32_t nSlotIdx;
926     //uint64_t nSecond;
927     struct xran_timer_ctx* p_timer_ctx = NULL;
928
929     if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
930         return;
931
932     /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
933     rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
934         + nSubframeIdx*SLOTNUM_PER_SUBFRAME
935         + nSlotIdx;*/
936     p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
937
938     if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
939         p_xran_dev_ctx->timer_put = 0;
940
941     rx_tti = p_timer_ctx->tti_to_process;
942 #if 1
943     if(rx_tti == 0)
944        rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
945     else
946        rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
947 #endif
948     /* U-Plane */
949     for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
950         if(p_xran_dev_ctx->pCallback[cc_id]){
951         struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
952             if(pTag) {
953                 //pTag->cellId = cc_id;
954         pTag->slotiId = rx_tti;
955                 pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
956         status = XRAN_STATUS_SUCCESS;
957             p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
958             }
959         }
960
961         if(p_xran_dev_ctx->pPrachCallback[cc_id]){
962             struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];
963             if(pTag) {
964                 //pTag->cellId = cc_id;
965             pTag->slotiId = rx_tti;
966                 pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
967             p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);
968         }
969         }
970
971         if(p_xran_dev_ctx->enableSrsCp)
972         {
973         if(p_xran_dev_ctx->pSrsCallback[cc_id]){
974             struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
975             if(pTag) {
976                 //pTag->cellId = cc_id;
977             pTag->slotiId = rx_tti;
978                     pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
979             p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
980         }
981     }
982     }
983     }
984
985     /* user call backs if any */
986     if(p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX]){
987         if(p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX] <= 0){
988             p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_FULL_SLOT_RX]);
989         }else{
990             p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX]--;
991         }
992     }
993
994     MLogXRANTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogXRANTick());
995 }
996
997 void
998 rx_ul_user_sym_cb(struct rte_timer *tim, void *arg)
999 {
1000     long t1 = MLogXRANTick();
1001     struct xran_device_ctx * p_dev_ctx = NULL;
1002     struct cb_user_per_sym_ctx *p_sym_cb_ctx = (struct cb_user_per_sym_ctx *)arg;
1003     int32_t rx_tti = 0; //(int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1004     uint32_t interval, ota_sym_idx = 0;
1005     uint8_t nNumerology = 0;
1006     struct xran_timer_ctx* p_timer_ctx =  NULL;
1007
1008     if(p_sym_cb_ctx->p_dev)
1009         p_dev_ctx = (struct xran_device_ctx *)p_sym_cb_ctx->p_dev;
1010     else
1011         rte_panic("p_sym_cb_ctx->p_dev == NULL");
1012
1013     if(p_dev_ctx->xran2phy_mem_ready == 0)
1014         return;
1015     nNumerology = xran_get_conf_numerology(p_dev_ctx);
1016     interval = p_dev_ctx->interval_us_local;
1017
1018     p_timer_ctx = &p_sym_cb_ctx->user_cb_timer_ctx[p_sym_cb_ctx->user_timer_get++ % MAX_CB_TIMER_CTX];
1019     if (p_sym_cb_ctx->user_timer_get >= MAX_CB_TIMER_CTX)
1020         p_sym_cb_ctx->user_timer_get = 0;
1021
1022     rx_tti = p_timer_ctx->tti_to_process;
1023
1024     if( p_sym_cb_ctx->sym_diff > 0)
1025         /* + advacne TX Wind: at OTA Time we indicating event in future */
1026         ota_sym_idx = ((p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology));
1027     else if (p_sym_cb_ctx->sym_diff < 0) {
1028         /* - dealy RX Win: at OTA Time we indicate event in the past */
1029         if(p_timer_ctx->ota_sym_idx >= abs(p_sym_cb_ctx->sym_diff)) {
1030             ota_sym_idx = p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff;
1031         } else {
1032             ota_sym_idx = ((xran_max_ota_sym_idx(nNumerology) + p_timer_ctx->ota_sym_idx) + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology);
1033         }
1034     } else /* 0 - OTA exact time */
1035         ota_sym_idx = p_timer_ctx->ota_sym_idx;
1036
1037     rx_tti = (int32_t)XranGetTtiNum(ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1038
1039     if(p_sym_cb_ctx->symCbTimeInfo) {
1040             struct xran_sense_of_time *p_sense_time = p_sym_cb_ctx->symCbTimeInfo;
1041             p_sense_time->type_of_event = p_sym_cb_ctx->cb_type_id;
1042             p_sense_time->nSymIdx       = p_sym_cb_ctx->symb_num_req;
1043             p_sense_time->tti_counter   = rx_tti;
1044             p_sense_time->nSlotIdx      = (uint32_t)XranGetSlotNum(rx_tti, SLOTNUM_PER_SUBFRAME(interval));
1045             p_sense_time->nSubframeIdx  = (uint32_t)XranGetSubFrameNum(rx_tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
1046             p_sense_time->nFrameIdx     = (uint32_t)XranGetFrameNum(rx_tti, p_timer_ctx->xran_sfn_at_sec_start,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1047             p_sense_time->nSecond       = p_timer_ctx->current_second;
1048     }
1049
1050     /* user call backs if any */
1051     if(p_sym_cb_ctx->symCb){
1052         p_sym_cb_ctx->symCb(p_sym_cb_ctx->symCbParam, p_sym_cb_ctx->symCbTimeInfo);
1053     }
1054
1055     MLogXRANTask(PID_UP_UL_USER_DEAD_LINE_CB, t1, MLogXRANTick());
1056 }
1057
1058 int32_t
1059 xran_prepare_cp_ul_slot(uint16_t xran_port_id, uint32_t nSlotIdx,  uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
1060                             uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
1061 {
1062     int32_t ret = XRAN_STATUS_SUCCESS;
1063     long t1 = MLogXRANTick();
1064     int tti, buf_id;
1065     uint32_t slot_id, subframe_id, frame_id;
1066     int32_t cc_id;
1067     int ant_id, port_id;
1068     uint16_t occasionid;
1069     uint16_t beam_id;
1070     uint8_t num_eAxc, num_CCPorts;
1071     uint8_t ctx_id;
1072
1073     void *pHandle;
1074     uint32_t interval;
1075     uint8_t PortId;
1076
1077     //struct xran_timer_ctx *pTCtx;
1078     struct xran_buffer_list *pBufList;
1079     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
1080     if(unlikely(!p_xran_dev_ctx))
1081     {
1082         print_err("Null xRAN context!!\n");
1083         return ret;
1084     }
1085
1086     if(first_call && p_xran_dev_ctx->enableCP)
1087     {
1088         pHandle     = p_xran_dev_ctx;
1089         //pTCtx       = &p_xran_dev_ctx->timer_ctx[0];
1090         interval    = p_xran_dev_ctx->interval_us_local;
1091         PortId      = p_xran_dev_ctx->xran_port_id;
1092         tti         = nSlotIdx; //pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
1093
1094         buf_id      = tti % XRAN_N_FE_BUF_LEN;
1095         ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
1096         slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1097         subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
1098         frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1099
1100         /* Wrap around to next second */
1101         if(tti == 0)
1102             frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
1103         if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
1104             num_eAxc = xran_get_num_eAxc(pHandle);
1105         else
1106             num_eAxc = xran_get_num_eAxcUl(pHandle);
1107         num_CCPorts = xran_get_num_cc(pHandle);
1108
1109         print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
1110
1111         /* General Uplink */
1112 #if defined(__INTEL_COMPILER)
1113 #pragma vector always
1114 #endif
1115         for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum)  && ant_id < num_eAxc); ++ant_id) {
1116             for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) {
1117                 /* start new section information list */
1118                 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
1119                 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1)
1120                 {
1121                     pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1122                     if(pBufList->pBuffers && pBufList->pBuffers->pData)
1123                     {
1124                         ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
1125                                         (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1126                                         p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1127                     }
1128                 }
1129             }
1130         } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
1131
1132         /* PRACH */
1133         if(p_xran_dev_ctx->enablePrach)
1134         {
1135             struct xran_prach_cp_config *pPrachCPConfig = NULL;
1136             //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE.
1137             if(p_xran_dev_ctx->dssEnable){
1138                 int i = tti % p_xran_dev_ctx->dssPeriod;
1139                 if(p_xran_dev_ctx->technology[i]==1) {
1140                     pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1141                 }
1142                 else{
1143                     pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
1144                 }
1145             }
1146             else{
1147                 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1148             }
1149             uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
1150
1151             if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
1152                 && (is_prach_slot==1))
1153             {
1154                 for(ant_id = 0; ant_id < num_eAxc; ant_id++)
1155                 {
1156                     port_id = ant_id + pPrachCPConfig->eAxC_offset;
1157                     for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1158                     {
1159                         /* start new section information list */
1160                         xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1161                         for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++)
1162                         {
1163                             struct xran_cp_gen_params params;
1164                             struct xran_section_gen_info sect_geninfo[8];
1165                             struct xran_section_info sectInfo[8];
1166                             for(int secId=0;secId<8;secId++)
1167                                 sect_geninfo[secId].info = &sectInfo[secId];
1168                             struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
1169                             uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id);
1170
1171                             beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id);
1172                             ret = generate_cpmsg_prach(pHandle, &params, sect_geninfo, mbuf, p_xran_dev_ctx,
1173                                         frame_id, subframe_id, slot_id, tti,
1174                                         beam_id, cc_id, port_id, occasionid, seqid);
1175                             if(ret == XRAN_STATUS_SUCCESS)
1176                                 send_cpmsg(pHandle, mbuf, &params, sect_geninfo,
1177                                         cc_id, port_id, seqid);
1178                         }
1179                     }
1180                 }
1181             }
1182         } /* if(p_xran_dev_ctx->enablePrach) */
1183
1184         /* SRS */
1185         if(p_xran_dev_ctx->enableSrsCp)
1186         {
1187             struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
1188
1189             for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
1190             {
1191                 port_id = ant_id + pSrsCfg->eAxC_offset;
1192                 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1193                 {
1194                     /* start new section information list */
1195                     xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1196                     if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
1197                     {
1198                         pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1199                         if(pBufList->pBuffers && pBufList->pBuffers->pData)
1200                         {
1201                             ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id,
1202                                             (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1203                                             p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1204                         }
1205                     }
1206                 }
1207             }
1208         } /* if(p_xran_dev_ctx->enableSrs) */
1209
1210         MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick());
1211     } /* if(p_xran_dev_ctx->enableCP) */
1212
1213     return ret;
1214 }
1215
1216
1217 void
1218 tx_cp_ul_cb(struct rte_timer *tim, void *arg)
1219 {
1220     long t1 = MLogXRANTick();
1221     int tti, buf_id;
1222     int ret;
1223     uint32_t slot_id, subframe_id, frame_id;
1224     int32_t cc_id;
1225     int ant_id, port_id;
1226     uint16_t occasionid = 0;
1227     uint16_t beam_id;
1228     uint8_t num_eAxc, num_CCPorts;
1229     uint8_t ctx_id;
1230
1231     void *pHandle;
1232     uint32_t interval;
1233     uint8_t PortId;
1234
1235     struct xran_timer_ctx *pTCtx;
1236     struct xran_buffer_list *pBufList;
1237     struct xran_device_ctx *p_xran_dev_ctx;
1238
1239     if(unlikely(!arg))
1240     {
1241         print_err("Null xRAN context!!\n");
1242         return;
1243     }
1244
1245     p_xran_dev_ctx  = (struct xran_device_ctx *)arg;
1246
1247     if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
1248         return;
1249
1250     /* */
1251     if(first_call && p_xran_dev_ctx->enableCP)
1252     {
1253         pHandle     = p_xran_dev_ctx;
1254         pTCtx       = &p_xran_dev_ctx->timer_ctx[0];
1255         interval    = p_xran_dev_ctx->interval_us_local;
1256         PortId      = p_xran_dev_ctx->xran_port_id;
1257     tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
1258
1259     buf_id = tti % XRAN_N_FE_BUF_LEN;
1260         ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
1261     slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1262     subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
1263     frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1264
1265         /* Wrap around to next second */
1266         if(tti == 0)
1267             frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
1268     if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
1269         num_eAxc    = xran_get_num_eAxc(pHandle);
1270     else
1271         num_eAxc    = xran_get_num_eAxcUl(pHandle);
1272     num_CCPorts = xran_get_num_cc(pHandle);
1273
1274         print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
1275
1276         /* General Uplink */
1277         for(ant_id = 0; ant_id < num_eAxc; ant_id++)
1278         {
1279             for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1280             {
1281                     /* start new section information list */
1282                     xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
1283                 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1)
1284                 {
1285                     pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1286                     if(pBufList->pBuffers && pBufList->pBuffers->pData)
1287                     {
1288                         ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
1289                                         (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1290                         p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1291                         }
1292                     }
1293                 }
1294         } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
1295
1296         /* PRACH */
1297         if(p_xran_dev_ctx->enablePrach)
1298         {
1299             struct xran_prach_cp_config *pPrachCPConfig = NULL;
1300             //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE.
1301             if(p_xran_dev_ctx->dssEnable){
1302                 int i = tti % p_xran_dev_ctx->dssPeriod;
1303                 if(p_xran_dev_ctx->technology[i]==1) {
1304                     pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1305                 }
1306                 else{
1307                     pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
1308                 }
1309             }
1310             else{
1311                 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1312         }
1313
1314             uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
1315
1316             if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
1317                 && (is_prach_slot==1))
1318             {
1319                 for(ant_id = 0; ant_id < num_eAxc; ant_id++)
1320                 {
1321                     port_id = ant_id + pPrachCPConfig->eAxC_offset;
1322                     for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1323                     {
1324                         /* start new section information list */
1325                         xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1326 #ifndef FCN_ADAPT
1327 //for FCN only send C-P for first occasion
1328                         for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++)
1329 #endif
1330                         {
1331                         struct xran_cp_gen_params params;
1332                         struct xran_section_gen_info sect_geninfo[8];
1333                             struct xran_section_info sectInfo[8];
1334                             for(int secId=0;secId<8;secId++)
1335                                 sect_geninfo[secId].info = &sectInfo[secId];
1336
1337                         struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
1338                             uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id);
1339
1340                             beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id);
1341                         ret = generate_cpmsg_prach(pHandle, &params, sect_geninfo, mbuf, p_xran_dev_ctx,
1342                                         frame_id, subframe_id, slot_id, tti,
1343                                         beam_id, cc_id, port_id, occasionid, seqid);
1344                         if (ret == XRAN_STATUS_SUCCESS)
1345                             send_cpmsg(pHandle, mbuf, &params, sect_geninfo,
1346                                         cc_id, port_id, seqid);
1347                         }
1348                     }
1349                     }
1350                 }
1351         } /* if(p_xran_dev_ctx->enablePrach) */
1352
1353         /* SRS */
1354         if(p_xran_dev_ctx->enableSrsCp)
1355         {
1356             struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
1357
1358             for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
1359             {
1360                 port_id = ant_id + pSrsCfg->eAxC_offset;
1361                 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1362                 {
1363                     /* start new section information list */
1364                     xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1365                     if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
1366                     {
1367                         pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1368                         if(pBufList->pBuffers && pBufList->pBuffers->pData)
1369                         {
1370                             ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id,
1371                                             (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1372                                             p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1373             }
1374         }
1375         }
1376             }
1377         } /* if(p_xran_dev_ctx->enableSrs) */
1378
1379     MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick());
1380     } /* if(p_xran_dev_ctx->enableCP) */
1381 }
1382
1383 void
1384 tti_to_phy_cb(struct rte_timer *tim, void *arg)
1385 {
1386     long t1 = MLogTick();
1387     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
1388     uint32_t interval = p_xran_dev_ctx->interval_us_local;
1389
1390     p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */
1391     if (first_call){
1392         if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
1393             if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){
1394                 p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]);
1395             }else{
1396                 p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--;
1397             }
1398         }
1399     } else {
1400         if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
1401             int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT);
1402             uint32_t slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1403             uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
1404             uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1405             if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME(interval)-1)) {  //(tti == xran_fs_get_max_slot()-1)
1406                 first_call = 1;
1407             }
1408         }
1409     }
1410
1411     MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());
1412 }
1413
1414 int32_t
1415 xran_timing_source_thread(void *args)
1416 {
1417     int res = 0;
1418     cpu_set_t cpuset;
1419     int32_t   result1;
1420     uint32_t xran_port_id = 0;
1421     static int owdm_init_done = 0;
1422     struct sched_param sched_param;
1423     struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *) args ;
1424     uint64_t tWake = 0, tWakePrev = 0, tUsed = 0;
1425     struct xran_device_ctx * p_dev_ctx_run = NULL;
1426     /* ToS = Top of Second start +- 1.5us */
1427     struct timespec ts;
1428     char thread_name[32];
1429     char buff[100];
1430
1431     printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());
1432     memset(&sched_param, 0, sizeof(struct sched_param));
1433     /* set main thread affinity mask to CPU2 */
1434     sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1435     CPU_ZERO(&cpuset);
1436     CPU_SET(p_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);
1437
1438     if ((result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)))
1439     {
1440         printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);
1441     }
1442     if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
1443     {
1444         printf("priority is not changed: coreId = 2, result1 = %d\n",result1);
1445     }
1446
1447     snprintf(thread_name, RTE_DIM(thread_name), "%s-%d", "fh_main_poll", rte_lcore_id());
1448     if ((res = pthread_setname_np(pthread_self(), thread_name))) {
1449         printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res);
1450         }
1451
1452     printf("TTI interval %ld [us]\n", interval_us);
1453
1454     if (!p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable) {
1455         if ((res = xran_timing_create_cbs(args)) < 0){
1456         return res;
1457         }
1458         }
1459
1460         do {
1461            timespec_get(&ts, TIME_UTC);
1462         }while (ts.tv_nsec >1500);
1463
1464         struct tm * ptm = gmtime(&ts.tv_sec);
1465         if(ptm){
1466             strftime(buff, sizeof buff, "%D %T", ptm);
1467         printf("%s: thread_run start time: %s.%09ld UTC [%ld]\n",
1468         (p_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
1469     }
1470
1471     do {
1472        timespec_get(&ts, TIME_UTC);
1473     }while (ts.tv_nsec == 0);
1474
1475     p_dev_ctx->timing_source_thread_running = 1;
1476     while(1) {
1477
1478         /* Check if owdm finished to create the timing cbs based on measurement results */
1479         if ((p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable)&&(!owdm_init_done)&&unlikely(XRAN_RUNNING == xran_if_current_state)) {
1480             // Adjust Windows based on Delay Measurement results
1481             xran_adjust_timing_parameters(p_dev_ctx);
1482             if ((res = xran_timing_create_cbs(args)) < 0){
1483                 return res;
1484                 }
1485             printf("TTI interval %ld [us]\n", interval_us);
1486             owdm_init_done = 1;
1487
1488         }
1489
1490
1491
1492         /* Update Usage Stats */
1493         tWake = xran_tick();
1494         xran_used_tick += tUsed;
1495         if (tWakePrev)
1496         {
1497             xran_total_tick += get_ticks_diff(tWake, tWakePrev);
1498         }
1499         tWakePrev = tWake;
1500         tUsed = 0;
1501
1502         int64_t delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed);
1503         if (XRAN_STOPPED == xran_if_current_state)
1504             break;
1505
1506         if (delta > 3E5 && tUsed > 0)//300us about 9 symbols
1507         {
1508             print_err("poll_next_tick too long, delta:%ld(ns), tUsed:%ld(tick)", delta, tUsed);
1509         }
1510
1511         if (likely(XRAN_RUNNING == xran_if_current_state)) {
1512             for(xran_port_id =  0; xran_port_id < XRAN_PORTS_NUM; xran_port_id++ ) {
1513                 p_dev_ctx_run = xran_dev_get_ctx_by_id(xran_port_id);
1514                 if(p_dev_ctx_run) {
1515                     if(p_dev_ctx_run->xran_port_id == xran_port_id) {
1516                         if(XranGetSymNum(xran_lib_ota_sym_idx[p_dev_ctx_run->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == xran_lib_ota_sym[xran_port_id])
1517                         {
1518                             sym_ota_cb(&p_dev_ctx_run->sym_timer, p_dev_ctx_run, &tUsed);
1519                             xran_lib_ota_sym[xran_port_id]++;
1520                             if(xran_lib_ota_sym[xran_port_id] >= N_SYM_PER_SLOT)
1521                                 xran_lib_ota_sym[xran_port_id]=0;
1522                         }
1523                     }
1524                     else  {
1525                         rte_panic("p_dev_ctx_run == xran_port_id");
1526     }
1527                 }
1528             }
1529         }
1530     }
1531
1532     xran_timing_destroy_cbs(args);
1533     printf("Closing timing source thread...\n");
1534     return res;
1535 }
1536
1537 /* Handle ecpri format. */
1538 #define MBUFS_CNT 16
1539
1540 int32_t handle_ecpri_ethertype(struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num)
1541 {
1542     struct rte_mbuf *pkt;
1543     uint16_t i;
1544     struct rte_ether_hdr* eth_hdr;
1545     struct xran_ecpri_hdr* ecpri_hdr;
1546     unsigned long t1;
1547     int32_t ret = MBUF_FREE;
1548     uint32_t ret_data[MBUFS_CNT] = { MBUFS_CNT * MBUF_FREE };
1549     struct xran_device_ctx* p_dev_ctx = xran_dev_get_ctx_by_id(xport_id);
1550     uint16_t num_data = 0, num_control = 0, num_meas = 0;
1551     struct rte_mbuf* pkt_data[MBUFS_CNT], * pkt_control[MBUFS_CNT], * pkt_meas[MBUFS_CNT], *pkt_adj[MBUFS_CNT];
1552     static uint32_t owdm_rx_first_pass = 1;
1553
1554     if (p_dev_ctx == NULL)
1555         return ret;
1556
1557     for (i = 0; i < num; i++)
1558     {
1559         pkt = pkt_q[i];
1560
1561 //        rte_prefetch0(rte_pktmbuf_mtod(pkt, void*));
1562
1563         rte_pktmbuf_adj(pkt, sizeof(*eth_hdr));
1564     ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *);
1565
1566         p_dev_ctx->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
1567
1568         pkt_adj[i] = pkt;
1569         switch (ecpri_hdr->cmnhdr.bits.ecpri_mesg_type)
1570         {
1571         case ECPRI_IQ_DATA:
1572                 pkt_data[num_data++] = pkt;
1573             break;
1574         // For RU emulation
1575         case ECPRI_RT_CONTROL_DATA:
1576                 pkt_control[num_control++] = pkt;
1577             break;
1578             case ECPRI_DELAY_MEASUREMENT:
1579                 if (owdm_rx_first_pass != 0)
1580 {
1581                     // Initialize and verify that Payload Length is in range */
1582                     xran_initialize_and_verify_owd_pl_length((void*)p_dev_ctx);
1583                     owdm_rx_first_pass = 0;
1584
1585                 }
1586                 pkt_meas[num_meas++] = pkt;
1587                 break;
1588             default:
1589                 if (p_dev_ctx->fh_init.io_cfg.id == O_DU) {
1590                     print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.bits.ecpri_mesg_type);
1591         }
1592                 break;
1593     }
1594 }
1595
1596     if(num_data == MBUFS_CNT && p_dev_ctx->fh_cfg.ru_conf.xranCat == XRAN_CATEGORY_B) /* w/a for Cat A issue */
1597 {
1598         for (i = 0; i < MBUFS_CNT; i++)
1599 {
1600             ret_data[i] = MBUF_FREE;
1601 }
1602
1603         if (p_dev_ctx->fh_init.io_cfg.id == O_DU || p_dev_ctx->fh_init.io_cfg.id == O_RU)
1604 {
1605             if (p_dev_ctx->xran2phy_mem_ready != 0)
1606                 ret = process_mbuf_batch(pkt_data, (void*)p_dev_ctx, MBUFS_CNT, p_cid,  ret_data );
1607             for (i = 0; i < MBUFS_CNT; i++)
1608                     {
1609                 if (ret_data[i] == MBUF_FREE)
1610                     rte_pktmbuf_free(pkt_data[i]);
1611                     }
1612             }
1613     else
1614 {
1615             for (i = 0; i < MBUFS_CNT; i++)
1616 {
1617                 if (ret_data[i] == MBUF_FREE)
1618                     rte_pktmbuf_free(pkt_data[i]);
1619             }
1620             print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id);
1621         }
1622         }
1623     else
1624 {
1625         for (i = 0; i < num_data; i++)
1626     {
1627             ret = process_mbuf(pkt_data[i], (void*)p_dev_ctx, p_cid);
1628             if (ret == MBUF_FREE)
1629                 rte_pktmbuf_free(pkt_data[i]);
1630     }
1631
1632         for (i = 0; i < num_control; i++)
1633     {
1634             t1 = MLogXRANTick();
1635             if (p_dev_ctx->fh_init.io_cfg.id == O_RU)
1636         {
1637                 ret = process_cplane(pkt_control[i], (void*)p_dev_ctx);
1638                 p_dev_ctx->fh_counters.rx_counter++;
1639                 if (ret == MBUF_FREE)
1640                     rte_pktmbuf_free(pkt_control[i]);
1641         }
1642         else
1643         {
1644                 print_err("O-DU recevied C-Plane message!");
1645         }
1646             MLogXRANTask(PID_PROCESS_CP_PKT, t1, MLogXRANTick());
1647     }
1648
1649         for (i = 0; i < num_meas; i++)
1650         {
1651
1652             /*if(p_dev_ctx->fh_init.io_cfg.id == O_RU)
1653                 printf("Got delay_meas_pkt xport_id %d p_dev_ctx %08"PRIx64" %d\n", xport_id,(int64_t*)p_dev_ctx, num_meas) ;*/
1654             t1 = MLogXRANTick();
1655             if(xran_if_current_state != XRAN_RUNNING)
1656             ret = process_delay_meas(pkt_meas[i], (void*)p_dev_ctx, xport_id);
1657             else
1658                 ret = MBUF_FREE;
1659             if (ret == MBUF_FREE)
1660                 rte_pktmbuf_free(pkt_meas[i]);
1661             MLogXRANTask(PID_PROCESS_DELAY_MEAS_PKT, t1, MLogXRANTick());
1662     }
1663             }
1664
1665     return MBUF_FREE;
1666 }
1667
1668 int32_t
1669 xran_packet_and_dpdk_timer_thread(void *args)
1670 {
1671     //struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1672
1673     uint64_t prev_tsc = 0;
1674     uint64_t cur_tsc = rte_rdtsc();
1675     uint64_t diff_tsc = cur_tsc - prev_tsc;
1676     struct sched_param sched_param;
1677     int res = 0;
1678     printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());
1679
1680     memset(&sched_param, 0, sizeof(struct sched_param));
1681     sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1682
1683     if ((res  = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
1684     {
1685         printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1686     }
1687
1688     while(1){
1689
1690         cur_tsc  = rte_rdtsc();
1691         diff_tsc = cur_tsc - prev_tsc;
1692         if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
1693             rte_timer_manage();
1694             prev_tsc = cur_tsc;
1695         }
1696
1697         if (XRAN_STOPPED == xran_if_current_state)
1698             break;
1699     }
1700
1701     printf("Closing pkts timer thread...\n");
1702     return 0;
1703 }
1704
1705 void xran_initialize_ecpri_owd_meas_cmn( struct xran_io_cfg *ptr)
1706 {
1707 //    ptr->eowd_cmn.initiator_en = 0; // Initiator 1, Recipient 0
1708 //    ptr->eowd_cmn.filterType = 0;  // 0 Simple average based on number of measurements
1709     // Set default values if the Timeout and numberOfSamples are not set
1710     if ( ptr->eowd_cmn[ptr->id].responseTo == 0)
1711         ptr->eowd_cmn[ptr->id].responseTo = 10E6; // 10 ms timeout expressed in ns
1712     if ( ptr->eowd_cmn[ptr->id].numberOfSamples == 0)
1713         ptr->eowd_cmn[ptr->id].numberOfSamples = 8; // Number of samples to be averaged
1714 }
1715 void xran_initialize_ecpri_owd_meas_per_port (int i, struct xran_io_cfg *ptr )
1716 {
1717    /* This function initializes one_way delay measurements on a per port basis,
1718       most variables default to zero    */
1719    ptr->eowd_port[ptr->id][i].portid = (uint8_t)i;
1720 }
1721
1722 int32_t
1723 xran_init(int argc, char *argv[],
1724            struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle)
1725 {
1726     int32_t ret = XRAN_STATUS_SUCCESS;
1727     int32_t i;
1728     int32_t j;
1729     int32_t o_xu_id = 0;
1730     struct xran_io_cfg      *p_io_cfg       = NULL;
1731     struct xran_device_ctx * p_xran_dev_ctx = NULL;
1732     int32_t  lcore_id = 0;
1733     const char *version = rte_version();
1734
1735     if (version == NULL)
1736         rte_panic("version == NULL");
1737
1738     printf("'%s'\n", version);
1739
1740     if (p_xran_fh_init->xran_ports < 1 || p_xran_fh_init->xran_ports > XRAN_PORTS_NUM) {
1741         ret = XRAN_STATUS_INVALID_PARAM;
1742         print_err("fh_init xran_ports= %d is wrong [%d]\n", p_xran_fh_init->xran_ports, ret);
1743         return ret;
1744     }
1745     mlogxranenable = p_xran_fh_init->mlogxranenable;
1746     p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg;
1747
1748     if ((ret = xran_dev_create_ctx(p_xran_fh_init->xran_ports)) < 0) {
1749         print_err("context allocation error [%d]\n", ret);
1750         return ret;
1751     }
1752
1753     for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1754         p_xran_dev_ctx  = xran_dev_get_ctx_by_id(o_xu_id);
1755     memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx));
1756         p_xran_dev_ctx->xran_port_id  = o_xu_id;
1757
1758     /* copy init */
1759     p_xran_dev_ctx->fh_init = *p_xran_fh_init;
1760     printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu);
1761
1762     memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config));
1763     /* To make sure to set default functions */
1764     p_xran_dev_ctx->send_upmbuf2ring    = NULL;
1765     p_xran_dev_ctx->send_cpmbuf2ring    = NULL;
1766         // Ecpri initialization for One Way delay measurements common variables to default values
1767         xran_initialize_ecpri_owd_meas_cmn(&p_xran_dev_ctx->fh_init.io_cfg);
1768     }
1769
1770     /* default values if not set */
1771     if(p_io_cfg->nEthLinePerPort == 0)
1772         p_io_cfg->nEthLinePerPort = 1;
1773
1774     if(p_io_cfg->nEthLineSpeed == 0)
1775         p_io_cfg->nEthLineSpeed = 25;
1776
1777     /** at least 1 RX Q */
1778     if(p_io_cfg->num_rxq == 0)
1779         p_io_cfg->num_rxq = 1;
1780
1781     if (p_io_cfg->id == 1) {
1782         /* 1 HW for O-RU */
1783         p_io_cfg->num_rxq =  1;
1784     }
1785
1786 #if (RTE_VER_YEAR < 21) /* eCPRI flow supported with DPDK 21.02 or later */
1787     if (p_io_cfg->num_rxq > 1){
1788         p_io_cfg->num_rxq =  1;
1789         printf("%s does support eCPRI flows. Set rxq to %d\n", version, p_io_cfg->num_rxq);
1790     }
1791 #endif
1792     printf("PF Eth line speed %dG\n",p_io_cfg->nEthLineSpeed);
1793     printf("PF Eth lines per O-xU port %d\n",p_io_cfg->nEthLinePerPort);
1794     printf("RX HW queues per O-xU Eth line %d \n",p_io_cfg->num_rxq);
1795
1796     if(p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane)  != p_io_cfg->num_vfs) {
1797         print_err("Incorrect VFs configurations: For %d O-xUs with %d Ethernet ports expected number of VFs is %d. [provided %d]\n",
1798             p_xran_fh_init->xran_ports, p_io_cfg->nEthLinePerPort,
1799             p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane), p_io_cfg->num_vfs);
1800     }
1801
1802     xran_if_current_state = XRAN_INIT;
1803     xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype);
1804     if (p_io_cfg->id == 0)
1805         xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1806                            p_io_cfg,
1807                            &lcore_id,
1808                            (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1809                            (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1810                            p_xran_dev_ctx->fh_init.mtu);
1811     else
1812         xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1813                            p_io_cfg,
1814                            &lcore_id,
1815                            (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1816                            (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1817                            p_xran_dev_ctx->fh_init.mtu);
1818
1819     for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1820         p_xran_dev_ctx  = xran_dev_get_ctx_by_id(o_xu_id);
1821
1822         for(i = 0; i < MAX_TTI_TO_PHY_TIMER; i++ )
1823             rte_timer_init(&p_xran_dev_ctx->tti_to_phy_timer[i]);
1824
1825         rte_timer_init(&p_xran_dev_ctx->sym_timer);
1826     for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++)
1827             rte_timer_init(&p_xran_dev_ctx->dpdk_timer[i]);
1828
1829     p_xran_dev_ctx->direct_pool   = socket_direct_pool;
1830     p_xran_dev_ctx->indirect_pool = socket_indirect_pool;
1831
1832
1833         for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){
1834             LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[j]);
1835     }
1836
1837     }
1838
1839     for (i=0; i<XRAN_PORTS_NUM; i++){
1840     for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){
1841             xran_fs_clear_slot_type(i,nCellIdx);
1842         }
1843     }
1844
1845     *pXranLayerHandle = xran_dev_get_ctx();
1846
1847
1848     // The ecpri initialization loop needs to be done per pf and vf (Outer loop pf and inner loop vf)
1849     for (i=0;  i< p_io_cfg->num_vfs; i++)
1850     {
1851         /* Initialize ecpri one-way delay measurement info on a per vf port basis */
1852         xran_initialize_ecpri_owd_meas_per_port (i, p_io_cfg);
1853     }
1854
1855     return ret;
1856 }
1857
1858 int32_t
1859 xran_sector_get_instances (uint32_t xran_port, void * pDevHandle, uint16_t nNumInstances,
1860                xran_cc_handle_t * pSectorInstanceHandles)
1861 {
1862     struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle;
1863     XranSectorHandleInfo *pCcHandle = NULL;
1864     int32_t i = 0;
1865
1866     pDev += xran_port;
1867
1868     /* Check for the Valid Parameters */
1869     CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM);
1870
1871     if (!nNumInstances) {
1872         print_dbg("Instance is not assigned for this function !!! \n");
1873         return XRAN_STATUS_INVALID_PARAM;
1874     }
1875
1876     for (i = 0; i < nNumInstances; i++) {
1877
1878         /* Allocate Memory for CC handles */
1879         pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64);
1880
1881         if(pCcHandle == NULL)
1882             return XRAN_STATUS_RESOURCE;
1883
1884         memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo)));
1885
1886         pCcHandle->nIndex    = i;
1887         pCcHandle->nXranPort = pDev->xran_port_id;
1888
1889         printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle);
1890         pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle;
1891
1892         printf("Handle: %p Instance: %p\n",
1893             &pSectorInstanceHandles[i], pSectorInstanceHandles[i]);
1894     }
1895
1896     return XRAN_STATUS_SUCCESS;
1897 }
1898
1899
1900 int32_t
1901 xran_5g_fronthault_config (void * pHandle,
1902                 struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1903                 struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1904                 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1905                 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1906                 xran_transport_callback_fn pCallback,
1907                 void *pCallbackTag)
1908 {
1909     int j, i = 0, z;
1910     XranSectorHandleInfo* pXranCc = NULL;
1911     struct xran_device_ctx * p_xran_dev_ctx = NULL;
1912
1913     if(NULL == pHandle) {
1914         printf("Handle is NULL!\n");
1915         return XRAN_STATUS_FAIL;
1916     }
1917
1918     pXranCc = (XranSectorHandleInfo*) pHandle;
1919     p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1920     if (p_xran_dev_ctx == NULL) {
1921         printf ("p_xran_dev_ctx is NULL\n");
1922         return XRAN_STATUS_FAIL;
1923     }
1924
1925     i = pXranCc->nIndex;
1926
1927     for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
1928         for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
1929             /* U-plane TX */
1930
1931             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0;
1932             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1933             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1934             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1935             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1936             p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];
1937
1938             if(pSrcBuffer[z][j])
1939                 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcBuffer[z][j];
1940             else
1941                 memset(&p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcBuffer[z][j]));
1942
1943
1944             /* C-plane TX */
1945             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1946             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1947             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1948             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1949             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1950             p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
1951
1952             if(pSrcCpBuffer[z][j])
1953                 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcCpBuffer[z][j];
1954             else
1955                 memset(&p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcCpBuffer[z][j]));
1956             /* U-plane RX */
1957
1958             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0;
1959             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1960             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1961             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1962             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1963             p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];
1964
1965             if(pDstBuffer[z][j])
1966                 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
1967             else
1968                 memset(&p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1969
1970
1971             /* C-plane RX */
1972             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1973             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1974             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1975             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1976             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1977             p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
1978
1979             if(pDstCpBuffer[z][j])
1980                 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pDstCpBuffer[z][j];
1981             else
1982                 memset(&p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
1983         }
1984     }
1985
1986     p_xran_dev_ctx->pCallback[i]    = pCallback;
1987     p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag;
1988     print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
1989         p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pCallback[i], p_xran_dev_ctx->pCallbackTag[i]);
1990
1991     p_xran_dev_ctx->xran2phy_mem_ready = 1;
1992
1993     return XRAN_STATUS_SUCCESS;
1994 }
1995
1996 int32_t xran_5g_bfw_config(void * pHandle,
1997                     struct xran_buffer_list *pSrcRxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1998                     struct xran_buffer_list *pSrcTxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1999                     xran_transport_callback_fn pCallback,
2000                     void *pCallbackTag){
2001     int j, i = 0, z;
2002     XranSectorHandleInfo* pXranCc = NULL;
2003     struct xran_device_ctx * p_xran_dev_ctx = NULL;
2004
2005     if(NULL == pHandle) {
2006         printf("Handle is NULL!\n");
2007         return XRAN_STATUS_FAIL;
2008     }
2009     pXranCc = (XranSectorHandleInfo*) pHandle;
2010     p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
2011     if (p_xran_dev_ctx == NULL) {
2012         printf ("p_xran_dev_ctx is NULL\n");
2013         return XRAN_STATUS_FAIL;
2014     }
2015
2016     i = pXranCc->nIndex;
2017
2018     for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
2019         for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
2020             /* C-plane RX - RU */
2021             p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
2022             p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2023             p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2024             p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2025             p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
2026             p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
2027
2028             if(pSrcRxCpBuffer[z][j])
2029                 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcRxCpBuffer[z][j];
2030             else
2031                 memset(&p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcRxCpBuffer[z][j]));
2032
2033             /* C-plane TX - RU */
2034             p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
2035             p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2036             p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2037             p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2038             p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
2039             p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
2040
2041             if(pSrcTxCpBuffer[z][j])
2042                 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcTxCpBuffer[z][j];
2043             else
2044                 memset(&p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcTxCpBuffer[z][j]));
2045         }
2046     }
2047     return XRAN_STATUS_SUCCESS;
2048 }
2049
2050 int32_t
2051 xran_5g_prach_req (void *  pHandle,
2052                 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
2053                 struct xran_buffer_list *pDstBufferDecomp[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],                
2054                 xran_transport_callback_fn pCallback,
2055                 void *pCallbackTag)
2056 {
2057     int j, i = 0, z;
2058     XranSectorHandleInfo* pXranCc = NULL;
2059     struct xran_device_ctx * p_xran_dev_ctx = NULL;
2060
2061     if(NULL == pHandle) {
2062         printf("Handle is NULL!\n");
2063         return XRAN_STATUS_FAIL;
2064     }
2065
2066     pXranCc = (XranSectorHandleInfo*) pHandle;
2067     p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
2068     if (p_xran_dev_ctx == NULL) {
2069         printf ("p_xran_dev_ctx is NULL\n");
2070         return XRAN_STATUS_FAIL;
2071     }
2072
2073     i = pXranCc->nIndex;
2074
2075     for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
2076         for(z = 0; z < XRAN_MAX_PRACH_ANT_NUM; z++){
2077            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0;
2078            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2079            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2080            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2081             p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_PRACH_ANT_NUM; // ant number.
2082            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];
2083            if(pDstBuffer[z][j])
2084                p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
2085             else
2086                 memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
2087                 
2088             p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0];
2089             if(pDstBufferDecomp[z][j])
2090                 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList =   *pDstBufferDecomp[z][j];
2091         }
2092     }
2093
2094     p_xran_dev_ctx->pPrachCallback[i]    = pCallback;
2095     p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
2096
2097     print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
2098         p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]);
2099
2100     return XRAN_STATUS_SUCCESS;
2101 }
2102
2103 int32_t
2104 xran_5g_srs_req (void *  pHandle,
2105                 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
2106                 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
2107                 xran_transport_callback_fn pCallback,
2108                 void *pCallbackTag)
2109 {
2110     int j, i = 0, z;
2111     XranSectorHandleInfo* pXranCc = NULL;
2112     struct xran_device_ctx * p_xran_dev_ctx = NULL;
2113
2114     if(NULL == pHandle) {
2115         printf("Handle is NULL!\n");
2116         return XRAN_STATUS_FAIL;
2117     }
2118
2119     pXranCc = (XranSectorHandleInfo*) pHandle;
2120     p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
2121     if (p_xran_dev_ctx == NULL) {
2122         printf ("p_xran_dev_ctx is NULL\n");
2123         return XRAN_STATUS_FAIL;
2124     }
2125
2126     i = pXranCc->nIndex;
2127
2128     for(j=0; j<XRAN_N_FE_BUF_LEN; j++) {
2129         for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){
2130            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;
2131            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2132            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2133            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2134            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
2135            p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
2136            if(pDstBuffer[z][j])
2137                p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
2138             else
2139                 memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
2140
2141             /* C-plane SRS */
2142             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
2143             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2144             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2145             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2146             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
2147             p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z];
2148
2149             if(pDstCpBuffer[z][j])
2150                 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pDstCpBuffer[z][j];
2151             else
2152                 memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
2153
2154         }
2155     }
2156
2157     p_xran_dev_ctx->pSrsCallback[i]    = pCallback;
2158     p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
2159
2160     print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
2161         p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]);
2162
2163     return XRAN_STATUS_SUCCESS;
2164 }
2165
2166 uint32_t
2167 xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear)
2168 {
2169     uint32_t i;
2170
2171     *num_core_used = xran_num_cores_used;
2172     for (i = 0; i < xran_num_cores_used; i++)
2173     {
2174         core_used[i] = xran_core_used[i];
2175     }
2176
2177     *total_time = xran_total_tick;
2178     *used_time = xran_used_tick;
2179
2180     if (clear)
2181     {
2182         xran_total_tick = 0;
2183         xran_used_tick = 0;
2184     }
2185
2186     return 0;
2187 }
2188
2189 uint8_t*
2190 xran_add_cp_hdr_offset(uint8_t  *dst)
2191 {
2192     dst += (RTE_PKTMBUF_HEADROOM +
2193             sizeof(struct xran_ecpri_hdr) +
2194             sizeof(struct xran_cp_radioapp_section1_header) +
2195             sizeof(struct xran_cp_radioapp_section1));
2196
2197     dst = RTE_PTR_ALIGN_CEIL(dst, 64);
2198
2199     return dst;
2200 }
2201
2202 uint8_t*
2203 xran_add_hdr_offset(uint8_t  *dst, int16_t compMethod)
2204 {
2205     dst+= (RTE_PKTMBUF_HEADROOM +
2206           sizeof (struct xran_ecpri_hdr) +
2207           sizeof (struct radio_app_common_hdr) +
2208           sizeof(struct data_section_hdr));
2209     if(compMethod != XRAN_COMPMETHOD_NONE)
2210           dst += sizeof (struct data_section_compression_hdr);
2211     dst = RTE_PTR_ALIGN_CEIL(dst, 64);
2212
2213     return dst;
2214 }
2215
2216 int32_t
2217 xran_pkt_gen_process_ring(struct rte_ring *r)
2218 {
2219     assert(r);
2220     struct rte_mbuf *mbufs[16];
2221     int i;
2222     uint32_t remaining;
2223     uint64_t t1;
2224     struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
2225     const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
2226         RTE_DIM(mbufs), &remaining);
2227
2228
2229     if (!dequeued)
2230         return 0;
2231
2232     t1 = MLogXRANTick();
2233     for (i = 0; i < dequeued; ++i) {
2234         struct cp_up_tx_desc * p_tx_desc =  (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i],  struct cp_up_tx_desc *);
2235         xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle,
2236                                         p_tx_desc->ctx_id,
2237                                         p_tx_desc->tti,
2238                                         p_tx_desc->start_cc,
2239                                         p_tx_desc->cc_num,
2240                                         p_tx_desc->start_ant,
2241                                         p_tx_desc->ant_num,
2242                                         p_tx_desc->frame_id,
2243                                         p_tx_desc->subframe_id,
2244                                         p_tx_desc->slot_id,
2245                                         p_tx_desc->sym_id,
2246                                         (enum xran_comp_hdr_type)p_tx_desc->compType,
2247                                         (enum xran_pkt_dir) p_tx_desc->direction,
2248                                         p_tx_desc->xran_port_id,
2249                                         (PSECTION_DB_TYPE)p_tx_desc->p_sec_db);
2250
2251         xran_pkt_gen_desc_free(p_tx_desc);
2252         if (XRAN_STOPPED == xran_if_current_state){
2253             MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick());
2254             return -1;
2255         }
2256     }
2257
2258     if(p_io_cfg->io_sleep)
2259        nanosleep(&sleeptime,NULL);
2260
2261     MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick());
2262
2263     return remaining;
2264 }
2265
2266 int32_t
2267 xran_dl_pkt_ring_processing_func(void* args)
2268 {
2269     struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
2270     uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF);
2271     uint16_t current_port;
2272
2273     rte_timer_manage();
2274
2275     for (current_port = 0; current_port < XRAN_PORTS_NUM;  current_port++) {
2276         if( xran_port_mask & (1<<current_port)) {
2277             xran_pkt_gen_process_ring(ctx->up_dl_pkt_gen_ring[current_port]);
2278         }
2279     }
2280
2281     if (XRAN_STOPPED == xran_if_current_state)
2282         return -1;
2283
2284     return 0;
2285 }
2286
2287 int32_t xran_fh_rx_and_up_tx_processing(void *port_mask)
2288 {
2289     int32_t ret_val=0;
2290
2291     ret_val = ring_processing_func((void *)0);
2292     if(ret_val != 0)
2293        return ret_val;
2294
2295     ret_val = xran_dl_pkt_ring_processing_func(port_mask);
2296     if(ret_val != 0)
2297        return ret_val;
2298
2299     return 0;
2300 }
2301 /** Function to peforms serves of DPDK times */
2302 int32_t
2303 xran_processing_timer_only_func(void* args)
2304 {
2305     rte_timer_manage();
2306     if (XRAN_STOPPED == xran_if_current_state)
2307         return -1;
2308
2309     return 0;
2310 }
2311
2312 /** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */
2313 int32_t
2314 xran_all_tasks(void* arg)
2315 {
2316
2317     ring_processing_func(arg);
2318     process_dpdk_io(arg);
2319     return 0;
2320 }
2321
2322 /** Function to pefromrm TX and RX on ETH device */
2323 int32_t
2324 xran_eth_trx_tasks(void* arg)
2325 {
2326     process_dpdk_io(arg);
2327     return 0;
2328 }
2329
2330 /** Function to pefromrm RX on ETH device */
2331 int32_t
2332 xran_eth_rx_tasks(void* arg)
2333 {
2334     process_dpdk_io_rx(arg);
2335     return 0;
2336 }
2337
2338 /** Function to porcess ORAN FH packet per port */
2339 int32_t
2340 ring_processing_func_per_port(void* args)
2341 {
2342     struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
2343     int32_t i;
2344     uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF);
2345     queueid_t qi;
2346
2347     for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) {
2348         if (ctx->vf2xran_port[i] == port_id) {
2349             for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
2350                 if (process_ring(ctx->rx_ring[i][qi], i, qi))
2351                     return 0;
2352             }
2353         }
2354     }
2355
2356     if (XRAN_STOPPED == xran_if_current_state)
2357         return -1;
2358
2359     return 0;
2360 }
2361
2362 /** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */
2363 int32_t
2364 xran_spawn_workers(void)
2365 {
2366     uint64_t nWorkerCore = 1LL;
2367     uint32_t coreNum     = sysconf(_SC_NPROCESSORS_CONF);
2368     int32_t  i = 0;
2369     uint32_t total_num_cores  = 1; /*start with timing core */
2370     uint32_t worker_num_cores = 0;
2371     uint32_t icx_cpu = 0;
2372     int32_t core_map[2*sizeof(uint64_t)*8];
2373     uint64_t xran_port_mask = 0;
2374
2375     struct xran_ethdi_ctx  *eth_ctx   = xran_ethdi_get_ctx();
2376     struct xran_device_ctx *p_dev     = NULL;
2377     struct xran_fh_init    *fh_init   = NULL;
2378     struct xran_fh_config  *fh_cfg    = NULL;
2379     struct xran_worker_th_ctx* pThCtx = NULL;
2380     void *worker_ports=NULL;
2381
2382     p_dev =  xran_dev_get_ctx_by_id(0);
2383     if(p_dev == NULL) {
2384         print_err("p_dev\n");
2385         return XRAN_STATUS_FAIL;
2386     }
2387
2388     fh_init = &p_dev->fh_init;
2389     if(fh_init == NULL) {
2390         print_err("fh_init\n");
2391         return XRAN_STATUS_FAIL;
2392     }
2393
2394     fh_cfg = &p_dev->fh_cfg;
2395     if(fh_cfg == NULL) {
2396         print_err("fh_cfg\n");
2397         return XRAN_STATUS_FAIL;
2398     }
2399
2400     for (i = 0; i < coreNum && i < 64; i++) {
2401         if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
2402             core_map[worker_num_cores++] = i;
2403             total_num_cores++;
2404         }
2405         nWorkerCore = nWorkerCore << 1;
2406     }
2407
2408     nWorkerCore = 1LL;
2409     for (i = 64; i < coreNum && i < 128; i++) {
2410         if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
2411             core_map[worker_num_cores++] = i;
2412             total_num_cores++;
2413         }
2414         nWorkerCore = nWorkerCore << 1;
2415     }
2416
2417     extern int _may_i_use_cpu_feature(unsigned __int64);
2418     icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52);
2419
2420     printf("O-XU      %d\n", eth_ctx->io_cfg.id);
2421     printf("HW        %d\n", icx_cpu);
2422     printf("Num cores %d\n", total_num_cores);
2423     printf("Num ports %d\n", fh_init->xran_ports);
2424     printf("O-RU Cat  %d\n", fh_cfg->ru_conf.xranCat);
2425     printf("O-RU CC   %d\n", fh_cfg->nCC);
2426     printf("O-RU eAxC %d\n", fh_cfg->neAxc);
2427
2428     for (i = 0; i < fh_init->xran_ports; i++){
2429         xran_port_mask |= 1L<<i;
2430     }
2431
2432     for (i = 0; i < fh_init->xran_ports; i++) {
2433         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2434         if(p_dev_update == NULL){
2435             print_err("p_dev_update\n");
2436             return XRAN_STATUS_FAIL;
2437         }
2438         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1;
2439         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1;
2440         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2441         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2442     }
2443
2444     if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
2445         switch(total_num_cores) {
2446             case 1: /** only timing core */
2447                 eth_ctx->time_wrk_cfg.f = xran_all_tasks;
2448                 eth_ctx->time_wrk_cfg.arg   = NULL;
2449                 eth_ctx->time_wrk_cfg.state = 1;
2450             break;
2451             case 2:
2452                 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2453                 eth_ctx->time_wrk_cfg.arg   = NULL;
2454                 eth_ctx->time_wrk_cfg.state = 1;
2455
2456                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2457                 if(pThCtx == NULL){
2458                     print_err("pThCtx allocation error\n");
2459                     return XRAN_STATUS_FAIL;
2460                 }
2461                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2462                 pThCtx->worker_id    = 0;
2463                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2464                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2465                 pThCtx->task_func = ring_processing_func;
2466                 pThCtx->task_arg  = NULL;
2467                 eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
2468                 eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
2469             break;
2470             case 3:
2471                 /* timing core */
2472                 eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2473                 eth_ctx->time_wrk_cfg.arg   = NULL;
2474                 eth_ctx->time_wrk_cfg.state = 1;
2475
2476                 /* workers */
2477                 /** 0 **/
2478                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2479                 if(pThCtx == NULL){
2480                     print_err("pThCtx allocation error\n");
2481                     return XRAN_STATUS_FAIL;
2482                 }
2483                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2484                 pThCtx->worker_id      = 0;
2485                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2486                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2487                 pThCtx->task_func = ring_processing_func;
2488                 pThCtx->task_arg  = NULL;
2489                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2490                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2491
2492                 for (i = 0; i < fh_init->xran_ports; i++) {
2493                     struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2494                     if(p_dev_update == NULL) {
2495                         print_err("p_dev_update\n");
2496                         return XRAN_STATUS_FAIL;
2497                     }
2498                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2499                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2500                     printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2501                     printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2502                 }
2503
2504                 /** 1 - CP GEN **/
2505                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2506                 if(pThCtx == NULL){
2507                     print_err("pThCtx allocation error\n");
2508                     return XRAN_STATUS_FAIL;
2509                 }
2510                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2511                 pThCtx->worker_id      = 1;
2512                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2513                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2514                 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2515                 pThCtx->task_arg  = (void*)xran_port_mask;
2516                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2517                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2518             break;
2519             default:
2520                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2521                 return XRAN_STATUS_FAIL;
2522         }
2523     } else if ((fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1)  || fh_init->io_cfg.bbu_offload) {
2524         switch(total_num_cores) {
2525             case 1: /** only timing core */
2526                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2527                 return XRAN_STATUS_FAIL;
2528             break;
2529             case 2:
2530                 eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2531                 eth_ctx->time_wrk_cfg.arg   = NULL;
2532                 eth_ctx->time_wrk_cfg.state = 1;
2533
2534                 if (p_dev->fh_init.io_cfg.bbu_offload)
2535                     p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring;
2536                 else
2537                 p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
2538
2539                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2540                 if(pThCtx == NULL){
2541                     print_err("pThCtx allocation error\n");
2542                     return XRAN_STATUS_FAIL;
2543                 }
2544                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2545                 pThCtx->worker_id    = 0;
2546                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2547                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2548                 pThCtx->task_func = ring_processing_func;
2549                 pThCtx->task_arg  = NULL;
2550                 eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
2551                 eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
2552             break;
2553             case 3:
2554                 if(1) {
2555                     /* timing core */
2556                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2557                     eth_ctx->time_wrk_cfg.arg   = NULL;
2558                     eth_ctx->time_wrk_cfg.state = 1;
2559
2560                     /* workers */
2561                     /** 0 **/
2562                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2563                     if(pThCtx == NULL){
2564                         print_err("pThCtx allocation error\n");
2565                         return XRAN_STATUS_FAIL;
2566                     }
2567                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2568                     pThCtx->worker_id      = 0;
2569                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2570                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2571                     pThCtx->task_func = ring_processing_func;
2572                     pThCtx->task_arg  = NULL;
2573                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2574                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2575
2576                     for (i = 0; i < fh_init->xran_ports; i++) {
2577                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2578                         if(p_dev_update == NULL) {
2579                             print_err("p_dev_update\n");
2580                             return XRAN_STATUS_FAIL;
2581                         }
2582                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2583                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2584                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2585                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2586                     }
2587
2588                     /** 1 - CP GEN **/
2589                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2590                     if(pThCtx == NULL){
2591                         print_err("pThCtx allocation error\n");
2592                         return XRAN_STATUS_FAIL;
2593                     }
2594                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2595                     pThCtx->worker_id      = 1;
2596                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2597                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2598                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2599                     pThCtx->task_arg  = (void*)xran_port_mask;
2600                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2601                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2602                 } else {
2603                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2604                     return XRAN_STATUS_FAIL;
2605                 }
2606             break;
2607             case 4:
2608                 if(1) {
2609                     /* timing core */
2610                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2611                     eth_ctx->time_wrk_cfg.arg   = NULL;
2612                     eth_ctx->time_wrk_cfg.state = 1;
2613
2614                     /* workers */
2615                     /** 0 **/
2616                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2617                     if(pThCtx == NULL){
2618                         print_err("pThCtx allocation error\n");
2619                         return XRAN_STATUS_FAIL;
2620                     }
2621                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2622                     pThCtx->worker_id      = 0;
2623                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2624                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2625                     pThCtx->task_func = ring_processing_func;
2626                     pThCtx->task_arg  = NULL;
2627                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2628                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2629
2630                     /** 1 - CP GEN **/
2631                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2632                     if(pThCtx == NULL){
2633                         print_err("pThCtx allocation error\n");
2634                         return XRAN_STATUS_FAIL;
2635                     }
2636                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2637                     pThCtx->worker_id      = 1;
2638                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2639                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2640                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2641                     pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
2642                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2643                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2644
2645                     /** 2 UP GEN **/
2646                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2647                     if(pThCtx == NULL){
2648                         print_err("pThCtx allocation error\n");
2649                         return XRAN_STATUS_FAIL;
2650                     }
2651                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2652                     pThCtx->worker_id    = 2;
2653                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2654                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2655                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2656                     pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
2657                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2658                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2659
2660                     for (i = 1; i < fh_init->xran_ports; i++) {
2661                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2662                         if(p_dev_update == NULL) {
2663                             print_err("p_dev_update\n");
2664                             return XRAN_STATUS_FAIL;
2665                         }
2666                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2667                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2668                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2669                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2670                     }
2671                 } else {
2672                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2673                     return XRAN_STATUS_FAIL;
2674                 }
2675                 break;
2676             case 5:
2677                 if(1) {
2678                     /* timing core */
2679                     eth_ctx->time_wrk_cfg.f     = xran_eth_rx_tasks;
2680                     eth_ctx->time_wrk_cfg.arg   = NULL;
2681                     eth_ctx->time_wrk_cfg.state = 1;
2682
2683                     /* workers */
2684                     /** 0 **/
2685                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2686                     if(pThCtx == NULL){
2687                         print_err("pThCtx allocation error\n");
2688                         return XRAN_STATUS_FAIL;
2689                     }
2690                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2691                     pThCtx->worker_id      = 0;
2692                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2693                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2694                     pThCtx->task_func = ring_processing_func;
2695                     pThCtx->task_arg  = NULL;
2696                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2697                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2698
2699                     /** 1 - CP GEN **/
2700                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2701                     if(pThCtx == NULL){
2702                         print_err("pThCtx allocation error\n");
2703                         return XRAN_STATUS_FAIL;
2704                     }
2705                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2706                     pThCtx->worker_id      = 1;
2707                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2708                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2709                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2710                     pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
2711                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2712                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2713
2714                     /** 2 UP GEN **/
2715                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2716                     if(pThCtx == NULL){
2717                         print_err("pThCtx allocation error\n");
2718                         return XRAN_STATUS_FAIL;
2719                     }
2720                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2721                     pThCtx->worker_id    = 2;
2722                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2723                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2724                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2725                     pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
2726                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2727                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2728
2729                     /** 3 UP GEN **/
2730                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2731                     if(pThCtx == NULL){
2732                         print_err("pThCtx allocation error\n");
2733                         return XRAN_STATUS_FAIL;
2734                     }
2735                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2736                     pThCtx->worker_id    = 3;
2737                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2738                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2739                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2740                     pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
2741                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2742                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2743
2744                     for (i = 1; i < fh_init->xran_ports; i++) {
2745                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2746                         if(p_dev_update == NULL) {
2747                             print_err("p_dev_update\n");
2748                             return XRAN_STATUS_FAIL;
2749                         }
2750                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2751                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2752                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2753                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2754                     }
2755                 } else {
2756                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2757                     return XRAN_STATUS_FAIL;
2758                 }
2759                 break;
2760             case 6:
2761                 if(eth_ctx->io_cfg.id == O_DU) {
2762                     /* timing core */
2763                     eth_ctx->time_wrk_cfg.f     = xran_eth_rx_tasks;
2764                     eth_ctx->time_wrk_cfg.arg   = NULL;
2765                     eth_ctx->time_wrk_cfg.state = 1;
2766
2767                     /* workers */
2768                     /** 0 **/
2769                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2770                     if(pThCtx == NULL){
2771                         print_err("pThCtx allocation error\n");
2772                         return XRAN_STATUS_FAIL;
2773                     }
2774                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2775                     pThCtx->worker_id      = 0;
2776                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2777                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2778                     pThCtx->task_func = ring_processing_func;
2779                     pThCtx->task_arg  = NULL;
2780                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2781                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2782
2783                     /** 1 Eth Tx **/
2784                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2785
2786                     if(pThCtx == NULL){
2787                         print_err("pThCtx allocation error\n");
2788                         return XRAN_STATUS_FAIL;
2789                     }
2790                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2791                     pThCtx->worker_id = 1;
2792                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2793                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2794                     pThCtx->task_func = process_dpdk_io_tx;
2795                     pThCtx->task_arg  = (void*)2;
2796                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2797                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2798
2799                     /** 2 - CP GEN **/
2800                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2801                     if(pThCtx == NULL){
2802                         print_err("pThCtx allocation error\n");
2803                         return XRAN_STATUS_FAIL;
2804                     }
2805                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2806                     pThCtx->worker_id      = 2;
2807                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2808                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2809                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2810                     pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
2811                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2812                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2813
2814                     /** 3 UP GEN **/
2815                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2816                     if(pThCtx == NULL){
2817                         print_err("pThCtx allocation error\n");
2818                         return XRAN_STATUS_FAIL;
2819                     }
2820                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2821                     pThCtx->worker_id    = 3;
2822                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2823                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2824                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2825                     pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
2826                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2827                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2828
2829                     /** 4 UP GEN **/
2830                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2831                     if(pThCtx == NULL){
2832                         print_err("pThCtx allocation error\n");
2833                         return XRAN_STATUS_FAIL;
2834                     }
2835                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2836                     pThCtx->worker_id    = 4;
2837                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2838                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2839                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2840                     pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
2841                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2842                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2843
2844                     for (i = 0; i < fh_init->xran_ports; i++) {
2845                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2846                         if(p_dev_update == NULL) {
2847                             print_err("p_dev_update\n");
2848                             return XRAN_STATUS_FAIL;
2849                         }
2850                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id;
2851                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id;
2852                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2853                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2854                     }
2855                 } else if(eth_ctx->io_cfg.id == O_RU) {
2856                     /*** O_RU specific config */
2857                     /* timing core */
2858                     eth_ctx->time_wrk_cfg.f     = NULL;
2859                     eth_ctx->time_wrk_cfg.arg   = NULL;
2860                     eth_ctx->time_wrk_cfg.state = 1;
2861
2862                     /* workers */
2863                     /** 0  Eth RX */
2864                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2865                     if(pThCtx == NULL){
2866                         print_err("pThCtx allocation error\n");
2867                         return XRAN_STATUS_FAIL;
2868                     }
2869                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2870                     pThCtx->worker_id = 0;
2871                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2872                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
2873                     pThCtx->task_func = process_dpdk_io_rx;
2874                     pThCtx->task_arg  = NULL;
2875                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2876                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2877
2878                     /** 1  FH RX and BBDEV */
2879                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2880                     if(pThCtx == NULL){
2881                         print_err("pThCtx allocation error\n");
2882                         return XRAN_STATUS_FAIL;
2883                     }
2884                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2885                     pThCtx->worker_id = 1;
2886                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2887                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
2888                     pThCtx->task_func = ring_processing_func_per_port;
2889                     pThCtx->task_arg  = (void*)0;
2890                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2891                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2892
2893                     /** 2  FH RX and BBDEV */
2894                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2895                     if(pThCtx == NULL){
2896                         print_err("pThCtx allocation error\n");
2897                         return XRAN_STATUS_FAIL;
2898                     }
2899                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2900                     pThCtx->worker_id = 2;
2901                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2902                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
2903                     pThCtx->task_func = ring_processing_func_per_port;
2904                     pThCtx->task_arg  = (void*)1;
2905                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2906                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2907
2908                     /** 3  FH RX and BBDEV */
2909                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2910                     if(pThCtx == NULL){
2911                         print_err("pThCtx allocation error\n");
2912                         return XRAN_STATUS_FAIL;
2913                     }
2914                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2915                     pThCtx->worker_id = 3;
2916                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2917                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
2918                     pThCtx->task_func = ring_processing_func_per_port;
2919                     pThCtx->task_arg  = (void*)2;
2920                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2921                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2922
2923                     /**  FH TX and BBDEV */
2924                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2925                     if(pThCtx == NULL){
2926                         print_err("pThCtx allocation error\n");
2927                         return XRAN_STATUS_FAIL;
2928                     }
2929                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2930                     pThCtx->worker_id = 4;
2931                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2932                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2933                     pThCtx->task_func = process_dpdk_io_tx;
2934                     pThCtx->task_arg  = (void*)2;
2935                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
2936                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
2937                 } else {
2938                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2939                     return XRAN_STATUS_FAIL;
2940                 }
2941                 break;
2942             default:
2943                 print_err("unsupported configuration\n");
2944                 return XRAN_STATUS_FAIL;
2945         }
2946     } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) {
2947         switch(total_num_cores) {
2948             case 1:
2949             print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2950             return XRAN_STATUS_FAIL;
2951             break;
2952
2953             case 2:
2954             if(fh_init->xran_ports == 2)
2955                 worker_ports = (void *)((1L<<0 | 1L<<1) & xran_port_mask);
2956             else if(fh_init->xran_ports == 3)
2957                 worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2) & xran_port_mask);
2958             else if(fh_init->xran_ports == 4)
2959                 worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2 | 1L<<3) & xran_port_mask);
2960             else
2961             {
2962                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2963                 return XRAN_STATUS_FAIL;
2964             }
2965
2966             eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
2967             eth_ctx->time_wrk_cfg.arg   = NULL;
2968             eth_ctx->time_wrk_cfg.state = 1;
2969
2970             /* p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; */
2971
2972             pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2973             if(pThCtx == NULL){
2974                 print_err("pThCtx allocation error\n");
2975                 return XRAN_STATUS_FAIL;
2976             }
2977             memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2978             pThCtx->worker_id    = 0;
2979             pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2980             snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2981             pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
2982             pThCtx->task_arg  = worker_ports;
2983             eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
2984             eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
2985
2986             for (i = 1; i < fh_init->xran_ports; i++) {
2987                 struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
2988                 if(p_dev_update == NULL) {
2989                     print_err("p_dev_update\n");
2990                     return XRAN_STATUS_FAIL;
2991                 }
2992                 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2993                 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2994                 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2995                 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2996             }
2997             break;
2998             case 3:
2999                 if(icx_cpu) {
3000                     /* timing core */
3001                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
3002                     eth_ctx->time_wrk_cfg.arg   = NULL;
3003                     eth_ctx->time_wrk_cfg.state = 1;
3004
3005                     /* workers */
3006                     /** 0 **/
3007                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3008                     if(pThCtx == NULL){
3009                         print_err("pThCtx allocation error\n");
3010                         return XRAN_STATUS_FAIL;
3011                     }
3012                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3013                     pThCtx->worker_id      = 0;
3014                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3015                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3016                     pThCtx->task_func = ring_processing_func;
3017                     pThCtx->task_arg  = NULL;
3018                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3019                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3020
3021                     for (i = 1; i < fh_init->xran_ports; i++) {
3022                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
3023                         if(p_dev_update == NULL) {
3024                             print_err("p_dev_update\n");
3025                             return XRAN_STATUS_FAIL;
3026                         }
3027                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3028                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3029                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3030                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3031                     }
3032
3033                     /** 1 - CP GEN **/
3034                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3035                     if(pThCtx == NULL){
3036                         print_err("pThCtx allocation error\n");
3037                         return XRAN_STATUS_FAIL;
3038                     }
3039                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3040                     pThCtx->worker_id      = 1;
3041                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3042                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3043                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3044                     pThCtx->task_arg  = (void*)xran_port_mask;
3045                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3046                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3047             }
3048             else /* csx cpu */
3049             {
3050                 if(fh_init->xran_ports == 3)
3051                     worker_ports = (void *)(1L<<2 & xran_port_mask);
3052                 else if(fh_init->xran_ports == 4)
3053                     worker_ports = (void *)((1L<<2 | 1L<<3) & xran_port_mask);
3054                 else{
3055                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3056                     return XRAN_STATUS_FAIL;
3057                 }
3058                 /* timing core */
3059                 eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
3060                 eth_ctx->time_wrk_cfg.arg   = NULL;
3061                 eth_ctx->time_wrk_cfg.state = 1;
3062
3063                 /* workers */
3064                 /** 0 **/
3065                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3066                 if(pThCtx == NULL){
3067                     print_err("pThCtx allocation error\n");
3068                     return XRAN_STATUS_FAIL;
3069                 }
3070                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3071                 pThCtx->worker_id      = 0;
3072                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3073                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3074                 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3075                 pThCtx->task_arg  = (void *)((1L<<0|1L<<1) & xran_port_mask);
3076                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3077                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3078
3079                 for (i = 1; i < fh_init->xran_ports; i++) {
3080                     struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
3081                     if(p_dev_update == NULL) {
3082                         print_err("p_dev_update\n");
3083                         return XRAN_STATUS_FAIL;
3084                     }
3085                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3086                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3087                     printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3088                     printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3089                 }
3090
3091                 /** 1 - CP GEN **/
3092                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3093                 if(pThCtx == NULL){
3094                     print_err("pThCtx allocation error\n");
3095                     return XRAN_STATUS_FAIL;
3096                 }
3097                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3098                 pThCtx->worker_id      = 1;
3099                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3100                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3101                 pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
3102                 pThCtx->task_arg  = worker_ports;
3103                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3104                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3105             }
3106
3107             break;
3108
3109             case 4:
3110                 if(1) {
3111                     /* timing core */
3112                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
3113                     eth_ctx->time_wrk_cfg.arg   = NULL;
3114                     eth_ctx->time_wrk_cfg.state = 1;
3115
3116                     /* workers */
3117                     /** 0 **/
3118                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3119                     if(pThCtx == NULL){
3120                         print_err("pThCtx allocation error\n");
3121                         return XRAN_STATUS_FAIL;
3122                     }
3123                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3124                     pThCtx->worker_id      = 0;
3125                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3126                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3127                     pThCtx->task_func = ring_processing_func;
3128                     pThCtx->task_arg  = NULL;
3129                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3130                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3131
3132                     /** 1 - CP GEN **/
3133                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3134                     if(pThCtx == NULL){
3135                         print_err("pThCtx allocation error\n");
3136                         return XRAN_STATUS_FAIL;
3137                     }
3138                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3139                     pThCtx->worker_id      = 1;
3140                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3141                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3142                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3143                     pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2)) & xran_port_mask);
3144                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3145                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3146
3147                     /** 2 UP GEN **/
3148                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3149                     if(pThCtx == NULL){
3150                         print_err("pThCtx allocation error\n");
3151                         return XRAN_STATUS_FAIL;
3152                     }
3153                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3154                     pThCtx->worker_id    = 2;
3155                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3156                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3157                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3158                     pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
3159                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3160                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3161
3162                     for (i = 1; i < fh_init->xran_ports; i++) {
3163                         struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
3164                         if(p_dev_update == NULL) {
3165                             print_err("p_dev_update\n");
3166                             return XRAN_STATUS_FAIL;
3167                         }
3168                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3169                         p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3170                         printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3171                         printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3172                     }
3173                 }
3174                 else {
3175                     print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3176                     return XRAN_STATUS_FAIL;
3177                 }
3178             break;
3179             case 5:
3180                     /* timing core */
3181                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
3182                     eth_ctx->time_wrk_cfg.arg   = NULL;
3183                     eth_ctx->time_wrk_cfg.state = 1;
3184
3185                     /* workers */
3186                     /** 0  FH RX and BBDEV */
3187                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3188                     if(pThCtx == NULL){
3189                         print_err("pThCtx allocation error\n");
3190                         return XRAN_STATUS_FAIL;
3191                     }
3192                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3193                     pThCtx->worker_id = 0;
3194                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3195                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3196                     pThCtx->task_func = ring_processing_func;
3197                     pThCtx->task_arg  = NULL;
3198                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3199                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3200
3201                     /** 1 - CP GEN **/
3202                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3203                     if(pThCtx == NULL){
3204                         print_err("pThCtx allocation error\n");
3205                         return XRAN_STATUS_FAIL;
3206                     }
3207                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3208                     pThCtx->worker_id = 1;
3209                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3210                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3211                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3212                     pThCtx->task_arg  = (void*)((1<<0)  & xran_port_mask);
3213                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3214                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3215
3216                     /** 2 UP GEN **/
3217                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3218                     if(pThCtx == NULL){
3219                         print_err("pThCtx allocation error\n");
3220                         return XRAN_STATUS_FAIL;
3221                     }
3222                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3223                     pThCtx->worker_id = 2;
3224                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3225                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
3226                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3227                     pThCtx->task_arg  = (void*)((1<<1)  & xran_port_mask);
3228                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3229                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3230
3231                     /** 3 UP GEN **/
3232                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3233                     if(pThCtx == NULL){
3234                         print_err("pThCtx allocation error\n");
3235                         return XRAN_STATUS_FAIL;
3236                     }
3237                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3238                     pThCtx->worker_id = 3;
3239                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3240                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
3241                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3242                     pThCtx->task_arg  = (void*)((1<<2)  & xran_port_mask);
3243                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3244                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3245
3246
3247                     if(eth_ctx->io_cfg.id == O_DU && 0 == fh_init->dlCpProcBurst) {
3248                         for (i = 1; i < fh_init->xran_ports; i++) {
3249                             struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
3250                             if(p_dev_update == NULL) {
3251                                 print_err("p_dev_update\n");
3252                                 return XRAN_STATUS_FAIL;
3253                             }
3254                             p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = i+1;
3255                             printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3256                         }
3257                     }
3258
3259             break;
3260             case 6:
3261                 if(eth_ctx->io_cfg.id == O_DU){
3262                     /* timing core */
3263                     eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
3264                     eth_ctx->time_wrk_cfg.arg   = NULL;
3265                     eth_ctx->time_wrk_cfg.state = 1;
3266
3267                     /* workers */
3268                     /** 0 **/
3269                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3270                     if(pThCtx == NULL){
3271                         print_err("pThCtx allocation error\n");
3272                         return XRAN_STATUS_FAIL;
3273                     }
3274                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3275                     pThCtx->worker_id      = 0;
3276                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3277                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3278                     pThCtx->task_func = ring_processing_func;
3279                     pThCtx->task_arg  = NULL;
3280                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3281                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3282
3283                     /** 1 - CP GEN **/
3284                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3285                     if(pThCtx == NULL){
3286                         print_err("pThCtx allocation error\n");
3287                         return XRAN_STATUS_FAIL;
3288                     }
3289                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3290                     pThCtx->worker_id      = 1;
3291                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3292                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3293                     pThCtx->task_func = xran_processing_timer_only_func;
3294                     pThCtx->task_arg  = NULL;
3295                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3296                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3297
3298                     /** 2 UP GEN **/
3299                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3300                     if(pThCtx == NULL){
3301                         print_err("pThCtx allocation error\n");
3302                         return XRAN_STATUS_FAIL;
3303                     }
3304                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3305                     pThCtx->worker_id    = 2;
3306                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3307                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3308                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3309                     pThCtx->task_arg  = (void*)((1<<0)  & xran_port_mask);
3310                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3311                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3312
3313                     /** 3 UP GEN **/
3314                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3315                     if(pThCtx == NULL){
3316                         print_err("pThCtx allocation error\n");
3317                         return XRAN_STATUS_FAIL;
3318                     }
3319                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3320                     pThCtx->worker_id    = 3;
3321                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3322                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3323                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3324                     pThCtx->task_arg  = (void*)((1<<1)  & xran_port_mask);
3325                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3326                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3327
3328                     /** 4 UP GEN **/
3329                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3330                     if(pThCtx == NULL){
3331                         print_err("pThCtx allocation error\n");
3332                         return XRAN_STATUS_FAIL;
3333                     }
3334                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3335                     pThCtx->worker_id    = 4;
3336                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3337                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3338                     pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3339                     pThCtx->task_arg  = (void*)((1<<2)  & xran_port_mask);
3340                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3341                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3342                 } else {
3343                     /*** O_RU specific config */
3344                     /* timing core */
3345                     eth_ctx->time_wrk_cfg.f     = NULL;
3346                     eth_ctx->time_wrk_cfg.arg   = NULL;
3347                     eth_ctx->time_wrk_cfg.state = 1;
3348
3349                     /* workers */
3350                     /** 0  Eth RX */
3351                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3352                     if(pThCtx == NULL){
3353                         print_err("pThCtx allocation error\n");
3354                         return XRAN_STATUS_FAIL;
3355                     }
3356                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3357                     pThCtx->worker_id = 0;
3358                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3359                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
3360                     pThCtx->task_func = process_dpdk_io_rx;
3361                     pThCtx->task_arg  = NULL;
3362                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3363                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3364
3365                     /** 1  FH RX and BBDEV */
3366                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3367                     if(pThCtx == NULL){
3368                         print_err("pThCtx allocation error\n");
3369                         return XRAN_STATUS_FAIL;
3370                     }
3371                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3372                     pThCtx->worker_id = 1;
3373                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3374                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
3375                     pThCtx->task_func = ring_processing_func_per_port;
3376                     pThCtx->task_arg  = (void*)0;
3377                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3378                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3379
3380                     /** 2  FH RX and BBDEV */
3381                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3382                     if(pThCtx == NULL){
3383                         print_err("pThCtx allocation error\n");
3384                         return XRAN_STATUS_FAIL;
3385                     }
3386                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3387                     pThCtx->worker_id = 2;
3388                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3389                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
3390                     pThCtx->task_func = ring_processing_func_per_port;
3391                     pThCtx->task_arg  = (void*)1;
3392                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3393                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3394
3395                     /** 3  FH RX and BBDEV */
3396                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3397                     if(pThCtx == NULL){
3398                         print_err("pThCtx allocation error\n");
3399                         return XRAN_STATUS_FAIL;
3400                     }
3401                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3402                     pThCtx->worker_id = 3;
3403                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3404                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
3405                     pThCtx->task_func = ring_processing_func_per_port;
3406                     pThCtx->task_arg  = (void*)2;
3407                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3408                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3409
3410                     /**  FH TX and BBDEV */
3411                     pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3412                     if(pThCtx == NULL){
3413                         print_err("pThCtx allocation error\n");
3414                         return XRAN_STATUS_FAIL;
3415                     }
3416                     memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3417                     pThCtx->worker_id = 4;
3418                     pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3419                     snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
3420                     pThCtx->task_func = process_dpdk_io_tx;
3421                     pThCtx->task_arg  = NULL;
3422                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3423                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3424                 }
3425             break;
3426             case 7:
3427             /*** O_RU specific config */
3428             if((fh_init->xran_ports == 4) && (eth_ctx->io_cfg.id == O_RU))
3429             {
3430                 /*** O_RU specific config */
3431                 /* timing core */
3432                 eth_ctx->time_wrk_cfg.f     = NULL;
3433                 eth_ctx->time_wrk_cfg.arg   = NULL;
3434                 eth_ctx->time_wrk_cfg.state = 1;
3435
3436                 /* workers */
3437                 /** 0  Eth RX */
3438                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3439                 if(pThCtx == NULL){
3440                     print_err("pThCtx allocation error\n");
3441                     return XRAN_STATUS_FAIL;
3442                 }
3443                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3444                 pThCtx->worker_id = 0;
3445                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3446                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
3447                 pThCtx->task_func = process_dpdk_io_rx;
3448                 pThCtx->task_arg  = NULL;
3449                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3450                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3451
3452                 /** 1  FH RX and BBDEV */
3453                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3454                 if(pThCtx == NULL){
3455                     print_err("pThCtx allocation error\n");
3456                     return XRAN_STATUS_FAIL;
3457                 }
3458                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3459                 pThCtx->worker_id = 1;
3460                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3461                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
3462                 pThCtx->task_func = ring_processing_func_per_port;
3463                 pThCtx->task_arg  = (void*)0;
3464                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3465                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3466
3467                 /** 2  FH RX and BBDEV */
3468                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3469                 if(pThCtx == NULL){
3470                     print_err("pThCtx allocation error\n");
3471                     return XRAN_STATUS_FAIL;
3472                 }
3473                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3474                 pThCtx->worker_id = 2;
3475                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3476                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
3477                 pThCtx->task_func = ring_processing_func_per_port;
3478                 pThCtx->task_arg  = (void*)1;
3479                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3480                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3481
3482                 /** 3  FH RX and BBDEV */
3483                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3484                 if(pThCtx == NULL){
3485                     print_err("pThCtx allocation error\n");
3486                     return XRAN_STATUS_FAIL;
3487                 }
3488                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3489                 pThCtx->worker_id = 3;
3490                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3491                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
3492                 pThCtx->task_func = ring_processing_func_per_port;
3493                     pThCtx->task_arg  = (void*)2;
3494                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3495                     eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3496
3497                 /** 4  FH RX and BBDEV */
3498                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3499                 if(pThCtx == NULL){
3500                     print_err("pThCtx allocation error\n");
3501                     return XRAN_STATUS_FAIL;
3502                 }
3503                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3504                 pThCtx->worker_id = 4;
3505                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3506                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p3", core_map[pThCtx->worker_id]);
3507                 pThCtx->task_func = ring_processing_func_per_port;
3508                 pThCtx->task_arg  = (void*)3;
3509                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3510                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3511
3512                 /**  FH TX and BBDEV */
3513                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3514                 if(pThCtx == NULL){
3515                     print_err("pThCtx allocation error\n");
3516                     return XRAN_STATUS_FAIL;
3517                 }
3518                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3519                 pThCtx->worker_id = 5;
3520                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3521                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
3522                 pThCtx->task_func = process_dpdk_io_tx;
3523                 pThCtx->task_arg  = NULL;
3524                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3525                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3526
3527             } /* -- if xran->ports == 4 -- */
3528             else if(eth_ctx->io_cfg.id == O_DU){
3529                 if(fh_init->xran_ports == 3)
3530                     worker_ports = (void *)((1<<2) & xran_port_mask);
3531                 else if(fh_init->xran_ports == 4)
3532                     worker_ports = (void *)((1<<3) & xran_port_mask);
3533                 /* timing core */
3534                 eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
3535                 eth_ctx->time_wrk_cfg.arg   = NULL;
3536                 eth_ctx->time_wrk_cfg.state = 1;
3537
3538                 /* workers */
3539                 /** 0 **/
3540                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3541                 if(pThCtx == NULL){
3542                     print_err("pThCtx allocation error\n");
3543                     return XRAN_STATUS_FAIL;
3544                 }
3545                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3546                 pThCtx->worker_id      = 0;
3547                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3548                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3549                 pThCtx->task_func = ring_processing_func;
3550                 pThCtx->task_arg  = NULL;
3551                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3552                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3553
3554                 for (i = 2; i < fh_init->xran_ports; i++) {
3555                     struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
3556                     if(p_dev_update == NULL) {
3557                         print_err("p_dev_update\n");
3558                         return XRAN_STATUS_FAIL;
3559                     }
3560                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3561                     printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3562                 }
3563
3564                 /** 1 - CP GEN **/
3565                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3566                 if(pThCtx == NULL){
3567                     print_err("pThCtx allocation error\n");
3568                     return XRAN_STATUS_FAIL;
3569                 }
3570                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3571                 pThCtx->worker_id      = 1;
3572                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3573                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3574                 pThCtx->task_func = xran_processing_timer_only_func;
3575                 pThCtx->task_arg  = NULL;
3576                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3577                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3578
3579                 /** 2 UP GEN **/
3580                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3581                 if(pThCtx == NULL){
3582                     print_err("pThCtx allocation error\n");
3583                     return XRAN_STATUS_FAIL;
3584                 }
3585                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3586                 pThCtx->worker_id    = 2;
3587                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3588                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3589                 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3590                 pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
3591                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3592                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3593
3594                 for (i = (fh_init->xran_ports-1); i < fh_init->xran_ports; i++) {
3595                     struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
3596                     if(p_dev_update == NULL) {
3597                         print_err("p_dev_update\n");
3598                         return XRAN_STATUS_FAIL;
3599                     }
3600                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3601                     printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3602                 }
3603
3604                 /** 3 UP GEN **/
3605                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3606                 if(pThCtx == NULL){
3607                     print_err("pThCtx allocation error\n");
3608                     return XRAN_STATUS_FAIL;
3609                 }
3610                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3611                 pThCtx->worker_id    = 3;
3612                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3613                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3614                 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3615                 pThCtx->task_arg  = (void*)((1<<1) & xran_port_mask);
3616                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3617                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3618
3619                 for (i = (fh_init->xran_ports - 2); i < (fh_init->xran_ports - 1); i++) {
3620                     struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
3621                     if(p_dev_update == NULL) {
3622                         print_err("p_dev_update\n");
3623                         return XRAN_STATUS_FAIL;
3624                     }
3625                     p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3626                     printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3627                 }
3628
3629                 /** 4 UP GEN **/
3630                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3631                 if(pThCtx == NULL){
3632                     print_err("pThCtx allocation error\n");
3633                     return XRAN_STATUS_FAIL;
3634                 }
3635                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3636                 pThCtx->worker_id    = 4;
3637                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3638                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3639                 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3640                 pThCtx->task_arg  = (void*)((1<<2) & xran_port_mask);
3641                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3642                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3643
3644                 /** 5 UP GEN **/
3645                 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3646                 if(pThCtx == NULL){
3647                     print_err("pThCtx allocation error\n");
3648                     return XRAN_STATUS_FAIL;
3649                 }
3650                 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3651                 pThCtx->worker_id    = 5;
3652                 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3653                 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3654                 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3655                 pThCtx->task_arg  = worker_ports;
3656                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
3657                 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
3658             }
3659             else{
3660                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3661                 return XRAN_STATUS_FAIL;
3662                 }
3663             break;
3664
3665             default:
3666                 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3667                 return XRAN_STATUS_FAIL;
3668         }
3669     } else {
3670         print_err("unsupported configuration\n");
3671         return XRAN_STATUS_FAIL;
3672     }
3673
3674     nWorkerCore = 1LL;
3675     if(eth_ctx->io_cfg.pkt_proc_core) {
3676         for (i = 0; i < coreNum && i < 64; i++) {
3677             if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
3678                 xran_core_used[xran_num_cores_used++] = i;
3679                 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
3680                     rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
3681                 eth_ctx->pkt_wrk_cfg[i].state = 1;
3682                 if(eth_ctx->pkt_proc_core_id == 0)
3683                     eth_ctx->pkt_proc_core_id = i;
3684                 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
3685                 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
3686             }
3687             nWorkerCore = nWorkerCore << 1;
3688         }
3689     }
3690
3691     nWorkerCore = 1LL;
3692     if(eth_ctx->io_cfg.pkt_proc_core_64_127) {
3693         for (i = 64; i < coreNum && i < 128; i++) {
3694             if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
3695                 xran_core_used[xran_num_cores_used++] = i;
3696                 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
3697                     rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
3698                 eth_ctx->pkt_wrk_cfg[i].state = 1;
3699                 if(eth_ctx->pkt_proc_core_id == 0)
3700                     eth_ctx->pkt_proc_core_id = i;
3701                 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
3702                 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
3703             }
3704             nWorkerCore = nWorkerCore << 1;
3705         }
3706     }
3707
3708     return XRAN_STATUS_SUCCESS;
3709 }
3710 int32_t
3711 xran_open(void *pHandle, struct xran_fh_config* pConf)
3712 {
3713     int32_t ret = XRAN_STATUS_SUCCESS;
3714     int32_t i;
3715     uint8_t nNumerology = 0;
3716     struct xran_device_ctx  *p_xran_dev_ctx = NULL;
3717     struct xran_fh_config   *pFhCfg  = NULL;
3718     struct xran_fh_init     *fh_init = NULL;
3719     struct xran_ethdi_ctx   *eth_ctx = xran_ethdi_get_ctx();
3720     int32_t wait_time = 10;
3721     int64_t offset_sec, offset_nsec;
3722
3723      if(pConf->dpdk_port < XRAN_PORTS_NUM) {
3724         p_xran_dev_ctx  = xran_dev_get_ctx_by_id(pConf->dpdk_port);
3725     } else {
3726         print_err("@0x%p [ru %d ] pConf->dpdk_port > XRAN_PORTS_NUM\n", pConf,  pConf->dpdk_port);
3727         return XRAN_STATUS_FAIL;
3728     }
3729
3730     if(p_xran_dev_ctx == NULL) {
3731         print_err("[ru %d] p_xran_dev_ctx == NULL ", pConf->dpdk_port);
3732         return XRAN_STATUS_FAIL;
3733     }
3734
3735     pFhCfg = &p_xran_dev_ctx->fh_cfg;
3736     memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));
3737
3738     fh_init = &p_xran_dev_ctx->fh_init;
3739     if(fh_init == NULL)
3740         return XRAN_STATUS_FAIL;
3741
3742     if(pConf->log_level) {
3743         printf(" %s: %s Category %s\n", __FUNCTION__,
3744         (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE",
3745         (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
3746     }
3747
3748     p_xran_dev_ctx->enableCP    = pConf->enableCP;
3749     p_xran_dev_ctx->enablePrach = pConf->prachEnable;
3750     p_xran_dev_ctx->enableSrs   = pConf->srsEnable;
3751     p_xran_dev_ctx->enableSrsCp   = pConf->srsEnableCp;
3752     p_xran_dev_ctx->nSrsDelaySym   = pConf->SrsDelaySym;
3753     p_xran_dev_ctx->puschMaskEnable = pConf->puschMaskEnable;
3754     p_xran_dev_ctx->puschMaskSlot = pConf->puschMaskSlot;
3755     p_xran_dev_ctx->DynamicSectionEna = pConf->DynamicSectionEna;
3756     p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable = pConf->RunSlotPrbMapBySymbolEnable;
3757     p_xran_dev_ctx->dssEnable = pConf->dssEnable;
3758     p_xran_dev_ctx->dssPeriod = pConf->dssPeriod;
3759     for(i=0; i<pConf->dssPeriod; i++) {
3760         p_xran_dev_ctx->technology[i] = pConf->technology[i];
3761     }
3762
3763     if(pConf->GPS_Alpha || pConf->GPS_Beta ){
3764         offset_sec = pConf->GPS_Beta / 100;    /* resolution of beta is 10ms */
3765         offset_nsec = (pConf->GPS_Beta - offset_sec * 100) * 1e7 + pConf->GPS_Alpha;
3766         p_xran_dev_ctx->offset_sec = offset_sec;
3767         p_xran_dev_ctx->offset_nsec = offset_nsec;
3768     }else {
3769         p_xran_dev_ctx->offset_sec  = 0;
3770         p_xran_dev_ctx->offset_nsec = 0;
3771     }
3772
3773
3774     nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
3775
3776     if (pConf->nCC > XRAN_MAX_SECTOR_NR) {
3777         if(pConf->log_level)
3778             printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR);
3779         pConf->nCC = XRAN_MAX_SECTOR_NR;
3780     }
3781
3782     if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER  || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ) {
3783         print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder);
3784         return XRAN_STATUS_FAIL;
3785     }
3786
3787     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) {
3788         if((ret = xran_ruemul_init(p_xran_dev_ctx)) < 0) {
3789             return ret;
3790         }
3791     }
3792
3793     /* setup PRACH configuration for C-Plane */
3794     if(pConf->dssEnable){
3795         if((ret  = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0)
3796             return ret;
3797         if((ret  =  xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0)
3798             return ret;
3799     }
3800     else{
3801     if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR) {
3802             if((ret  = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0){
3803             return ret;
3804         }
3805     } else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE) {
3806         if((ret  =  xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0){
3807             return ret;
3808         }
3809     }
3810     }
3811
3812     if((ret  = xran_init_srs(pConf, p_xran_dev_ctx))< 0){
3813         return ret;
3814     }
3815
3816     if((ret  = xran_cp_init_sectiondb(p_xran_dev_ctx)) < 0){
3817         return ret;
3818     }
3819
3820     if((ret  = xran_init_sectionid(p_xran_dev_ctx)) < 0){
3821         return ret;
3822     }
3823
3824     if((ret  = xran_init_seqid(p_xran_dev_ctx)) < 0){
3825         return ret;
3826     }
3827
3828     if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
3829         if((ret  = xran_init_vfs_mapping(p_xran_dev_ctx)) < 0) {
3830             return ret;
3831         }
3832
3833         if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->fh_init.io_cfg.num_rxq > 1) {
3834             if((ret  = xran_init_vf_rxq_to_pcid_mapping(p_xran_dev_ctx)) < 0) {
3835                 return ret;
3836             }
3837         }
3838     }
3839
3840     if(pConf->ru_conf.xran_max_frame) {
3841        xran_max_frame = pConf->ru_conf.xran_max_frame;
3842        printf("xran_max_frame %d\n", xran_max_frame);
3843     }
3844
3845     p_xran_dev_ctx->interval_us_local = xran_fs_get_tti_interval(nNumerology);
3846     if (interval_us > p_xran_dev_ctx->interval_us_local)
3847     {
3848         interval_us = xran_fs_get_tti_interval(nNumerology); //only update interval_us based on maximum numerology
3849     }
3850
3851 //    if(pConf->log_level){
3852         printf("%s: interval_us=%ld, interval_us_local=%d\n", __FUNCTION__, interval_us, p_xran_dev_ctx->interval_us_local);
3853 //    }
3854     if (nNumerology >= timing_get_numerology())
3855     {
3856     timing_set_numerology(nNumerology);
3857     }
3858
3859     for(i = 0 ; i <pConf->nCC; i++){
3860         xran_fs_set_slot_type(pConf->dpdk_port, i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod,
3861             pConf->frame_conf.sSlotConfig);
3862     }
3863
3864     xran_fs_slot_limit_init(pConf->dpdk_port, xran_fs_get_tti_interval(nNumerology));
3865
3866     /* if send_xpmbuf2ring needs to be changed from default functions,
3867      * then those should be set between xran_init and xran_open */
3868     if(p_xran_dev_ctx->send_cpmbuf2ring == NULL)
3869         p_xran_dev_ctx->send_cpmbuf2ring    = xran_ethdi_mbuf_send_cp;
3870     if(p_xran_dev_ctx->send_upmbuf2ring == NULL)
3871         p_xran_dev_ctx->send_upmbuf2ring    = xran_ethdi_mbuf_send;
3872
3873     if(pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
3874         if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
3875             p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
3876     } else {
3877         if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
3878             p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_dispatch_opt;
3879     }
3880
3881     if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
3882         p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring;
3883     printf("bbu_offload %d\n", p_xran_dev_ctx->fh_init.io_cfg.bbu_offload);
3884     if(pConf->dpdk_port == 0) {
3885         /* create all thread on open of port 0 */
3886         xran_num_cores_used = 0;
3887         if(eth_ctx->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){
3888             eth_ctx->bbdev_dec = pConf->bbdev_dec;
3889             eth_ctx->bbdev_enc = pConf->bbdev_enc;
3890         }
3891
3892         if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
3893             printf("XRAN_UP_VF: 0x%04x\n", eth_ctx->io_cfg.port[XRAN_UP_VF]);
3894             p_xran_dev_ctx->timing_source_thread_running = 0;
3895             xran_core_used[xran_num_cores_used++] = eth_ctx->io_cfg.timing_core;
3896             if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), eth_ctx->io_cfg.timing_core))
3897             rte_panic("thread_run() failed to start\n");
3898         } else if(pConf->log_level) {
3899                 printf("Eth port was not open. Processing thread was not started\n");
3900         }
3901     } else {
3902         if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF) {
3903             if ((ret = xran_timing_create_cbs(p_xran_dev_ctx)) < 0) {
3904                 return ret;
3905             }
3906         }
3907     }
3908
3909     if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
3910         if(pConf->dpdk_port == (fh_init->xran_ports - 1)) {
3911             if((ret = xran_spawn_workers()) < 0) {
3912                 return ret;
3913                 }
3914             }
3915         printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  sched_getcpu(), getpid());
3916         printf("Waiting on Timing thread...\n");
3917         while (p_xran_dev_ctx->timing_source_thread_running == 0 && wait_time--) {
3918             usleep(100);
3919         }
3920     }
3921
3922     print_dbg("%s : %d", __FUNCTION__, pConf->dpdk_port);
3923     return ret;
3924 }
3925
3926 int32_t
3927 xran_start(void *pHandle)
3928 {
3929     struct tm * ptm;
3930     /* ToS = Top of Second start +- 1.5us */
3931     struct timespec ts;
3932     char buff[100];
3933     int i;
3934     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
3935     struct xran_prb_map * prbMap0 = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[0][0][0].sBufferList.pBuffers->pData;
3936     for(i = 0; i < XRAN_MAX_SECTIONS_PER_SLOT && i < prbMap0->nPrbElm; i++)
3937     {
3938         p_xran_dev_ctx->numSetBFWs_arr[i] = prbMap0->prbMap[i].bf_weight.numSetBFWs;
3939     }
3940
3941     if(xran_get_if_state() == XRAN_RUNNING) {
3942         print_err("Already STARTED!!");
3943         return (-1);
3944         }
3945     timespec_get(&ts, TIME_UTC);
3946     ptm = gmtime(&ts.tv_sec);
3947     if(ptm){
3948         strftime(buff, sizeof(buff), "%D %T", ptm);
3949         printf("%s: XRAN start time: %s.%09ld UTC [%ld]\n",
3950             (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
3951     }
3952
3953     if (p_xran_dev_ctx->fh_init.io_cfg.eowd_cmn[p_xran_dev_ctx->fh_init.io_cfg.id].owdm_enable)
3954         {
3955         xran_if_current_state = XRAN_OWDM;
3956         }
3957     else
3958         {
3959     xran_if_current_state = XRAN_RUNNING;
3960         }
3961     return 0;
3962 }
3963
3964 int32_t
3965 xran_stop(void *pHandle)
3966 {
3967     if(xran_get_if_state() == XRAN_STOPPED) {
3968         print_err("Already STOPPED!!");
3969         return (-1);
3970         }
3971
3972     xran_if_current_state = XRAN_STOPPED;
3973     return 0;
3974 }
3975
3976 int32_t
3977 xran_close(void *pHandle)
3978 {
3979     int32_t ret = XRAN_STATUS_SUCCESS;
3980     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
3981
3982     xran_if_current_state = XRAN_STOPPED;
3983     ret = xran_cp_free_sectiondb(p_xran_dev_ctx);
3984
3985     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)
3986         xran_ruemul_release(p_xran_dev_ctx);
3987
3988 #ifdef RTE_LIBRTE_PDUMP
3989     /* uninitialize packet capture framework */
3990     rte_pdump_uninit();
3991 #endif
3992     return ret;
3993 }
3994
3995 /* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open
3996  * each cb will be set by default duing open if it is set by NULL */
3997 int32_t
3998 xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up)
3999 {
4000     struct xran_device_ctx *p_xran_dev_ctx;
4001
4002     if(xran_get_if_state() == XRAN_RUNNING) {
4003         print_err("Cannot register callback while running!!\n");
4004         return (-1);
4005         }
4006
4007     p_xran_dev_ctx = xran_dev_get_ctx();
4008
4009     p_xran_dev_ctx->send_cpmbuf2ring    = mbuf_send_cp;
4010     p_xran_dev_ctx->send_upmbuf2ring    = mbuf_send_up;
4011
4012     p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
4013
4014     return (0);
4015 }
4016
4017 int32_t
4018 xran_get_slot_idx (uint32_t PortId, uint32_t *nFrameIdx, uint32_t *nSubframeIdx,  uint32_t *nSlotIdx, uint64_t *nSecond)
4019 {
4020     int32_t tti = 0;
4021     struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
4022     if (!p_xran_dev_ctx)
4023 {
4024         print_err("Null xRAN context on port id %u!!\n", PortId);
4025         return 0;
4026 }
4027
4028     tti           = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT);
4029     *nSlotIdx     = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
4030     *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
4031     *nFrameIdx    = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
4032     *nSecond      = timing_get_current_second();
4033
4034     return tti;
4035 }
4036
4037 int32_t
4038 xran_set_debug_stop(int32_t value, int32_t count)
4039 {
4040     return timing_set_debug_stop(value, count);
4041     }
4042
4043
4044 int32_t xran_get_num_prb_elm(struct xran_prb_map* p_PrbMapIn, uint32_t mtu)
4045 {
4046     int32_t i,j = 0;
4047     int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4048     struct xran_prb_elm *p_prb_elm_src;
4049     int32_t nRBremain;
4050     // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4051     // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4052     int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4053     int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4054     uint32_t nRBSize=0;
4055
4056     if (mtu==9600)
4057         nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4058
4059     for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
4060     {
4061         p_prb_elm_src = &p_PrbMapIn->prbMap[i];
4062         if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
4063         {
4064             j++;
4065         }
4066         else
4067         {
4068             nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4069             j++;
4070             while (nRBremain > 0)
4071             {
4072                 nRBSize = RTE_MIN(nmaxRB, nRBremain);
4073                 nRBremain -= nRBSize;
4074                 j++;
4075             }
4076         }
4077     }
4078
4079     return j;
4080 }
4081
4082
4083 int32_t xran_init_PrbMap_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu)
4084 {
4085     int32_t i,j = 0;
4086     int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4087     struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
4088     int32_t nRBStart_tmp, nRBremain;
4089     // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4090     // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4091     int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4092     int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4093
4094     if (mtu==9600)
4095         nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4096
4097     memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
4098     for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
4099     {
4100         p_prb_elm_src = &p_PrbMapIn->prbMap[i];
4101         p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4102         memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4103
4104         // int32_t nStartSymb, nEndSymb, numSymb, nRBStart, nRBEnd, nRBSize;
4105         // nStartSymb = p_prb_elm_src->nStartSymb;
4106         // nEndSymb = nStartSymb + p_prb_elm_src->numSymb;
4107         if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
4108         {
4109             p_prb_elm_dst->IsNewSect = 1;
4110             p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
4111             p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4112             p_prb_elm_dst->nSectId = i;
4113             j++;
4114         }
4115         else
4116         {
4117             nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
4118             nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4119             p_prb_elm_dst->IsNewSect = 1;
4120             p_prb_elm_dst->UP_nRBSize = nmaxRB;
4121             p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4122             p_prb_elm_dst->nSectId = i;
4123             j++;
4124             while (nRBremain > 0)
4125             {
4126                 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4127                 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4128                 p_prb_elm_dst->IsNewSect = 0;
4129                 p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
4130                 p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
4131                 nRBremain -= p_prb_elm_dst->UP_nRBSize;
4132                 nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
4133                 p_prb_elm_dst->nSectId = i;
4134                 j++;
4135             }
4136         }
4137     }
4138
4139     p_PrbMapOut->nPrbElm = j;
4140     return 0;
4141 }
4142
4143
4144 int32_t xran_init_PrbMap_from_cfg_for_rx(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu)
4145 {
4146     int32_t i,j = 0;
4147     int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4148     struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
4149     int32_t nRBStart_tmp, nRBremain;
4150     // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4151     // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4152     int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4153     int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4154
4155     if (mtu==9600)
4156         nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4157     nmaxRB *= XRAN_MAX_FRAGMENT; 
4158
4159     memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
4160     for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
4161     {
4162         p_prb_elm_src = &p_PrbMapIn->prbMap[i];
4163         p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4164         memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4165
4166         if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
4167         {
4168             p_prb_elm_dst->IsNewSect = 1;
4169             p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
4170             p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4171             p_prb_elm_dst->nSectId = j;
4172             j++;
4173         }
4174         else
4175         {
4176             nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
4177             nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4178             p_prb_elm_dst->IsNewSect = 1;
4179             p_prb_elm_dst->nRBSize = nmaxRB;
4180             p_prb_elm_dst->UP_nRBSize = nmaxRB;
4181             p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4182             p_prb_elm_dst->nSectId = j;
4183             j++;
4184             while (nRBremain > 0)
4185             {
4186                 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4187                 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4188                 p_prb_elm_dst->IsNewSect = 1;
4189                 p_prb_elm_dst->nRBSize = RTE_MIN(nmaxRB, nRBremain);
4190                 p_prb_elm_dst->nRBStart = nRBStart_tmp;
4191                 p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
4192                 p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
4193                 nRBremain -= p_prb_elm_dst->UP_nRBSize;
4194                 nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
4195                 p_prb_elm_dst->nSectId = j;
4196                 j++;
4197             }
4198         }
4199     }
4200
4201     p_PrbMapOut->nPrbElm = j;
4202     return 0;
4203 }
4204
4205
4206 int32_t xran_init_PrbMap_by_symbol_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu, uint32_t xran_max_prb)
4207 {
4208     int32_t i = 0, j = 0, nPrbElm = 0;
4209     int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4210     struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
4211     struct xran_prb_elm prbMapTemp[XRAN_NUM_OF_SYMBOL_PER_SLOT];
4212     int32_t nRBStart_tmp, nRBremain, nStartSymb, nEndSymb, nRBStart, nRBEnd, nRBSize;
4213     // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4214     // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4215     int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4216     int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4217     if (mtu==9600)
4218         nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4219
4220
4221     memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
4222     for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
4223     {
4224         p_prb_elm_dst = &prbMapTemp[i];
4225         // nRBStart = 273;
4226         nRBStart = xran_max_prb;
4227         nRBEnd = 0;
4228
4229         for(j = 0; j < p_PrbMapIn->nPrbElm; j++)
4230         {
4231             p_prb_elm_src = &(p_PrbMapIn->prbMap[j]);
4232             nStartSymb = p_prb_elm_src->nStartSymb;
4233             nEndSymb = nStartSymb + p_prb_elm_src->numSymb;
4234
4235             if((i >=  nStartSymb) && (i < nEndSymb))
4236             {
4237                 if(nRBStart > p_prb_elm_src->nRBStart)
4238                 {
4239                     nRBStart = p_prb_elm_src->nRBStart;
4240                 }
4241                 if(nRBEnd < (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize))
4242                 {
4243                     nRBEnd = (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize);
4244                 }
4245
4246                 p_prb_elm_dst->nBeamIndex = p_prb_elm_src->nBeamIndex;
4247                 p_prb_elm_dst->bf_weight_update = p_prb_elm_src->bf_weight_update;
4248                 p_prb_elm_dst->compMethod = p_prb_elm_src->compMethod;
4249                 p_prb_elm_dst->iqWidth = p_prb_elm_src->iqWidth;
4250                 p_prb_elm_dst->ScaleFactor = p_prb_elm_src->ScaleFactor;
4251                 p_prb_elm_dst->reMask = p_prb_elm_src->reMask;
4252                 p_prb_elm_dst->BeamFormingType = p_prb_elm_src->BeamFormingType;
4253             }
4254         }
4255
4256         if(nRBEnd < nRBStart)
4257         {
4258             p_prb_elm_dst->nRBStart = 0;
4259             p_prb_elm_dst->nRBSize = 0;
4260             p_prb_elm_dst->nStartSymb = i;
4261             p_prb_elm_dst->numSymb = 1;
4262         }
4263         else
4264         {
4265             p_prb_elm_dst->nRBStart = nRBStart;
4266             p_prb_elm_dst->nRBSize = nRBEnd - nRBStart;
4267             p_prb_elm_dst->nStartSymb = i;
4268             p_prb_elm_dst->numSymb = 1;
4269         }
4270     }
4271
4272     for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
4273     {
4274         if((prbMapTemp[i].nRBSize != 0))
4275         {
4276             nRBStart = prbMapTemp[i].nRBStart;
4277             nRBSize = prbMapTemp[i].nRBSize;
4278             prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart;
4279             prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize;
4280             prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb;
4281             prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex;
4282             prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update;
4283             prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod;
4284             prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth;
4285             prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor;
4286             prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask;
4287             prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType;
4288             i++;
4289             break;
4290         }
4291     }
4292
4293     for(; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
4294     {
4295         if((nRBStart == prbMapTemp[i].nRBStart) && (nRBSize == prbMapTemp[i].nRBSize))
4296         {
4297                 prbMapTemp[nPrbElm].numSymb++;
4298         }
4299         else
4300         {
4301             nPrbElm++;
4302             prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb;
4303             prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart;
4304             prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize;
4305             prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex;
4306             prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update;
4307             prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod;
4308             prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth;
4309             prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor;
4310             prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask;
4311             prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType;
4312
4313             nRBStart = prbMapTemp[i].nRBStart;
4314             nRBSize = prbMapTemp[i].nRBSize;
4315         }
4316     }
4317
4318     for(i = 0; i < nPrbElm; i++)
4319     {
4320         if(prbMapTemp[i].nRBSize == 0)
4321             prbMapTemp[i].nRBSize = 1;
4322     }
4323
4324     if(prbMapTemp[nPrbElm].nRBSize != 0)
4325         nPrbElm++;
4326
4327
4328     j = 0;
4329
4330     for (i = 0;i < nPrbElm; i++)
4331     {
4332         p_prb_elm_src = &prbMapTemp[i];
4333         p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4334         memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4335         if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
4336         {
4337             p_prb_elm_dst->IsNewSect = 1;
4338             p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
4339             p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4340             p_prb_elm_dst->nSectId = i;
4341             j++;
4342         }
4343         else
4344         {
4345             nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
4346             nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4347             p_prb_elm_dst->IsNewSect = 1;
4348             p_prb_elm_dst->UP_nRBSize = nmaxRB;
4349             p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4350             p_prb_elm_dst->nSectId = i;
4351             j++;
4352             while (nRBremain > 0)
4353             {
4354                 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4355                 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4356                 p_prb_elm_dst->IsNewSect = 0;
4357                 p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
4358                 p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
4359                 nRBremain -= p_prb_elm_dst->UP_nRBSize;
4360                 nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
4361                 p_prb_elm_dst->nSectId = i;
4362                 j++;
4363             }
4364         }
4365     }
4366
4367     p_PrbMapOut->nPrbElm = j;
4368
4369     return 0;
4370 }
4371
4372 inline void MLogXRANTask(uint32_t taskid, uint64_t ticksstart, uint64_t ticksstop)
4373 {
4374     if (mlogxranenable)
4375     {
4376         MLogTask(taskid, ticksstart, ticksstop);
4377     }
4378     return;
4379 }
4380
4381 inline uint64_t MLogXRANTick(void)
4382 {
4383     if (mlogxranenable)
4384         return MLogTick();
4385     else
4386         return 0;
4387 }
4388
4389