1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN main functionality module
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
40 #include <rte_common.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
49 #include <rte_version.h>
51 #if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */
52 #include <rte_ecpri.h>
54 #include "xran_fh_o_du.h"
55 #include "xran_fh_o_ru.h"
56 #include "xran_main.h"
59 #include "xran_mem_mgr.h"
60 #include "xran_tx_proc.h"
61 #include "xran_rx_proc.h"
63 #include "xran_up_api.h"
64 #include "xran_cp_api.h"
65 #include "xran_sync_api.h"
66 #include "xran_lib_mlog_tasks_id.h"
67 #include "xran_timer.h"
68 #include "xran_common.h"
70 #include "xran_frame_struct.h"
71 #include "xran_printf.h"
72 #include "xran_cp_proc.h"
73 #include "xran_tx_proc.h"
74 #include "xran_rx_proc.h"
75 #include "xran_cb_proc.h"
76 #include "xran_ecpri_owd_measurements.h"
78 #include "xran_mlog_lnx.h"
80 static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {{NULL}};
82 uint64_t interval_us = 1000; //the TTI interval of the cell with maximum numerology
84 uint32_t xran_lib_ota_tti[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Slot index in a second [0:(1000000/TTI-1)] */
85 uint32_t xran_lib_ota_sym[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a slot [0:13] */
86 uint32_t xran_lib_ota_sym_idx[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1]
87 where TTI is TTI interval in microseconds */
89 uint16_t xran_SFN_at_Sec_Start = 0; /**< SFN at current second start */
90 uint16_t xran_max_frame = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2 System Frame Number Calculation */
92 static uint64_t xran_total_tick = 0, xran_used_tick = 0;
93 static uint32_t xran_num_cores_used = 0;
94 static uint32_t xran_core_used[64] = {0};
95 int32_t first_call = 0;
96 int32_t mlogxranenable = 0;
98 struct cp_up_tx_desc * xran_pkt_gen_desc_alloc(void);
99 int32_t xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc);
101 void tti_ota_cb(struct rte_timer *tim, void *arg);
102 void tti_to_phy_cb(struct rte_timer *tim, void *arg);
104 int32_t xran_pkt_gen_process_ring(struct rte_ring *r);
107 xran_updateSfnSecStart(void)
109 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
110 struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
111 int32_t xran_ports = p_xran_dev_ctx->fh_init.xran_ports;
113 uint64_t currentSecond = timing_get_current_second();
114 // Assume always positive
115 uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;
116 uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND;
117 uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));
118 xran_SFN_at_Sec_Start = sfn;
120 for(o_xu_id = 0; o_xu_id < xran_ports; o_xu_id++){
121 pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter;
122 pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter;
123 pCnt->tx_bytes_counter = 0;
124 pCnt->rx_bytes_counter = 0;
126 pCnt = &p_xran_dev_ctx->fh_counters;
131 static inline int32_t
132 xran_getSlotIdxSecond(uint32_t interval)
134 int32_t frameIdxSecond = xran_getSfnSecStart();
135 int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME(interval);
136 return slotIndxSecond;
141 xran_get_if_state(void)
143 return xran_if_current_state;
146 int32_t xran_is_prach_slot(uint8_t PortId, uint32_t subframe_id, uint32_t slot_id)
148 int32_t is_prach_slot = 0;
149 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
150 if (p_xran_dev_ctx == NULL)
152 print_err("PortId %d not exist\n", PortId);
153 return is_prach_slot;
155 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
156 uint8_t nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
158 if (nNumerology < 2){
159 //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index
160 if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){
161 if (pPrachCPConfig->nrofPrachInSlot == 0){
165 else if (pPrachCPConfig->nrofPrachInSlot == 2)
168 if (nNumerology == 0)
170 else if (slot_id == 1)
174 } else if (nNumerology == 3){
175 //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot
177 slotidx = subframe_id * SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local) + slot_id;
178 if (pPrachCPConfig->nrofPrachInSlot == 2){
179 if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1)
182 if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){
187 print_err("Numerology %d not supported", nNumerology);
188 return is_prach_slot;
192 xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
194 struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg);
197 p_srs->symbMask = pConf->srs_conf.symbMask; /* deprecated */
198 p_srs->slot = pConf->srs_conf.slot;
199 p_srs->ndm_offset = pConf->srs_conf.ndm_offset;
200 p_srs->ndm_txduration = pConf->srs_conf.ndm_txduration;
201 p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset;
203 print_dbg("SRS sym %d\n", p_srs->slot);
204 print_dbg("SRS NDM offset %d\n", p_srs->ndm_offset);
205 print_dbg("SRS NDM Tx %d\n", p_srs->ndm_txduration);
206 print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset);
208 return (XRAN_STATUS_SUCCESS);
212 xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
214 /* update Rach for LTE */
215 return xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_LTE);
219 xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx, enum xran_ran_tech xran_tech)
223 struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf);
224 const xRANPrachConfigTableStruct *pxRANPrachConfigTable;
225 uint8_t nNumerology = pConf->frame_conf.nNumerology;
226 uint8_t nPrachConfIdx = -1;// = pPRACHConfig->nPrachConfIdx;
227 struct xran_prach_cp_config *pPrachCPConfig = NULL;
228 if(pConf->dssEnable){
229 /*Check Slot type and */
230 if(xran_tech == XRAN_RAN_5GNR){
231 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
232 nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
235 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
236 nPrachConfIdx = pPRACHConfig->nPrachConfIdxLTE;
240 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
241 nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
244 pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx];
245 else if (pConf->frame_conf.nFrameDuplexType == 1)
246 pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx];
248 pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx];
250 uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0];
251 const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt];
252 memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config));
254 printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot);
256 if (preambleFmrt <= 2)
258 pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_012; // 1 PRACH preamble format 0 1 2
260 else if (preambleFmrt == 3)
262 pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_3; // 1 PRACH preamble format 3
266 pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC; // 3, PRACH preamble format A1~3, B1~4, C0, C2
268 pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym;
269 pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart;
270 pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70;
271 pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp;
272 pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing);
273 pPrachCPConfig->x = pxRANPrachConfigTable->x;
274 pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot;
275 pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0];
276 pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1];
277 if (preambleFmrt >= FORMAT_A1)
279 pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration;
280 pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot;
284 pPrachCPConfig->numSymbol = 1;
285 pPrachCPConfig->occassionsInPrachSlot = 1;
289 printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]);
290 pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1;
291 for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++)
293 slotNr = pxRANPrachConfigTable->slotNr[i];
295 pPrachCPConfig->isPRACHslot[slotNr] = 1;
297 printf(" %u ..", slotNr);
301 for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){
302 p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId;
303 p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
305 if(pConf->log_level){
306 printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]);
309 pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(p_xran_dev_ctx);
310 print_dbg("PRACH eAxC_offset %d\n", pPrachCPConfig->eAxC_offset);
312 /* Save some configs for app */
313 pPRACHConfig->startSymId = pPrachCPConfig->startSymId;
314 pPRACHConfig->lastSymId = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
315 pPRACHConfig->startPrbc = pPrachCPConfig->startPrbc;
316 pPRACHConfig->numPrbc = pPrachCPConfig->numPrbc;
317 pPRACHConfig->timeOffset = pPrachCPConfig->timeOffset;
318 pPRACHConfig->freqOffset = pPrachCPConfig->freqOffset;
319 pPRACHConfig->eAxC_offset = pPrachCPConfig->eAxC_offset;
321 return (XRAN_STATUS_SUCCESS);
325 xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid
329 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
330 uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
338 return (slot_id << (2-mu));
342 return (slot_id << (3-mu));
349 return (slot_id >> (2-mu));
353 return (slot_id >> (3-mu));
360 sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick)
362 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
363 long t1 = MLogXRANTick(), t2;
366 if(XranGetSymNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){
368 tti_ota_cb(NULL, (void*)p_xran_dev_ctx);
369 *used_tick += get_ticks_diff(xran_tick(), t3);
373 if (xran_process_tx_sym(p_xran_dev_ctx))
375 *used_tick += get_ticks_diff(xran_tick(), t3);
378 /* check if there is call back to do something else on this symbol */
379 struct cb_elem_entry *cb_elm;
380 LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[xran_lib_ota_sym[p_xran_dev_ctx->xran_port_id]], pointers){
382 cb_elm->pSymCallback(&p_xran_dev_ctx->dpdk_timer[p_xran_dev_ctx->ctx % MAX_NUM_OF_DPDK_TIMERS], cb_elm->pSymCallbackTag, cb_elm->p_dev_ctx);
383 p_xran_dev_ctx->ctx = DpdkTimerIncrementCtx(p_xran_dev_ctx->ctx);
388 MLogXRANTask(PID_SYM_OTA_CB, t1, t2);
392 xran_schedule_to_worker(enum xran_job_type_id job_type_id, struct xran_device_ctx * p_xran_dev_ctx)
394 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
395 uint32_t tim_lcore = eth_ctx->io_cfg.timing_core; /* default to timing core */
398 if(eth_ctx->num_workers == 0) { /* no workers */
399 tim_lcore = eth_ctx->io_cfg.timing_core;
400 } else if (eth_ctx->num_workers == 1) { /* one worker */
403 case XRAN_JOB_TYPE_OTA_CB:
404 tim_lcore = eth_ctx->io_cfg.timing_core;
406 case XRAN_JOB_TYPE_CP_DL:
407 case XRAN_JOB_TYPE_CP_UL:
408 case XRAN_JOB_TYPE_DEADLINE:
409 case XRAN_JOB_TYPE_SYM_CB:
410 tim_lcore = eth_ctx->worker_core[0];
413 print_err("incorrect job type id %d\n", job_type_id);
414 tim_lcore = eth_ctx->io_cfg.timing_core;
417 } else if (eth_ctx->num_workers >= 2 && eth_ctx->num_workers <= 6) {
420 case XRAN_JOB_TYPE_OTA_CB:
421 tim_lcore = eth_ctx->worker_core[0];
423 case XRAN_JOB_TYPE_CP_DL:
424 tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_DL]];
426 case XRAN_JOB_TYPE_CP_UL:
427 tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_UL]];
429 case XRAN_JOB_TYPE_DEADLINE:
430 case XRAN_JOB_TYPE_SYM_CB:
431 tim_lcore = eth_ctx->worker_core[0];
434 print_err("incorrect job type id %d\n", job_type_id);
435 tim_lcore = eth_ctx->io_cfg.timing_core;
439 print_err("incorrect eth_ctx->num_workers id %d\n", eth_ctx->num_workers);
440 tim_lcore = eth_ctx->io_cfg.timing_core;
448 tti_ota_cb(struct rte_timer *tim, void *arg)
450 uint32_t frame_id = 0;
451 uint32_t subframe_id = 0;
452 uint32_t slot_id = 0;
453 uint32_t next_tti = 0;
455 uint32_t mlogVar[10];
456 uint32_t mlogVarCnt = 0;
457 uint64_t t1 = MLogTick();
458 uint32_t reg_tti = 0;
459 uint32_t reg_sfn = 0;
461 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
462 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)p_xran_dev_ctx->timer_ctx;
463 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
464 uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
466 unsigned tim_lcore = xran_schedule_to_worker(XRAN_JOB_TYPE_OTA_CB, p_xran_dev_ctx);
468 MLogTask(PID_TTI_TIMER, t1, MLogTick());
470 if(p_xran_dev_ctx->xran_port_id == 0){
472 if(xran_lib_ota_tti[0] == 0)
473 reg_tti = xran_fs_get_max_slot(PortId) - 1;
475 reg_tti = xran_lib_ota_tti[0] -1;
477 MLogIncrementCounter();
478 reg_sfn = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us))*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME(interval_us), SUBFRAMES_PER_SYSTEMFRAME);;
479 /* subframe and slot */
480 MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME(interval_us)));
484 slot_id = XranGetSlotNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local));
485 subframe_id = XranGetSubFrameNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
486 frame_id = XranGetFrameNum(xran_lib_ota_tti[PortId],xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
488 pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process = xran_lib_ota_tti[PortId];
490 /** tti as seen from PHY */
493 uint32_t nSubframeIdx;
496 uint8_t Numerlogy = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
497 uint8_t nNrOfSlotInSf = 1<<Numerlogy;
499 xran_get_slot_idx(0, &nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
500 nSfIdx = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*nNrOfSlotInSf
501 + nSubframeIdx*nNrOfSlotInSf
504 mlogVar[mlogVarCnt++] = 0x11111111;
505 mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId];
506 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
507 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId] / 14;
508 mlogVar[mlogVarCnt++] = frame_id;
509 mlogVar[mlogVarCnt++] = subframe_id;
510 mlogVar[mlogVarCnt++] = slot_id;
511 mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId] % XRAN_N_FE_BUF_LEN;
512 mlogVar[mlogVarCnt++] = nSfIdx;
513 mlogVar[mlogVarCnt++] = nSfIdx % XRAN_N_FE_BUF_LEN;
514 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
517 if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU)
518 next_tti = xran_lib_ota_tti[PortId] + 1;
520 next_tti = xran_lib_ota_tti[PortId];
523 if(next_tti>= xran_fs_get_max_slot(PortId)){
524 print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
528 slot_id = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
529 subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
530 frame_id = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
532 print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
534 if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){
535 pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = next_tti;
537 pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti[PortId] & 1)^1].tti_to_process;
540 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]) {
541 p_xran_dev_ctx->phy_tti_cb_done = 0;
542 xran_timer_arm_ex(&p_xran_dev_ctx->tti_to_phy_timer[xran_lib_ota_tti[PortId] % MAX_TTI_TO_PHY_TIMER], tti_to_phy_cb, (void*)p_xran_dev_ctx, tim_lcore);
544 //slot index is increased to next slot at the beginning of current OTA slot
545 xran_lib_ota_tti[PortId]++;
546 if(xran_lib_ota_tti[PortId] >= xran_fs_get_max_slot(PortId)) {
547 print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti[PortId], frame_id, subframe_id, slot_id);
548 xran_lib_ota_tti[PortId] = 0;
550 MLogXRANTask(PID_TTI_CB, t1, MLogTick());
555 xran_prepare_cp_dl_slot(uint16_t xran_port_id, uint32_t nSlotIdx, uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
556 uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
558 long t1 = MLogXRANTick();
559 int32_t ret = XRAN_STATUS_SUCCESS;
561 uint32_t slot_id, subframe_id, frame_id;
564 uint8_t ant_id, num_eAxc, num_CCPorts;
567 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
568 if(unlikely(!p_xran_dev_ctx))
570 print_err("Null xRAN context!!\n");
573 //struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
574 uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
575 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
576 pHandle = p_xran_dev_ctx;
578 num_eAxc = xran_get_num_eAxc(pHandle);
579 num_CCPorts = xran_get_num_cc(pHandle);
581 if(first_call && p_xran_dev_ctx->enableCP)
583 tti = nSlotIdx ;//pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
584 buf_id = tti % XRAN_N_FE_BUF_LEN;
586 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
587 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
588 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
591 /* Wrap around to next second */
592 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
595 ctx_id = tti % XRAN_MAX_SECTIONDB_CTX;
597 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
598 #if defined(__INTEL_COMPILER)
599 #pragma vector always
601 for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum) && ant_id < num_eAxc); ++ant_id) {
602 for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) {
603 /* start new section information list */
604 xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
605 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
606 if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
607 if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData) {
608 /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
609 (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
610 &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]),
611 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
613 print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
616 print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pBuffers] \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
618 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
619 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
620 } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
621 MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick());
627 tx_cp_dl_cb(struct rte_timer *tim, void *arg)
629 long t1 = MLogXRANTick();
631 uint32_t slot_id, subframe_id, frame_id;
634 uint8_t ant_id, num_eAxc, num_CCPorts;
637 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
639 if(unlikely(!p_xran_dev_ctx))
641 print_err("Null xRAN context!!\n");
645 if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
648 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
649 uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
650 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
651 pHandle = p_xran_dev_ctx;
653 num_eAxc = xran_get_num_eAxc(pHandle);
654 num_CCPorts = xran_get_num_cc(pHandle);
656 if(first_call && p_xran_dev_ctx->enableCP)
658 tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
659 buf_id = tti % XRAN_N_FE_BUF_LEN;
661 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
662 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
663 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
666 /* Wrap around to next second */
667 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
670 ctx_id = tti % XRAN_MAX_SECTIONDB_CTX;
672 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
673 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
674 for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) {
675 if(0== p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id].numSymsRemaining)
676 {/* Start of new slot - reset the section info */
677 xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
679 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
680 if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
681 if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
682 /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
683 (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
684 &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]),
685 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
688 print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
690 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
691 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
692 } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
693 MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick());
698 rx_ul_static_srs_cb(struct rte_timer *tim, void *arg)
700 long t1 = MLogXRANTick();
701 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
702 xran_status_t status = 0;
703 int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
705 //uint32_t nFrameIdx;
706 //uint32_t nSubframeIdx;
709 struct xran_timer_ctx* p_timer_ctx = NULL;
711 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
714 p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
716 if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
717 p_xran_dev_ctx->timer_put = 0;
719 rx_tti = p_timer_ctx->tti_to_process;
722 rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
724 rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
727 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
729 if(0 == p_xran_dev_ctx->enableSrsCp)
731 if(p_xran_dev_ctx->pSrsCallback[cc_id]){
732 struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
734 //pTag->cellId = cc_id;
735 pTag->slotiId = rx_tti;
736 pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
737 p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
742 MLogXRANTask(PID_UP_STATIC_SRS_DEAD_LINE_CB, t1, MLogXRANTick());
748 rx_ul_deadline_one_fourths_cb(struct rte_timer *tim, void *arg)
750 long t1 = MLogXRANTick();
751 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
752 xran_status_t status;
753 /* half of RX for current TTI as measured against current OTA time */
756 //uint32_t nFrameIdx;
757 //uint32_t nSubframeIdx;
760 struct xran_timer_ctx* p_timer_ctx = NULL;
761 /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
762 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
763 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
765 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
768 p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
769 if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
770 p_xran_dev_ctx->timer_put = 0;
772 rx_tti = p_timer_ctx->tti_to_process;
774 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
775 if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
776 if(p_xran_dev_ctx->pCallback[cc_id]) {
777 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
779 //pTag->cellId = cc_id;
780 pTag->slotiId = rx_tti;
781 pTag->symbol = XRAN_ONE_FOURTHS_CB_SYM;
782 status = XRAN_STATUS_SUCCESS;
784 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
788 p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
792 if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
793 if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
794 p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
796 p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
800 MLogXRANTask(PID_UP_UL_ONE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick());
804 rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg)
806 long t1 = MLogXRANTick();
807 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
808 xran_status_t status;
809 /* half of RX for current TTI as measured against current OTA time */
812 //uint32_t nFrameIdx;
813 //uint32_t nSubframeIdx;
816 struct xran_timer_ctx* p_timer_ctx = NULL;
817 /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
818 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
819 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
821 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
824 p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
825 if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
826 p_xran_dev_ctx->timer_put = 0;
828 rx_tti = p_timer_ctx->tti_to_process;
830 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
831 if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
832 if(p_xran_dev_ctx->pCallback[cc_id]) {
833 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
835 //pTag->cellId = cc_id;
836 pTag->slotiId = rx_tti;
837 pTag->symbol = XRAN_HALF_CB_SYM;
838 status = XRAN_STATUS_SUCCESS;
840 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
844 p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
848 if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
849 if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
850 p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
852 p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
856 MLogXRANTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogXRANTick());
860 rx_ul_deadline_three_fourths_cb(struct rte_timer *tim, void *arg)
862 long t1 = MLogXRANTick();
863 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
864 xran_status_t status;
865 /* half of RX for current TTI as measured against current OTA time */
868 //uint32_t nFrameIdx;
869 //uint32_t nSubframeIdx;
872 struct xran_timer_ctx* p_timer_ctx = NULL;
873 /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
874 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
875 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
877 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
880 p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
881 if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
882 p_xran_dev_ctx->timer_put = 0;
884 rx_tti = p_timer_ctx->tti_to_process;
886 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
887 if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
888 if(p_xran_dev_ctx->pCallback[cc_id]) {
889 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
891 //pTag->cellId = cc_id;
892 pTag->slotiId = rx_tti;
893 pTag->symbol = XRAN_THREE_FOURTHS_CB_SYM;
894 status = XRAN_STATUS_SUCCESS;
896 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
900 p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
904 if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
905 if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
906 p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
908 p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
912 MLogXRANTask(PID_UP_UL_THREE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick());
916 rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg)
918 long t1 = MLogXRANTick();
919 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
920 xran_status_t status = 0;
921 int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
923 //uint32_t nFrameIdx;
924 //uint32_t nSubframeIdx;
927 struct xran_timer_ctx* p_timer_ctx = NULL;
929 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
932 /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
933 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
934 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
936 p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
938 if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
939 p_xran_dev_ctx->timer_put = 0;
941 rx_tti = p_timer_ctx->tti_to_process;
944 rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
946 rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
949 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
950 if(p_xran_dev_ctx->pCallback[cc_id]){
951 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
953 //pTag->cellId = cc_id;
954 pTag->slotiId = rx_tti;
955 pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
956 status = XRAN_STATUS_SUCCESS;
957 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
961 if(p_xran_dev_ctx->pPrachCallback[cc_id]){
962 struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];
964 //pTag->cellId = cc_id;
965 pTag->slotiId = rx_tti;
966 pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
967 p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);
971 if(p_xran_dev_ctx->enableSrsCp)
973 if(p_xran_dev_ctx->pSrsCallback[cc_id]){
974 struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
976 //pTag->cellId = cc_id;
977 pTag->slotiId = rx_tti;
978 pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
979 p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
985 /* user call backs if any */
986 if(p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX]){
987 if(p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX] <= 0){
988 p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_FULL_SLOT_RX]);
990 p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX]--;
994 MLogXRANTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogXRANTick());
998 rx_ul_user_sym_cb(struct rte_timer *tim, void *arg)
1000 long t1 = MLogXRANTick();
1001 struct xran_device_ctx * p_dev_ctx = NULL;
1002 struct cb_user_per_sym_ctx *p_sym_cb_ctx = (struct cb_user_per_sym_ctx *)arg;
1003 int32_t rx_tti = 0; //(int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1004 uint32_t interval, ota_sym_idx = 0;
1005 uint8_t nNumerology = 0;
1006 struct xran_timer_ctx* p_timer_ctx = NULL;
1008 if(p_sym_cb_ctx->p_dev)
1009 p_dev_ctx = (struct xran_device_ctx *)p_sym_cb_ctx->p_dev;
1011 rte_panic("p_sym_cb_ctx->p_dev == NULL");
1013 if(p_dev_ctx->xran2phy_mem_ready == 0)
1015 nNumerology = xran_get_conf_numerology(p_dev_ctx);
1016 interval = p_dev_ctx->interval_us_local;
1018 p_timer_ctx = &p_sym_cb_ctx->user_cb_timer_ctx[p_sym_cb_ctx->user_timer_get++ % MAX_CB_TIMER_CTX];
1019 if (p_sym_cb_ctx->user_timer_get >= MAX_CB_TIMER_CTX)
1020 p_sym_cb_ctx->user_timer_get = 0;
1022 rx_tti = p_timer_ctx->tti_to_process;
1024 if( p_sym_cb_ctx->sym_diff > 0)
1025 /* + advacne TX Wind: at OTA Time we indicating event in future */
1026 ota_sym_idx = ((p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology));
1027 else if (p_sym_cb_ctx->sym_diff < 0) {
1028 /* - dealy RX Win: at OTA Time we indicate event in the past */
1029 if(p_timer_ctx->ota_sym_idx >= abs(p_sym_cb_ctx->sym_diff)) {
1030 ota_sym_idx = p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff;
1032 ota_sym_idx = ((xran_max_ota_sym_idx(nNumerology) + p_timer_ctx->ota_sym_idx) + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology);
1034 } else /* 0 - OTA exact time */
1035 ota_sym_idx = p_timer_ctx->ota_sym_idx;
1037 rx_tti = (int32_t)XranGetTtiNum(ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
1039 if(p_sym_cb_ctx->symCbTimeInfo) {
1040 struct xran_sense_of_time *p_sense_time = p_sym_cb_ctx->symCbTimeInfo;
1041 p_sense_time->type_of_event = p_sym_cb_ctx->cb_type_id;
1042 p_sense_time->nSymIdx = p_sym_cb_ctx->symb_num_req;
1043 p_sense_time->tti_counter = rx_tti;
1044 p_sense_time->nSlotIdx = (uint32_t)XranGetSlotNum(rx_tti, SLOTNUM_PER_SUBFRAME(interval));
1045 p_sense_time->nSubframeIdx = (uint32_t)XranGetSubFrameNum(rx_tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
1046 p_sense_time->nFrameIdx = (uint32_t)XranGetFrameNum(rx_tti, p_timer_ctx->xran_sfn_at_sec_start,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1047 p_sense_time->nSecond = p_timer_ctx->current_second;
1050 /* user call backs if any */
1051 if(p_sym_cb_ctx->symCb){
1052 p_sym_cb_ctx->symCb(p_sym_cb_ctx->symCbParam, p_sym_cb_ctx->symCbTimeInfo);
1055 MLogXRANTask(PID_UP_UL_USER_DEAD_LINE_CB, t1, MLogXRANTick());
1059 xran_prepare_cp_ul_slot(uint16_t xran_port_id, uint32_t nSlotIdx, uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
1060 uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
1062 int32_t ret = XRAN_STATUS_SUCCESS;
1063 long t1 = MLogXRANTick();
1065 uint32_t slot_id, subframe_id, frame_id;
1067 int ant_id, port_id;
1068 uint16_t occasionid;
1070 uint8_t num_eAxc, num_CCPorts;
1077 //struct xran_timer_ctx *pTCtx;
1078 struct xran_buffer_list *pBufList;
1079 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
1080 if(unlikely(!p_xran_dev_ctx))
1082 print_err("Null xRAN context!!\n");
1086 if(first_call && p_xran_dev_ctx->enableCP)
1088 pHandle = p_xran_dev_ctx;
1089 //pTCtx = &p_xran_dev_ctx->timer_ctx[0];
1090 interval = p_xran_dev_ctx->interval_us_local;
1091 PortId = p_xran_dev_ctx->xran_port_id;
1092 tti = nSlotIdx; //pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
1094 buf_id = tti % XRAN_N_FE_BUF_LEN;
1095 ctx_id = tti % XRAN_MAX_SECTIONDB_CTX;
1096 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1097 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
1098 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1100 /* Wrap around to next second */
1102 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
1103 if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
1104 num_eAxc = xran_get_num_eAxc(pHandle);
1106 num_eAxc = xran_get_num_eAxcUl(pHandle);
1107 num_CCPorts = xran_get_num_cc(pHandle);
1109 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
1111 /* General Uplink */
1112 #if defined(__INTEL_COMPILER)
1113 #pragma vector always
1115 for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum) && ant_id < num_eAxc); ++ant_id) {
1116 for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) {
1117 /* start new section information list */
1118 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
1119 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1)
1121 pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1122 if(pBufList->pBuffers && pBufList->pBuffers->pData)
1124 ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
1125 (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1126 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1130 } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
1133 if(p_xran_dev_ctx->enablePrach)
1135 struct xran_prach_cp_config *pPrachCPConfig = NULL;
1136 //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE.
1137 if(p_xran_dev_ctx->dssEnable){
1138 int i = tti % p_xran_dev_ctx->dssPeriod;
1139 if(p_xran_dev_ctx->technology[i]==1) {
1140 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1143 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
1147 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1149 uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
1151 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
1152 && (is_prach_slot==1))
1154 for(ant_id = 0; ant_id < num_eAxc; ant_id++)
1156 port_id = ant_id + pPrachCPConfig->eAxC_offset;
1157 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1159 /* start new section information list */
1160 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1161 for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++)
1163 struct xran_cp_gen_params params;
1164 struct xran_section_gen_info sect_geninfo[8];
1165 struct xran_section_info sectInfo[8];
1166 for(int secId=0;secId<8;secId++)
1167 sect_geninfo[secId].info = §Info[secId];
1168 struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
1169 uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id);
1171 beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id);
1172 ret = generate_cpmsg_prach(pHandle, ¶ms, sect_geninfo, mbuf, p_xran_dev_ctx,
1173 frame_id, subframe_id, slot_id, tti,
1174 beam_id, cc_id, port_id, occasionid, seqid);
1175 if(ret == XRAN_STATUS_SUCCESS)
1176 send_cpmsg(pHandle, mbuf, ¶ms, sect_geninfo,
1177 cc_id, port_id, seqid);
1182 } /* if(p_xran_dev_ctx->enablePrach) */
1185 if(p_xran_dev_ctx->enableSrsCp)
1187 struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
1189 for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
1191 port_id = ant_id + pSrsCfg->eAxC_offset;
1192 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1194 /* start new section information list */
1195 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1196 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
1198 pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1199 if(pBufList->pBuffers && pBufList->pBuffers->pData)
1201 ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id,
1202 (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1203 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1208 } /* if(p_xran_dev_ctx->enableSrs) */
1210 MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick());
1211 } /* if(p_xran_dev_ctx->enableCP) */
1218 tx_cp_ul_cb(struct rte_timer *tim, void *arg)
1220 long t1 = MLogXRANTick();
1223 uint32_t slot_id, subframe_id, frame_id;
1225 int ant_id, port_id;
1226 uint16_t occasionid = 0;
1228 uint8_t num_eAxc, num_CCPorts;
1235 struct xran_timer_ctx *pTCtx;
1236 struct xran_buffer_list *pBufList;
1237 struct xran_device_ctx *p_xran_dev_ctx;
1241 print_err("Null xRAN context!!\n");
1245 p_xran_dev_ctx = (struct xran_device_ctx *)arg;
1247 if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
1251 if(first_call && p_xran_dev_ctx->enableCP)
1253 pHandle = p_xran_dev_ctx;
1254 pTCtx = &p_xran_dev_ctx->timer_ctx[0];
1255 interval = p_xran_dev_ctx->interval_us_local;
1256 PortId = p_xran_dev_ctx->xran_port_id;
1257 tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
1259 buf_id = tti % XRAN_N_FE_BUF_LEN;
1260 ctx_id = tti % XRAN_MAX_SECTIONDB_CTX;
1261 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1262 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
1263 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1265 /* Wrap around to next second */
1267 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
1268 if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
1269 num_eAxc = xran_get_num_eAxc(pHandle);
1271 num_eAxc = xran_get_num_eAxcUl(pHandle);
1272 num_CCPorts = xran_get_num_cc(pHandle);
1274 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
1276 /* General Uplink */
1277 for(ant_id = 0; ant_id < num_eAxc; ant_id++)
1279 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1281 /* start new section information list */
1282 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
1283 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1)
1285 pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1286 if(pBufList->pBuffers && pBufList->pBuffers->pData)
1288 ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
1289 (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1290 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1294 } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
1297 if(p_xran_dev_ctx->enablePrach)
1299 struct xran_prach_cp_config *pPrachCPConfig = NULL;
1300 //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE.
1301 if(p_xran_dev_ctx->dssEnable){
1302 int i = tti % p_xran_dev_ctx->dssPeriod;
1303 if(p_xran_dev_ctx->technology[i]==1) {
1304 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1307 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
1311 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
1314 uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
1316 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
1317 && (is_prach_slot==1))
1319 for(ant_id = 0; ant_id < num_eAxc; ant_id++)
1321 port_id = ant_id + pPrachCPConfig->eAxC_offset;
1322 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1324 /* start new section information list */
1325 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1327 //for FCN only send C-P for first occasion
1328 for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++)
1331 struct xran_cp_gen_params params;
1332 struct xran_section_gen_info sect_geninfo[8];
1333 struct xran_section_info sectInfo[8];
1334 for(int secId=0;secId<8;secId++)
1335 sect_geninfo[secId].info = §Info[secId];
1337 struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
1338 uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id);
1340 beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id);
1341 ret = generate_cpmsg_prach(pHandle, ¶ms, sect_geninfo, mbuf, p_xran_dev_ctx,
1342 frame_id, subframe_id, slot_id, tti,
1343 beam_id, cc_id, port_id, occasionid, seqid);
1344 if (ret == XRAN_STATUS_SUCCESS)
1345 send_cpmsg(pHandle, mbuf, ¶ms, sect_geninfo,
1346 cc_id, port_id, seqid);
1351 } /* if(p_xran_dev_ctx->enablePrach) */
1354 if(p_xran_dev_ctx->enableSrsCp)
1356 struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
1358 for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
1360 port_id = ant_id + pSrsCfg->eAxC_offset;
1361 for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
1363 /* start new section information list */
1364 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
1365 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
1367 pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
1368 if(pBufList->pBuffers && pBufList->pBuffers->pData)
1370 ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id,
1371 (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
1372 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
1377 } /* if(p_xran_dev_ctx->enableSrs) */
1379 MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick());
1380 } /* if(p_xran_dev_ctx->enableCP) */
1384 tti_to_phy_cb(struct rte_timer *tim, void *arg)
1386 long t1 = MLogTick();
1387 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
1388 uint32_t interval = p_xran_dev_ctx->interval_us_local;
1390 p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */
1392 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
1393 if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){
1394 p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]);
1396 p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--;
1400 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
1401 int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT);
1402 uint32_t slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
1403 uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
1404 uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
1405 if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME(interval)-1)) { //(tti == xran_fs_get_max_slot()-1)
1411 MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());
1415 xran_timing_source_thread(void *args)
1420 uint32_t xran_port_id = 0;
1421 static int owdm_init_done = 0;
1422 struct sched_param sched_param;
1423 struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *) args ;
1424 uint64_t tWake = 0, tWakePrev = 0, tUsed = 0;
1425 struct xran_device_ctx * p_dev_ctx_run = NULL;
1426 /* ToS = Top of Second start +- 1.5us */
1428 char thread_name[32];
1431 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
1432 memset(&sched_param, 0, sizeof(struct sched_param));
1433 /* set main thread affinity mask to CPU2 */
1434 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1436 CPU_SET(p_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);
1438 if ((result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)))
1440 printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);
1442 if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
1444 printf("priority is not changed: coreId = 2, result1 = %d\n",result1);
1447 snprintf(thread_name, RTE_DIM(thread_name), "%s-%d", "fh_main_poll", rte_lcore_id());
1448 if ((res = pthread_setname_np(pthread_self(), thread_name))) {
1449 printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res);
1452 printf("TTI interval %ld [us]\n", interval_us);
1454 if (!p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable) {
1455 if ((res = xran_timing_create_cbs(args)) < 0){
1461 timespec_get(&ts, TIME_UTC);
1462 }while (ts.tv_nsec >1500);
1464 struct tm * ptm = gmtime(&ts.tv_sec);
1466 strftime(buff, sizeof buff, "%D %T", ptm);
1467 printf("%s: thread_run start time: %s.%09ld UTC [%ld]\n",
1468 (p_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
1472 timespec_get(&ts, TIME_UTC);
1473 }while (ts.tv_nsec == 0);
1475 p_dev_ctx->timing_source_thread_running = 1;
1478 /* Check if owdm finished to create the timing cbs based on measurement results */
1479 if ((p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable)&&(!owdm_init_done)&&unlikely(XRAN_RUNNING == xran_if_current_state)) {
1480 // Adjust Windows based on Delay Measurement results
1481 xran_adjust_timing_parameters(p_dev_ctx);
1482 if ((res = xran_timing_create_cbs(args)) < 0){
1485 printf("TTI interval %ld [us]\n", interval_us);
1492 /* Update Usage Stats */
1493 tWake = xran_tick();
1494 xran_used_tick += tUsed;
1497 xran_total_tick += get_ticks_diff(tWake, tWakePrev);
1502 int64_t delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed);
1503 if (XRAN_STOPPED == xran_if_current_state)
1506 if (delta > 3E5 && tUsed > 0)//300us about 9 symbols
1508 print_err("poll_next_tick too long, delta:%ld(ns), tUsed:%ld(tick)", delta, tUsed);
1511 if (likely(XRAN_RUNNING == xran_if_current_state)) {
1512 for(xran_port_id = 0; xran_port_id < XRAN_PORTS_NUM; xran_port_id++ ) {
1513 p_dev_ctx_run = xran_dev_get_ctx_by_id(xran_port_id);
1515 if(p_dev_ctx_run->xran_port_id == xran_port_id) {
1516 if(XranGetSymNum(xran_lib_ota_sym_idx[p_dev_ctx_run->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == xran_lib_ota_sym[xran_port_id])
1518 sym_ota_cb(&p_dev_ctx_run->sym_timer, p_dev_ctx_run, &tUsed);
1519 xran_lib_ota_sym[xran_port_id]++;
1520 if(xran_lib_ota_sym[xran_port_id] >= N_SYM_PER_SLOT)
1521 xran_lib_ota_sym[xran_port_id]=0;
1525 rte_panic("p_dev_ctx_run == xran_port_id");
1532 xran_timing_destroy_cbs(args);
1533 printf("Closing timing source thread...\n");
1537 /* Handle ecpri format. */
1538 #define MBUFS_CNT 16
1540 int32_t handle_ecpri_ethertype(struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num)
1542 struct rte_mbuf *pkt;
1544 struct rte_ether_hdr* eth_hdr;
1545 struct xran_ecpri_hdr* ecpri_hdr;
1547 int32_t ret = MBUF_FREE;
1548 uint32_t ret_data[MBUFS_CNT] = { MBUFS_CNT * MBUF_FREE };
1549 struct xran_device_ctx* p_dev_ctx = xran_dev_get_ctx_by_id(xport_id);
1550 uint16_t num_data = 0, num_control = 0, num_meas = 0;
1551 struct rte_mbuf* pkt_data[MBUFS_CNT], * pkt_control[MBUFS_CNT], * pkt_meas[MBUFS_CNT], *pkt_adj[MBUFS_CNT];
1552 static uint32_t owdm_rx_first_pass = 1;
1554 if (p_dev_ctx == NULL)
1557 for (i = 0; i < num; i++)
1561 // rte_prefetch0(rte_pktmbuf_mtod(pkt, void*));
1563 rte_pktmbuf_adj(pkt, sizeof(*eth_hdr));
1564 ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *);
1566 p_dev_ctx->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
1569 switch (ecpri_hdr->cmnhdr.bits.ecpri_mesg_type)
1572 pkt_data[num_data++] = pkt;
1575 case ECPRI_RT_CONTROL_DATA:
1576 pkt_control[num_control++] = pkt;
1578 case ECPRI_DELAY_MEASUREMENT:
1579 if (owdm_rx_first_pass != 0)
1581 // Initialize and verify that Payload Length is in range */
1582 xran_initialize_and_verify_owd_pl_length((void*)p_dev_ctx);
1583 owdm_rx_first_pass = 0;
1586 pkt_meas[num_meas++] = pkt;
1589 if (p_dev_ctx->fh_init.io_cfg.id == O_DU) {
1590 print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.bits.ecpri_mesg_type);
1596 if(num_data == MBUFS_CNT && p_dev_ctx->fh_cfg.ru_conf.xranCat == XRAN_CATEGORY_B) /* w/a for Cat A issue */
1598 for (i = 0; i < MBUFS_CNT; i++)
1600 ret_data[i] = MBUF_FREE;
1603 if (p_dev_ctx->fh_init.io_cfg.id == O_DU || p_dev_ctx->fh_init.io_cfg.id == O_RU)
1605 if (p_dev_ctx->xran2phy_mem_ready != 0)
1606 ret = process_mbuf_batch(pkt_data, (void*)p_dev_ctx, MBUFS_CNT, p_cid, ret_data );
1607 for (i = 0; i < MBUFS_CNT; i++)
1609 if (ret_data[i] == MBUF_FREE)
1610 rte_pktmbuf_free(pkt_data[i]);
1615 for (i = 0; i < MBUFS_CNT; i++)
1617 if (ret_data[i] == MBUF_FREE)
1618 rte_pktmbuf_free(pkt_data[i]);
1620 print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id);
1625 for (i = 0; i < num_data; i++)
1627 ret = process_mbuf(pkt_data[i], (void*)p_dev_ctx, p_cid);
1628 if (ret == MBUF_FREE)
1629 rte_pktmbuf_free(pkt_data[i]);
1632 for (i = 0; i < num_control; i++)
1634 t1 = MLogXRANTick();
1635 if (p_dev_ctx->fh_init.io_cfg.id == O_RU)
1637 ret = process_cplane(pkt_control[i], (void*)p_dev_ctx);
1638 p_dev_ctx->fh_counters.rx_counter++;
1639 if (ret == MBUF_FREE)
1640 rte_pktmbuf_free(pkt_control[i]);
1644 print_err("O-DU recevied C-Plane message!");
1646 MLogXRANTask(PID_PROCESS_CP_PKT, t1, MLogXRANTick());
1649 for (i = 0; i < num_meas; i++)
1652 /*if(p_dev_ctx->fh_init.io_cfg.id == O_RU)
1653 printf("Got delay_meas_pkt xport_id %d p_dev_ctx %08"PRIx64" %d\n", xport_id,(int64_t*)p_dev_ctx, num_meas) ;*/
1654 t1 = MLogXRANTick();
1655 if(xran_if_current_state != XRAN_RUNNING)
1656 ret = process_delay_meas(pkt_meas[i], (void*)p_dev_ctx, xport_id);
1659 if (ret == MBUF_FREE)
1660 rte_pktmbuf_free(pkt_meas[i]);
1661 MLogXRANTask(PID_PROCESS_DELAY_MEAS_PKT, t1, MLogXRANTick());
1669 xran_packet_and_dpdk_timer_thread(void *args)
1671 //struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1673 uint64_t prev_tsc = 0;
1674 uint64_t cur_tsc = rte_rdtsc();
1675 uint64_t diff_tsc = cur_tsc - prev_tsc;
1676 struct sched_param sched_param;
1678 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
1680 memset(&sched_param, 0, sizeof(struct sched_param));
1681 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1683 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
1685 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1690 cur_tsc = rte_rdtsc();
1691 diff_tsc = cur_tsc - prev_tsc;
1692 if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
1697 if (XRAN_STOPPED == xran_if_current_state)
1701 printf("Closing pkts timer thread...\n");
1705 void xran_initialize_ecpri_owd_meas_cmn( struct xran_io_cfg *ptr)
1707 // ptr->eowd_cmn.initiator_en = 0; // Initiator 1, Recipient 0
1708 // ptr->eowd_cmn.filterType = 0; // 0 Simple average based on number of measurements
1709 // Set default values if the Timeout and numberOfSamples are not set
1710 if ( ptr->eowd_cmn[ptr->id].responseTo == 0)
1711 ptr->eowd_cmn[ptr->id].responseTo = 10E6; // 10 ms timeout expressed in ns
1712 if ( ptr->eowd_cmn[ptr->id].numberOfSamples == 0)
1713 ptr->eowd_cmn[ptr->id].numberOfSamples = 8; // Number of samples to be averaged
1715 void xran_initialize_ecpri_owd_meas_per_port (int i, struct xran_io_cfg *ptr )
1717 /* This function initializes one_way delay measurements on a per port basis,
1718 most variables default to zero */
1719 ptr->eowd_port[ptr->id][i].portid = (uint8_t)i;
1723 xran_init(int argc, char *argv[],
1724 struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle)
1726 int32_t ret = XRAN_STATUS_SUCCESS;
1729 int32_t o_xu_id = 0;
1730 struct xran_io_cfg *p_io_cfg = NULL;
1731 struct xran_device_ctx * p_xran_dev_ctx = NULL;
1732 int32_t lcore_id = 0;
1733 const char *version = rte_version();
1735 if (version == NULL)
1736 rte_panic("version == NULL");
1738 printf("'%s'\n", version);
1740 if (p_xran_fh_init->xran_ports < 1 || p_xran_fh_init->xran_ports > XRAN_PORTS_NUM) {
1741 ret = XRAN_STATUS_INVALID_PARAM;
1742 print_err("fh_init xran_ports= %d is wrong [%d]\n", p_xran_fh_init->xran_ports, ret);
1745 mlogxranenable = p_xran_fh_init->mlogxranenable;
1746 p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg;
1748 if ((ret = xran_dev_create_ctx(p_xran_fh_init->xran_ports)) < 0) {
1749 print_err("context allocation error [%d]\n", ret);
1753 for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1754 p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id);
1755 memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx));
1756 p_xran_dev_ctx->xran_port_id = o_xu_id;
1759 p_xran_dev_ctx->fh_init = *p_xran_fh_init;
1760 printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu);
1762 memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config));
1763 /* To make sure to set default functions */
1764 p_xran_dev_ctx->send_upmbuf2ring = NULL;
1765 p_xran_dev_ctx->send_cpmbuf2ring = NULL;
1766 // Ecpri initialization for One Way delay measurements common variables to default values
1767 xran_initialize_ecpri_owd_meas_cmn(&p_xran_dev_ctx->fh_init.io_cfg);
1770 /* default values if not set */
1771 if(p_io_cfg->nEthLinePerPort == 0)
1772 p_io_cfg->nEthLinePerPort = 1;
1774 if(p_io_cfg->nEthLineSpeed == 0)
1775 p_io_cfg->nEthLineSpeed = 25;
1777 /** at least 1 RX Q */
1778 if(p_io_cfg->num_rxq == 0)
1779 p_io_cfg->num_rxq = 1;
1781 if (p_io_cfg->id == 1) {
1783 p_io_cfg->num_rxq = 1;
1786 #if (RTE_VER_YEAR < 21) /* eCPRI flow supported with DPDK 21.02 or later */
1787 if (p_io_cfg->num_rxq > 1){
1788 p_io_cfg->num_rxq = 1;
1789 printf("%s does support eCPRI flows. Set rxq to %d\n", version, p_io_cfg->num_rxq);
1792 printf("PF Eth line speed %dG\n",p_io_cfg->nEthLineSpeed);
1793 printf("PF Eth lines per O-xU port %d\n",p_io_cfg->nEthLinePerPort);
1794 printf("RX HW queues per O-xU Eth line %d \n",p_io_cfg->num_rxq);
1796 if(p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane) != p_io_cfg->num_vfs) {
1797 print_err("Incorrect VFs configurations: For %d O-xUs with %d Ethernet ports expected number of VFs is %d. [provided %d]\n",
1798 p_xran_fh_init->xran_ports, p_io_cfg->nEthLinePerPort,
1799 p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane), p_io_cfg->num_vfs);
1802 xran_if_current_state = XRAN_INIT;
1803 xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype);
1804 if (p_io_cfg->id == 0)
1805 xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1808 (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1809 (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1810 p_xran_dev_ctx->fh_init.mtu);
1812 xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1815 (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1816 (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1817 p_xran_dev_ctx->fh_init.mtu);
1819 for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1820 p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id);
1822 for(i = 0; i < MAX_TTI_TO_PHY_TIMER; i++ )
1823 rte_timer_init(&p_xran_dev_ctx->tti_to_phy_timer[i]);
1825 rte_timer_init(&p_xran_dev_ctx->sym_timer);
1826 for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++)
1827 rte_timer_init(&p_xran_dev_ctx->dpdk_timer[i]);
1829 p_xran_dev_ctx->direct_pool = socket_direct_pool;
1830 p_xran_dev_ctx->indirect_pool = socket_indirect_pool;
1833 for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){
1834 LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[j]);
1839 for (i=0; i<XRAN_PORTS_NUM; i++){
1840 for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){
1841 xran_fs_clear_slot_type(i,nCellIdx);
1845 *pXranLayerHandle = xran_dev_get_ctx();
1848 // The ecpri initialization loop needs to be done per pf and vf (Outer loop pf and inner loop vf)
1849 for (i=0; i< p_io_cfg->num_vfs; i++)
1851 /* Initialize ecpri one-way delay measurement info on a per vf port basis */
1852 xran_initialize_ecpri_owd_meas_per_port (i, p_io_cfg);
1859 xran_sector_get_instances (uint32_t xran_port, void * pDevHandle, uint16_t nNumInstances,
1860 xran_cc_handle_t * pSectorInstanceHandles)
1862 struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle;
1863 XranSectorHandleInfo *pCcHandle = NULL;
1868 /* Check for the Valid Parameters */
1869 CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM);
1871 if (!nNumInstances) {
1872 print_dbg("Instance is not assigned for this function !!! \n");
1873 return XRAN_STATUS_INVALID_PARAM;
1876 for (i = 0; i < nNumInstances; i++) {
1878 /* Allocate Memory for CC handles */
1879 pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64);
1881 if(pCcHandle == NULL)
1882 return XRAN_STATUS_RESOURCE;
1884 memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo)));
1886 pCcHandle->nIndex = i;
1887 pCcHandle->nXranPort = pDev->xran_port_id;
1889 printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle);
1890 pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle;
1892 printf("Handle: %p Instance: %p\n",
1893 &pSectorInstanceHandles[i], pSectorInstanceHandles[i]);
1896 return XRAN_STATUS_SUCCESS;
1901 xran_5g_fronthault_config (void * pHandle,
1902 struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1903 struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1904 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1905 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1906 xran_transport_callback_fn pCallback,
1910 XranSectorHandleInfo* pXranCc = NULL;
1911 struct xran_device_ctx * p_xran_dev_ctx = NULL;
1913 if(NULL == pHandle) {
1914 printf("Handle is NULL!\n");
1915 return XRAN_STATUS_FAIL;
1918 pXranCc = (XranSectorHandleInfo*) pHandle;
1919 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1920 if (p_xran_dev_ctx == NULL) {
1921 printf ("p_xran_dev_ctx is NULL\n");
1922 return XRAN_STATUS_FAIL;
1925 i = pXranCc->nIndex;
1927 for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
1928 for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
1931 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0;
1932 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1933 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1934 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1935 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1936 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];
1938 if(pSrcBuffer[z][j])
1939 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j];
1941 memset(&p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcBuffer[z][j]));
1945 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1946 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1947 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1948 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1949 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1950 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
1952 if(pSrcCpBuffer[z][j])
1953 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcCpBuffer[z][j];
1955 memset(&p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcCpBuffer[z][j]));
1958 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0;
1959 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1960 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1961 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1962 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1963 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];
1965 if(pDstBuffer[z][j])
1966 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
1968 memset(&p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1972 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1973 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1974 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1975 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1976 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1977 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
1979 if(pDstCpBuffer[z][j])
1980 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
1982 memset(&p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
1986 p_xran_dev_ctx->pCallback[i] = pCallback;
1987 p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag;
1988 print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
1989 p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pCallback[i], p_xran_dev_ctx->pCallbackTag[i]);
1991 p_xran_dev_ctx->xran2phy_mem_ready = 1;
1993 return XRAN_STATUS_SUCCESS;
1996 int32_t xran_5g_bfw_config(void * pHandle,
1997 struct xran_buffer_list *pSrcRxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1998 struct xran_buffer_list *pSrcTxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1999 xran_transport_callback_fn pCallback,
2000 void *pCallbackTag){
2002 XranSectorHandleInfo* pXranCc = NULL;
2003 struct xran_device_ctx * p_xran_dev_ctx = NULL;
2005 if(NULL == pHandle) {
2006 printf("Handle is NULL!\n");
2007 return XRAN_STATUS_FAIL;
2009 pXranCc = (XranSectorHandleInfo*) pHandle;
2010 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
2011 if (p_xran_dev_ctx == NULL) {
2012 printf ("p_xran_dev_ctx is NULL\n");
2013 return XRAN_STATUS_FAIL;
2016 i = pXranCc->nIndex;
2018 for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
2019 for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
2020 /* C-plane RX - RU */
2021 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
2022 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2023 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2024 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2025 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
2026 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
2028 if(pSrcRxCpBuffer[z][j])
2029 p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcRxCpBuffer[z][j];
2031 memset(&p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcRxCpBuffer[z][j]));
2033 /* C-plane TX - RU */
2034 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
2035 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2036 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2037 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2038 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
2039 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
2041 if(pSrcTxCpBuffer[z][j])
2042 p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcTxCpBuffer[z][j];
2044 memset(&p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcTxCpBuffer[z][j]));
2047 return XRAN_STATUS_SUCCESS;
2051 xran_5g_prach_req (void * pHandle,
2052 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
2053 struct xran_buffer_list *pDstBufferDecomp[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
2054 xran_transport_callback_fn pCallback,
2058 XranSectorHandleInfo* pXranCc = NULL;
2059 struct xran_device_ctx * p_xran_dev_ctx = NULL;
2061 if(NULL == pHandle) {
2062 printf("Handle is NULL!\n");
2063 return XRAN_STATUS_FAIL;
2066 pXranCc = (XranSectorHandleInfo*) pHandle;
2067 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
2068 if (p_xran_dev_ctx == NULL) {
2069 printf ("p_xran_dev_ctx is NULL\n");
2070 return XRAN_STATUS_FAIL;
2073 i = pXranCc->nIndex;
2075 for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
2076 for(z = 0; z < XRAN_MAX_PRACH_ANT_NUM; z++){
2077 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0;
2078 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2079 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2080 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2081 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_PRACH_ANT_NUM; // ant number.
2082 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];
2083 if(pDstBuffer[z][j])
2084 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
2086 memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
2088 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0];
2089 if(pDstBufferDecomp[z][j])
2090 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList = *pDstBufferDecomp[z][j];
2094 p_xran_dev_ctx->pPrachCallback[i] = pCallback;
2095 p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
2097 print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
2098 p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]);
2100 return XRAN_STATUS_SUCCESS;
2104 xran_5g_srs_req (void * pHandle,
2105 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
2106 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
2107 xran_transport_callback_fn pCallback,
2111 XranSectorHandleInfo* pXranCc = NULL;
2112 struct xran_device_ctx * p_xran_dev_ctx = NULL;
2114 if(NULL == pHandle) {
2115 printf("Handle is NULL!\n");
2116 return XRAN_STATUS_FAIL;
2119 pXranCc = (XranSectorHandleInfo*) pHandle;
2120 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
2121 if (p_xran_dev_ctx == NULL) {
2122 printf ("p_xran_dev_ctx is NULL\n");
2123 return XRAN_STATUS_FAIL;
2126 i = pXranCc->nIndex;
2128 for(j=0; j<XRAN_N_FE_BUF_LEN; j++) {
2129 for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){
2130 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;
2131 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2132 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2133 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2134 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
2135 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
2136 if(pDstBuffer[z][j])
2137 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
2139 memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
2142 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
2143 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
2144 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
2145 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
2146 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
2147 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z];
2149 if(pDstCpBuffer[z][j])
2150 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
2152 memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
2157 p_xran_dev_ctx->pSrsCallback[i] = pCallback;
2158 p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
2160 print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
2161 p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]);
2163 return XRAN_STATUS_SUCCESS;
2167 xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear)
2171 *num_core_used = xran_num_cores_used;
2172 for (i = 0; i < xran_num_cores_used; i++)
2174 core_used[i] = xran_core_used[i];
2177 *total_time = xran_total_tick;
2178 *used_time = xran_used_tick;
2182 xran_total_tick = 0;
2190 xran_add_cp_hdr_offset(uint8_t *dst)
2192 dst += (RTE_PKTMBUF_HEADROOM +
2193 sizeof(struct xran_ecpri_hdr) +
2194 sizeof(struct xran_cp_radioapp_section1_header) +
2195 sizeof(struct xran_cp_radioapp_section1));
2197 dst = RTE_PTR_ALIGN_CEIL(dst, 64);
2203 xran_add_hdr_offset(uint8_t *dst, int16_t compMethod)
2205 dst+= (RTE_PKTMBUF_HEADROOM +
2206 sizeof (struct xran_ecpri_hdr) +
2207 sizeof (struct radio_app_common_hdr) +
2208 sizeof(struct data_section_hdr));
2209 if(compMethod != XRAN_COMPMETHOD_NONE)
2210 dst += sizeof (struct data_section_compression_hdr);
2211 dst = RTE_PTR_ALIGN_CEIL(dst, 64);
2217 xran_pkt_gen_process_ring(struct rte_ring *r)
2220 struct rte_mbuf *mbufs[16];
2224 struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
2225 const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
2226 RTE_DIM(mbufs), &remaining);
2232 t1 = MLogXRANTick();
2233 for (i = 0; i < dequeued; ++i) {
2234 struct cp_up_tx_desc * p_tx_desc = (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i], struct cp_up_tx_desc *);
2235 xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle,
2238 p_tx_desc->start_cc,
2240 p_tx_desc->start_ant,
2242 p_tx_desc->frame_id,
2243 p_tx_desc->subframe_id,
2246 (enum xran_comp_hdr_type)p_tx_desc->compType,
2247 (enum xran_pkt_dir) p_tx_desc->direction,
2248 p_tx_desc->xran_port_id,
2249 (PSECTION_DB_TYPE)p_tx_desc->p_sec_db);
2251 xran_pkt_gen_desc_free(p_tx_desc);
2252 if (XRAN_STOPPED == xran_if_current_state){
2253 MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick());
2258 if(p_io_cfg->io_sleep)
2259 nanosleep(&sleeptime,NULL);
2261 MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick());
2267 xran_dl_pkt_ring_processing_func(void* args)
2269 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
2270 uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF);
2271 uint16_t current_port;
2275 for (current_port = 0; current_port < XRAN_PORTS_NUM; current_port++) {
2276 if( xran_port_mask & (1<<current_port)) {
2277 xran_pkt_gen_process_ring(ctx->up_dl_pkt_gen_ring[current_port]);
2281 if (XRAN_STOPPED == xran_if_current_state)
2287 int32_t xran_fh_rx_and_up_tx_processing(void *port_mask)
2291 ret_val = ring_processing_func((void *)0);
2295 ret_val = xran_dl_pkt_ring_processing_func(port_mask);
2301 /** Function to peforms serves of DPDK times */
2303 xran_processing_timer_only_func(void* args)
2306 if (XRAN_STOPPED == xran_if_current_state)
2312 /** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */
2314 xran_all_tasks(void* arg)
2317 ring_processing_func(arg);
2318 process_dpdk_io(arg);
2322 /** Function to pefromrm TX and RX on ETH device */
2324 xran_eth_trx_tasks(void* arg)
2326 process_dpdk_io(arg);
2330 /** Function to pefromrm RX on ETH device */
2332 xran_eth_rx_tasks(void* arg)
2334 process_dpdk_io_rx(arg);
2338 /** Function to porcess ORAN FH packet per port */
2340 ring_processing_func_per_port(void* args)
2342 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
2344 uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF);
2347 for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) {
2348 if (ctx->vf2xran_port[i] == port_id) {
2349 for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
2350 if (process_ring(ctx->rx_ring[i][qi], i, qi))
2356 if (XRAN_STOPPED == xran_if_current_state)
2362 /** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */
2364 xran_spawn_workers(void)
2366 uint64_t nWorkerCore = 1LL;
2367 uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF);
2369 uint32_t total_num_cores = 1; /*start with timing core */
2370 uint32_t worker_num_cores = 0;
2371 uint32_t icx_cpu = 0;
2372 int32_t core_map[2*sizeof(uint64_t)*8];
2373 uint64_t xran_port_mask = 0;
2375 struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
2376 struct xran_device_ctx *p_dev = NULL;
2377 struct xran_fh_init *fh_init = NULL;
2378 struct xran_fh_config *fh_cfg = NULL;
2379 struct xran_worker_th_ctx* pThCtx = NULL;
2380 void *worker_ports=NULL;
2382 p_dev = xran_dev_get_ctx_by_id(0);
2384 print_err("p_dev\n");
2385 return XRAN_STATUS_FAIL;
2388 fh_init = &p_dev->fh_init;
2389 if(fh_init == NULL) {
2390 print_err("fh_init\n");
2391 return XRAN_STATUS_FAIL;
2394 fh_cfg = &p_dev->fh_cfg;
2395 if(fh_cfg == NULL) {
2396 print_err("fh_cfg\n");
2397 return XRAN_STATUS_FAIL;
2400 for (i = 0; i < coreNum && i < 64; i++) {
2401 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
2402 core_map[worker_num_cores++] = i;
2405 nWorkerCore = nWorkerCore << 1;
2409 for (i = 64; i < coreNum && i < 128; i++) {
2410 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
2411 core_map[worker_num_cores++] = i;
2414 nWorkerCore = nWorkerCore << 1;
2417 extern int _may_i_use_cpu_feature(unsigned __int64);
2418 icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52);
2420 printf("O-XU %d\n", eth_ctx->io_cfg.id);
2421 printf("HW %d\n", icx_cpu);
2422 printf("Num cores %d\n", total_num_cores);
2423 printf("Num ports %d\n", fh_init->xran_ports);
2424 printf("O-RU Cat %d\n", fh_cfg->ru_conf.xranCat);
2425 printf("O-RU CC %d\n", fh_cfg->nCC);
2426 printf("O-RU eAxC %d\n", fh_cfg->neAxc);
2428 for (i = 0; i < fh_init->xran_ports; i++){
2429 xran_port_mask |= 1L<<i;
2432 for (i = 0; i < fh_init->xran_ports; i++) {
2433 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2434 if(p_dev_update == NULL){
2435 print_err("p_dev_update\n");
2436 return XRAN_STATUS_FAIL;
2438 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1;
2439 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1;
2440 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2441 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2444 if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
2445 switch(total_num_cores) {
2446 case 1: /** only timing core */
2447 eth_ctx->time_wrk_cfg.f = xran_all_tasks;
2448 eth_ctx->time_wrk_cfg.arg = NULL;
2449 eth_ctx->time_wrk_cfg.state = 1;
2452 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2453 eth_ctx->time_wrk_cfg.arg = NULL;
2454 eth_ctx->time_wrk_cfg.state = 1;
2456 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2458 print_err("pThCtx allocation error\n");
2459 return XRAN_STATUS_FAIL;
2461 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2462 pThCtx->worker_id = 0;
2463 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2464 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2465 pThCtx->task_func = ring_processing_func;
2466 pThCtx->task_arg = NULL;
2467 eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
2468 eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
2472 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2473 eth_ctx->time_wrk_cfg.arg = NULL;
2474 eth_ctx->time_wrk_cfg.state = 1;
2478 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2480 print_err("pThCtx allocation error\n");
2481 return XRAN_STATUS_FAIL;
2483 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2484 pThCtx->worker_id = 0;
2485 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2486 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2487 pThCtx->task_func = ring_processing_func;
2488 pThCtx->task_arg = NULL;
2489 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2490 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2492 for (i = 0; i < fh_init->xran_ports; i++) {
2493 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2494 if(p_dev_update == NULL) {
2495 print_err("p_dev_update\n");
2496 return XRAN_STATUS_FAIL;
2498 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2499 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2500 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2501 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2505 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2507 print_err("pThCtx allocation error\n");
2508 return XRAN_STATUS_FAIL;
2510 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2511 pThCtx->worker_id = 1;
2512 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2513 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2514 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2515 pThCtx->task_arg = (void*)xran_port_mask;
2516 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2517 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2520 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2521 return XRAN_STATUS_FAIL;
2523 } else if ((fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1) || fh_init->io_cfg.bbu_offload) {
2524 switch(total_num_cores) {
2525 case 1: /** only timing core */
2526 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2527 return XRAN_STATUS_FAIL;
2530 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2531 eth_ctx->time_wrk_cfg.arg = NULL;
2532 eth_ctx->time_wrk_cfg.state = 1;
2534 if (p_dev->fh_init.io_cfg.bbu_offload)
2535 p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring;
2537 p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
2539 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2541 print_err("pThCtx allocation error\n");
2542 return XRAN_STATUS_FAIL;
2544 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2545 pThCtx->worker_id = 0;
2546 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2547 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2548 pThCtx->task_func = ring_processing_func;
2549 pThCtx->task_arg = NULL;
2550 eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
2551 eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
2556 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2557 eth_ctx->time_wrk_cfg.arg = NULL;
2558 eth_ctx->time_wrk_cfg.state = 1;
2562 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2564 print_err("pThCtx allocation error\n");
2565 return XRAN_STATUS_FAIL;
2567 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2568 pThCtx->worker_id = 0;
2569 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2570 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2571 pThCtx->task_func = ring_processing_func;
2572 pThCtx->task_arg = NULL;
2573 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2574 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2576 for (i = 0; i < fh_init->xran_ports; i++) {
2577 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2578 if(p_dev_update == NULL) {
2579 print_err("p_dev_update\n");
2580 return XRAN_STATUS_FAIL;
2582 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2583 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2584 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2585 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2589 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2591 print_err("pThCtx allocation error\n");
2592 return XRAN_STATUS_FAIL;
2594 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2595 pThCtx->worker_id = 1;
2596 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2597 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2598 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2599 pThCtx->task_arg = (void*)xran_port_mask;
2600 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2601 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2603 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2604 return XRAN_STATUS_FAIL;
2610 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2611 eth_ctx->time_wrk_cfg.arg = NULL;
2612 eth_ctx->time_wrk_cfg.state = 1;
2616 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2618 print_err("pThCtx allocation error\n");
2619 return XRAN_STATUS_FAIL;
2621 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2622 pThCtx->worker_id = 0;
2623 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2624 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2625 pThCtx->task_func = ring_processing_func;
2626 pThCtx->task_arg = NULL;
2627 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2628 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2631 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2633 print_err("pThCtx allocation error\n");
2634 return XRAN_STATUS_FAIL;
2636 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2637 pThCtx->worker_id = 1;
2638 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2639 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2640 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2641 pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
2642 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2643 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2646 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2648 print_err("pThCtx allocation error\n");
2649 return XRAN_STATUS_FAIL;
2651 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2652 pThCtx->worker_id = 2;
2653 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2654 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2655 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2656 pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
2657 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2658 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2660 for (i = 1; i < fh_init->xran_ports; i++) {
2661 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2662 if(p_dev_update == NULL) {
2663 print_err("p_dev_update\n");
2664 return XRAN_STATUS_FAIL;
2666 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2667 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2668 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2669 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2672 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2673 return XRAN_STATUS_FAIL;
2679 eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
2680 eth_ctx->time_wrk_cfg.arg = NULL;
2681 eth_ctx->time_wrk_cfg.state = 1;
2685 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2687 print_err("pThCtx allocation error\n");
2688 return XRAN_STATUS_FAIL;
2690 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2691 pThCtx->worker_id = 0;
2692 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2693 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2694 pThCtx->task_func = ring_processing_func;
2695 pThCtx->task_arg = NULL;
2696 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2697 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2700 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2702 print_err("pThCtx allocation error\n");
2703 return XRAN_STATUS_FAIL;
2705 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2706 pThCtx->worker_id = 1;
2707 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2708 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2709 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2710 pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
2711 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2712 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2715 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2717 print_err("pThCtx allocation error\n");
2718 return XRAN_STATUS_FAIL;
2720 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2721 pThCtx->worker_id = 2;
2722 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2723 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2724 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2725 pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
2726 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2727 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2730 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2732 print_err("pThCtx allocation error\n");
2733 return XRAN_STATUS_FAIL;
2735 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2736 pThCtx->worker_id = 3;
2737 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2738 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2739 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2740 pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
2741 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2742 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2744 for (i = 1; i < fh_init->xran_ports; i++) {
2745 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2746 if(p_dev_update == NULL) {
2747 print_err("p_dev_update\n");
2748 return XRAN_STATUS_FAIL;
2750 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2751 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2752 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2753 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2756 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2757 return XRAN_STATUS_FAIL;
2761 if(eth_ctx->io_cfg.id == O_DU) {
2763 eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
2764 eth_ctx->time_wrk_cfg.arg = NULL;
2765 eth_ctx->time_wrk_cfg.state = 1;
2769 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2771 print_err("pThCtx allocation error\n");
2772 return XRAN_STATUS_FAIL;
2774 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2775 pThCtx->worker_id = 0;
2776 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2777 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2778 pThCtx->task_func = ring_processing_func;
2779 pThCtx->task_arg = NULL;
2780 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2781 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2784 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2787 print_err("pThCtx allocation error\n");
2788 return XRAN_STATUS_FAIL;
2790 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2791 pThCtx->worker_id = 1;
2792 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2793 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2794 pThCtx->task_func = process_dpdk_io_tx;
2795 pThCtx->task_arg = (void*)2;
2796 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2797 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2800 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2802 print_err("pThCtx allocation error\n");
2803 return XRAN_STATUS_FAIL;
2805 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2806 pThCtx->worker_id = 2;
2807 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2808 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2809 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2810 pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
2811 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2812 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2815 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2817 print_err("pThCtx allocation error\n");
2818 return XRAN_STATUS_FAIL;
2820 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2821 pThCtx->worker_id = 3;
2822 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2823 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2824 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2825 pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
2826 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2827 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2830 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2832 print_err("pThCtx allocation error\n");
2833 return XRAN_STATUS_FAIL;
2835 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2836 pThCtx->worker_id = 4;
2837 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2838 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2839 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2840 pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
2841 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2842 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2844 for (i = 0; i < fh_init->xran_ports; i++) {
2845 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2846 if(p_dev_update == NULL) {
2847 print_err("p_dev_update\n");
2848 return XRAN_STATUS_FAIL;
2850 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id;
2851 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id;
2852 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2853 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2855 } else if(eth_ctx->io_cfg.id == O_RU) {
2856 /*** O_RU specific config */
2858 eth_ctx->time_wrk_cfg.f = NULL;
2859 eth_ctx->time_wrk_cfg.arg = NULL;
2860 eth_ctx->time_wrk_cfg.state = 1;
2864 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2866 print_err("pThCtx allocation error\n");
2867 return XRAN_STATUS_FAIL;
2869 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2870 pThCtx->worker_id = 0;
2871 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2872 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
2873 pThCtx->task_func = process_dpdk_io_rx;
2874 pThCtx->task_arg = NULL;
2875 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2876 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2878 /** 1 FH RX and BBDEV */
2879 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2881 print_err("pThCtx allocation error\n");
2882 return XRAN_STATUS_FAIL;
2884 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2885 pThCtx->worker_id = 1;
2886 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2887 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
2888 pThCtx->task_func = ring_processing_func_per_port;
2889 pThCtx->task_arg = (void*)0;
2890 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2891 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2893 /** 2 FH RX and BBDEV */
2894 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2896 print_err("pThCtx allocation error\n");
2897 return XRAN_STATUS_FAIL;
2899 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2900 pThCtx->worker_id = 2;
2901 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2902 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
2903 pThCtx->task_func = ring_processing_func_per_port;
2904 pThCtx->task_arg = (void*)1;
2905 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2906 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2908 /** 3 FH RX and BBDEV */
2909 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2911 print_err("pThCtx allocation error\n");
2912 return XRAN_STATUS_FAIL;
2914 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2915 pThCtx->worker_id = 3;
2916 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2917 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
2918 pThCtx->task_func = ring_processing_func_per_port;
2919 pThCtx->task_arg = (void*)2;
2920 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2921 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2923 /** FH TX and BBDEV */
2924 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2926 print_err("pThCtx allocation error\n");
2927 return XRAN_STATUS_FAIL;
2929 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2930 pThCtx->worker_id = 4;
2931 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2932 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2933 pThCtx->task_func = process_dpdk_io_tx;
2934 pThCtx->task_arg = (void*)2;
2935 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2936 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2938 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2939 return XRAN_STATUS_FAIL;
2943 print_err("unsupported configuration\n");
2944 return XRAN_STATUS_FAIL;
2946 } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) {
2947 switch(total_num_cores) {
2949 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2950 return XRAN_STATUS_FAIL;
2954 if(fh_init->xran_ports == 2)
2955 worker_ports = (void *)((1L<<0 | 1L<<1) & xran_port_mask);
2956 else if(fh_init->xran_ports == 3)
2957 worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2) & xran_port_mask);
2958 else if(fh_init->xran_ports == 4)
2959 worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2 | 1L<<3) & xran_port_mask);
2962 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2963 return XRAN_STATUS_FAIL;
2966 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2967 eth_ctx->time_wrk_cfg.arg = NULL;
2968 eth_ctx->time_wrk_cfg.state = 1;
2970 /* p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; */
2972 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2974 print_err("pThCtx allocation error\n");
2975 return XRAN_STATUS_FAIL;
2977 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2978 pThCtx->worker_id = 0;
2979 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2980 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2981 pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
2982 pThCtx->task_arg = worker_ports;
2983 eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
2984 eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
2986 for (i = 1; i < fh_init->xran_ports; i++) {
2987 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2988 if(p_dev_update == NULL) {
2989 print_err("p_dev_update\n");
2990 return XRAN_STATUS_FAIL;
2992 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2993 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2994 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2995 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3001 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
3002 eth_ctx->time_wrk_cfg.arg = NULL;
3003 eth_ctx->time_wrk_cfg.state = 1;
3007 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3009 print_err("pThCtx allocation error\n");
3010 return XRAN_STATUS_FAIL;
3012 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3013 pThCtx->worker_id = 0;
3014 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3015 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3016 pThCtx->task_func = ring_processing_func;
3017 pThCtx->task_arg = NULL;
3018 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3019 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3021 for (i = 1; i < fh_init->xran_ports; i++) {
3022 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
3023 if(p_dev_update == NULL) {
3024 print_err("p_dev_update\n");
3025 return XRAN_STATUS_FAIL;
3027 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3028 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3029 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3030 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3034 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3036 print_err("pThCtx allocation error\n");
3037 return XRAN_STATUS_FAIL;
3039 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3040 pThCtx->worker_id = 1;
3041 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3042 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3043 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3044 pThCtx->task_arg = (void*)xran_port_mask;
3045 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3046 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3050 if(fh_init->xran_ports == 3)
3051 worker_ports = (void *)(1L<<2 & xran_port_mask);
3052 else if(fh_init->xran_ports == 4)
3053 worker_ports = (void *)((1L<<2 | 1L<<3) & xran_port_mask);
3055 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3056 return XRAN_STATUS_FAIL;
3059 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
3060 eth_ctx->time_wrk_cfg.arg = NULL;
3061 eth_ctx->time_wrk_cfg.state = 1;
3065 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3067 print_err("pThCtx allocation error\n");
3068 return XRAN_STATUS_FAIL;
3070 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3071 pThCtx->worker_id = 0;
3072 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3073 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3074 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3075 pThCtx->task_arg = (void *)((1L<<0|1L<<1) & xran_port_mask);
3076 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3077 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3079 for (i = 1; i < fh_init->xran_ports; i++) {
3080 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
3081 if(p_dev_update == NULL) {
3082 print_err("p_dev_update\n");
3083 return XRAN_STATUS_FAIL;
3085 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3086 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3087 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3088 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3092 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3094 print_err("pThCtx allocation error\n");
3095 return XRAN_STATUS_FAIL;
3097 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3098 pThCtx->worker_id = 1;
3099 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3100 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3101 pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
3102 pThCtx->task_arg = worker_ports;
3103 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3104 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3112 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
3113 eth_ctx->time_wrk_cfg.arg = NULL;
3114 eth_ctx->time_wrk_cfg.state = 1;
3118 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3120 print_err("pThCtx allocation error\n");
3121 return XRAN_STATUS_FAIL;
3123 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3124 pThCtx->worker_id = 0;
3125 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3126 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3127 pThCtx->task_func = ring_processing_func;
3128 pThCtx->task_arg = NULL;
3129 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3130 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3133 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3135 print_err("pThCtx allocation error\n");
3136 return XRAN_STATUS_FAIL;
3138 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3139 pThCtx->worker_id = 1;
3140 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3141 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3142 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3143 pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2)) & xran_port_mask);
3144 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3145 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3148 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3150 print_err("pThCtx allocation error\n");
3151 return XRAN_STATUS_FAIL;
3153 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3154 pThCtx->worker_id = 2;
3155 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3156 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3157 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3158 pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
3159 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3160 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3162 for (i = 1; i < fh_init->xran_ports; i++) {
3163 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
3164 if(p_dev_update == NULL) {
3165 print_err("p_dev_update\n");
3166 return XRAN_STATUS_FAIL;
3168 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3169 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3170 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3171 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3175 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3176 return XRAN_STATUS_FAIL;
3181 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
3182 eth_ctx->time_wrk_cfg.arg = NULL;
3183 eth_ctx->time_wrk_cfg.state = 1;
3186 /** 0 FH RX and BBDEV */
3187 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3189 print_err("pThCtx allocation error\n");
3190 return XRAN_STATUS_FAIL;
3192 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3193 pThCtx->worker_id = 0;
3194 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3195 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3196 pThCtx->task_func = ring_processing_func;
3197 pThCtx->task_arg = NULL;
3198 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3199 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3202 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3204 print_err("pThCtx allocation error\n");
3205 return XRAN_STATUS_FAIL;
3207 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3208 pThCtx->worker_id = 1;
3209 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3210 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3211 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3212 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
3213 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3214 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3217 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3219 print_err("pThCtx allocation error\n");
3220 return XRAN_STATUS_FAIL;
3222 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3223 pThCtx->worker_id = 2;
3224 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3225 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
3226 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3227 pThCtx->task_arg = (void*)((1<<1) & xran_port_mask);
3228 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3229 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3232 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3234 print_err("pThCtx allocation error\n");
3235 return XRAN_STATUS_FAIL;
3237 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3238 pThCtx->worker_id = 3;
3239 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3240 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
3241 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3242 pThCtx->task_arg = (void*)((1<<2) & xran_port_mask);
3243 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3244 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3247 if(eth_ctx->io_cfg.id == O_DU && 0 == fh_init->dlCpProcBurst) {
3248 for (i = 1; i < fh_init->xran_ports; i++) {
3249 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
3250 if(p_dev_update == NULL) {
3251 print_err("p_dev_update\n");
3252 return XRAN_STATUS_FAIL;
3254 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = i+1;
3255 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3261 if(eth_ctx->io_cfg.id == O_DU){
3263 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
3264 eth_ctx->time_wrk_cfg.arg = NULL;
3265 eth_ctx->time_wrk_cfg.state = 1;
3269 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3271 print_err("pThCtx allocation error\n");
3272 return XRAN_STATUS_FAIL;
3274 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3275 pThCtx->worker_id = 0;
3276 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3277 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3278 pThCtx->task_func = ring_processing_func;
3279 pThCtx->task_arg = NULL;
3280 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3281 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3284 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3286 print_err("pThCtx allocation error\n");
3287 return XRAN_STATUS_FAIL;
3289 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3290 pThCtx->worker_id = 1;
3291 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3292 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3293 pThCtx->task_func = xran_processing_timer_only_func;
3294 pThCtx->task_arg = NULL;
3295 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3296 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3299 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3301 print_err("pThCtx allocation error\n");
3302 return XRAN_STATUS_FAIL;
3304 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3305 pThCtx->worker_id = 2;
3306 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3307 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3308 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3309 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
3310 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3311 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3314 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3316 print_err("pThCtx allocation error\n");
3317 return XRAN_STATUS_FAIL;
3319 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3320 pThCtx->worker_id = 3;
3321 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3322 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3323 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3324 pThCtx->task_arg = (void*)((1<<1) & xran_port_mask);
3325 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3326 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3329 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3331 print_err("pThCtx allocation error\n");
3332 return XRAN_STATUS_FAIL;
3334 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3335 pThCtx->worker_id = 4;
3336 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3337 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3338 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3339 pThCtx->task_arg = (void*)((1<<2) & xran_port_mask);
3340 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3341 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3343 /*** O_RU specific config */
3345 eth_ctx->time_wrk_cfg.f = NULL;
3346 eth_ctx->time_wrk_cfg.arg = NULL;
3347 eth_ctx->time_wrk_cfg.state = 1;
3351 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3353 print_err("pThCtx allocation error\n");
3354 return XRAN_STATUS_FAIL;
3356 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3357 pThCtx->worker_id = 0;
3358 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3359 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
3360 pThCtx->task_func = process_dpdk_io_rx;
3361 pThCtx->task_arg = NULL;
3362 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3363 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3365 /** 1 FH RX and BBDEV */
3366 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3368 print_err("pThCtx allocation error\n");
3369 return XRAN_STATUS_FAIL;
3371 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3372 pThCtx->worker_id = 1;
3373 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3374 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
3375 pThCtx->task_func = ring_processing_func_per_port;
3376 pThCtx->task_arg = (void*)0;
3377 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3378 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3380 /** 2 FH RX and BBDEV */
3381 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3383 print_err("pThCtx allocation error\n");
3384 return XRAN_STATUS_FAIL;
3386 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3387 pThCtx->worker_id = 2;
3388 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3389 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
3390 pThCtx->task_func = ring_processing_func_per_port;
3391 pThCtx->task_arg = (void*)1;
3392 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3393 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3395 /** 3 FH RX and BBDEV */
3396 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3398 print_err("pThCtx allocation error\n");
3399 return XRAN_STATUS_FAIL;
3401 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3402 pThCtx->worker_id = 3;
3403 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3404 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
3405 pThCtx->task_func = ring_processing_func_per_port;
3406 pThCtx->task_arg = (void*)2;
3407 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3408 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3410 /** FH TX and BBDEV */
3411 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3413 print_err("pThCtx allocation error\n");
3414 return XRAN_STATUS_FAIL;
3416 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3417 pThCtx->worker_id = 4;
3418 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3419 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
3420 pThCtx->task_func = process_dpdk_io_tx;
3421 pThCtx->task_arg = NULL;
3422 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3423 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3427 /*** O_RU specific config */
3428 if((fh_init->xran_ports == 4) && (eth_ctx->io_cfg.id == O_RU))
3430 /*** O_RU specific config */
3432 eth_ctx->time_wrk_cfg.f = NULL;
3433 eth_ctx->time_wrk_cfg.arg = NULL;
3434 eth_ctx->time_wrk_cfg.state = 1;
3438 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3440 print_err("pThCtx allocation error\n");
3441 return XRAN_STATUS_FAIL;
3443 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3444 pThCtx->worker_id = 0;
3445 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3446 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
3447 pThCtx->task_func = process_dpdk_io_rx;
3448 pThCtx->task_arg = NULL;
3449 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3450 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3452 /** 1 FH RX and BBDEV */
3453 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3455 print_err("pThCtx allocation error\n");
3456 return XRAN_STATUS_FAIL;
3458 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3459 pThCtx->worker_id = 1;
3460 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3461 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
3462 pThCtx->task_func = ring_processing_func_per_port;
3463 pThCtx->task_arg = (void*)0;
3464 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3465 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3467 /** 2 FH RX and BBDEV */
3468 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3470 print_err("pThCtx allocation error\n");
3471 return XRAN_STATUS_FAIL;
3473 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3474 pThCtx->worker_id = 2;
3475 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3476 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
3477 pThCtx->task_func = ring_processing_func_per_port;
3478 pThCtx->task_arg = (void*)1;
3479 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3480 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3482 /** 3 FH RX and BBDEV */
3483 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3485 print_err("pThCtx allocation error\n");
3486 return XRAN_STATUS_FAIL;
3488 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3489 pThCtx->worker_id = 3;
3490 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3491 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
3492 pThCtx->task_func = ring_processing_func_per_port;
3493 pThCtx->task_arg = (void*)2;
3494 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3495 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3497 /** 4 FH RX and BBDEV */
3498 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3500 print_err("pThCtx allocation error\n");
3501 return XRAN_STATUS_FAIL;
3503 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3504 pThCtx->worker_id = 4;
3505 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3506 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p3", core_map[pThCtx->worker_id]);
3507 pThCtx->task_func = ring_processing_func_per_port;
3508 pThCtx->task_arg = (void*)3;
3509 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3510 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3512 /** FH TX and BBDEV */
3513 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3515 print_err("pThCtx allocation error\n");
3516 return XRAN_STATUS_FAIL;
3518 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3519 pThCtx->worker_id = 5;
3520 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3521 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
3522 pThCtx->task_func = process_dpdk_io_tx;
3523 pThCtx->task_arg = NULL;
3524 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3525 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3527 } /* -- if xran->ports == 4 -- */
3528 else if(eth_ctx->io_cfg.id == O_DU){
3529 if(fh_init->xran_ports == 3)
3530 worker_ports = (void *)((1<<2) & xran_port_mask);
3531 else if(fh_init->xran_ports == 4)
3532 worker_ports = (void *)((1<<3) & xran_port_mask);
3534 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
3535 eth_ctx->time_wrk_cfg.arg = NULL;
3536 eth_ctx->time_wrk_cfg.state = 1;
3540 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3542 print_err("pThCtx allocation error\n");
3543 return XRAN_STATUS_FAIL;
3545 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3546 pThCtx->worker_id = 0;
3547 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3548 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
3549 pThCtx->task_func = ring_processing_func;
3550 pThCtx->task_arg = NULL;
3551 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3552 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3554 for (i = 2; i < fh_init->xran_ports; i++) {
3555 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
3556 if(p_dev_update == NULL) {
3557 print_err("p_dev_update\n");
3558 return XRAN_STATUS_FAIL;
3560 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
3561 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
3565 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3567 print_err("pThCtx allocation error\n");
3568 return XRAN_STATUS_FAIL;
3570 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3571 pThCtx->worker_id = 1;
3572 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3573 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
3574 pThCtx->task_func = xran_processing_timer_only_func;
3575 pThCtx->task_arg = NULL;
3576 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3577 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3580 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3582 print_err("pThCtx allocation error\n");
3583 return XRAN_STATUS_FAIL;
3585 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3586 pThCtx->worker_id = 2;
3587 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3588 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3589 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3590 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
3591 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3592 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3594 for (i = (fh_init->xran_ports-1); i < fh_init->xran_ports; i++) {
3595 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
3596 if(p_dev_update == NULL) {
3597 print_err("p_dev_update\n");
3598 return XRAN_STATUS_FAIL;
3600 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3601 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3605 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3607 print_err("pThCtx allocation error\n");
3608 return XRAN_STATUS_FAIL;
3610 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3611 pThCtx->worker_id = 3;
3612 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3613 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3614 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3615 pThCtx->task_arg = (void*)((1<<1) & xran_port_mask);
3616 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3617 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3619 for (i = (fh_init->xran_ports - 2); i < (fh_init->xran_ports - 1); i++) {
3620 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
3621 if(p_dev_update == NULL) {
3622 print_err("p_dev_update\n");
3623 return XRAN_STATUS_FAIL;
3625 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
3626 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
3630 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3632 print_err("pThCtx allocation error\n");
3633 return XRAN_STATUS_FAIL;
3635 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3636 pThCtx->worker_id = 4;
3637 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3638 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3639 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3640 pThCtx->task_arg = (void*)((1<<2) & xran_port_mask);
3641 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3642 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3645 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
3647 print_err("pThCtx allocation error\n");
3648 return XRAN_STATUS_FAIL;
3650 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
3651 pThCtx->worker_id = 5;
3652 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
3653 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
3654 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
3655 pThCtx->task_arg = worker_ports;
3656 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
3657 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
3660 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3661 return XRAN_STATUS_FAIL;
3666 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
3667 return XRAN_STATUS_FAIL;
3670 print_err("unsupported configuration\n");
3671 return XRAN_STATUS_FAIL;
3675 if(eth_ctx->io_cfg.pkt_proc_core) {
3676 for (i = 0; i < coreNum && i < 64; i++) {
3677 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
3678 xran_core_used[xran_num_cores_used++] = i;
3679 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
3680 rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
3681 eth_ctx->pkt_wrk_cfg[i].state = 1;
3682 if(eth_ctx->pkt_proc_core_id == 0)
3683 eth_ctx->pkt_proc_core_id = i;
3684 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
3685 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
3687 nWorkerCore = nWorkerCore << 1;
3692 if(eth_ctx->io_cfg.pkt_proc_core_64_127) {
3693 for (i = 64; i < coreNum && i < 128; i++) {
3694 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
3695 xran_core_used[xran_num_cores_used++] = i;
3696 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
3697 rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
3698 eth_ctx->pkt_wrk_cfg[i].state = 1;
3699 if(eth_ctx->pkt_proc_core_id == 0)
3700 eth_ctx->pkt_proc_core_id = i;
3701 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
3702 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
3704 nWorkerCore = nWorkerCore << 1;
3708 return XRAN_STATUS_SUCCESS;
3711 xran_open(void *pHandle, struct xran_fh_config* pConf)
3713 int32_t ret = XRAN_STATUS_SUCCESS;
3715 uint8_t nNumerology = 0;
3716 struct xran_device_ctx *p_xran_dev_ctx = NULL;
3717 struct xran_fh_config *pFhCfg = NULL;
3718 struct xran_fh_init *fh_init = NULL;
3719 struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
3720 int32_t wait_time = 10;
3721 int64_t offset_sec, offset_nsec;
3723 if(pConf->dpdk_port < XRAN_PORTS_NUM) {
3724 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pConf->dpdk_port);
3726 print_err("@0x%p [ru %d ] pConf->dpdk_port > XRAN_PORTS_NUM\n", pConf, pConf->dpdk_port);
3727 return XRAN_STATUS_FAIL;
3730 if(p_xran_dev_ctx == NULL) {
3731 print_err("[ru %d] p_xran_dev_ctx == NULL ", pConf->dpdk_port);
3732 return XRAN_STATUS_FAIL;
3735 pFhCfg = &p_xran_dev_ctx->fh_cfg;
3736 memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));
3738 fh_init = &p_xran_dev_ctx->fh_init;
3740 return XRAN_STATUS_FAIL;
3742 if(pConf->log_level) {
3743 printf(" %s: %s Category %s\n", __FUNCTION__,
3744 (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE",
3745 (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
3748 p_xran_dev_ctx->enableCP = pConf->enableCP;
3749 p_xran_dev_ctx->enablePrach = pConf->prachEnable;
3750 p_xran_dev_ctx->enableSrs = pConf->srsEnable;
3751 p_xran_dev_ctx->enableSrsCp = pConf->srsEnableCp;
3752 p_xran_dev_ctx->nSrsDelaySym = pConf->SrsDelaySym;
3753 p_xran_dev_ctx->puschMaskEnable = pConf->puschMaskEnable;
3754 p_xran_dev_ctx->puschMaskSlot = pConf->puschMaskSlot;
3755 p_xran_dev_ctx->DynamicSectionEna = pConf->DynamicSectionEna;
3756 p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable = pConf->RunSlotPrbMapBySymbolEnable;
3757 p_xran_dev_ctx->dssEnable = pConf->dssEnable;
3758 p_xran_dev_ctx->dssPeriod = pConf->dssPeriod;
3759 for(i=0; i<pConf->dssPeriod; i++) {
3760 p_xran_dev_ctx->technology[i] = pConf->technology[i];
3763 if(pConf->GPS_Alpha || pConf->GPS_Beta ){
3764 offset_sec = pConf->GPS_Beta / 100; /* resolution of beta is 10ms */
3765 offset_nsec = (pConf->GPS_Beta - offset_sec * 100) * 1e7 + pConf->GPS_Alpha;
3766 p_xran_dev_ctx->offset_sec = offset_sec;
3767 p_xran_dev_ctx->offset_nsec = offset_nsec;
3769 p_xran_dev_ctx->offset_sec = 0;
3770 p_xran_dev_ctx->offset_nsec = 0;
3774 nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
3776 if (pConf->nCC > XRAN_MAX_SECTOR_NR) {
3777 if(pConf->log_level)
3778 printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR);
3779 pConf->nCC = XRAN_MAX_SECTOR_NR;
3782 if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ) {
3783 print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder);
3784 return XRAN_STATUS_FAIL;
3787 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) {
3788 if((ret = xran_ruemul_init(p_xran_dev_ctx)) < 0) {
3793 /* setup PRACH configuration for C-Plane */
3794 if(pConf->dssEnable){
3795 if((ret = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0)
3797 if((ret = xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0)
3801 if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR) {
3802 if((ret = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0){
3805 } else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE) {
3806 if((ret = xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0){
3812 if((ret = xran_init_srs(pConf, p_xran_dev_ctx))< 0){
3816 if((ret = xran_cp_init_sectiondb(p_xran_dev_ctx)) < 0){
3820 if((ret = xran_init_sectionid(p_xran_dev_ctx)) < 0){
3824 if((ret = xran_init_seqid(p_xran_dev_ctx)) < 0){
3828 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
3829 if((ret = xran_init_vfs_mapping(p_xran_dev_ctx)) < 0) {
3833 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->fh_init.io_cfg.num_rxq > 1) {
3834 if((ret = xran_init_vf_rxq_to_pcid_mapping(p_xran_dev_ctx)) < 0) {
3840 if(pConf->ru_conf.xran_max_frame) {
3841 xran_max_frame = pConf->ru_conf.xran_max_frame;
3842 printf("xran_max_frame %d\n", xran_max_frame);
3845 p_xran_dev_ctx->interval_us_local = xran_fs_get_tti_interval(nNumerology);
3846 if (interval_us > p_xran_dev_ctx->interval_us_local)
3848 interval_us = xran_fs_get_tti_interval(nNumerology); //only update interval_us based on maximum numerology
3851 // if(pConf->log_level){
3852 printf("%s: interval_us=%ld, interval_us_local=%d\n", __FUNCTION__, interval_us, p_xran_dev_ctx->interval_us_local);
3854 if (nNumerology >= timing_get_numerology())
3856 timing_set_numerology(nNumerology);
3859 for(i = 0 ; i <pConf->nCC; i++){
3860 xran_fs_set_slot_type(pConf->dpdk_port, i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod,
3861 pConf->frame_conf.sSlotConfig);
3864 xran_fs_slot_limit_init(pConf->dpdk_port, xran_fs_get_tti_interval(nNumerology));
3866 /* if send_xpmbuf2ring needs to be changed from default functions,
3867 * then those should be set between xran_init and xran_open */
3868 if(p_xran_dev_ctx->send_cpmbuf2ring == NULL)
3869 p_xran_dev_ctx->send_cpmbuf2ring = xran_ethdi_mbuf_send_cp;
3870 if(p_xran_dev_ctx->send_upmbuf2ring == NULL)
3871 p_xran_dev_ctx->send_upmbuf2ring = xran_ethdi_mbuf_send;
3873 if(pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
3874 if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
3875 p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
3877 if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
3878 p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_dispatch_opt;
3881 if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
3882 p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring;
3883 printf("bbu_offload %d\n", p_xran_dev_ctx->fh_init.io_cfg.bbu_offload);
3884 if(pConf->dpdk_port == 0) {
3885 /* create all thread on open of port 0 */
3886 xran_num_cores_used = 0;
3887 if(eth_ctx->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){
3888 eth_ctx->bbdev_dec = pConf->bbdev_dec;
3889 eth_ctx->bbdev_enc = pConf->bbdev_enc;
3892 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
3893 printf("XRAN_UP_VF: 0x%04x\n", eth_ctx->io_cfg.port[XRAN_UP_VF]);
3894 p_xran_dev_ctx->timing_source_thread_running = 0;
3895 xran_core_used[xran_num_cores_used++] = eth_ctx->io_cfg.timing_core;
3896 if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), eth_ctx->io_cfg.timing_core))
3897 rte_panic("thread_run() failed to start\n");
3898 } else if(pConf->log_level) {
3899 printf("Eth port was not open. Processing thread was not started\n");
3902 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF) {
3903 if ((ret = xran_timing_create_cbs(p_xran_dev_ctx)) < 0) {
3909 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
3910 if(pConf->dpdk_port == (fh_init->xran_ports - 1)) {
3911 if((ret = xran_spawn_workers()) < 0) {
3915 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, sched_getcpu(), getpid());
3916 printf("Waiting on Timing thread...\n");
3917 while (p_xran_dev_ctx->timing_source_thread_running == 0 && wait_time--) {
3922 print_dbg("%s : %d", __FUNCTION__, pConf->dpdk_port);
3927 xran_start(void *pHandle)
3930 /* ToS = Top of Second start +- 1.5us */
3934 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
3935 struct xran_prb_map * prbMap0 = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[0][0][0].sBufferList.pBuffers->pData;
3936 for(i = 0; i < XRAN_MAX_SECTIONS_PER_SLOT && i < prbMap0->nPrbElm; i++)
3938 p_xran_dev_ctx->numSetBFWs_arr[i] = prbMap0->prbMap[i].bf_weight.numSetBFWs;
3941 if(xran_get_if_state() == XRAN_RUNNING) {
3942 print_err("Already STARTED!!");
3945 timespec_get(&ts, TIME_UTC);
3946 ptm = gmtime(&ts.tv_sec);
3948 strftime(buff, sizeof(buff), "%D %T", ptm);
3949 printf("%s: XRAN start time: %s.%09ld UTC [%ld]\n",
3950 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
3953 if (p_xran_dev_ctx->fh_init.io_cfg.eowd_cmn[p_xran_dev_ctx->fh_init.io_cfg.id].owdm_enable)
3955 xran_if_current_state = XRAN_OWDM;
3959 xran_if_current_state = XRAN_RUNNING;
3965 xran_stop(void *pHandle)
3967 if(xran_get_if_state() == XRAN_STOPPED) {
3968 print_err("Already STOPPED!!");
3972 xran_if_current_state = XRAN_STOPPED;
3977 xran_close(void *pHandle)
3979 int32_t ret = XRAN_STATUS_SUCCESS;
3980 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
3982 xran_if_current_state = XRAN_STOPPED;
3983 ret = xran_cp_free_sectiondb(p_xran_dev_ctx);
3985 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)
3986 xran_ruemul_release(p_xran_dev_ctx);
3988 #ifdef RTE_LIBRTE_PDUMP
3989 /* uninitialize packet capture framework */
3995 /* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open
3996 * each cb will be set by default duing open if it is set by NULL */
3998 xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up)
4000 struct xran_device_ctx *p_xran_dev_ctx;
4002 if(xran_get_if_state() == XRAN_RUNNING) {
4003 print_err("Cannot register callback while running!!\n");
4007 p_xran_dev_ctx = xran_dev_get_ctx();
4009 p_xran_dev_ctx->send_cpmbuf2ring = mbuf_send_cp;
4010 p_xran_dev_ctx->send_upmbuf2ring = mbuf_send_up;
4012 p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
4018 xran_get_slot_idx (uint32_t PortId, uint32_t *nFrameIdx, uint32_t *nSubframeIdx, uint32_t *nSlotIdx, uint64_t *nSecond)
4021 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
4022 if (!p_xran_dev_ctx)
4024 print_err("Null xRAN context on port id %u!!\n", PortId);
4028 tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT);
4029 *nSlotIdx = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
4030 *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
4031 *nFrameIdx = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
4032 *nSecond = timing_get_current_second();
4038 xran_set_debug_stop(int32_t value, int32_t count)
4040 return timing_set_debug_stop(value, count);
4044 int32_t xran_get_num_prb_elm(struct xran_prb_map* p_PrbMapIn, uint32_t mtu)
4047 int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4048 struct xran_prb_elm *p_prb_elm_src;
4050 // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4051 // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4052 int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4053 int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4057 nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4059 for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
4061 p_prb_elm_src = &p_PrbMapIn->prbMap[i];
4062 if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed
4068 nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4070 while (nRBremain > 0)
4072 nRBSize = RTE_MIN(nmaxRB, nRBremain);
4073 nRBremain -= nRBSize;
4083 int32_t xran_init_PrbMap_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu)
4086 int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4087 struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
4088 int32_t nRBStart_tmp, nRBremain;
4089 // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4090 // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4091 int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4092 int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4095 nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4097 memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
4098 for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
4100 p_prb_elm_src = &p_PrbMapIn->prbMap[i];
4101 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4102 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4104 // int32_t nStartSymb, nEndSymb, numSymb, nRBStart, nRBEnd, nRBSize;
4105 // nStartSymb = p_prb_elm_src->nStartSymb;
4106 // nEndSymb = nStartSymb + p_prb_elm_src->numSymb;
4107 if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed
4109 p_prb_elm_dst->IsNewSect = 1;
4110 p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
4111 p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4112 p_prb_elm_dst->nSectId = i;
4117 nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
4118 nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4119 p_prb_elm_dst->IsNewSect = 1;
4120 p_prb_elm_dst->UP_nRBSize = nmaxRB;
4121 p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4122 p_prb_elm_dst->nSectId = i;
4124 while (nRBremain > 0)
4126 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4127 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4128 p_prb_elm_dst->IsNewSect = 0;
4129 p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
4130 p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
4131 nRBremain -= p_prb_elm_dst->UP_nRBSize;
4132 nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
4133 p_prb_elm_dst->nSectId = i;
4139 p_PrbMapOut->nPrbElm = j;
4144 int32_t xran_init_PrbMap_from_cfg_for_rx(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu)
4147 int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4148 struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
4149 int32_t nRBStart_tmp, nRBremain;
4150 // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4151 // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4152 int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4153 int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4156 nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4157 nmaxRB *= XRAN_MAX_FRAGMENT;
4159 memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
4160 for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
4162 p_prb_elm_src = &p_PrbMapIn->prbMap[i];
4163 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4164 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4166 if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed
4168 p_prb_elm_dst->IsNewSect = 1;
4169 p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
4170 p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4171 p_prb_elm_dst->nSectId = j;
4176 nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
4177 nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4178 p_prb_elm_dst->IsNewSect = 1;
4179 p_prb_elm_dst->nRBSize = nmaxRB;
4180 p_prb_elm_dst->UP_nRBSize = nmaxRB;
4181 p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4182 p_prb_elm_dst->nSectId = j;
4184 while (nRBremain > 0)
4186 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4187 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4188 p_prb_elm_dst->IsNewSect = 1;
4189 p_prb_elm_dst->nRBSize = RTE_MIN(nmaxRB, nRBremain);
4190 p_prb_elm_dst->nRBStart = nRBStart_tmp;
4191 p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
4192 p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
4193 nRBremain -= p_prb_elm_dst->UP_nRBSize;
4194 nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
4195 p_prb_elm_dst->nSectId = j;
4201 p_PrbMapOut->nPrbElm = j;
4206 int32_t xran_init_PrbMap_by_symbol_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu, uint32_t xran_max_prb)
4208 int32_t i = 0, j = 0, nPrbElm = 0;
4209 int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
4210 struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
4211 struct xran_prb_elm prbMapTemp[XRAN_NUM_OF_SYMBOL_PER_SLOT];
4212 int32_t nRBStart_tmp, nRBremain, nStartSymb, nEndSymb, nRBStart, nRBEnd, nRBSize;
4213 // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
4214 // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
4215 int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
4216 int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
4218 nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196
4221 memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
4222 for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
4224 p_prb_elm_dst = &prbMapTemp[i];
4226 nRBStart = xran_max_prb;
4229 for(j = 0; j < p_PrbMapIn->nPrbElm; j++)
4231 p_prb_elm_src = &(p_PrbMapIn->prbMap[j]);
4232 nStartSymb = p_prb_elm_src->nStartSymb;
4233 nEndSymb = nStartSymb + p_prb_elm_src->numSymb;
4235 if((i >= nStartSymb) && (i < nEndSymb))
4237 if(nRBStart > p_prb_elm_src->nRBStart)
4239 nRBStart = p_prb_elm_src->nRBStart;
4241 if(nRBEnd < (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize))
4243 nRBEnd = (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize);
4246 p_prb_elm_dst->nBeamIndex = p_prb_elm_src->nBeamIndex;
4247 p_prb_elm_dst->bf_weight_update = p_prb_elm_src->bf_weight_update;
4248 p_prb_elm_dst->compMethod = p_prb_elm_src->compMethod;
4249 p_prb_elm_dst->iqWidth = p_prb_elm_src->iqWidth;
4250 p_prb_elm_dst->ScaleFactor = p_prb_elm_src->ScaleFactor;
4251 p_prb_elm_dst->reMask = p_prb_elm_src->reMask;
4252 p_prb_elm_dst->BeamFormingType = p_prb_elm_src->BeamFormingType;
4256 if(nRBEnd < nRBStart)
4258 p_prb_elm_dst->nRBStart = 0;
4259 p_prb_elm_dst->nRBSize = 0;
4260 p_prb_elm_dst->nStartSymb = i;
4261 p_prb_elm_dst->numSymb = 1;
4265 p_prb_elm_dst->nRBStart = nRBStart;
4266 p_prb_elm_dst->nRBSize = nRBEnd - nRBStart;
4267 p_prb_elm_dst->nStartSymb = i;
4268 p_prb_elm_dst->numSymb = 1;
4272 for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
4274 if((prbMapTemp[i].nRBSize != 0))
4276 nRBStart = prbMapTemp[i].nRBStart;
4277 nRBSize = prbMapTemp[i].nRBSize;
4278 prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart;
4279 prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize;
4280 prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb;
4281 prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex;
4282 prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update;
4283 prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod;
4284 prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth;
4285 prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor;
4286 prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask;
4287 prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType;
4293 for(; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
4295 if((nRBStart == prbMapTemp[i].nRBStart) && (nRBSize == prbMapTemp[i].nRBSize))
4297 prbMapTemp[nPrbElm].numSymb++;
4302 prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb;
4303 prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart;
4304 prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize;
4305 prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex;
4306 prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update;
4307 prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod;
4308 prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth;
4309 prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor;
4310 prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask;
4311 prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType;
4313 nRBStart = prbMapTemp[i].nRBStart;
4314 nRBSize = prbMapTemp[i].nRBSize;
4318 for(i = 0; i < nPrbElm; i++)
4320 if(prbMapTemp[i].nRBSize == 0)
4321 prbMapTemp[i].nRBSize = 1;
4324 if(prbMapTemp[nPrbElm].nRBSize != 0)
4330 for (i = 0;i < nPrbElm; i++)
4332 p_prb_elm_src = &prbMapTemp[i];
4333 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4334 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4335 if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed
4337 p_prb_elm_dst->IsNewSect = 1;
4338 p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
4339 p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4340 p_prb_elm_dst->nSectId = i;
4345 nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
4346 nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
4347 p_prb_elm_dst->IsNewSect = 1;
4348 p_prb_elm_dst->UP_nRBSize = nmaxRB;
4349 p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
4350 p_prb_elm_dst->nSectId = i;
4352 while (nRBremain > 0)
4354 p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
4355 memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
4356 p_prb_elm_dst->IsNewSect = 0;
4357 p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
4358 p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
4359 nRBremain -= p_prb_elm_dst->UP_nRBSize;
4360 nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
4361 p_prb_elm_dst->nSectId = i;
4367 p_PrbMapOut->nPrbElm = j;
4372 inline void MLogXRANTask(uint32_t taskid, uint64_t ticksstart, uint64_t ticksstop)
4376 MLogTask(taskid, ticksstart, ticksstop);
4381 inline uint64_t MLogXRANTick(void)