1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN main functionality module
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
40 #include <rte_common.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
49 #include <rte_version.h>
51 #if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */
52 #include <rte_ecpri.h>
54 #include "xran_fh_o_du.h"
55 #include "xran_main.h"
58 #include "xran_mem_mgr.h"
59 #include "xran_tx_proc.h"
60 #include "xran_rx_proc.h"
62 #include "xran_up_api.h"
63 #include "xran_cp_api.h"
64 #include "xran_sync_api.h"
65 #include "xran_lib_mlog_tasks_id.h"
66 #include "xran_timer.h"
67 #include "xran_common.h"
69 #include "xran_frame_struct.h"
70 #include "xran_printf.h"
71 #include "xran_app_frag.h"
72 #include "xran_cp_proc.h"
73 #include "xran_tx_proc.h"
74 #include "xran_rx_proc.h"
75 #include "xran_cb_proc.h"
76 #include "xran_ecpri_owd_measurements.h"
78 #include "xran_mlog_lnx.h"
80 static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {NULL};
82 uint64_t interval_us = 1000; //the TTI interval of the cell with maximum numerology
84 uint32_t xran_lib_ota_tti[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Slot index in a second [0:(1000000/TTI-1)] */
85 uint32_t xran_lib_ota_sym[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Symbol index in a slot [0:13] */
86 uint32_t xran_lib_ota_sym_idx[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1]
87 where TTI is TTI interval in microseconds */
89 uint16_t xran_SFN_at_Sec_Start = 0; /**< SFN at current second start */
90 uint16_t xran_max_frame = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2 System Frame Number Calculation */
92 static uint64_t xran_total_tick = 0, xran_used_tick = 0;
93 static uint32_t xran_num_cores_used = 0;
94 static uint32_t xran_core_used[64] = {0};
95 static int32_t first_call = 0;
97 struct cp_up_tx_desc * xran_pkt_gen_desc_alloc(void);
98 int32_t xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc);
100 void tti_ota_cb(struct rte_timer *tim, void *arg);
101 void tti_to_phy_cb(struct rte_timer *tim, void *arg);
103 int32_t xran_pkt_gen_process_ring(struct rte_ring *r);
106 xran_updateSfnSecStart(void)
108 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
109 struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
110 int32_t xran_ports = p_xran_dev_ctx->fh_init.xran_ports;
112 uint64_t currentSecond = timing_get_current_second();
113 // Assume always positive
114 uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;
115 uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND;
116 uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));
117 xran_SFN_at_Sec_Start = sfn;
119 for(o_xu_id = 0; o_xu_id < xran_ports; o_xu_id++){
120 pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter;
121 pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter;
122 pCnt->tx_bytes_counter = 0;
123 pCnt->rx_bytes_counter = 0;
125 pCnt = &p_xran_dev_ctx->fh_counters;
129 static inline int32_t
130 xran_getSlotIdxSecond(uint32_t interval)
132 int32_t frameIdxSecond = xran_getSfnSecStart();
133 int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME(interval);
134 return slotIndxSecond;
138 xran_get_if_state(void)
140 return xran_if_current_state;
143 int32_t xran_is_prach_slot(uint8_t PortId, uint32_t subframe_id, uint32_t slot_id)
145 int32_t is_prach_slot = 0;
146 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
147 if (p_xran_dev_ctx == NULL)
149 print_err("PortId %d not exist\n", PortId);
150 return is_prach_slot;
152 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
153 uint8_t nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
155 if (nNumerology < 2){
156 //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index
157 if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){
158 if (pPrachCPConfig->nrofPrachInSlot == 0){
162 else if (pPrachCPConfig->nrofPrachInSlot == 2)
165 if (nNumerology == 0)
167 else if (slot_id == 1)
171 } else if (nNumerology == 3){
172 //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot
174 slotidx = subframe_id * SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local) + slot_id;
175 if (pPrachCPConfig->nrofPrachInSlot == 2){
176 if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1)
179 if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){
184 print_err("Numerology %d not supported", nNumerology);
185 return is_prach_slot;
189 xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
191 struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg);
194 p_srs->symbMask = pConf->srs_conf.symbMask;
195 p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset;
196 print_dbg("SRS sym %d\n", p_srs->symbMask );
197 print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset);
199 return (XRAN_STATUS_SUCCESS);
203 xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
205 /* update Rach for LTE */
206 return xran_init_prach(pConf, p_xran_dev_ctx);
210 xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
214 struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf);
215 const xRANPrachConfigTableStruct *pxRANPrachConfigTable;
216 uint8_t nNumerology = pConf->frame_conf.nNumerology;
217 uint8_t nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
218 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
221 pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx];
222 else if (pConf->frame_conf.nFrameDuplexType == 1)
223 pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx];
225 pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx];
227 uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0];
228 const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt];
229 memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config));
231 printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot);
233 pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC; // 3, PRACH preamble format A1~3, B1~4, C0, C2
234 pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym;
235 pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart;
236 pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70;
237 pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp;
238 pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing);
239 pPrachCPConfig->x = pxRANPrachConfigTable->x;
240 pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot;
241 pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0];
242 pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1];
243 if (preambleFmrt >= FORMAT_A1)
245 pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration;
246 pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot;
250 pPrachCPConfig->numSymbol = 1;
251 pPrachCPConfig->occassionsInPrachSlot = 1;
255 printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]);
256 pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1;
257 for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++)
259 slotNr = pxRANPrachConfigTable->slotNr[i];
261 pPrachCPConfig->isPRACHslot[slotNr] = 1;
263 printf(" %u ..", slotNr);
267 for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){
268 p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId;
269 p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
271 if(pConf->log_level){
272 printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]);
275 pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(p_xran_dev_ctx);
276 print_dbg("PRACH eAxC_offset %d\n", pPrachCPConfig->eAxC_offset);
278 /* Save some configs for app */
279 pPRACHConfig->startSymId = pPrachCPConfig->startSymId;
280 pPRACHConfig->lastSymId = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
281 pPRACHConfig->startPrbc = pPrachCPConfig->startPrbc;
282 pPRACHConfig->numPrbc = pPrachCPConfig->numPrbc;
283 pPRACHConfig->timeOffset = pPrachCPConfig->timeOffset;
284 pPRACHConfig->freqOffset = pPrachCPConfig->freqOffset;
285 pPRACHConfig->eAxC_offset = pPrachCPConfig->eAxC_offset;
287 return (XRAN_STATUS_SUCCESS);
291 xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid
295 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
296 uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
304 return (slot_id << (2-mu));
308 return (slot_id << (3-mu));
315 return (slot_id >> (2-mu));
319 return (slot_id >> (3-mu));
326 sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick)
328 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
329 long t1 = MLogTick(), t2;
332 if(XranGetSymNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){
334 tti_ota_cb(NULL, (void*)p_xran_dev_ctx);
335 *used_tick += get_ticks_diff(xran_tick(), t3);
339 if (xran_process_tx_sym(p_xran_dev_ctx))
341 *used_tick += get_ticks_diff(xran_tick(), t3);
344 /* check if there is call back to do something else on this symbol */
345 struct cb_elem_entry *cb_elm;
346 LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[xran_lib_ota_sym[p_xran_dev_ctx->xran_port_id]], pointers){
348 cb_elm->pSymCallback(&p_xran_dev_ctx->dpdk_timer[p_xran_dev_ctx->ctx % MAX_NUM_OF_DPDK_TIMERS], cb_elm->pSymCallbackTag, cb_elm->p_dev_ctx);
349 p_xran_dev_ctx->ctx = DpdkTimerIncrementCtx(p_xran_dev_ctx->ctx);
354 MLogTask(PID_SYM_OTA_CB, t1, t2);
358 xran_schedule_to_worker(enum xran_job_type_id job_type_id, struct xran_device_ctx * p_xran_dev_ctx)
360 struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
361 uint32_t tim_lcore = eth_ctx->io_cfg.timing_core; /* default to timing core */
364 if(eth_ctx->num_workers == 0) { /* no workers */
365 tim_lcore = eth_ctx->io_cfg.timing_core;
366 } else if (eth_ctx->num_workers == 1) { /* one worker */
369 case XRAN_JOB_TYPE_OTA_CB:
370 tim_lcore = eth_ctx->io_cfg.timing_core;
372 case XRAN_JOB_TYPE_CP_DL:
373 case XRAN_JOB_TYPE_CP_UL:
374 case XRAN_JOB_TYPE_DEADLINE:
375 case XRAN_JOB_TYPE_SYM_CB:
376 tim_lcore = eth_ctx->worker_core[0];
379 print_err("incorrect job type id %d\n", job_type_id);
380 tim_lcore = eth_ctx->io_cfg.timing_core;
383 } else if (eth_ctx->num_workers >= 2 && eth_ctx->num_workers <= 6) {
386 case XRAN_JOB_TYPE_OTA_CB:
387 tim_lcore = eth_ctx->worker_core[0];
389 case XRAN_JOB_TYPE_CP_DL:
390 tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_DL]];
392 case XRAN_JOB_TYPE_CP_UL:
393 tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_UL]];
395 case XRAN_JOB_TYPE_DEADLINE:
396 case XRAN_JOB_TYPE_SYM_CB:
397 tim_lcore = eth_ctx->worker_core[0];
400 print_err("incorrect job type id %d\n", job_type_id);
401 tim_lcore = eth_ctx->io_cfg.timing_core;
405 print_err("incorrect eth_ctx->num_workers id %d\n", eth_ctx->num_workers);
406 tim_lcore = eth_ctx->io_cfg.timing_core;
414 tti_ota_cb(struct rte_timer *tim, void *arg)
416 uint32_t frame_id = 0;
417 uint32_t subframe_id = 0;
418 uint32_t slot_id = 0;
419 uint32_t next_tti = 0;
421 uint32_t mlogVar[10];
422 uint32_t mlogVarCnt = 0;
423 uint64_t t1 = MLogTick();
425 uint32_t reg_tti = 0;
426 uint32_t reg_sfn = 0;
429 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
430 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)p_xran_dev_ctx->timer_ctx;
431 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
432 uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
434 unsigned tim_lcore = xran_schedule_to_worker(XRAN_JOB_TYPE_OTA_CB, p_xran_dev_ctx);
436 MLogTask(PID_TTI_TIMER, t1, MLogTick());
438 if(p_xran_dev_ctx->xran_port_id == 0){
440 if(xran_lib_ota_tti[0] == 0)
441 reg_tti = xran_fs_get_max_slot(PortId) - 1;
443 reg_tti = xran_lib_ota_tti[0] -1;
445 MLogIncrementCounter();
446 reg_sfn = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us))*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME(interval_us), SUBFRAMES_PER_SYSTEMFRAME);;
447 /* subframe and slot */
448 MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME(interval_us)));
452 slot_id = XranGetSlotNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local));
453 subframe_id = XranGetSubFrameNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
454 frame_id = XranGetFrameNum(xran_lib_ota_tti[PortId],xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
456 pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process = xran_lib_ota_tti[PortId];
458 mlogVar[mlogVarCnt++] = 0x11111111;
459 mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId];
460 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
461 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId] / 14;
462 mlogVar[mlogVarCnt++] = frame_id;
463 mlogVar[mlogVarCnt++] = subframe_id;
464 mlogVar[mlogVarCnt++] = slot_id;
465 mlogVar[mlogVarCnt++] = 0;
466 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
469 if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU)
470 next_tti = xran_lib_ota_tti[PortId] + 1;
472 next_tti = xran_lib_ota_tti[PortId];
475 if(next_tti>= xran_fs_get_max_slot(PortId)){
476 print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
480 slot_id = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
481 subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
482 frame_id = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
484 print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
486 if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){
487 pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = next_tti;
489 pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti[PortId] & 1)^1].tti_to_process;
492 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]) {
493 p_xran_dev_ctx->phy_tti_cb_done = 0;
494 xran_timer_arm_ex(&p_xran_dev_ctx->tti_to_phy_timer[xran_lib_ota_tti[PortId] % MAX_TTI_TO_PHY_TIMER], tti_to_phy_cb, (void*)p_xran_dev_ctx, tim_lcore);
496 //slot index is increased to next slot at the beginning of current OTA slot
497 xran_lib_ota_tti[PortId]++;
498 if(xran_lib_ota_tti[PortId] >= xran_fs_get_max_slot(PortId)) {
499 print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti[PortId], frame_id, subframe_id, slot_id);
500 xran_lib_ota_tti[PortId] = 0;
502 MLogTask(PID_TTI_CB, t1, MLogTick());
506 tx_cp_dl_cb(struct rte_timer *tim, void *arg)
508 long t1 = MLogTick();
510 uint32_t slot_id, subframe_id, frame_id;
513 uint8_t ant_id, num_eAxc, num_CCPorts;
516 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
519 print_err("Null xRAN context!!\n");
522 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
523 uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
524 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
525 pHandle = p_xran_dev_ctx;
527 num_eAxc = xran_get_num_eAxc(pHandle);
528 num_CCPorts = xran_get_num_cc(pHandle);
530 if(first_call && p_xran_dev_ctx->enableCP) {
532 tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
533 buf_id = tti % XRAN_N_FE_BUF_LEN;
535 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
536 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
537 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
539 /* Wrap around to next second */
540 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
543 ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval_us_local)) % XRAN_MAX_SECTIONDB_CTX;
545 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
546 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
547 for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) {
548 /* start new section information list */
549 xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
550 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
551 if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
552 if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
553 num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
554 (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
555 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
557 print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
560 print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pBuffers] \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
562 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
563 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
564 } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
565 MLogTask(PID_CP_DL_CB, t1, MLogTick());
570 rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg)
572 long t1 = MLogTick();
573 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
574 xran_status_t status;
575 /* half of RX for current TTI as measured against current OTA time */
579 uint32_t nSubframeIdx;
582 struct xran_timer_ctx* p_timer_ctx = NULL;
583 /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
584 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
585 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
587 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
590 p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
591 if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
592 p_xran_dev_ctx->timer_put = 0;
594 rx_tti = p_timer_ctx->tti_to_process;
596 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
597 if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
598 if(p_xran_dev_ctx->pCallback[cc_id]) {
599 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
601 //pTag->cellId = cc_id;
602 pTag->slotiId = rx_tti;
603 pTag->symbol = 0; /* last 7 sym means full slot of Symb */
604 status = XRAN_STATUS_SUCCESS;
606 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
610 p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
614 if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
615 if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
616 p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
618 p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
622 MLogTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogTick());
626 rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg)
628 long t1 = MLogTick();
629 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
630 xran_status_t status = 0;
631 int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
634 uint32_t nSubframeIdx;
637 struct xran_timer_ctx* p_timer_ctx = NULL;
639 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
642 /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
643 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
644 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
646 p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
648 if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
649 p_xran_dev_ctx->timer_put = 0;
651 rx_tti = p_timer_ctx->tti_to_process;
654 rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
656 rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
659 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
660 if(p_xran_dev_ctx->pCallback[cc_id]){
661 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
663 //pTag->cellId = cc_id;
664 pTag->slotiId = rx_tti;
665 pTag->symbol = 7; /* last 7 sym means full slot of Symb */
666 status = XRAN_STATUS_SUCCESS;
667 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
671 if(p_xran_dev_ctx->pPrachCallback[cc_id]){
672 struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];
674 //pTag->cellId = cc_id;
675 pTag->slotiId = rx_tti;
676 pTag->symbol = 7; /* last 7 sym means full slot of Symb */
677 p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);
681 if(p_xran_dev_ctx->pSrsCallback[cc_id]){
682 struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
684 //pTag->cellId = cc_id;
685 pTag->slotiId = rx_tti;
686 pTag->symbol = 7; /* last 7 sym means full slot of Symb */
687 p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
692 /* user call backs if any */
693 if(p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX]){
694 if(p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX] <= 0){
695 p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_FULL_SLOT_RX]);
697 p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX]--;
701 MLogTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogTick());
705 rx_ul_user_sym_cb(struct rte_timer *tim, void *arg)
707 long t1 = MLogTick();
708 struct xran_device_ctx * p_dev_ctx = NULL;
709 struct cb_user_per_sym_ctx *p_sym_cb_ctx = (struct cb_user_per_sym_ctx *)arg;
710 xran_status_t status = 0;
711 int32_t rx_tti = 0; //(int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
714 uint32_t nSubframeIdx;
717 uint32_t interval, ota_sym_idx = 0;
718 uint8_t nNumerology = 0;
719 struct xran_timer_ctx* p_timer_ctx = NULL;
721 if(p_sym_cb_ctx->p_dev)
722 p_dev_ctx = (struct xran_device_ctx *)p_sym_cb_ctx->p_dev;
724 rte_panic("p_sym_cb_ctx->p_dev == NULL");
726 if(p_dev_ctx->xran2phy_mem_ready == 0)
728 nNumerology = xran_get_conf_numerology(p_dev_ctx);
729 interval = p_dev_ctx->interval_us_local;
731 p_timer_ctx = &p_sym_cb_ctx->user_cb_timer_ctx[p_sym_cb_ctx->user_timer_get++ % MAX_CB_TIMER_CTX];
732 if (p_sym_cb_ctx->user_timer_get >= MAX_CB_TIMER_CTX)
733 p_sym_cb_ctx->user_timer_get = 0;
735 rx_tti = p_timer_ctx->tti_to_process;
737 if( p_sym_cb_ctx->sym_diff > 0)
738 /* + advacne TX Wind: at OTA Time we indicating event in future */
739 ota_sym_idx = ((p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology));
740 else if (p_sym_cb_ctx->sym_diff < 0) {
741 /* - dealy RX Win: at OTA Time we indicate event in the past */
742 if(p_timer_ctx->ota_sym_idx >= abs(p_sym_cb_ctx->sym_diff)) {
743 ota_sym_idx = p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff;
745 ota_sym_idx = ((xran_max_ota_sym_idx(nNumerology) + p_timer_ctx->ota_sym_idx) + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology);
747 } else /* 0 - OTA exact time */
748 ota_sym_idx = p_timer_ctx->ota_sym_idx;
750 rx_tti = (int32_t)XranGetTtiNum(ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
752 if(p_sym_cb_ctx->symCbTimeInfo) {
753 struct xran_sense_of_time *p_sense_time = p_sym_cb_ctx->symCbTimeInfo;
754 p_sense_time->type_of_event = p_sym_cb_ctx->cb_type_id;
755 p_sense_time->nSymIdx = p_sym_cb_ctx->symb_num_req;
756 p_sense_time->tti_counter = rx_tti;
757 p_sense_time->nSlotIdx = (uint32_t)XranGetSlotNum(rx_tti, SLOTNUM_PER_SUBFRAME(interval));
758 p_sense_time->nSubframeIdx = (uint32_t)XranGetSubFrameNum(rx_tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
759 p_sense_time->nFrameIdx = (uint32_t)XranGetFrameNum(rx_tti, p_timer_ctx->xran_sfn_at_sec_start,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
760 p_sense_time->nSecond = p_timer_ctx->current_second;
763 /* user call backs if any */
764 if(p_sym_cb_ctx->symCb){
765 p_sym_cb_ctx->symCb(p_sym_cb_ctx->symCbParam, p_sym_cb_ctx->symCbTimeInfo);
768 MLogTask(PID_UP_UL_USER_DEAD_LINE_CB, t1, MLogTick());
772 tx_cp_ul_cb(struct rte_timer *tim, void *arg)
774 long t1 = MLogTick();
777 uint32_t slot_id, subframe_id, frame_id;
779 int ant_id, prach_port_id;
782 uint8_t num_eAxc, num_CCPorts;
788 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
791 print_err("Null xRAN context!!\n");
794 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
795 struct xran_timer_ctx *pTCtx = &p_xran_dev_ctx->timer_ctx[0];
796 uint32_t interval = p_xran_dev_ctx->interval_us_local;
797 uint8_t PortId = p_xran_dev_ctx->xran_port_id;
799 tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
800 buf_id = tti % XRAN_N_FE_BUF_LEN;
801 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
802 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
803 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
805 //Wrap around to next second
806 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
808 ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval)) % XRAN_MAX_SECTIONDB_CTX;
810 pHandle = p_xran_dev_ctx;
811 if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
812 num_eAxc = xran_get_num_eAxc(pHandle);
814 num_eAxc = xran_get_num_eAxcUl(pHandle);
815 num_CCPorts = xran_get_num_cc(pHandle);
817 if(first_call && p_xran_dev_ctx->enableCP) {
819 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
821 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
822 for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
823 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1
824 /* || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_SP) == 1*/ ) {
825 /* start new section information list */
826 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
827 if(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers){
828 if(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
829 num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
830 (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
831 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
838 if(p_xran_dev_ctx->enablePrach) {
839 uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
840 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0]) && (is_prach_slot==1)) { //is prach slot
841 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
842 for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
843 for (occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++) {
844 struct xran_cp_gen_params params;
845 struct xran_section_gen_info sect_geninfo[8];
846 struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
847 prach_port_id = ant_id + num_eAxc;
848 /* start new section information list */
849 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, ctx_id);
851 beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id);
852 ret = generate_cpmsg_prach(pHandle, ¶ms, sect_geninfo, mbuf, p_xran_dev_ctx,
853 frame_id, subframe_id, slot_id,
854 beam_id, cc_id, prach_port_id, occasionid,
855 xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));
856 if (ret == XRAN_STATUS_SUCCESS)
857 send_cpmsg(pHandle, mbuf, ¶ms, sect_geninfo,
858 cc_id, prach_port_id, xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));
864 } /* if(p_xran_dev_ctx->enableCP) */
866 MLogTask(PID_CP_UL_CB, t1, MLogTick());
870 tti_to_phy_cb(struct rte_timer *tim, void *arg)
872 long t1 = MLogTick();
873 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
874 uint32_t interval = p_xran_dev_ctx->interval_us_local;
876 p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */
878 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
879 if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){
880 p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]);
882 p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--;
886 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
887 int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT);
888 uint32_t slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
889 uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
890 uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
891 if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME(interval)-1)) { //(tti == xran_fs_get_max_slot()-1)
897 MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());
901 xran_timing_source_thread(void *args)
905 int32_t do_reset = 0;
910 uint32_t xran_port_id = 0;
911 static int owdm_init_done = 0;
913 struct sched_param sched_param;
914 struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *) args ;
915 uint64_t tWake = 0, tWakePrev = 0, tUsed = 0;
916 struct cb_elem_entry * cb_elm = NULL;
918 struct xran_device_ctx * p_dev_ctx_run = NULL;
919 /* ToS = Top of Second start +- 1.5us */
921 char thread_name[32];
924 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
925 memset(&sched_param, 0, sizeof(struct sched_param));
926 /* set main thread affinity mask to CPU2 */
927 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
929 CPU_SET(p_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);
931 if (result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset))
933 printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);
935 if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
937 printf("priority is not changed: coreId = 2, result1 = %d\n",result1);
940 snprintf(thread_name, RTE_DIM(thread_name), "%s-%d", "fh_main_poll", rte_lcore_id());
941 if ((res = pthread_setname_np(pthread_self(), thread_name))) {
942 printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res);
945 printf("TTI interval %ld [us]\n", interval_us);
947 if (!p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable) {
948 if ((res = xran_timing_create_cbs(args)) < 0){
954 timespec_get(&ts, TIME_UTC);
955 }while (ts.tv_nsec >1500);
957 struct tm * ptm = gmtime(&ts.tv_sec);
959 strftime(buff, sizeof buff, "%D %T", ptm);
960 printf("%s: thread_run start time: %s.%09ld UTC [%ld]\n",
961 (p_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
965 timespec_get(&ts, TIME_UTC);
966 }while (ts.tv_nsec == 0);
968 p_dev_ctx->timing_source_thread_running = 1;
971 /* Check if owdm finished to create the timing cbs based on measurement results */
972 if ((p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable)&&(!owdm_init_done)&&unlikely(XRAN_RUNNING == xran_if_current_state)) {
973 // Adjust Windows based on Delay Measurement results
974 xran_adjust_timing_parameters(p_dev_ctx);
975 if ((res = xran_timing_create_cbs(args)) < 0){
978 printf("TTI interval %ld [us]\n", interval_us);
985 /* Update Usage Stats */
987 xran_used_tick += tUsed;
990 xran_total_tick += get_ticks_diff(tWake, tWakePrev);
995 delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed);
996 if (XRAN_STOPPED == xran_if_current_state)
999 if (likely(XRAN_RUNNING == xran_if_current_state)) {
1000 for(xran_port_id = 0; xran_port_id < XRAN_PORTS_NUM; xran_port_id++ ) {
1001 p_dev_ctx_run = xran_dev_get_ctx_by_id(xran_port_id);
1003 if(p_dev_ctx_run->xran_port_id == xran_port_id) {
1004 if(XranGetSymNum(xran_lib_ota_sym_idx[p_dev_ctx_run->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == xran_lib_ota_sym[xran_port_id])
1006 sym_ota_cb(&p_dev_ctx_run->sym_timer, p_dev_ctx_run, &tUsed);
1007 xran_lib_ota_sym[xran_port_id]++;
1008 if(xran_lib_ota_sym[xran_port_id] >= N_SYM_PER_SLOT)
1009 xran_lib_ota_sym[xran_port_id]=0;
1013 rte_panic("p_dev_ctx_run == xran_port_id");
1020 xran_timing_destroy_cbs(args);
1021 printf("Closing timing source thread...\n");
1025 /* Handle ecpri format. */
1026 #define MBUFS_CNT 16
1028 int32_t handle_ecpri_ethertype(struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num)
1030 struct rte_mbuf* pkt, * pkt0;
1032 struct rte_ether_hdr* eth_hdr;
1033 struct xran_ecpri_hdr* ecpri_hdr;
1034 union xran_ecpri_cmn_hdr* ecpri_cmn;
1036 int32_t ret = MBUF_FREE;
1037 uint32_t ret_data[MBUFS_CNT] = { MBUFS_CNT * MBUF_FREE };
1038 struct xran_device_ctx* p_dev_ctx = xran_dev_get_ctx_by_id(xport_id);
1039 uint16_t num_data = 0, num_control = 0, num_meas = 0;
1040 struct rte_mbuf* pkt_data[MBUFS_CNT], * pkt_control[MBUFS_CNT], * pkt_meas[MBUFS_CNT], *pkt_adj[MBUFS_CNT];
1041 static uint32_t owdm_rx_first_pass = 1;
1043 if (p_dev_ctx == NULL)
1046 for (i = 0; i < num; i++)
1050 // rte_prefetch0(rte_pktmbuf_mtod(pkt, void*));
1052 rte_pktmbuf_adj(pkt, sizeof(*eth_hdr));
1053 ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *);
1055 p_dev_ctx->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
1058 switch (ecpri_hdr->cmnhdr.bits.ecpri_mesg_type)
1061 pkt_data[num_data++] = pkt;
1064 case ECPRI_RT_CONTROL_DATA:
1065 pkt_control[num_control++] = pkt;
1067 case ECPRI_DELAY_MEASUREMENT:
1068 if (owdm_rx_first_pass != 0)
1070 // Initialize and verify that Payload Length is in range */
1071 xran_initialize_and_verify_owd_pl_length((void*)p_dev_ctx);
1072 owdm_rx_first_pass = 0;
1075 pkt_meas[num_meas++] = pkt;
1078 if (p_dev_ctx->fh_init.io_cfg.id == O_DU) {
1079 print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.bits.ecpri_mesg_type);
1085 if(num_data == MBUFS_CNT && p_dev_ctx->fh_cfg.ru_conf.xranCat == XRAN_CATEGORY_B) /* w/a for Cat A issue */
1087 for (i = 0; i < MBUFS_CNT; i++)
1089 ret_data[i] == MBUF_FREE;
1092 if (p_dev_ctx->fh_init.io_cfg.id == O_DU || p_dev_ctx->fh_init.io_cfg.id == O_RU)
1094 if (p_dev_ctx->xran2phy_mem_ready != 0)
1095 ret = process_mbuf_batch(pkt_data, (void*)p_dev_ctx, MBUFS_CNT, p_cid, ret_data );
1096 for (i = 0; i < MBUFS_CNT; i++)
1098 if (ret_data[i] == MBUF_FREE)
1099 rte_pktmbuf_free(pkt_data[i]);
1104 for (i = 0; i < MBUFS_CNT; i++)
1106 if (ret_data[i] == MBUF_FREE)
1107 rte_pktmbuf_free(pkt_data[i]);
1109 print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id);
1114 for (i = 0; i < num_data; i++)
1116 ret = process_mbuf(pkt_data[i], (void*)p_dev_ctx, p_cid);
1117 if (ret == MBUF_FREE)
1118 rte_pktmbuf_free(pkt_data[i]);
1121 for (i = 0; i < num_control; i++)
1124 if (p_dev_ctx->fh_init.io_cfg.id == O_RU)
1126 ret = process_cplane(pkt_control[i], (void*)p_dev_ctx);
1127 p_dev_ctx->fh_counters.rx_counter++;
1128 if (ret == MBUF_FREE)
1129 rte_pktmbuf_free(pkt_control[i]);
1133 print_err("O-DU recevied C-Plane message!");
1135 MLogTask(PID_PROCESS_CP_PKT, t1, MLogTick());
1138 for (i = 0; i < num_meas; i++)
1141 ret = process_delay_meas(pkt_meas[i], (void*)p_dev_ctx, xport_id);
1142 // printf("Got delay_meas_pkt xport_id %d p_dev_ctx %08"PRIx64"\n", xport_id,(int64_t*)p_dev_ctx) ;
1143 if (ret == MBUF_FREE)
1144 rte_pktmbuf_free(pkt_meas[i]);
1145 MLogTask(PID_PROCESS_DELAY_MEAS_PKT, t1, MLogTick());
1153 xran_packet_and_dpdk_timer_thread(void *args)
1155 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1157 uint64_t prev_tsc = 0;
1158 uint64_t cur_tsc = rte_rdtsc();
1159 uint64_t diff_tsc = cur_tsc - prev_tsc;
1161 struct sched_param sched_param;
1163 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
1165 memset(&sched_param, 0, sizeof(struct sched_param));
1166 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1168 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
1170 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1175 cur_tsc = rte_rdtsc();
1176 diff_tsc = cur_tsc - prev_tsc;
1177 if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
1182 if (XRAN_STOPPED == xran_if_current_state)
1186 printf("Closing pkts timer thread...\n");
1190 void xran_initialize_ecpri_owd_meas_cmn( struct xran_io_cfg *ptr)
1192 // ptr->eowd_cmn.initiator_en = 0; // Initiator 1, Recipient 0
1193 // ptr->eowd_cmn.filterType = 0; // 0 Simple average based on number of measurements
1194 // Set default values if the Timeout and numberOfSamples are not set
1195 if ( ptr->eowd_cmn[ptr->id].responseTo == 0)
1196 ptr->eowd_cmn[ptr->id].responseTo = 10E6; // 10 ms timeout expressed in ns
1197 if ( ptr->eowd_cmn[ptr->id].numberOfSamples == 0)
1198 ptr->eowd_cmn[ptr->id].numberOfSamples = 8; // Number of samples to be averaged
1200 void xran_initialize_ecpri_owd_meas_per_port (int i, struct xran_io_cfg *ptr )
1202 /* This function initializes one_way delay measurements on a per port basis,
1203 most variables default to zero */
1204 ptr->eowd_port[ptr->id][i].portid = (uint8_t)i;
1208 xran_init(int argc, char *argv[],
1209 struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle)
1211 int32_t ret = XRAN_STATUS_SUCCESS;
1214 int32_t o_xu_id = 0;
1216 struct xran_io_cfg *p_io_cfg = NULL;
1217 struct xran_device_ctx * p_xran_dev_ctx = NULL;
1219 int32_t lcore_id = 0;
1222 const char *version = rte_version();
1224 if (version == NULL)
1225 rte_panic("version == NULL");
1227 printf("'%s'\n", version);
1229 if (p_xran_fh_init->xran_ports < 1 || p_xran_fh_init->xran_ports > XRAN_PORTS_NUM) {
1230 ret = XRAN_STATUS_INVALID_PARAM;
1231 print_err("fh_init xran_ports= %d is wrong [%d]\n", p_xran_fh_init->xran_ports, ret);
1235 p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg;
1237 if ((ret = xran_dev_create_ctx(p_xran_fh_init->xran_ports)) < 0) {
1238 print_err("context allocation error [%d]\n", ret);
1242 for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1243 p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id);
1244 memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx));
1245 p_xran_dev_ctx->xran_port_id = o_xu_id;
1248 p_xran_dev_ctx->fh_init = *p_xran_fh_init;
1249 printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu);
1251 memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config));
1252 /* To make sure to set default functions */
1253 p_xran_dev_ctx->send_upmbuf2ring = NULL;
1254 p_xran_dev_ctx->send_cpmbuf2ring = NULL;
1255 // Ecpri initialization for One Way delay measurements common variables to default values
1256 xran_initialize_ecpri_owd_meas_cmn(&p_xran_dev_ctx->fh_init.io_cfg);
1259 /* default values if not set */
1260 if(p_io_cfg->nEthLinePerPort == 0)
1261 p_io_cfg->nEthLinePerPort = 1;
1263 if(p_io_cfg->nEthLineSpeed == 0)
1264 p_io_cfg->nEthLineSpeed = 25;
1266 /** at least 1 RX Q */
1267 if(p_io_cfg->num_rxq == 0)
1268 p_io_cfg->num_rxq = 1;
1270 if (p_io_cfg->id == 1) {
1272 p_io_cfg->num_rxq = 1;
1275 #if (RTE_VER_YEAR < 21) /* eCPRI flow supported with DPDK 21.02 or later */
1276 if (p_io_cfg->num_rxq > 1){
1277 p_io_cfg->num_rxq = 1;
1278 printf("%s does support eCPRI flows. Set rxq to %d\n", version, p_io_cfg->num_rxq);
1281 printf("PF Eth line speed %dG\n",p_io_cfg->nEthLineSpeed);
1282 printf("PF Eth lines per O-xU port %d\n",p_io_cfg->nEthLinePerPort);
1283 printf("RX HW queues per O-xU Eth line %d \n",p_io_cfg->num_rxq);
1285 if(p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane) != p_io_cfg->num_vfs) {
1286 print_err("Incorrect VFs configurations: For %d O-xUs with %d Ethernet ports expected number of VFs is %d. [provided %d]\n",
1287 p_xran_fh_init->xran_ports, p_io_cfg->nEthLinePerPort,
1288 p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane), p_io_cfg->num_vfs);
1291 xran_if_current_state = XRAN_INIT;
1292 xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype);
1293 if (p_io_cfg->id == 0)
1294 xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1297 (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1298 (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1299 p_xran_dev_ctx->fh_init.mtu);
1301 xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
1304 (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
1305 (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
1306 p_xran_dev_ctx->fh_init.mtu);
1308 for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
1309 p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id);
1311 for(i = 0; i < MAX_TTI_TO_PHY_TIMER; i++ )
1312 rte_timer_init(&p_xran_dev_ctx->tti_to_phy_timer[i]);
1314 rte_timer_init(&p_xran_dev_ctx->sym_timer);
1315 for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++)
1316 rte_timer_init(&p_xran_dev_ctx->dpdk_timer[i]);
1318 p_xran_dev_ctx->direct_pool = socket_direct_pool;
1319 p_xran_dev_ctx->indirect_pool = socket_indirect_pool;
1322 for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){
1323 LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[j]);
1328 for (i=0; i<XRAN_PORTS_NUM; i++){
1329 for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){
1330 xran_fs_clear_slot_type(i,nCellIdx);
1334 *pXranLayerHandle = xran_dev_get_ctx();
1337 // The ecpri initialization loop needs to be done per pf and vf (Outer loop pf and inner loop vf)
1338 for (i=0; i< p_io_cfg->num_vfs; i++)
1340 /* Initialize ecpri one-way delay measurement info on a per vf port basis */
1341 xran_initialize_ecpri_owd_meas_per_port (i, p_io_cfg);
1348 xran_sector_get_instances (uint32_t xran_port, void * pDevHandle, uint16_t nNumInstances,
1349 xran_cc_handle_t * pSectorInstanceHandles)
1351 xran_status_t nStatus = XRAN_STATUS_FAIL;
1352 struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle;
1353 XranSectorHandleInfo *pCcHandle = NULL;
1358 /* Check for the Valid Parameters */
1359 CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM);
1361 if (!nNumInstances) {
1362 print_dbg("Instance is not assigned for this function !!! \n");
1363 return XRAN_STATUS_INVALID_PARAM;
1366 for (i = 0; i < nNumInstances; i++) {
1368 /* Allocate Memory for CC handles */
1369 pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64);
1371 if(pCcHandle == NULL)
1372 return XRAN_STATUS_RESOURCE;
1374 memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo)));
1376 pCcHandle->nIndex = i;
1377 pCcHandle->nXranPort = pDev->xran_port_id;
1379 printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle);
1380 pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle;
1382 printf("Handle: %p Instance: %p\n",
1383 &pSectorInstanceHandles[i], pSectorInstanceHandles[i]);
1386 return XRAN_STATUS_SUCCESS;
1391 xran_5g_fronthault_config (void * pHandle,
1392 struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1393 struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1394 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1395 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1396 xran_transport_callback_fn pCallback,
1400 XranSectorHandleInfo* pXranCc = NULL;
1401 struct xran_device_ctx * p_xran_dev_ctx = NULL;
1403 if(NULL == pHandle) {
1404 printf("Handle is NULL!\n");
1405 return XRAN_STATUS_FAIL;
1408 pXranCc = (XranSectorHandleInfo*) pHandle;
1409 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1410 if (p_xran_dev_ctx == NULL) {
1411 printf ("p_xran_dev_ctx is NULL\n");
1412 return XRAN_STATUS_FAIL;
1415 i = pXranCc->nIndex;
1417 for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
1418 for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
1421 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0;
1422 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1423 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1424 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1425 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1426 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];
1428 if(pSrcBuffer[z][j])
1429 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j];
1431 memset(&p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcBuffer[z][j]));
1435 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1436 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1437 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1438 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1439 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1440 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
1442 if(pSrcCpBuffer[z][j])
1443 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcCpBuffer[z][j];
1445 memset(&p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcCpBuffer[z][j]));
1448 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0;
1449 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1450 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1451 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1452 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1453 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];
1455 if(pDstBuffer[z][j])
1456 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
1458 memset(&p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1462 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1463 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1464 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1465 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1466 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1467 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
1469 if(pDstCpBuffer[z][j])
1470 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
1472 memset(&p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
1477 p_xran_dev_ctx->pCallback[i] = pCallback;
1478 p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag;
1479 print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
1480 p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pCallback[i], p_xran_dev_ctx->pCallbackTag[i]);
1482 p_xran_dev_ctx->xran2phy_mem_ready = 1;
1484 return XRAN_STATUS_SUCCESS;
1488 xran_5g_prach_req (void * pHandle,
1489 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1490 struct xran_buffer_list *pDstBufferDecomp[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
1491 xran_transport_callback_fn pCallback,
1495 XranSectorHandleInfo* pXranCc = NULL;
1496 struct xran_device_ctx * p_xran_dev_ctx = NULL;
1498 if(NULL == pHandle) {
1499 printf("Handle is NULL!\n");
1500 return XRAN_STATUS_FAIL;
1503 pXranCc = (XranSectorHandleInfo*) pHandle;
1504 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1505 if (p_xran_dev_ctx == NULL) {
1506 printf ("p_xran_dev_ctx is NULL\n");
1507 return XRAN_STATUS_FAIL;
1510 i = pXranCc->nIndex;
1512 for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
1513 for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
1514 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0;
1515 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1516 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1517 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1518 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number.
1519 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];
1520 if(pDstBuffer[z][j])
1521 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
1523 memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1525 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0];
1526 if(pDstBufferDecomp[z][j])
1527 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList = *pDstBufferDecomp[z][j];
1532 p_xran_dev_ctx->pPrachCallback[i] = pCallback;
1533 p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
1535 print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
1536 p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]);
1538 return XRAN_STATUS_SUCCESS;
1542 xran_5g_srs_req (void * pHandle,
1543 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
1544 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
1545 xran_transport_callback_fn pCallback,
1549 XranSectorHandleInfo* pXranCc = NULL;
1550 struct xran_device_ctx * p_xran_dev_ctx = NULL;
1552 if(NULL == pHandle) {
1553 printf("Handle is NULL!\n");
1554 return XRAN_STATUS_FAIL;
1557 pXranCc = (XranSectorHandleInfo*) pHandle;
1558 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
1559 if (p_xran_dev_ctx == NULL) {
1560 printf ("p_xran_dev_ctx is NULL\n");
1561 return XRAN_STATUS_FAIL;
1564 i = pXranCc->nIndex;
1566 for(j=0; j<XRAN_N_FE_BUF_LEN; j++) {
1567 for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){
1568 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;
1569 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1570 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1571 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1572 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
1573 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
1574 if(pDstBuffer[z][j])
1575 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
1577 memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
1580 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
1581 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
1582 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
1583 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
1584 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
1585 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z];
1587 if(pDstCpBuffer[z][j])
1588 p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
1590 memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
1595 p_xran_dev_ctx->pSrsCallback[i] = pCallback;
1596 p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
1598 print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
1599 p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]);
1601 return XRAN_STATUS_SUCCESS;
1605 xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear)
1609 *num_core_used = xran_num_cores_used;
1610 for (i = 0; i < xran_num_cores_used; i++)
1612 core_used[i] = xran_core_used[i];
1615 *total_time = xran_total_tick;
1616 *used_time = xran_used_tick;
1620 xran_total_tick = 0;
1628 xran_add_cp_hdr_offset(uint8_t *dst)
1630 dst += (RTE_PKTMBUF_HEADROOM +
1631 sizeof(struct xran_ecpri_hdr) +
1632 sizeof(struct xran_cp_radioapp_section1_header) +
1633 sizeof(struct xran_cp_radioapp_section1));
1635 dst = RTE_PTR_ALIGN_CEIL(dst, 64);
1641 xran_add_hdr_offset(uint8_t *dst, int16_t compMethod)
1643 dst+= (RTE_PKTMBUF_HEADROOM +
1644 sizeof (struct xran_ecpri_hdr) +
1645 sizeof (struct radio_app_common_hdr) +
1646 sizeof(struct data_section_hdr));
1647 if(compMethod != XRAN_COMPMETHOD_NONE)
1648 dst += sizeof (struct data_section_compression_hdr);
1649 dst = RTE_PTR_ALIGN_CEIL(dst, 64);
1655 xran_pkt_gen_process_ring(struct rte_ring *r)
1659 struct rte_mbuf *mbufs[16];
1663 struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
1664 const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
1665 RTE_DIM(mbufs), &remaining);
1671 for (i = 0; i < dequeued; ++i) {
1672 struct cp_up_tx_desc * p_tx_desc = (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i], struct cp_up_tx_desc *);
1673 retval = xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle,
1678 p_tx_desc->frame_id,
1679 p_tx_desc->subframe_id,
1682 (enum xran_comp_hdr_type)p_tx_desc->compType,
1683 (enum xran_pkt_dir) p_tx_desc->direction,
1684 p_tx_desc->xran_port_id,
1685 (PSECTION_DB_TYPE)p_tx_desc->p_sec_db);
1687 xran_pkt_gen_desc_free(p_tx_desc);
1688 if (XRAN_STOPPED == xran_if_current_state){
1689 MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());
1694 if(p_io_cfg->io_sleep)
1695 nanosleep(&sleeptime,NULL);
1697 MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());
1703 xran_dl_pkt_ring_processing_func(void* args)
1705 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1706 uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF);
1707 uint16_t current_port;
1711 for (current_port = 0; current_port < XRAN_PORTS_NUM; current_port++) {
1712 if( xran_port_mask & (1<<current_port)) {
1713 xran_pkt_gen_process_ring(ctx->up_dl_pkt_gen_ring[current_port]);
1717 if (XRAN_STOPPED == xran_if_current_state)
1723 /** Function to peforms serves of DPDK times */
1725 xran_processing_timer_only_func(void* args)
1728 if (XRAN_STOPPED == xran_if_current_state)
1734 /** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */
1736 xran_all_tasks(void* arg)
1739 ring_processing_func(arg);
1740 process_dpdk_io(arg);
1744 /** Function to pefromrm TX and RX on ETH device */
1746 xran_eth_trx_tasks(void* arg)
1748 process_dpdk_io(arg);
1752 /** Function to pefromrm RX on ETH device */
1754 xran_eth_rx_tasks(void* arg)
1756 process_dpdk_io_rx(arg);
1760 /** Function to porcess ORAN FH packet per port */
1762 ring_processing_func_per_port(void* args)
1764 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1765 int16_t retPoll = 0;
1768 uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF);
1771 for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) {
1772 if (ctx->vf2xran_port[i] == port_id) {
1773 for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
1774 if (process_ring(ctx->rx_ring[i][qi], i, qi))
1780 if (XRAN_STOPPED == xran_if_current_state)
1786 /** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */
1788 xran_spawn_workers(void)
1790 uint64_t nWorkerCore = 1LL;
1791 uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF);
1793 uint32_t total_num_cores = 1; /*start with timing core */
1794 uint32_t worker_num_cores = 0;
1795 uint32_t icx_cpu = 0;
1796 int32_t core_map[2*sizeof(uint64_t)*8];
1797 uint32_t xran_port_mask = 0;
1799 struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
1800 struct xran_device_ctx *p_dev = NULL;
1801 struct xran_fh_init *fh_init = NULL;
1802 struct xran_fh_config *fh_cfg = NULL;
1803 struct xran_worker_th_ctx* pThCtx = NULL;
1805 p_dev = xran_dev_get_ctx_by_id(0);
1807 print_err("p_dev\n");
1808 return XRAN_STATUS_FAIL;
1811 fh_init = &p_dev->fh_init;
1812 if(fh_init == NULL) {
1813 print_err("fh_init\n");
1814 return XRAN_STATUS_FAIL;
1817 fh_cfg = &p_dev->fh_cfg;
1818 if(fh_cfg == NULL) {
1819 print_err("fh_cfg\n");
1820 return XRAN_STATUS_FAIL;
1823 for (i = 0; i < coreNum && i < 64; i++) {
1824 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
1825 core_map[worker_num_cores++] = i;
1828 nWorkerCore = nWorkerCore << 1;
1832 for (i = 64; i < coreNum && i < 128; i++) {
1833 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
1834 core_map[worker_num_cores++] = i;
1837 nWorkerCore = nWorkerCore << 1;
1840 extern int _may_i_use_cpu_feature(unsigned __int64);
1841 icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52);
1843 printf("O-XU %d\n", eth_ctx->io_cfg.id);
1844 printf("HW %d\n", icx_cpu);
1845 printf("Num cores %d\n", total_num_cores);
1846 printf("Num ports %d\n", fh_init->xran_ports);
1847 printf("O-RU Cat %d\n", fh_cfg->ru_conf.xranCat);
1848 printf("O-RU CC %d\n", fh_cfg->nCC);
1849 printf("O-RU eAxC %d\n", fh_cfg->neAxc);
1851 for (i = 0; i < fh_init->xran_ports; i++){
1852 xran_port_mask |= 1<<i;
1855 for (i = 0; i < fh_init->xran_ports; i++) {
1856 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
1857 if(p_dev_update == NULL){
1858 print_err("p_dev_update\n");
1859 return XRAN_STATUS_FAIL;
1861 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1;
1862 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1;
1863 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
1864 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
1867 if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
1868 switch(total_num_cores) {
1869 case 1: /** only timing core */
1870 eth_ctx->time_wrk_cfg.f = xran_all_tasks;
1871 eth_ctx->time_wrk_cfg.arg = NULL;
1872 eth_ctx->time_wrk_cfg.state = 1;
1875 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
1876 eth_ctx->time_wrk_cfg.arg = NULL;
1877 eth_ctx->time_wrk_cfg.state = 1;
1879 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1881 print_err("pThCtx allocation error\n");
1882 return XRAN_STATUS_FAIL;
1884 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1885 pThCtx->worker_id = 0;
1886 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1887 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1888 pThCtx->task_func = ring_processing_func;
1889 pThCtx->task_arg = NULL;
1890 eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
1891 eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
1895 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
1896 eth_ctx->time_wrk_cfg.arg = NULL;
1897 eth_ctx->time_wrk_cfg.state = 1;
1901 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1903 print_err("pThCtx allocation error\n");
1904 return XRAN_STATUS_FAIL;
1906 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1907 pThCtx->worker_id = 0;
1908 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1909 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1910 pThCtx->task_func = ring_processing_func;
1911 pThCtx->task_arg = NULL;
1912 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
1913 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
1915 for (i = 0; i < fh_init->xran_ports; i++) {
1916 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
1917 if(p_dev_update == NULL) {
1918 print_err("p_dev_update\n");
1919 return XRAN_STATUS_FAIL;
1921 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
1922 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
1923 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
1924 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
1928 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1930 print_err("pThCtx allocation error\n");
1931 return XRAN_STATUS_FAIL;
1933 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1934 pThCtx->worker_id = 1;
1935 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1936 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
1937 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
1938 pThCtx->task_arg = (void*)xran_port_mask;
1939 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
1940 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
1943 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
1944 return XRAN_STATUS_FAIL;
1946 } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1) {
1947 switch(total_num_cores) {
1948 case 1: /** only timing core */
1949 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
1950 return XRAN_STATUS_FAIL;
1953 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
1954 eth_ctx->time_wrk_cfg.arg = NULL;
1955 eth_ctx->time_wrk_cfg.state = 1;
1957 p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
1959 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1961 print_err("pThCtx allocation error\n");
1962 return XRAN_STATUS_FAIL;
1964 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1965 pThCtx->worker_id = 0;
1966 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1967 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1968 pThCtx->task_func = ring_processing_func;
1969 pThCtx->task_arg = NULL;
1970 eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
1971 eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
1976 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
1977 eth_ctx->time_wrk_cfg.arg = NULL;
1978 eth_ctx->time_wrk_cfg.state = 1;
1982 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
1984 print_err("pThCtx allocation error\n");
1985 return XRAN_STATUS_FAIL;
1987 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
1988 pThCtx->worker_id = 0;
1989 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
1990 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
1991 pThCtx->task_func = ring_processing_func;
1992 pThCtx->task_arg = NULL;
1993 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
1994 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
1996 for (i = 0; i < fh_init->xran_ports; i++) {
1997 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
1998 if(p_dev_update == NULL) {
1999 print_err("p_dev_update\n");
2000 return XRAN_STATUS_FAIL;
2002 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2003 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2004 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2005 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2009 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2011 print_err("pThCtx allocation error\n");
2012 return XRAN_STATUS_FAIL;
2014 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2015 pThCtx->worker_id = 1;
2016 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2017 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2018 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2019 pThCtx->task_arg = (void*)xran_port_mask;
2020 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2021 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2023 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2024 return XRAN_STATUS_FAIL;
2030 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2031 eth_ctx->time_wrk_cfg.arg = NULL;
2032 eth_ctx->time_wrk_cfg.state = 1;
2036 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2038 print_err("pThCtx allocation error\n");
2039 return XRAN_STATUS_FAIL;
2041 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2042 pThCtx->worker_id = 0;
2043 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2044 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2045 pThCtx->task_func = ring_processing_func;
2046 pThCtx->task_arg = NULL;
2047 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2048 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2051 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2053 print_err("pThCtx allocation error\n");
2054 return XRAN_STATUS_FAIL;
2056 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2057 pThCtx->worker_id = 1;
2058 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2059 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2060 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2061 pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
2062 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2063 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2066 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2068 print_err("pThCtx allocation error\n");
2069 return XRAN_STATUS_FAIL;
2071 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2072 pThCtx->worker_id = 2;
2073 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2074 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2075 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2076 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
2077 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2078 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2080 for (i = 1; i < fh_init->xran_ports; i++) {
2081 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2082 if(p_dev_update == NULL) {
2083 print_err("p_dev_update\n");
2084 return XRAN_STATUS_FAIL;
2086 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2087 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2088 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2089 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2092 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2093 return XRAN_STATUS_FAIL;
2099 eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
2100 eth_ctx->time_wrk_cfg.arg = NULL;
2101 eth_ctx->time_wrk_cfg.state = 1;
2105 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2107 print_err("pThCtx allocation error\n");
2108 return XRAN_STATUS_FAIL;
2110 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2111 pThCtx->worker_id = 0;
2112 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2113 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2114 pThCtx->task_func = ring_processing_func;
2115 pThCtx->task_arg = NULL;
2116 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2117 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2120 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2122 print_err("pThCtx allocation error\n");
2123 return XRAN_STATUS_FAIL;
2125 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2126 pThCtx->worker_id = 1;
2127 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2128 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2129 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2130 pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
2131 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2132 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2135 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2137 print_err("pThCtx allocation error\n");
2138 return XRAN_STATUS_FAIL;
2140 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2141 pThCtx->worker_id = 2;
2142 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2143 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2144 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2145 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
2146 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2147 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2150 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2152 print_err("pThCtx allocation error\n");
2153 return XRAN_STATUS_FAIL;
2155 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2156 pThCtx->worker_id = 3;
2157 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2158 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2159 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2160 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
2161 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2162 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2164 for (i = 1; i < fh_init->xran_ports; i++) {
2165 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2166 if(p_dev_update == NULL) {
2167 print_err("p_dev_update\n");
2168 return XRAN_STATUS_FAIL;
2170 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2171 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2172 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2173 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2176 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2177 return XRAN_STATUS_FAIL;
2181 if(eth_ctx->io_cfg.id == O_DU) {
2183 eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
2184 eth_ctx->time_wrk_cfg.arg = NULL;
2185 eth_ctx->time_wrk_cfg.state = 1;
2189 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2191 print_err("pThCtx allocation error\n");
2192 return XRAN_STATUS_FAIL;
2194 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2195 pThCtx->worker_id = 0;
2196 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2197 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2198 pThCtx->task_func = ring_processing_func;
2199 pThCtx->task_arg = NULL;
2200 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2201 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2204 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2207 print_err("pThCtx allocation error\n");
2208 return XRAN_STATUS_FAIL;
2210 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2211 pThCtx->worker_id = 1;
2212 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2213 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2214 pThCtx->task_func = process_dpdk_io_tx;
2215 pThCtx->task_arg = (void*)2;
2216 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2217 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2220 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2222 print_err("pThCtx allocation error\n");
2223 return XRAN_STATUS_FAIL;
2225 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2226 pThCtx->worker_id = 2;
2227 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2228 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2229 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2230 pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
2231 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2232 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2235 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2237 print_err("pThCtx allocation error\n");
2238 return XRAN_STATUS_FAIL;
2240 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2241 pThCtx->worker_id = 3;
2242 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2243 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2244 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2245 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
2246 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2247 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2250 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2252 print_err("pThCtx allocation error\n");
2253 return XRAN_STATUS_FAIL;
2255 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2256 pThCtx->worker_id = 4;
2257 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2258 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2259 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2260 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
2261 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2262 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2264 for (i = 0; i < fh_init->xran_ports; i++) {
2265 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2266 if(p_dev_update == NULL) {
2267 print_err("p_dev_update\n");
2268 return XRAN_STATUS_FAIL;
2270 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id;
2271 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id;
2272 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2273 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2275 } else if(eth_ctx->io_cfg.id == O_RU) {
2276 /*** O_RU specific config */
2278 eth_ctx->time_wrk_cfg.f = NULL;
2279 eth_ctx->time_wrk_cfg.arg = NULL;
2280 eth_ctx->time_wrk_cfg.state = 1;
2284 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2286 print_err("pThCtx allocation error\n");
2287 return XRAN_STATUS_FAIL;
2289 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2290 pThCtx->worker_id = 0;
2291 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2292 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
2293 pThCtx->task_func = process_dpdk_io_rx;
2294 pThCtx->task_arg = NULL;
2295 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2296 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2298 /** 1 FH RX and BBDEV */
2299 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2301 print_err("pThCtx allocation error\n");
2302 return XRAN_STATUS_FAIL;
2304 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2305 pThCtx->worker_id = 1;
2306 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2307 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
2308 pThCtx->task_func = ring_processing_func_per_port;
2309 pThCtx->task_arg = (void*)0;
2310 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2311 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2313 /** 2 FH RX and BBDEV */
2314 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2316 print_err("pThCtx allocation error\n");
2317 return XRAN_STATUS_FAIL;
2319 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2320 pThCtx->worker_id = 2;
2321 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2322 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
2323 pThCtx->task_func = ring_processing_func_per_port;
2324 pThCtx->task_arg = (void*)1;
2325 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2326 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2328 /** 3 FH RX and BBDEV */
2329 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2331 print_err("pThCtx allocation error\n");
2332 return XRAN_STATUS_FAIL;
2334 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2335 pThCtx->worker_id = 3;
2336 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2337 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
2338 pThCtx->task_func = ring_processing_func_per_port;
2339 pThCtx->task_arg = (void*)2;
2340 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2341 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2343 /** FH TX and BBDEV */
2344 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2346 print_err("pThCtx allocation error\n");
2347 return XRAN_STATUS_FAIL;
2349 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2350 pThCtx->worker_id = 4;
2351 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2352 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2353 pThCtx->task_func = process_dpdk_io_tx;
2354 pThCtx->task_arg = (void*)2;
2355 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2356 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2358 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2359 return XRAN_STATUS_FAIL;
2363 print_err("unsupported configuration\n");
2364 return XRAN_STATUS_FAIL;
2366 } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) {
2367 switch(total_num_cores) {
2370 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2371 return XRAN_STATUS_FAIL;
2376 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2377 eth_ctx->time_wrk_cfg.arg = NULL;
2378 eth_ctx->time_wrk_cfg.state = 1;
2382 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2384 print_err("pThCtx allocation error\n");
2385 return XRAN_STATUS_FAIL;
2387 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2388 pThCtx->worker_id = 0;
2389 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2390 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2391 pThCtx->task_func = ring_processing_func;
2392 pThCtx->task_arg = NULL;
2393 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2394 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2396 for (i = 1; i < fh_init->xran_ports; i++) {
2397 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2398 if(p_dev_update == NULL) {
2399 print_err("p_dev_update\n");
2400 return XRAN_STATUS_FAIL;
2402 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2403 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2404 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2405 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2409 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2411 print_err("pThCtx allocation error\n");
2412 return XRAN_STATUS_FAIL;
2414 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2415 pThCtx->worker_id = 1;
2416 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2417 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2418 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2419 pThCtx->task_arg = (void*)xran_port_mask;
2420 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2421 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2423 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2424 return XRAN_STATUS_FAIL;
2430 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2431 eth_ctx->time_wrk_cfg.arg = NULL;
2432 eth_ctx->time_wrk_cfg.state = 1;
2436 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2438 print_err("pThCtx allocation error\n");
2439 return XRAN_STATUS_FAIL;
2441 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2442 pThCtx->worker_id = 0;
2443 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2444 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2445 pThCtx->task_func = ring_processing_func;
2446 pThCtx->task_arg = NULL;
2447 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2448 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2451 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2453 print_err("pThCtx allocation error\n");
2454 return XRAN_STATUS_FAIL;
2456 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2457 pThCtx->worker_id = 1;
2458 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2459 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2460 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2461 pThCtx->task_arg = (void*)(((1<<1) | (1<<2)) & xran_port_mask);
2462 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2463 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2466 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2468 print_err("pThCtx allocation error\n");
2469 return XRAN_STATUS_FAIL;
2471 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2472 pThCtx->worker_id = 2;
2473 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2474 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2475 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2476 pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
2477 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2478 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2480 for (i = 1; i < fh_init->xran_ports; i++) {
2481 struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
2482 if(p_dev_update == NULL) {
2483 print_err("p_dev_update\n");
2484 return XRAN_STATUS_FAIL;
2486 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
2487 p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
2488 printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
2489 printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
2492 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2493 return XRAN_STATUS_FAIL;
2498 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2499 eth_ctx->time_wrk_cfg.arg = NULL;
2500 eth_ctx->time_wrk_cfg.state = 1;
2503 /** 0 FH RX and BBDEV */
2504 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2506 print_err("pThCtx allocation error\n");
2507 return XRAN_STATUS_FAIL;
2509 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2510 pThCtx->worker_id = 0;
2511 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2512 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2513 pThCtx->task_func = ring_processing_func;
2514 pThCtx->task_arg = NULL;
2515 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2516 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2519 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2521 print_err("pThCtx allocation error\n");
2522 return XRAN_STATUS_FAIL;
2524 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2525 pThCtx->worker_id = 1;
2526 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2527 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2528 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2529 pThCtx->task_arg = (void*)(1<<0);
2530 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2531 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2534 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2536 print_err("pThCtx allocation error\n");
2537 return XRAN_STATUS_FAIL;
2539 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2540 pThCtx->worker_id = 2;
2541 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2542 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
2543 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2544 pThCtx->task_arg = (void*)(1<<1);
2545 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2546 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2549 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2551 print_err("pThCtx allocation error\n");
2552 return XRAN_STATUS_FAIL;
2554 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2555 pThCtx->worker_id = 3;
2556 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2557 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
2558 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2559 pThCtx->task_arg = (void*)(1<<2);
2560 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2561 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2564 if(eth_ctx->io_cfg.id == O_DU){
2566 eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
2567 eth_ctx->time_wrk_cfg.arg = NULL;
2568 eth_ctx->time_wrk_cfg.state = 1;
2572 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2574 print_err("pThCtx allocation error\n");
2575 return XRAN_STATUS_FAIL;
2577 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2578 pThCtx->worker_id = 0;
2579 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2580 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
2581 pThCtx->task_func = ring_processing_func;
2582 pThCtx->task_arg = NULL;
2583 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2584 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2587 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2589 print_err("pThCtx allocation error\n");
2590 return XRAN_STATUS_FAIL;
2592 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2593 pThCtx->worker_id = 1;
2594 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2595 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
2596 pThCtx->task_func = xran_processing_timer_only_func;
2597 pThCtx->task_arg = NULL;
2598 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2599 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2602 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2604 print_err("pThCtx allocation error\n");
2605 return XRAN_STATUS_FAIL;
2607 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2608 pThCtx->worker_id = 2;
2609 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2610 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2611 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2612 pThCtx->task_arg = (void*)(1<<0);
2613 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2614 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2617 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2619 print_err("pThCtx allocation error\n");
2620 return XRAN_STATUS_FAIL;
2622 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2623 pThCtx->worker_id = 3;
2624 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2625 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2626 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2627 pThCtx->task_arg = (void*)(1<<1);
2628 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2629 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2632 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2634 print_err("pThCtx allocation error\n");
2635 return XRAN_STATUS_FAIL;
2637 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2638 pThCtx->worker_id = 4;
2639 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2640 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
2641 pThCtx->task_func = xran_dl_pkt_ring_processing_func;
2642 pThCtx->task_arg = (void*)(1<<2);
2643 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2644 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2646 /*** O_RU specific config */
2648 eth_ctx->time_wrk_cfg.f = NULL;
2649 eth_ctx->time_wrk_cfg.arg = NULL;
2650 eth_ctx->time_wrk_cfg.state = 1;
2654 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2656 print_err("pThCtx allocation error\n");
2657 return XRAN_STATUS_FAIL;
2659 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2660 pThCtx->worker_id = 0;
2661 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2662 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
2663 pThCtx->task_func = process_dpdk_io_rx;
2664 pThCtx->task_arg = NULL;
2665 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2666 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2668 /** 1 FH RX and BBDEV */
2669 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2671 print_err("pThCtx allocation error\n");
2672 return XRAN_STATUS_FAIL;
2674 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2675 pThCtx->worker_id = 1;
2676 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2677 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
2678 pThCtx->task_func = ring_processing_func_per_port;
2679 pThCtx->task_arg = (void*)0;
2680 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2681 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2683 /** 2 FH RX and BBDEV */
2684 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2686 print_err("pThCtx allocation error\n");
2687 return XRAN_STATUS_FAIL;
2689 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2690 pThCtx->worker_id = 2;
2691 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2692 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
2693 pThCtx->task_func = ring_processing_func_per_port;
2694 pThCtx->task_arg = (void*)1;
2695 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2696 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2698 /** 3 FH RX and BBDEV */
2699 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2701 print_err("pThCtx allocation error\n");
2702 return XRAN_STATUS_FAIL;
2704 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2705 pThCtx->worker_id = 3;
2706 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2707 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
2708 pThCtx->task_func = ring_processing_func_per_port;
2709 pThCtx->task_arg = (void*)2;
2710 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2711 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2713 /** FH TX and BBDEV */
2714 pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
2716 print_err("pThCtx allocation error\n");
2717 return XRAN_STATUS_FAIL;
2719 memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
2720 pThCtx->worker_id = 4;
2721 pThCtx->worker_core_id = core_map[pThCtx->worker_id];
2722 snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
2723 pThCtx->task_func = process_dpdk_io_tx;
2724 pThCtx->task_arg = (void*)2;
2725 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
2726 eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
2730 print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
2731 return XRAN_STATUS_FAIL;
2734 print_err("unsupported configuration\n");
2735 return XRAN_STATUS_FAIL;
2739 if(eth_ctx->io_cfg.pkt_proc_core) {
2740 for (i = 0; i < coreNum && i < 64; i++) {
2741 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
2742 xran_core_used[xran_num_cores_used++] = i;
2743 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
2744 rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
2745 eth_ctx->pkt_wrk_cfg[i].state = 1;
2746 if(eth_ctx->pkt_proc_core_id == 0)
2747 eth_ctx->pkt_proc_core_id = i;
2748 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
2749 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
2751 nWorkerCore = nWorkerCore << 1;
2756 if(eth_ctx->io_cfg.pkt_proc_core_64_127) {
2757 for (i = 64; i < coreNum && i < 128; i++) {
2758 if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
2759 xran_core_used[xran_num_cores_used++] = i;
2760 if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
2761 rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
2762 eth_ctx->pkt_wrk_cfg[i].state = 1;
2763 if(eth_ctx->pkt_proc_core_id == 0)
2764 eth_ctx->pkt_proc_core_id = i;
2765 printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
2766 eth_ctx->worker_core[eth_ctx->num_workers++] = i;
2768 nWorkerCore = nWorkerCore << 1;
2772 return XRAN_STATUS_SUCCESS;
2775 xran_open(void *pHandle, struct xran_fh_config* pConf)
2777 int32_t ret = XRAN_STATUS_SUCCESS;
2779 uint8_t nNumerology = 0;
2780 int32_t lcore_id = 0;
2781 struct xran_device_ctx *p_xran_dev_ctx = NULL;
2782 struct xran_fh_config *pFhCfg = NULL;
2783 struct xran_fh_init *fh_init = NULL;
2784 struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
2785 int32_t wait_time = 10;
2786 int64_t offset_sec, offset_nsec;
2788 if(pConf->dpdk_port < XRAN_PORTS_NUM) {
2789 p_xran_dev_ctx = xran_dev_get_ctx_by_id(pConf->dpdk_port);
2791 print_err("@0x%08p [ru %d ] pConf->dpdk_port > XRAN_PORTS_NUM\n", pConf, pConf->dpdk_port);
2792 return XRAN_STATUS_FAIL;
2795 if(p_xran_dev_ctx == NULL) {
2796 print_err("[ru %d] p_xran_dev_ctx == NULL ", pConf->dpdk_port);
2797 return XRAN_STATUS_FAIL;
2800 pFhCfg = &p_xran_dev_ctx->fh_cfg;
2801 memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));
2803 fh_init = &p_xran_dev_ctx->fh_init;
2805 return XRAN_STATUS_FAIL;
2807 if(pConf->log_level) {
2808 printf(" %s: %s Category %s\n", __FUNCTION__,
2809 (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE",
2810 (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
2813 p_xran_dev_ctx->enableCP = pConf->enableCP;
2814 p_xran_dev_ctx->enablePrach = pConf->prachEnable;
2815 p_xran_dev_ctx->enableSrs = pConf->srsEnable;
2816 p_xran_dev_ctx->puschMaskEnable = pConf->puschMaskEnable;
2817 p_xran_dev_ctx->puschMaskSlot = pConf->puschMaskSlot;
2818 p_xran_dev_ctx->DynamicSectionEna = pConf->DynamicSectionEna;
2820 if(pConf->GPS_Alpha || pConf->GPS_Beta ){
2821 offset_sec = pConf->GPS_Beta / 100; /* resolution of beta is 10ms */
2822 offset_nsec = (pConf->GPS_Beta - offset_sec * 100) * 1e7 + pConf->GPS_Alpha;
2823 p_xran_dev_ctx->offset_sec = offset_sec;
2824 p_xran_dev_ctx->offset_nsec = offset_nsec;
2826 p_xran_dev_ctx->offset_sec = 0;
2827 p_xran_dev_ctx->offset_nsec = 0;
2831 nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
2833 if (pConf->nCC > XRAN_MAX_SECTOR_NR) {
2834 if(pConf->log_level)
2835 printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR);
2836 pConf->nCC = XRAN_MAX_SECTOR_NR;
2839 if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ) {
2840 print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder);
2841 return XRAN_STATUS_FAIL;
2844 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) {
2845 if((ret = xran_ruemul_init(p_xran_dev_ctx)) < 0) {
2850 /* setup PRACH configuration for C-Plane */
2851 if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR) {
2852 if((ret = xran_init_prach(pConf, p_xran_dev_ctx))< 0){
2855 } else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE) {
2856 if((ret = xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0){
2861 if((ret = xran_init_srs(pConf, p_xran_dev_ctx))< 0){
2865 if((ret = xran_cp_init_sectiondb(p_xran_dev_ctx)) < 0){
2869 if((ret = xran_init_sectionid(p_xran_dev_ctx)) < 0){
2873 if((ret = xran_init_seqid(p_xran_dev_ctx)) < 0){
2877 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
2878 if((ret = xran_init_vfs_mapping(p_xran_dev_ctx)) < 0) {
2882 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->fh_init.io_cfg.num_rxq > 1) {
2883 if((ret = xran_init_vf_rxq_to_pcid_mapping(p_xran_dev_ctx)) < 0) {
2889 if(pConf->ru_conf.xran_max_frame) {
2890 xran_max_frame = pConf->ru_conf.xran_max_frame;
2891 printf("xran_max_frame %d\n", xran_max_frame);
2894 p_xran_dev_ctx->interval_us_local = xran_fs_get_tti_interval(nNumerology);
2895 if (interval_us > p_xran_dev_ctx->interval_us_local)
2897 interval_us = xran_fs_get_tti_interval(nNumerology); //only update interval_us based on maximum numerology
2900 // if(pConf->log_level){
2901 printf("%s: interval_us=%ld, interval_us_local=%d\n", __FUNCTION__, interval_us, p_xran_dev_ctx->interval_us_local);
2903 if (nNumerology >= timing_get_numerology())
2905 timing_set_numerology(nNumerology);
2908 for(i = 0 ; i <pConf->nCC; i++){
2909 xran_fs_set_slot_type(pConf->dpdk_port, i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod,
2910 pConf->frame_conf.sSlotConfig);
2913 xran_fs_slot_limit_init(pConf->dpdk_port, xran_fs_get_tti_interval(nNumerology));
2915 /* if send_xpmbuf2ring needs to be changed from default functions,
2916 * then those should be set between xran_init and xran_open */
2917 if(p_xran_dev_ctx->send_cpmbuf2ring == NULL)
2918 p_xran_dev_ctx->send_cpmbuf2ring = xran_ethdi_mbuf_send_cp;
2919 if(p_xran_dev_ctx->send_upmbuf2ring == NULL)
2920 p_xran_dev_ctx->send_upmbuf2ring = xran_ethdi_mbuf_send;
2922 if(pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
2923 if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
2924 p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
2926 if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
2927 p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_dispatch_opt;
2930 if(pConf->dpdk_port == 0) {
2931 /* create all thread on open of port 0 */
2932 xran_num_cores_used = 0;
2933 if(eth_ctx->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){
2934 eth_ctx->bbdev_dec = pConf->bbdev_dec;
2935 eth_ctx->bbdev_enc = pConf->bbdev_enc;
2938 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
2939 printf("XRAN_UP_VF: 0x%04x\n", eth_ctx->io_cfg.port[XRAN_UP_VF]);
2940 p_xran_dev_ctx->timing_source_thread_running = 0;
2941 xran_core_used[xran_num_cores_used++] = eth_ctx->io_cfg.timing_core;
2942 if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), eth_ctx->io_cfg.timing_core))
2943 rte_panic("thread_run() failed to start\n");
2944 } else if(pConf->log_level) {
2945 printf("Eth port was not open. Processing thread was not started\n");
2948 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF) {
2949 if ((ret = xran_timing_create_cbs(p_xran_dev_ctx)) < 0) {
2955 if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
2956 if(pConf->dpdk_port == (fh_init->xran_ports - 1)) {
2957 if((ret = xran_spawn_workers()) < 0) {
2961 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, sched_getcpu(), getpid());
2962 printf("Waiting on Timing thread...\n");
2963 while (p_xran_dev_ctx->timing_source_thread_running == 0 && wait_time--) {
2968 print_dbg("%s : %d", __FUNCTION__, pConf->dpdk_port);
2973 xran_start(void *pHandle)
2976 /* ToS = Top of Second start +- 1.5us */
2980 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
2981 if(xran_get_if_state() == XRAN_RUNNING) {
2982 print_err("Already STARTED!!");
2985 timespec_get(&ts, TIME_UTC);
2986 ptm = gmtime(&ts.tv_sec);
2988 strftime(buff, sizeof(buff), "%D %T", ptm);
2989 printf("%s: XRAN start time: %s.%09ld UTC [%ld]\n",
2990 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
2993 if (p_xran_dev_ctx->fh_init.io_cfg.eowd_cmn[p_xran_dev_ctx->fh_init.io_cfg.id].owdm_enable)
2995 xran_if_current_state = XRAN_OWDM;
2999 xran_if_current_state = XRAN_RUNNING;
3005 xran_stop(void *pHandle)
3007 if(xran_get_if_state() == XRAN_STOPPED) {
3008 print_err("Already STOPPED!!");
3012 xran_if_current_state = XRAN_STOPPED;
3017 xran_close(void *pHandle)
3019 int32_t ret = XRAN_STATUS_SUCCESS;
3020 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
3022 xran_if_current_state = XRAN_STOPPED;
3023 ret = xran_cp_free_sectiondb(p_xran_dev_ctx);
3025 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)
3026 xran_ruemul_release(p_xran_dev_ctx);
3028 #ifdef RTE_LIBRTE_PDUMP
3029 /* uninitialize packet capture framework */
3035 /* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open
3036 * each cb will be set by default duing open if it is set by NULL */
3038 xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up)
3040 struct xran_device_ctx *p_xran_dev_ctx;
3042 if(xran_get_if_state() == XRAN_RUNNING) {
3043 print_err("Cannot register callback while running!!\n");
3047 p_xran_dev_ctx = xran_dev_get_ctx();
3049 p_xran_dev_ctx->send_cpmbuf2ring = mbuf_send_cp;
3050 p_xran_dev_ctx->send_upmbuf2ring = mbuf_send_up;
3052 p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
3058 xran_get_slot_idx (uint32_t PortId, uint32_t *nFrameIdx, uint32_t *nSubframeIdx, uint32_t *nSlotIdx, uint64_t *nSecond)
3061 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
3062 if (!p_xran_dev_ctx)
3064 print_err("Null xRAN context on port id %u!!\n", PortId);
3068 tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT);
3069 *nSlotIdx = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
3070 *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local), SUBFRAMES_PER_SYSTEMFRAME);
3071 *nFrameIdx = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
3072 *nSecond = timing_get_current_second();
3078 xran_set_debug_stop(int32_t value, int32_t count)
3080 return timing_set_debug_stop(value, count);