+ else
+ memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
+
+ p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0];
+ if(pDstBufferDecomp[z][j])
+ p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList = *pDstBufferDecomp[z][j];
+
+ }
+ }
+
+ p_xran_dev_ctx->pPrachCallback[i] = pCallback;
+ p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
+
+ print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
+ p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]);
+
+ return XRAN_STATUS_SUCCESS;
+}
+
+int32_t
+xran_5g_srs_req (void * pHandle,
+ struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
+ struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
+ xran_transport_callback_fn pCallback,
+ void *pCallbackTag)
+{
+ int j, i = 0, z;
+ XranSectorHandleInfo* pXranCc = NULL;
+ struct xran_device_ctx * p_xran_dev_ctx = NULL;
+
+ if(NULL == pHandle) {
+ printf("Handle is NULL!\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ pXranCc = (XranSectorHandleInfo*) pHandle;
+ p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
+ if (p_xran_dev_ctx == NULL) {
+ printf ("p_xran_dev_ctx is NULL\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ i = pXranCc->nIndex;
+
+ for(j=0; j<XRAN_N_FE_BUF_LEN; j++) {
+ for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
+ if(pDstBuffer[z][j])
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
+ else
+ memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
+
+ /* C-plane SRS */
+ p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
+ p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+ p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+ p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+ p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+ p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z];
+
+ if(pDstCpBuffer[z][j])
+ p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
+ else
+ memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
+
+ }
+ }
+
+ p_xran_dev_ctx->pSrsCallback[i] = pCallback;
+ p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
+
+ print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__,
+ p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]);
+
+ return XRAN_STATUS_SUCCESS;
+}
+
+uint32_t
+xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear)
+{
+ uint32_t i;
+
+ *num_core_used = xran_num_cores_used;
+ for (i = 0; i < xran_num_cores_used; i++)
+ {
+ core_used[i] = xran_core_used[i];
+ }
+
+ *total_time = xran_total_tick;
+ *used_time = xran_used_tick;
+
+ if (clear)
+ {
+ xran_total_tick = 0;
+ xran_used_tick = 0;
+ }
+
+ return 0;
+}
+
+uint8_t*
+xran_add_cp_hdr_offset(uint8_t *dst)
+{
+ dst += (RTE_PKTMBUF_HEADROOM +
+ sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct xran_cp_radioapp_section1_header) +
+ sizeof(struct xran_cp_radioapp_section1));
+
+ dst = RTE_PTR_ALIGN_CEIL(dst, 64);
+
+ return dst;
+}
+
+uint8_t*
+xran_add_hdr_offset(uint8_t *dst, int16_t compMethod)
+{
+ dst+= (RTE_PKTMBUF_HEADROOM +
+ sizeof (struct xran_ecpri_hdr) +
+ sizeof (struct radio_app_common_hdr) +
+ sizeof(struct data_section_hdr));
+ if(compMethod != XRAN_COMPMETHOD_NONE)
+ dst += sizeof (struct data_section_compression_hdr);
+ dst = RTE_PTR_ALIGN_CEIL(dst, 64);
+
+ return dst;
+}
+
+int32_t
+xran_pkt_gen_process_ring(struct rte_ring *r)
+{
+ assert(r);
+ int32_t retval = 0;
+ struct rte_mbuf *mbufs[16];
+ int i;
+ uint32_t remaining;
+ uint64_t t1;
+ struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
+ const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
+ RTE_DIM(mbufs), &remaining);
+
+ if (!dequeued)
+ return 0;
+
+ t1 = MLogTick();
+ for (i = 0; i < dequeued; ++i) {
+ struct cp_up_tx_desc * p_tx_desc = (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i], struct cp_up_tx_desc *);
+ retval = xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle,
+ p_tx_desc->ctx_id,
+ p_tx_desc->tti,
+ p_tx_desc->cc_id,
+ p_tx_desc->ant_id,
+ p_tx_desc->frame_id,
+ p_tx_desc->subframe_id,
+ p_tx_desc->slot_id,
+ p_tx_desc->sym_id,
+ (enum xran_comp_hdr_type)p_tx_desc->compType,
+ (enum xran_pkt_dir) p_tx_desc->direction,
+ p_tx_desc->xran_port_id,
+ (PSECTION_DB_TYPE)p_tx_desc->p_sec_db);
+
+ xran_pkt_gen_desc_free(p_tx_desc);
+ if (XRAN_STOPPED == xran_if_current_state){
+ MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());
+ return -1;
+ }
+ }
+
+ if(p_io_cfg->io_sleep)
+ nanosleep(&sleeptime,NULL);
+
+ MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());
+
+ return remaining;
+}
+
+int32_t
+xran_dl_pkt_ring_processing_func(void* args)
+{
+ struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
+ uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF);
+ uint16_t current_port;
+
+ rte_timer_manage();
+
+ for (current_port = 0; current_port < XRAN_PORTS_NUM; current_port++) {
+ if( xran_port_mask & (1<<current_port)) {
+ xran_pkt_gen_process_ring(ctx->up_dl_pkt_gen_ring[current_port]);
+ }
+ }
+
+ if (XRAN_STOPPED == xran_if_current_state)
+ return -1;
+
+ return 0;
+}
+
+/** Function to peforms serves of DPDK times */
+int32_t
+xran_processing_timer_only_func(void* args)
+{
+ rte_timer_manage();
+ if (XRAN_STOPPED == xran_if_current_state)
+ return -1;
+
+ return 0;
+}
+
+/** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */
+int32_t
+xran_all_tasks(void* arg)
+{
+
+ ring_processing_func(arg);
+ process_dpdk_io(arg);
+ return 0;
+}
+
+/** Function to pefromrm TX and RX on ETH device */
+int32_t
+xran_eth_trx_tasks(void* arg)
+{
+ process_dpdk_io(arg);
+ return 0;
+}
+
+/** Function to pefromrm RX on ETH device */
+int32_t
+xran_eth_rx_tasks(void* arg)
+{
+ process_dpdk_io_rx(arg);
+ return 0;
+}
+
+/** Function to porcess ORAN FH packet per port */
+int32_t
+ring_processing_func_per_port(void* args)
+{
+ struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
+ int16_t retPoll = 0;
+ int32_t i;
+ uint64_t t1, t2;
+ uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF);
+ queueid_t qi;
+
+ for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) {
+ if (ctx->vf2xran_port[i] == port_id) {
+ for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
+ if (process_ring(ctx->rx_ring[i][qi], i, qi))
+ return 0;
+ }
+ }
+ }
+
+ if (XRAN_STOPPED == xran_if_current_state)
+ return -1;
+
+ return 0;
+}
+
+/** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */
+int32_t
+xran_spawn_workers(void)
+{
+ uint64_t nWorkerCore = 1LL;
+ uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF);
+ int32_t i = 0;
+ uint32_t total_num_cores = 1; /*start with timing core */
+ uint32_t worker_num_cores = 0;
+ uint32_t icx_cpu = 0;
+ int32_t core_map[2*sizeof(uint64_t)*8];
+ uint32_t xran_port_mask = 0;
+
+ struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
+ struct xran_device_ctx *p_dev = NULL;
+ struct xran_fh_init *fh_init = NULL;
+ struct xran_fh_config *fh_cfg = NULL;
+ struct xran_worker_th_ctx* pThCtx = NULL;
+
+ p_dev = xran_dev_get_ctx_by_id(0);
+ if(p_dev == NULL) {
+ print_err("p_dev\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ fh_init = &p_dev->fh_init;
+ if(fh_init == NULL) {
+ print_err("fh_init\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ fh_cfg = &p_dev->fh_cfg;
+ if(fh_cfg == NULL) {
+ print_err("fh_cfg\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ for (i = 0; i < coreNum && i < 64; i++) {
+ if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
+ core_map[worker_num_cores++] = i;
+ total_num_cores++;
+ }
+ nWorkerCore = nWorkerCore << 1;
+ }
+
+ nWorkerCore = 1LL;
+ for (i = 64; i < coreNum && i < 128; i++) {
+ if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
+ core_map[worker_num_cores++] = i;
+ total_num_cores++;
+ }
+ nWorkerCore = nWorkerCore << 1;
+ }
+
+ extern int _may_i_use_cpu_feature(unsigned __int64);
+ icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52);
+
+ printf("O-XU %d\n", eth_ctx->io_cfg.id);
+ printf("HW %d\n", icx_cpu);
+ printf("Num cores %d\n", total_num_cores);
+ printf("Num ports %d\n", fh_init->xran_ports);
+ printf("O-RU Cat %d\n", fh_cfg->ru_conf.xranCat);
+ printf("O-RU CC %d\n", fh_cfg->nCC);
+ printf("O-RU eAxC %d\n", fh_cfg->neAxc);
+
+ for (i = 0; i < fh_init->xran_ports; i++){
+ xran_port_mask |= 1<<i;
+ }
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL){
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
+ switch(total_num_cores) {
+ case 1: /** only timing core */
+ eth_ctx->time_wrk_cfg.f = xran_all_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+ break;
+ case 2:
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
+ break;
+ case 3:
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)xran_port_mask;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ break;
+ default:
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1) {
+ switch(total_num_cores) {
+ case 1: /** only timing core */
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ break;
+ case 2:
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
+
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
+ break;
+ case 3:
+ if(icx_cpu) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)xran_port_mask;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 4:
+ if(icx_cpu) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 5:
+ if(icx_cpu) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 6:
+ if(eth_ctx->io_cfg.id == O_DU) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 Eth Tx **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_tx;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 4 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ } else if(eth_ctx->io_cfg.id == O_RU) {
+ /*** O_RU specific config */
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = NULL;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 Eth RX */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_rx;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)0;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)1;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** FH TX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_tx;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ default:
+ print_err("unsupported configuration\n");
+ return XRAN_STATUS_FAIL;
+ }
+ } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) {
+ switch(total_num_cores) {
+ case 1:
+ case 2:
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ break;
+ case 3:
+ if(icx_cpu) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)xran_port_mask;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 4:
+ if(icx_cpu) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1<<1) | (1<<2)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 5:
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(1<<0);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(1<<1);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(1<<2);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ break;
+ case 6:
+ if(eth_ctx->io_cfg.id == O_DU){
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_processing_timer_only_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(1<<0);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(1<<1);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 4 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(1<<2);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ } else {
+ /*** O_RU specific config */
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = NULL;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 Eth RX */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_rx;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)0;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)1;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** FH TX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_tx;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ }
+ break;
+ default:
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;