+/** Function to pefromrm RX on ETH device */
+int32_t
+xran_eth_rx_tasks(void* arg)
+{
+ process_dpdk_io_rx(arg);
+ return 0;
+}
+
+/** Function to porcess ORAN FH packet per port */
+int32_t
+ring_processing_func_per_port(void* args)
+{
+ struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
+ int32_t i;
+ uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF);
+ queueid_t qi;
+
+ for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) {
+ if (ctx->vf2xran_port[i] == port_id) {
+ for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
+ if (process_ring(ctx->rx_ring[i][qi], i, qi))
+ return 0;
+ }
+ }
+ }
+
+ if (XRAN_STOPPED == xran_if_current_state)
+ return -1;
+
+ return 0;
+}
+
+/** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */
+int32_t
+xran_spawn_workers(void)
+{
+ uint64_t nWorkerCore = 1LL;
+ uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF);
+ int32_t i = 0;
+ uint32_t total_num_cores = 1; /*start with timing core */
+ uint32_t worker_num_cores = 0;
+ uint32_t icx_cpu = 0;
+ int32_t core_map[2*sizeof(uint64_t)*8];
+ uint64_t xran_port_mask = 0;
+
+ struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
+ struct xran_device_ctx *p_dev = NULL;
+ struct xran_fh_init *fh_init = NULL;
+ struct xran_fh_config *fh_cfg = NULL;
+ struct xran_worker_th_ctx* pThCtx = NULL;
+ void *worker_ports=NULL;
+
+ p_dev = xran_dev_get_ctx_by_id(0);
+ if(p_dev == NULL) {
+ print_err("p_dev\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ fh_init = &p_dev->fh_init;
+ if(fh_init == NULL) {
+ print_err("fh_init\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ fh_cfg = &p_dev->fh_cfg;
+ if(fh_cfg == NULL) {
+ print_err("fh_cfg\n");
+ return XRAN_STATUS_FAIL;
+ }
+
+ for (i = 0; i < coreNum && i < 64; i++) {
+ if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
+ core_map[worker_num_cores++] = i;
+ total_num_cores++;
+ }
+ nWorkerCore = nWorkerCore << 1;
+ }
+
+ nWorkerCore = 1LL;
+ for (i = 64; i < coreNum && i < 128; i++) {
+ if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
+ core_map[worker_num_cores++] = i;
+ total_num_cores++;
+ }
+ nWorkerCore = nWorkerCore << 1;
+ }
+
+ extern int _may_i_use_cpu_feature(unsigned __int64);
+ icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52);
+
+ printf("O-XU %d\n", eth_ctx->io_cfg.id);
+ printf("HW %d\n", icx_cpu);
+ printf("Num cores %d\n", total_num_cores);
+ printf("Num ports %d\n", fh_init->xran_ports);
+ printf("O-RU Cat %d\n", fh_cfg->ru_conf.xranCat);
+ printf("O-RU CC %d\n", fh_cfg->nCC);
+ printf("O-RU eAxC %d\n", fh_cfg->neAxc);
+
+ for (i = 0; i < fh_init->xran_ports; i++){
+ xran_port_mask |= 1L<<i;
+ }
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL){
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
+ switch(total_num_cores) {
+ case 1: /** only timing core */
+ eth_ctx->time_wrk_cfg.f = xran_all_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+ break;
+ case 2:
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
+ break;
+ case 3:
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)xran_port_mask;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ break;
+ default:
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ } else if ((fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1) || fh_init->io_cfg.bbu_offload) {
+ switch(total_num_cores) {
+ case 1: /** only timing core */
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ break;
+ case 2:
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ if (p_dev->fh_init.io_cfg.bbu_offload)
+ p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring;
+ else
+ p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
+
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
+ break;
+ case 3:
+ if(1) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)xran_port_mask;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 4:
+ if(1) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 5:
+ if(1) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 6:
+ if(eth_ctx->io_cfg.id == O_DU) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 Eth Tx **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_tx;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 4 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 0; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ } else if(eth_ctx->io_cfg.id == O_RU) {
+ /*** O_RU specific config */
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = NULL;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 Eth RX */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_rx;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)0;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)1;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** FH TX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_tx;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ } else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ default:
+ print_err("unsupported configuration\n");
+ return XRAN_STATUS_FAIL;
+ }
+ } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) {
+ switch(total_num_cores) {
+ case 1:
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ break;
+
+ case 2:
+ if(fh_init->xran_ports == 2)
+ worker_ports = (void *)((1L<<0 | 1L<<1) & xran_port_mask);
+ else if(fh_init->xran_ports == 3)
+ worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2) & xran_port_mask);
+ else if(fh_init->xran_ports == 4)
+ worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2 | 1L<<3) & xran_port_mask);
+ else
+ {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; */
+
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
+ pThCtx->task_arg = worker_ports;
+ eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[0].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ break;
+ case 3:
+ if(icx_cpu) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)xran_port_mask;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ }
+ else /* csx cpu */
+ {
+ if(fh_init->xran_ports == 3)
+ worker_ports = (void *)(1L<<2 & xran_port_mask);
+ else if(fh_init->xran_ports == 4)
+ worker_ports = (void *)((1L<<2 | 1L<<3) & xran_port_mask);
+ else{
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void *)((1L<<0|1L<<1) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
+ pThCtx->task_arg = worker_ports;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ }
+
+ break;
+
+ case 4:
+ if(1) {
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2)) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+ }
+ else {
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+ case 5:
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<1) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<2) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+
+ if(eth_ctx->io_cfg.id == O_DU && 0 == fh_init->dlCpProcBurst) {
+ for (i = 1; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = i+1;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ }
+ }
+
+ break;
+ case 6:
+ if(eth_ctx->io_cfg.id == O_DU){
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_processing_timer_only_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<1) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 4 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<2) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ } else {
+ /*** O_RU specific config */
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = NULL;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 Eth RX */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_rx;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)0;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)1;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** FH TX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_tx;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ }
+ break;
+ case 7:
+ /*** O_RU specific config */
+ if((fh_init->xran_ports == 4) && (eth_ctx->io_cfg.id == O_RU))
+ {
+ /*** O_RU specific config */
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = NULL;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 Eth RX */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_rx;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 1 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)0;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)1;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 3 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)2;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 4 FH RX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p3", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func_per_port;
+ pThCtx->task_arg = (void*)3;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** FH TX and BBDEV */
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 5;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = process_dpdk_io_tx;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ } /* -- if xran->ports == 4 -- */
+ else if(eth_ctx->io_cfg.id == O_DU){
+ if(fh_init->xran_ports == 3)
+ worker_ports = (void *)((1<<2) & xran_port_mask);
+ else if(fh_init->xran_ports == 4)
+ worker_ports = (void *)((1<<3) & xran_port_mask);
+ /* timing core */
+ eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+ eth_ctx->time_wrk_cfg.arg = NULL;
+ eth_ctx->time_wrk_cfg.state = 1;
+
+ /* workers */
+ /** 0 **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 0;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = ring_processing_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = 2; i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+ }
+
+ /** 1 - CP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 1;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_processing_timer_only_func;
+ pThCtx->task_arg = NULL;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 2 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 2;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<0) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = (fh_init->xran_ports-1); i < fh_init->xran_ports; i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ }
+
+ /** 3 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 3;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<1) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ for (i = (fh_init->xran_ports - 2); i < (fh_init->xran_ports - 1); i++) {
+ struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i);
+ if(p_dev_update == NULL) {
+ print_err("p_dev_update\n");
+ return XRAN_STATUS_FAIL;
+ }
+ p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+ printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+ }
+
+ /** 4 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 4;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = (void*)((1<<2) & xran_port_mask);
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+
+ /** 5 UP GEN **/
+ pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+ if(pThCtx == NULL){
+ print_err("pThCtx allocation error\n");
+ return XRAN_STATUS_FAIL;
+ }
+ memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+ pThCtx->worker_id = 5;
+ pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+ snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+ pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+ pThCtx->task_arg = worker_ports;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread;
+ eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx;
+ }
+ else{
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ break;
+
+ default:
+ print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+ return XRAN_STATUS_FAIL;
+ }
+ } else {
+ print_err("unsupported configuration\n");
+ return XRAN_STATUS_FAIL;
+ }