* INTC Contribution to the O-RAN F Release for O-DU Low
[o-du/phy.git] / fhi_lib / lib / src / xran_main.c
index 0f4bf8f..7c472d7 100644 (file)
-/******************************************************************************\r
-*\r
-*   Copyright (c) 2019 Intel.\r
-*\r
-*   Licensed under the Apache License, Version 2.0 (the "License");\r
-*   you may not use this file except in compliance with the License.\r
-*   You may obtain a copy of the License at\r
-*\r
-*       http://www.apache.org/licenses/LICENSE-2.0\r
-*\r
-*   Unless required by applicable law or agreed to in writing, software\r
-*   distributed under the License is distributed on an "AS IS" BASIS,\r
-*   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-*   See the License for the specific language governing permissions and\r
-*   limitations under the License.\r
-*\r
-*******************************************************************************/\r
-\r
-/**\r
- * @brief XRAN main functionality module\r
- * @file xran_main.c\r
- * @ingroup group_source_xran\r
- * @author Intel Corporation\r
- **/\r
-\r
-#define _GNU_SOURCE\r
-#include <sched.h>\r
-#include <assert.h>\r
-#include <err.h>\r
-#include <libgen.h>\r
-#include <sys/time.h>\r
-#include <sys/queue.h>\r
-#include <time.h>\r
-#include <unistd.h>\r
-#include <stdio.h>\r
-#include <pthread.h>\r
-#include <malloc.h>\r
-\r
-#include <rte_common.h>\r
-#include <rte_eal.h>\r
-#include <rte_errno.h>\r
-#include <rte_lcore.h>\r
-#include <rte_cycles.h>\r
-#include <rte_memory.h>\r
-#include <rte_memzone.h>\r
-#include <rte_mbuf.h>\r
-#include <rte_ring.h>\r
-\r
-#include "xran_fh_o_du.h"\r
-\r
-#include "ethdi.h"\r
-#include "xran_pkt.h"\r
-#include "xran_up_api.h"\r
-#include "xran_cp_api.h"\r
-#include "xran_sync_api.h"\r
-#include "xran_lib_mlog_tasks_id.h"\r
-#include "xran_timer.h"\r
-#include "xran_common.h"\r
-#include "xran_frame_struct.h"\r
-#include "xran_printf.h"\r
-#include "xran_app_frag.h"\r
-\r
-#include "xran_mlog_lnx.h"\r
-\r
-#define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) )\r
-\r
-#define XranOffsetSym(offSym, otaSym, numSymTotal)  (((int32_t)offSym > (int32_t)otaSym) ? \\r
-                            ((int32_t)otaSym + ((int32_t)numSymTotal) - (uint32_t)offSym) : \\r
-                            (((int32_t)otaSym - (int32_t)offSym) >= numSymTotal) ?  \\r
-                                    (((int32_t)otaSym - (int32_t)offSym) - numSymTotal) : \\r
-                                    ((int32_t)otaSym - (int32_t)offSym))\r
-\r
-#define MAX_NUM_OF_XRAN_CTX          (2)\r
-#define XranIncrementCtx(ctx)                             ((ctx >= (MAX_NUM_OF_XRAN_CTX-1)) ? 0 : (ctx+1))\r
-#define XranDecrementCtx(ctx)                             ((ctx == 0) ? (MAX_NUM_OF_XRAN_CTX-1) : (ctx-1))\r
-\r
-#define MAX_NUM_OF_DPDK_TIMERS       (10)\r
-#define DpdkTimerIncrementCtx(ctx)           ((ctx >= (MAX_NUM_OF_DPDK_TIMERS-1)) ? 0 : (ctx+1))\r
-#define DpdkTimerDecrementCtx(ctx)           ((ctx == 0) ? (MAX_NUM_OF_DPDK_TIMERS-1) : (ctx-1))\r
-\r
-/* Difference between Unix seconds to GPS seconds\r
-   GPS epoch: 1980.1.6 00:00:00 (UTC); Unix time epoch: 1970:1.1 00:00:00 UTC\r
-   Value is calculated on Sep.6 2019. Need to be change if International\r
-   Earth Rotation and Reference Systems Service (IERS) adds more leap seconds\r
-   1970:1.1 - 1980.1.6: 3657 days\r
-   3657*24*3600=315 964 800 seconds (unix seconds value at 1980.1.6 00:00:00 (UTC))\r
-   There are 18 leap seconds inserted after 1980.1.6 00:00:00 (UTC), which means\r
-   GPS is 18 larger. 315 964 800 - 18 = 315 964 782\r
-*/\r
-#define UNIX_TO_GPS_SECONDS_OFFSET 315964782UL\r
-#define NUM_OF_FRAMES_PER_SECOND 100\r
-\r
-//#define XRAN_CREATE_RBMAP /**< generate slot map base on symbols */\r
-\r
-\r
-struct xran_timer_ctx {\r
-    uint32_t    tti_to_process;\r
-};\r
-\r
-static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {NULL};\r
-static struct xran_device_ctx g_xran_dev_ctx[XRAN_PORTS_NUM] = { 0 };\r
-\r
-struct xran_timer_ctx timer_ctx[MAX_NUM_OF_XRAN_CTX];\r
-\r
-static struct rte_timer tti_to_phy_timer[10];\r
-static struct rte_timer sym_timer;\r
-static struct rte_timer dpdk_timer[MAX_NUM_OF_DPDK_TIMERS];\r
-\r
-uint64_t interval_us = 1000;\r
-\r
-uint32_t xran_lib_ota_tti        = 0; /**< Slot index in a second [0:(1000000/TTI-1)] */\r
-uint32_t xran_lib_ota_sym        = 0; /**< Symbol index in a slot [0:13] */\r
-uint32_t xran_lib_ota_sym_idx    = 0; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1]\r
-                                                where TTI is TTI interval in microseconds */\r
-uint16_t xran_SFN_at_Sec_Start   = 0; /**< SFN at current second start */\r
-uint16_t xran_max_frame          = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2        System Frame Number Calculation */\r
-\r
-static uint8_t xran_cp_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_DIR_MAX][XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR]; /* XRAN_MAX_ANTENNA_NR * 2 for PUSCH and PRACH */\r
-static uint8_t xran_updl_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR];\r
-static uint8_t xran_upul_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR]; /**< PUSCH, PRACH, SRS for Cat B */\r
-\r
-static uint8_t xran_section_id_curslot[XRAN_DIR_MAX][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2+ XRAN_MAX_ANT_ARRAY_ELM_NR];\r
-static uint16_t xran_section_id[XRAN_DIR_MAX][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2+ XRAN_MAX_ANT_ARRAY_ELM_NR];\r
-static uint64_t xran_total_tick = 0, xran_used_tick = 0;\r
-static uint32_t xran_core_used = 0;\r
-static int32_t first_call = 0;\r
-\r
-\r
-static void\r
-extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)\r
-{\r
-}\r
-\r
-static struct rte_mbuf_ext_shared_info share_data[XRAN_N_FE_BUF_LEN];\r
-\r
-void xran_timer_arm(struct rte_timer *tim, void* arg);\r
-\r
-int32_t xran_process_tx_sym(void *arg);\r
-\r
-int32_t xran_process_rx_sym(void *arg,\r
-                        struct rte_mbuf *mbuf,\r
-                        void *iq_data_start,\r
-                        uint16_t size,\r
-                        uint8_t CC_ID,\r
-                        uint8_t Ant_ID,\r
-                        uint8_t frame_id,\r
-                        uint8_t subframe_id,\r
-                        uint8_t slot_id,\r
-                        uint8_t symb_id,\r
-                        uint16_t num_prbu,\r
-                        uint16_t start_prbu,\r
-                        uint16_t sym_inc,\r
-                        uint16_t rb,\r
-                        uint16_t sect_id,\r
-                        uint32_t *mb_free);\r
-\r
-int32_t xran_process_prach_sym(void *arg,\r
-                        struct rte_mbuf *mbuf,\r
-                        void *iq_data_start,\r
-                        uint16_t size,\r
-                        uint8_t CC_ID,\r
-                        uint8_t Ant_ID,\r
-                        uint8_t frame_id,\r
-                        uint8_t subframe_id,\r
-                        uint8_t slot_id,\r
-                        uint8_t symb_id,\r
-                        uint16_t num_prbu,\r
-                        uint16_t start_prbu,\r
-                        uint16_t sym_inc,\r
-                        uint16_t rb,\r
-                        uint16_t sect_id,\r
-                        uint32_t *mb_free);\r
-\r
-int32_t xran_process_srs_sym(void *arg,\r
-                        struct rte_mbuf *mbuf,\r
-                        void *iq_data_start,\r
-                        uint16_t size,\r
-                        uint8_t CC_ID,\r
-                        uint8_t Ant_ID,\r
-                        uint8_t frame_id,\r
-                        uint8_t subframe_id,\r
-                        uint8_t slot_id,\r
-                        uint8_t symb_id,\r
-                        uint16_t num_prbu,\r
-                        uint16_t start_prbu,\r
-                        uint16_t sym_inc,\r
-                        uint16_t rb,\r
-                        uint16_t sect_id,\r
-                        uint32_t *mb_free);\r
-\r
-\r
-void tti_ota_cb(struct rte_timer *tim, void *arg);\r
-void tti_to_phy_cb(struct rte_timer *tim, void *arg);\r
-void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore);\r
-\r
-// Return SFN at current second start, 10 bits, [0, 1023]\r
-static inline uint16_t xran_getSfnSecStart(void)\r
-{\r
-    return xran_SFN_at_Sec_Start;\r
-}\r
-void xran_updateSfnSecStart(void)\r
-{\r
-    uint64_t currentSecond = timing_get_current_second();\r
-    // Assume always positive\r
-    uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;\r
-    uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND;\r
-    uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));\r
-    xran_SFN_at_Sec_Start = sfn;\r
-\r
-    tx_bytes_per_sec = tx_bytes_counter;\r
-    rx_bytes_per_sec = rx_bytes_counter;\r
-    tx_bytes_counter = 0;\r
-    rx_bytes_counter = 0;\r
-}\r
-\r
-static inline int32_t xran_getSlotIdxSecond(void)\r
-{\r
-    int32_t frameIdxSecond = xran_getSfnSecStart();\r
-    int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME;\r
-    return slotIndxSecond;\r
-}\r
-\r
-struct xran_device_ctx *xran_dev_get_ctx(void)\r
-{\r
-    return &g_xran_dev_ctx[0];\r
-}\r
-\r
-static inline struct xran_fh_config *xran_lib_get_ctx_fhcfg(void)\r
-{\r
-    return (&(xran_dev_get_ctx()->fh_cfg));\r
-}\r
-\r
-uint16_t xran_get_beamid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id)\r
-{\r
-    return (0);     // NO BEAMFORMING\r
-}\r
-\r
-enum xran_if_state xran_get_if_state(void)\r
-{\r
-    return xran_if_current_state;\r
-}\r
-\r
-int xran_is_prach_slot(uint32_t subframe_id, uint32_t slot_id)\r
-{\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);\r
-    int32_t is_prach_slot = 0;\r
-\r
-    if (p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology < 2){\r
-        //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index\r
-        if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){\r
-            if (pPrachCPConfig->nrofPrachInSlot != 1)\r
-                is_prach_slot = 1;\r
-            else{\r
-                if (p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology == 0)\r
-                    is_prach_slot = 1;\r
-                else if (slot_id == 1)\r
-                    is_prach_slot = 1;\r
-            }\r
-        }\r
-    } else if (p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology == 3){\r
-        //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot\r
-        uint32_t slotidx;\r
-        slotidx = subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;\r
-        if (pPrachCPConfig->nrofPrachInSlot == 2){\r
-            if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1)\r
-                is_prach_slot = 1;\r
-        } else {\r
-            if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){\r
-                is_prach_slot = 1;\r
-            }\r
-        }\r
-    } else\r
-        print_err("Numerology %d not supported", p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology);\r
-    return is_prach_slot;\r
-}\r
-\r
-int xran_init_sectionid(void *pHandle)\r
-{\r
-  int cell, ant, dir;\r
-\r
-    for (dir = 0; dir < XRAN_DIR_MAX; dir++){\r
-        for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) {\r
-            for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++) {\r
-                xran_section_id[dir][cell][ant] = 0;\r
-                xran_section_id_curslot[dir][cell][ant] = 255;\r
-            }\r
-        }\r
-    }\r
-\r
-    return (0);\r
-}\r
-\r
-int xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)\r
-{\r
-    struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg);\r
-\r
-    if(p_srs){\r
-        p_srs->symbMask = pConf->srs_conf.symbMask;\r
-        p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset;\r
-        print_dbg("SRS sym         %d\n", p_srs->symbMask );\r
-        print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset);\r
-    }\r
-    return (XRAN_STATUS_SUCCESS);\r
-}\r
-\r
-\r
-int xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)\r
-{\r
-    int32_t i;\r
-    uint8_t slotNr;\r
-    struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf);\r
-    const xRANPrachConfigTableStruct *pxRANPrachConfigTable;\r
-    uint8_t nNumerology = pConf->frame_conf.nNumerology;\r
-    uint8_t nPrachConfIdx = pPRACHConfig->nPrachConfIdx;\r
-    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);\r
-\r
-    if (nNumerology > 2)\r
-        pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx];\r
-    else if (pConf->frame_conf.nFrameDuplexType == 1)\r
-        pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx];\r
-    else\r
-        pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx];\r
-\r
-    uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0];\r
-    const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt];\r
-    memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config));\r
-    if(pConf->log_level)\r
-        printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot);\r
-\r
-    pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC;         // 3, PRACH preamble format A1~3, B1~4, C0, C2\r
-    pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym;\r
-    pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart;\r
-    pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70;\r
-    pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp;\r
-    pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing);\r
-    pPrachCPConfig->x = pxRANPrachConfigTable->x;\r
-    pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot;\r
-    pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0];\r
-    pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1];\r
-    if (preambleFmrt >= FORMAT_A1)\r
-    {\r
-        pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration;\r
-        pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot;\r
-    }\r
-    else\r
-    {\r
-        pPrachCPConfig->numSymbol = 1;\r
-        pPrachCPConfig->occassionsInPrachSlot = 1;\r
-    }\r
-\r
-    if(pConf->log_level)\r
-        printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]);\r
-    pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1;\r
-    for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++)\r
-    {\r
-        slotNr = pxRANPrachConfigTable->slotNr[i];\r
-        if (slotNr > 0){\r
-            pPrachCPConfig->isPRACHslot[slotNr] = 1;\r
-            if(pConf->log_level)\r
-                printf(" %u ..", slotNr);\r
-        }\r
-    }\r
-    printf("\n");\r
-    for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){\r
-        p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId;\r
-        p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;\r
-    }\r
-    if(pConf->log_level){\r
-        printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]);\r
-    }\r
-\r
-    pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(NULL);\r
-    print_dbg("PRACH eAxC_offset %d\n",  pPrachCPConfig->eAxC_offset);\r
-\r
-    return (XRAN_STATUS_SUCCESS);\r
-}\r
-\r
-inline uint16_t xran_alloc_sectionid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id)\r
-{\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (0);\r
-        }\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) {  //for PRACH, ant_id starts from num_ant\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (0);\r
-    }\r
-\r
-    /* if new slot has been started,\r
-     * then initializes section id again for new start */\r
-    if(xran_section_id_curslot[dir][cc_id][ant_id] != slot_id) {\r
-        xran_section_id[dir][cc_id][ant_id] = 0;\r
-        xran_section_id_curslot[dir][cc_id][ant_id] = slot_id;\r
-    }\r
-\r
-    return(xran_section_id[dir][cc_id][ant_id]++);\r
-}\r
-\r
-int xran_init_seqid(void *pHandle)\r
-{\r
-    int cell, dir, ant;\r
-\r
-    for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) {\r
-        for(dir=0; dir < XRAN_DIR_MAX; dir++) {\r
-            for(ant=0; ant < XRAN_MAX_ANTENNA_NR * 2; ant++)\r
-                xran_cp_seq_id_num[cell][dir][ant] = 0;\r
-            }\r
-        for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++)\r
-                xran_updl_seq_id_num[cell][ant] = 0;\r
-        for(ant=0; ant < XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR; ant++)\r
-                xran_upul_seq_id_num[cell][ant] = 0;\r
-        }\r
-\r
-    return (0);\r
-}\r
-\r
-static inline uint8_t xran_get_cp_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id)\r
-{\r
-    if(dir >= XRAN_DIR_MAX) {\r
-        print_err("Invalid direction - %d", dir);\r
-        return (0);\r
-        }\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (0);\r
-        }\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (0);\r
-        }\r
-\r
-    return(xran_cp_seq_id_num[cc_id][dir][ant_id]++);\r
-}\r
-static inline uint8_t xran_get_updl_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id)\r
-{\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (0);\r
-        }\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (0);\r
-        }\r
-\r
-    /* Only U-Plane DL needs to get sequence ID in O-DU */\r
-    return(xran_updl_seq_id_num[cc_id][ant_id]++);\r
-}\r
-static inline uint8_t *xran_get_updl_seqid_addr(void *pHandle, uint8_t cc_id, uint8_t ant_id)\r
-{\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (NULL);\r
-    }\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (NULL);\r
-    }\r
-\r
-    /* Only U-Plane DL needs to get sequence ID in O-DU */\r
-    return(&xran_updl_seq_id_num[cc_id][ant_id]);\r
-}\r
-static inline int8_t xran_check_upul_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id, uint8_t seq_id)\r
-{\r
-\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (-1);\r
-    }\r
-\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (-1);\r
-    }\r
-\r
-    /* O-DU needs to check the sequence ID of U-Plane UL from O-RU */\r
-    xran_upul_seq_id_num[cc_id][ant_id]++;\r
-    if(xran_upul_seq_id_num[cc_id][ant_id] == seq_id) { /* expected sequence */\r
-        return (XRAN_STATUS_SUCCESS);\r
-    } else {\r
-        print_err("expected seqid %u received %u, slot %u, ant %u cc %u", xran_upul_seq_id_num[cc_id][ant_id], seq_id, slot_id, ant_id, cc_id);\r
-        xran_upul_seq_id_num[cc_id][ant_id] = seq_id; // for next\r
-        return (-1);\r
-    }\r
-}\r
-\r
-//////////////////////////////////////////\r
-// For RU emulation\r
-static inline uint8_t xran_get_upul_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id)\r
-{\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (0);\r
-        }\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (0);\r
-        }\r
-\r
-    return(xran_upul_seq_id_num[cc_id][ant_id]++);\r
-}\r
-static inline uint8_t *xran_get_upul_seqid_addr(void *pHandle, uint8_t cc_id, uint8_t ant_id)\r
-{\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (0);\r
-        }\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR * 2) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (0);\r
-        }\r
-\r
-    return(&xran_upul_seq_id_num[cc_id][ant_id]);\r
-}\r
-static inline int8_t xran_check_cp_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t seq_id)\r
-{\r
-    if(dir >= XRAN_DIR_MAX) {\r
-        print_err("Invalid direction - %d", dir);\r
-        return (-1);\r
-        }\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (-1);\r
-        }\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR * 2) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (-1);\r
-        }\r
-\r
-    xran_cp_seq_id_num[cc_id][dir][ant_id]++;\r
-    if(xran_cp_seq_id_num[cc_id][dir][ant_id] == seq_id) { /* expected sequence */\r
-        return (0);\r
-        }\r
-    else {\r
-        xran_cp_seq_id_num[cc_id][dir][ant_id] = seq_id;\r
-        return (-1);\r
-        }\r
-}\r
-static inline int8_t xran_check_updl_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id, uint8_t seq_id)\r
-{\r
-    if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {\r
-        print_err("Invalid CC ID - %d", cc_id);\r
-        return (-1);\r
-    }\r
-\r
-    if(ant_id >= XRAN_MAX_ANTENNA_NR) {\r
-        print_err("Invalid antenna ID - %d", ant_id);\r
-        return (-1);\r
-    }\r
-\r
-    /* O-RU needs to check the sequence ID of U-Plane DL from O-DU */\r
-    xran_updl_seq_id_num[cc_id][ant_id]++;\r
-    if(xran_updl_seq_id_num[cc_id][ant_id] == seq_id) {\r
-        /* expected sequence */\r
-        /*print_dbg("ant %u  cc_id %u : slot_id %u : seq_id %u : expected seq_id %u\n",\r
-            ant_id, cc_id, slot_id, seq_id, xran_updl_seq_id_num[cc_id][ant_id]);*/\r
-        return (0);\r
-    } else {\r
-       /* print_err("ant %u  cc_id %u : slot_id %u : seq_id %u : expected seq_id %u\n",\r
-            ant_id, cc_id, slot_id, seq_id, xran_updl_seq_id_num[cc_id][ant_id]);*/\r
-\r
-        xran_updl_seq_id_num[cc_id][ant_id] = seq_id;\r
-\r
-        return (-1);\r
-    }\r
-}\r
-\r
-\r
-static struct xran_section_gen_info cpSections[XRAN_MAX_NUM_SECTIONS];\r
-static struct xran_cp_gen_params cpInfo;\r
-int process_cplane(struct rte_mbuf *pkt)\r
-{\r
-  struct xran_recv_packet_info recv;\r
-\r
-    cpInfo.sections = cpSections;\r
-    xran_parse_cp_pkt(pkt, &cpInfo, &recv);\r
-\r
-    return (MBUF_FREE);\r
-}\r
-//////////////////////////////////////////\r
-\r
-void sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick)\r
-{\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;\r
-    long t1 = MLogTick(), t2;\r
-    long t3;\r
-    static int32_t ctx = 0;\r
-\r
-    if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){\r
-        t3 = xran_tick();\r
-        tti_ota_cb(NULL, arg);\r
-        *used_tick += get_ticks_diff(xran_tick(), t3);\r
-    }\r
-\r
-    if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 3){\r
-        if(p_xran_dev_ctx->phy_tti_cb_done == 0){\r
-            /* rearm timer to deliver TTI event to PHY */\r
-            t3 = xran_tick();\r
-            p_xran_dev_ctx->phy_tti_cb_done = 0;\r
-            xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_dev_ctx->fh_init.io_cfg.timing_core);\r
-            *used_tick += get_ticks_diff(xran_tick(), t3);\r
-        }\r
-    }\r
-\r
-    t3 = xran_tick();\r
-    if (xran_process_tx_sym(timer_ctx))\r
-    {\r
-        *used_tick += get_ticks_diff(xran_tick(), t3);\r
-    }\r
-\r
-    /* check if there is call back to do something else on this symbol */\r
-\r
-    struct cb_elem_entry *cb_elm;\r
-    LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[0][xran_lib_ota_sym], pointers){\r
-        if(cb_elm){\r
-            cb_elm->pSymCallback(&dpdk_timer[ctx], cb_elm->pSymCallbackTag);\r
-            ctx = DpdkTimerIncrementCtx(ctx);\r
-        }\r
-    }\r
-\r
-    // This counter is incremented in advance before it is the time for the next symbol\r
-    xran_lib_ota_sym++;\r
-    if(xran_lib_ota_sym >= N_SYM_PER_SLOT){\r
-        xran_lib_ota_sym=0;\r
-    }\r
-\r
-    t2 = MLogTick();\r
-    MLogTask(PID_SYM_OTA_CB, t1, t2);\r
-}\r
-\r
-void tti_ota_cb(struct rte_timer *tim, void *arg)\r
-{\r
-    uint32_t    frame_id    = 0;\r
-    uint32_t    subframe_id = 0;\r
-    uint32_t    slot_id     = 0;\r
-    uint32_t    next_tti    = 0;\r
-\r
-    uint32_t mlogVar[10];\r
-    uint32_t mlogVarCnt = 0;\r
-    uint64_t t1 = MLogTick();\r
-    uint64_t t3 = 0;\r
-    uint32_t reg_tti  = 0;\r
-    uint32_t reg_sfn  = 0;\r
-    struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    MLogTask(PID_TTI_TIMER, t1, MLogTick());\r
-\r
-    /* To match TTbox */\r
-    if(xran_lib_ota_tti == 0)\r
-        reg_tti = xran_fs_get_max_slot() - 1;\r
-    else\r
-        reg_tti = xran_lib_ota_tti -1;\r
-    MLogIncrementCounter();\r
-    reg_sfn    = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME)*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);;\r
-    /* subframe and slot */\r
-    MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME));\r
-    MLogMark(1, t1);\r
-\r
-    slot_id     = XranGetSlotNum(xran_lib_ota_tti, SLOTNUM_PER_SUBFRAME);\r
-    subframe_id = XranGetSubFrameNum(xran_lib_ota_tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-    frame_id    = XranGetFrameNum(xran_lib_ota_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-\r
-    pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process = xran_lib_ota_tti;\r
-\r
-    mlogVar[mlogVarCnt++] = 0x11111111;\r
-    mlogVar[mlogVarCnt++] = xran_lib_ota_tti;\r
-    mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx;\r
-    mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx / 14;\r
-    mlogVar[mlogVarCnt++] = frame_id;\r
-    mlogVar[mlogVarCnt++] = subframe_id;\r
-    mlogVar[mlogVarCnt++] = slot_id;\r
-    mlogVar[mlogVarCnt++] = 0;\r
-    MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());\r
-\r
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_LLS_CU)\r
-        next_tti = xran_lib_ota_tti + 1;\r
-    else\r
-        next_tti = xran_lib_ota_tti;\r
-\r
-    if(next_tti>= xran_fs_get_max_slot()){\r
-        print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);\r
-        next_tti=0;\r
-    }\r
-\r
-    slot_id     = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME);\r
-    subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-    frame_id    = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-\r
-    print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);\r
-\r
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_LLS_CU){\r
-        pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = next_tti;\r
-    } else {\r
-        pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti & 1)^1].tti_to_process;\r
-    }\r
-\r
-    p_xran_dev_ctx->phy_tti_cb_done = 0;\r
-    xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_dev_ctx->fh_init.io_cfg.timing_core);\r
-\r
-    //slot index is increased to next slot at the beginning of current OTA slot\r
-    xran_lib_ota_tti++;\r
-    if(xran_lib_ota_tti >= xran_fs_get_max_slot()){\r
-        print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti, frame_id, subframe_id, slot_id);\r
-        xran_lib_ota_tti=0;\r
-    }\r
-    MLogTask(PID_TTI_CB, t1, MLogTick());\r
-}\r
-\r
-void xran_timer_arm(struct rte_timer *tim, void* arg)\r
-{\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    uint64_t t3 = MLogTick();\r
-\r
-    if (xran_if_current_state == XRAN_RUNNING){\r
-        rte_timer_cb_t fct = (rte_timer_cb_t)arg;\r
-        rte_timer_init(tim);\r
-        rte_timer_reset_sync(tim, 0, SINGLE, p_xran_dev_ctx->fh_init.io_cfg.timing_core, fct, &timer_ctx[0]);\r
-    }\r
-    MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick());\r
-}\r
-\r
-void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore)\r
-{\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    uint64_t t3 = MLogTick();\r
-\r
-    if (xran_if_current_state == XRAN_RUNNING){\r
-        rte_timer_cb_t fct = (rte_timer_cb_t)CbFct;\r
-        rte_timer_init(tim);\r
-        rte_timer_reset_sync(tim, 0, SINGLE, tim_lcore, fct, CbArg);\r
-    }\r
-    MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick());\r
-}\r
-\r
-int xran_cp_create_and_send_section(void *pHandle, uint8_t ru_port_id, int dir, int tti, int cc_id,\r
-        struct xran_prb_map *prbMap, enum xran_category category,  uint8_t ctx_id)\r
-{\r
-    struct xran_device_ctx *p_x_ctx = xran_dev_get_ctx();\r
-    struct xran_cp_gen_params params;\r
-    struct xran_section_gen_info sect_geninfo[1];\r
-    struct rte_mbuf *mbuf;\r
-    int ret = 0;\r
-    uint32_t i, j, loc_sym;\r
-    uint32_t nsection = 0;\r
-    struct xran_prb_elm *pPrbMapElem = NULL;\r
-    struct xran_prb_elm *pPrbMapElemPrev = NULL;\r
-    uint32_t slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);\r
-    uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-    uint32_t frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-\r
-    frame_id = (frame_id & 0xff); /* ORAN frameId, 8 bits, [0, 255] */\r
-    uint8_t seq_id = 0;\r
-\r
-    struct xran_sectionext1_info m_ext1;\r
-\r
-    if(prbMap) {\r
-        nsection = prbMap->nPrbElm;\r
-        pPrbMapElem = &prbMap->prbMap[0];\r
-        if (nsection < 1){\r
-            print_dbg("cp[%d:%d:%d] ru_port_id %d dir=%d nsection %d\n",\r
-                                   frame_id, subframe_id, slot_id, ru_port_id, dir, nsection);\r
-        }\r
-    } else {\r
-        print_err("prbMap is NULL\n");\r
-        return (-1);\r
-    }\r
-    for (i=0; i<nsection; i++)\r
-    {\r
-        pPrbMapElem                 = &prbMap->prbMap[i];\r
-        params.dir                  = dir;\r
-        params.sectionType          = XRAN_CP_SECTIONTYPE_1;        /* Most DL/UL Radio Channels */\r
-        params.hdr.filterIdx        = XRAN_FILTERINDEX_STANDARD;\r
-        params.hdr.frameId          = frame_id;\r
-        params.hdr.subframeId       = subframe_id;\r
-        params.hdr.slotId           = slot_id;\r
-        params.hdr.startSymId       = pPrbMapElem->nStartSymb;\r
-        params.hdr.iqWidth          = pPrbMapElem->iqWidth; /*xran_get_conf_iqwidth(pHandle);*/\r
-        params.hdr.compMeth         = pPrbMapElem->compMethod;\r
-\r
-        print_dbg("cp[%d:%d:%d] ru_port_id %d dir=%d\n",\r
-                               frame_id, subframe_id, slot_id, ru_port_id, dir);\r
-\r
-        seq_id = xran_get_cp_seqid(pHandle, XRAN_DIR_DL, cc_id, ru_port_id);\r
-\r
-        sect_geninfo[0].info.type        = params.sectionType;       // for database\r
-        sect_geninfo[0].info.startSymId  = params.hdr.startSymId;    // for database\r
-        sect_geninfo[0].info.iqWidth     = params.hdr.iqWidth;       // for database\r
-        sect_geninfo[0].info.compMeth    = params.hdr.compMeth;      // for database\r
-        sect_geninfo[0].info.id          = i; /*xran_alloc_sectionid(pHandle, dir, cc_id, ru_port_id, slot_id);*/\r
-\r
-        if(sect_geninfo[0].info.id > 7)\r
-            print_err("sectinfo->id %d\n", sect_geninfo[0].info.id);\r
-\r
-        if (dir == XRAN_DIR_UL) {\r
-            for (loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++){\r
-                struct xran_section_desc *p_sec_desc =  pPrbMapElem->p_sec_desc[loc_sym];\r
-                if(p_sec_desc) {\r
-                    p_sec_desc->section_id   = sect_geninfo[0].info.id;\r
-                    if(p_sec_desc->pCtrl) {\r
-                        rte_pktmbuf_free(p_sec_desc->pCtrl);\r
-                        p_sec_desc->pCtrl = NULL;\r
-                        p_sec_desc->pData = NULL;\r
-                    }\r
-                } else {\r
-                    print_err("section desc is NULL\n");\r
-                }\r
-            }\r
-        }\r
-\r
-        sect_geninfo[0].info.rb          = XRAN_RBIND_EVERY;\r
-        sect_geninfo[0].info.startPrbc   = pPrbMapElem->nRBStart;\r
-        sect_geninfo[0].info.numPrbc     = pPrbMapElem->nRBSize;\r
-        sect_geninfo[0].info.numSymbol   = pPrbMapElem->numSymb;\r
-        sect_geninfo[0].info.reMask      = 0xfff;\r
-        sect_geninfo[0].info.beamId      = pPrbMapElem->nBeamIndex;\r
-\r
-        for (loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++){\r
-            struct xran_section_desc *p_sec_desc =  pPrbMapElem->p_sec_desc[loc_sym];\r
-            if(p_sec_desc) {\r
-                p_sec_desc->section_id   = sect_geninfo[0].info.id;\r
-\r
-                sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_offset = p_sec_desc->iq_buffer_offset;\r
-                sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_len    = p_sec_desc->iq_buffer_len;\r
-            } else {\r
-                print_err("section desc is NULL\n");\r
-            }\r
-        }\r
-\r
-        if (i==0)\r
-            sect_geninfo[0].info.symInc      = XRAN_SYMBOLNUMBER_NOTINC;\r
-        else\r
-        {\r
-            pPrbMapElemPrev = &prbMap->prbMap[i-1];\r
-            if (pPrbMapElemPrev->nStartSymb == pPrbMapElem->nStartSymb)\r
-            {\r
-                sect_geninfo[0].info.symInc      = XRAN_SYMBOLNUMBER_NOTINC;\r
-                if (pPrbMapElemPrev->numSymb != pPrbMapElem->numSymb)\r
-                    print_err("section info error: previous numSymb %d not equal to current numSymb %d\n", pPrbMapElemPrev->numSymb, pPrbMapElem->numSymb);\r
-            }\r
-            else\r
-            {\r
-                sect_geninfo[0].info.symInc      = XRAN_SYMBOLNUMBER_INC;\r
-                if (pPrbMapElem->nStartSymb != (pPrbMapElemPrev->nStartSymb + pPrbMapElemPrev->numSymb))\r
-                    print_err("section info error: current startSym %d not equal to previous endSymb %d\n", pPrbMapElem->nStartSymb, pPrbMapElemPrev->nStartSymb + pPrbMapElemPrev->numSymb);\r
-            }\r
-        }\r
-\r
-        if(category == XRAN_CATEGORY_A){\r
-            /* no extention sections for category */\r
-            sect_geninfo[0].info.ef          = 0;\r
-            sect_geninfo[0].exDataSize       = 0;\r
-        } else if (category == XRAN_CATEGORY_B) {\r
-            /*add extantion section for BF Weights if update is needed */\r
-            if(pPrbMapElem->bf_weight_update){\r
-                memset(&m_ext1, 0, sizeof (struct xran_sectionext1_info));\r
-                m_ext1.bfwNumber      = pPrbMapElem->bf_weight.nAntElmTRx;\r
-                m_ext1.bfwiqWidth     = pPrbMapElem->iqWidth;\r
-                m_ext1.bfwCompMeth    = pPrbMapElem->compMethod;\r
-                m_ext1.p_bfwIQ        = (int16_t*)pPrbMapElem->bf_weight.p_ext_section;\r
-                m_ext1.bfwIQ_sz       = pPrbMapElem->bf_weight.ext_section_sz;\r
-\r
-                sect_geninfo[0].exData[0].type = XRAN_CP_SECTIONEXTCMD_1;\r
-                sect_geninfo[0].exData[0].len  = sizeof(m_ext1);\r
-                sect_geninfo[0].exData[0].data = &m_ext1;\r
-\r
-                sect_geninfo[0].info.ef       = 1;\r
-                sect_geninfo[0].exDataSize    = 1;\r
-            } else {\r
-                sect_geninfo[0].info.ef          = 0;\r
-                sect_geninfo[0].exDataSize       = 0;\r
-            }\r
-        } else {\r
-            print_err("Unsupported Category %d\n", category);\r
-            return (-1);\r
-        }\r
-\r
-        params.numSections          = 1;//nsection;\r
-        params.sections             = sect_geninfo;\r
-\r
-        mbuf = xran_ethdi_mbuf_alloc();\r
-        if(unlikely(mbuf == NULL)) {\r
-            print_err("Alloc fail!\n");\r
-            return (-1);\r
-        }\r
-\r
-        ret = xran_prepare_ctrl_pkt(mbuf, &params, cc_id, ru_port_id, seq_id);\r
-        if(ret < 0) {\r
-            print_err("Fail to build control plane packet - [%d:%d:%d] dir=%d\n",\r
-                        frame_id, subframe_id, slot_id, dir);\r
-        } else {\r
-            /* add in the ethernet header */\r
-            struct ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));\r
-            tx_counter++;\r
-            tx_bytes_counter += rte_pktmbuf_pkt_len(mbuf);\r
-            p_x_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI);\r
-\r
-            /*for(i=0; i<nsection; i++)*/\r
-                xran_cp_add_section_info(pHandle,\r
-                        dir, cc_id, ru_port_id,\r
-                        ctx_id,\r
-                        &sect_geninfo[0].info);\r
-        }\r
-    }\r
-\r
-    return ret;\r
-}\r
-\r
-void tx_cp_dl_cb(struct rte_timer *tim, void *arg)\r
-{\r
-    long t1 = MLogTick();\r
-    int tti, buf_id;\r
-    int i, ret;\r
-    uint32_t slot_id, subframe_id, frame_id;\r
-    int cc_id;\r
-    uint8_t ctx_id;\r
-    uint8_t ant_id, num_eAxc, num_CCPorts;\r
-    void *pHandle;\r
-    int num_list;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;\r
-\r
-    pHandle = NULL;     // TODO: temp implemantation\r
-    num_eAxc    = xran_get_num_eAxc(pHandle);\r
-    num_CCPorts = xran_get_num_cc(pHandle);\r
-\r
-    if(first_call && p_xran_dev_ctx->enableCP) {\r
-\r
-        tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process;\r
-        buf_id = tti % XRAN_N_FE_BUF_LEN;\r
-\r
-        slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);\r
-        subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-        frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-        if (tti == 0){\r
-            /* Wrap around to next second */\r
-            frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;\r
-        }\r
-\r
-        ctx_id      = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME) % XRAN_MAX_SECTIONDB_CTX;\r
-\r
-        print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);\r
-        for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {\r
-            for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) {\r
-                /* start new section information list */\r
-                xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);\r
-                if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {\r
-                    if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){\r
-                        num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,\r
-                            (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,\r
-                            p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);\r
-                    } else {\r
-                        print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);\r
-                    }\r
-                } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */\r
-            } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */\r
-        } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */\r
-        MLogTask(PID_CP_DL_CB, t1, MLogTick());\r
-    }\r
-}\r
-\r
-void rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg)\r
-{\r
-    long t1 = MLogTick();\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    xran_status_t status;\r
-    /* half of RX for current TTI as measured against current OTA time */\r
-    int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);\r
-    int32_t cc_id;\r
-    uint32_t nFrameIdx;\r
-    uint32_t nSubframeIdx;\r
-    uint32_t nSlotIdx;\r
-    uint64_t nSecond;\r
-\r
-    uint32_t nXranTime  = xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);\r
-    rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME\r
-           + nSubframeIdx*SLOTNUM_PER_SUBFRAME\r
-           + nSlotIdx;\r
-\r
-    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)\r
-        return;\r
-\r
-    for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {\r
-        if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){\r
-            struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];\r
-            pTag->slotiId = rx_tti;\r
-            pTag->symbol  = 0; /* last 7 sym means full slot of Symb */\r
-            status = XRAN_STATUS_SUCCESS;\r
-            if(p_xran_dev_ctx->pCallback[cc_id])\r
-               p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);\r
-        } else {\r
-            p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;\r
-        }\r
-    }\r
-    MLogTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogTick());\r
-}\r
-\r
-void rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg)\r
-{\r
-    long t1 = MLogTick();\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    xran_status_t status = 0;\r
-    int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);\r
-    int32_t cc_id = 0;\r
-    uint32_t nFrameIdx;\r
-    uint32_t nSubframeIdx;\r
-    uint32_t nSlotIdx;\r
-    uint64_t nSecond;\r
-\r
-    uint32_t nXranTime  = xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);\r
-    rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME\r
-        + nSubframeIdx*SLOTNUM_PER_SUBFRAME\r
-        + nSlotIdx;\r
-\r
-    if(rx_tti == 0)\r
-       rx_tti = (xran_fs_get_max_slot_SFN()-1);\r
-    else\r
-       rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */\r
-\r
-    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)\r
-        return;\r
-\r
-    /* U-Plane */\r
-    for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {\r
-        struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];\r
-        pTag->slotiId = rx_tti;\r
-        pTag->symbol  = 7; /* last 7 sym means full slot of Symb */\r
-        status = XRAN_STATUS_SUCCESS;\r
-        if(p_xran_dev_ctx->pCallback[cc_id])\r
-            p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);\r
-\r
-        if(p_xran_dev_ctx->pPrachCallback[cc_id]){\r
-            struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];\r
-            pTag->slotiId = rx_tti;\r
-            pTag->symbol  = 7; /* last 7 sym means full slot of Symb */\r
-            p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);\r
-        }\r
-    }\r
-\r
-    MLogTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogTick());\r
-}\r
-\r
-\r
-void tx_cp_ul_cb(struct rte_timer *tim, void *arg)\r
-{\r
-    long t1 = MLogTick();\r
-    int tti, buf_id;\r
-    int i, ret;\r
-    uint32_t slot_id, subframe_id, frame_id;\r
-    int32_t cc_id;\r
-    int ant_id, prach_port_id;\r
-    uint16_t beam_id;\r
-    uint8_t num_eAxc, num_CCPorts;\r
-    uint8_t ctx_id;\r
-\r
-    void *pHandle;\r
-    int num_list;\r
-\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);\r
-    struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;\r
-\r
-    pHandle     = NULL;     // TODO: temp implemantation\r
-\r
-    if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)\r
-        num_eAxc    = xran_get_num_eAxc(pHandle);\r
-    else\r
-        num_eAxc    = xran_get_num_eAxcUl(pHandle);\r
-\r
-    num_CCPorts = xran_get_num_cc(pHandle);\r
-    tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process;\r
-    buf_id = tti % XRAN_N_FE_BUF_LEN;\r
-    slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);\r
-    subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-    frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-    if (tti == 0) {\r
-        //Wrap around to next second\r
-        frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;\r
-    }\r
-    ctx_id      = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME) % XRAN_MAX_SECTIONDB_CTX;\r
-\r
-    if(first_call && p_xran_dev_ctx->enableCP) {\r
-\r
-        print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);\r
-\r
-        for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {\r
-            for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {\r
-                if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) == 1 ||\r
-                    xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_SP) == 1 ){\r
-                    /* start new section information list */\r
-                    xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);\r
-                    num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,\r
-                        (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,\r
-                        p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);\r
-                } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) == 1 */\r
-            } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */\r
-        } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */\r
-\r
-        if(p_xran_dev_ctx->enablePrach) {\r
-            uint32_t is_prach_slot = xran_is_prach_slot(subframe_id, slot_id);\r
-            if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0]) && (is_prach_slot==1)) {   //is prach slot\r
-                for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {\r
-                    for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {\r
-                        struct xran_cp_gen_params params;\r
-                        struct xran_section_gen_info sect_geninfo[8];\r
-                        struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();\r
-                        prach_port_id = ant_id + num_eAxc;\r
-                        /* start new section information list */\r
-                        xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, ctx_id);\r
-\r
-                        beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id);\r
-                        ret = generate_cpmsg_prach(pHandle, &params, sect_geninfo, mbuf, p_xran_dev_ctx,\r
-                                    frame_id, subframe_id, slot_id,\r
-                                    beam_id, cc_id, prach_port_id,\r
-                                    xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));\r
-                        if (ret == XRAN_STATUS_SUCCESS)\r
-                            send_cpmsg(pHandle, mbuf, &params, sect_geninfo,\r
-                                cc_id, prach_port_id, xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));\r
-                    }\r
-                }\r
-            }\r
-        }\r
-    } /* if(p_xran_dev_ctx->enableCP) */\r
-\r
-    MLogTask(PID_CP_UL_CB, t1, MLogTick());\r
-}\r
-\r
-void ul_up_full_slot_cb(struct rte_timer *tim, void *arg)\r
-{\r
-    long t1 = MLogTick();\r
-    rte_pause();\r
-    MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());\r
-}\r
-\r
-void tti_to_phy_cb(struct rte_timer *tim, void *arg)\r
-{\r
-    long t1 = MLogTick();\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */\r
-    if (first_call){\r
-        if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){\r
-            if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){\r
-                p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]);\r
-            }else{\r
-                p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--;\r
-            }\r
-        }\r
-    } else {\r
-        if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){\r
-            int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);\r
-            uint32_t slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);\r
-            uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-            uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-            if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME-1)) {  //(tti == xran_fs_get_max_slot()-1)\r
-                first_call = 1;\r
-            }\r
-        }\r
-    }\r
-\r
-    MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());\r
-}\r
-\r
-int xran_timing_source_thread(void *args)\r
-{\r
-    int res = 0;\r
-    cpu_set_t cpuset;\r
-    int32_t   do_reset = 0;\r
-    uint64_t  t1 = 0;\r
-    uint64_t  delta;\r
-    int32_t   result1,i,j;\r
-    uint32_t delay_cp_dl;\r
-    uint32_t delay_cp_ul;\r
-    uint32_t delay_up;\r
-    uint32_t delay_up_ul;\r
-    uint32_t delay_cp2up;\r
-    uint32_t sym_cp_dl;\r
-    uint32_t sym_cp_ul;\r
-    uint32_t sym_up_ul;\r
-    int32_t sym_up;\r
-    struct sched_param sched_param;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    uint64_t tWake = 0, tWakePrev = 0, tUsed = 0;\r
-    struct cb_elem_entry * cb_elm = NULL;\r
-\r
-    /* ToS = Top of Second start +- 1.5us */\r
-    struct timespec ts;\r
-\r
-    char buff[100];\r
-\r
-    xran_core_used = rte_lcore_id();\r
-    printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());\r
-\r
-    /* set main thread affinity mask to CPU2 */\r
-    sched_param.sched_priority = 98;\r
-\r
-    CPU_ZERO(&cpuset);\r
-    CPU_SET(p_xran_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);\r
-    if (result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset))\r
-    {\r
-        printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);\r
-    }\r
-    if ((result1 = pthread_setschedparam(pthread_self(), 1, &sched_param)))\r
-    {\r
-        printf("priority is not changed: coreId = 2, result1 = %d\n",result1);\r
-    }\r
-\r
-    if (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {\r
-        do {\r
-           timespec_get(&ts, TIME_UTC);\r
-        }while (ts.tv_nsec >1500);\r
-        struct tm * ptm = gmtime(&ts.tv_sec);\r
-        if(ptm){\r
-            strftime(buff, sizeof buff, "%D %T", ptm);\r
-            printf("O-DU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us);\r
-        }\r
-\r
-        delay_cp_dl = interval_us - p_xran_dev_ctx->fh_init.T1a_max_cp_dl;\r
-        delay_cp_ul = interval_us - p_xran_dev_ctx->fh_init.T1a_max_cp_ul;\r
-        delay_up    = p_xran_dev_ctx->fh_init.T1a_max_up;\r
-        delay_up_ul = p_xran_dev_ctx->fh_init.Ta4_max;\r
-\r
-        delay_cp2up = delay_up-delay_cp_dl;\r
-\r
-        sym_cp_dl = delay_cp_dl*1000/(interval_us*1000/N_SYM_PER_SLOT)+1;\r
-        sym_cp_ul = delay_cp_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1;\r
-        sym_up_ul = delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT);\r
-        p_xran_dev_ctx->sym_up = sym_up = -(delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT));\r
-        p_xran_dev_ctx->sym_up_ul = sym_up_ul = (delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1);\r
-\r
-        printf("Start C-plane DL %d us after TTI  [trigger on sym %d]\n", delay_cp_dl, sym_cp_dl);\r
-        printf("Start C-plane UL %d us after TTI  [trigger on sym %d]\n", delay_cp_ul, sym_cp_ul);\r
-        printf("Start U-plane DL %d us before OTA [offset  in sym %d]\n", delay_up, sym_up);\r
-        printf("Start U-plane UL %d us OTA        [offset  in sym %d]\n", delay_up_ul, sym_up_ul);\r
-\r
-        printf("C-plane to U-plane delay %d us after TTI\n", delay_cp2up);\r
-        printf("Start Sym timer %ld ns\n", TX_TIMER_INTERVAL/N_SYM_PER_SLOT);\r
-\r
-        cb_elm = xran_create_cb(xran_timer_arm, tx_cp_dl_cb);\r
-        if(cb_elm){\r
-            LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_cp_dl],\r
-                             cb_elm,\r
-                             pointers);\r
-        } else {\r
-            print_err("cb_elm is NULL\n");\r
-            res =  -1;\r
-            goto err0;\r
-        }\r
-\r
-        cb_elm = xran_create_cb(xran_timer_arm, tx_cp_ul_cb);\r
-        if(cb_elm){\r
-            LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_cp_ul],\r
-                             cb_elm,\r
-                             pointers);\r
-        } else {\r
-            print_err("cb_elm is NULL\n");\r
-            res =  -1;\r
-            goto err0;\r
-        }\r
-\r
-        /* Full slot UL OTA + delay_up_ul */\r
-        cb_elm = xran_create_cb(xran_timer_arm, rx_ul_deadline_full_cb);\r
-        if(cb_elm){\r
-            LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_up_ul],\r
-                             cb_elm,\r
-                             pointers);\r
-        } else {\r
-            print_err("cb_elm is NULL\n");\r
-            res =  -1;\r
-            goto err0;\r
-        }\r
-\r
-        /* Half slot UL OTA + delay_up_ul*/\r
-        cb_elm = xran_create_cb(xran_timer_arm, rx_ul_deadline_half_cb);\r
-        if(cb_elm){\r
-            LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_up_ul + N_SYM_PER_SLOT/2],\r
-                         cb_elm,\r
-                         pointers);\r
-        } else {\r
-            print_err("cb_elm is NULL\n");\r
-            res =  -1;\r
-            goto err0;\r
-        }\r
-    } else {    // APP_O_RU\r
-        /* calcualte when to send UL U-plane */\r
-        delay_up = p_xran_dev_ctx->fh_init.Ta3_min;\r
-        p_xran_dev_ctx->sym_up = sym_up = delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT)+1;\r
-        printf("Start UL U-plane %d us after OTA [offset in sym %d]\n", delay_up, sym_up);\r
-        do {\r
-           timespec_get(&ts, TIME_UTC);\r
-        }while (ts.tv_nsec >1500);\r
-        struct tm * ptm = gmtime(&ts.tv_sec);\r
-        if(ptm){\r
-            strftime(buff, sizeof buff, "%D %T", ptm);\r
-            printf("RU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us);\r
-        }\r
-    }\r
-\r
-    printf("interval_us %ld\n", interval_us);\r
-    do {\r
-       timespec_get(&ts, TIME_UTC);\r
-    }while (ts.tv_nsec == 0);\r
-\r
-    while(1) {\r
-        /* Update Usage Stats */\r
-        tWake = xran_tick();\r
-        xran_used_tick += tUsed;\r
-        if (tWakePrev)\r
-        {\r
-            xran_total_tick += get_ticks_diff(tWake, tWakePrev);\r
-        }\r
-        tWakePrev = tWake;\r
-        tUsed = 0;\r
-\r
-        delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed);\r
-        if (XRAN_STOPPED == xran_if_current_state)\r
-            break;\r
-\r
-        if (likely(XRAN_RUNNING == xran_if_current_state))\r
-            sym_ota_cb(&sym_timer, timer_ctx, &tUsed);\r
-    }\r
-\r
-    err0:\r
-    for (i = 0; i< XRAN_MAX_SECTOR_NR; i++){\r
-        for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){\r
-            struct cb_elem_entry *cb_elm;\r
-            LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[i][j], pointers){\r
-                if(cb_elm){\r
-                    LIST_REMOVE(cb_elm, pointers);\r
-                    xran_destroy_cb(cb_elm);\r
-                }\r
-            }\r
-        }\r
-    }\r
-\r
-    printf("Closing timing source thread...tx counter %lu, rx counter %lu\n", tx_counter, rx_counter);\r
-    return res;\r
-}\r
-\r
-/* Handle ecpri format. */\r
-int handle_ecpri_ethertype(struct rte_mbuf *pkt, uint64_t rx_time)\r
-{\r
-    const struct xran_ecpri_hdr *ecpri_hdr;\r
-    unsigned long t1;\r
-    int32_t ret = MBUF_FREE;\r
-\r
-    if (rte_pktmbuf_data_len(pkt) < sizeof(struct xran_ecpri_hdr)) {\r
-        print_err("Packet too short - %d bytes", rte_pktmbuf_data_len(pkt));\r
-        return 0;\r
-    }\r
-\r
-    /* check eCPRI header. */\r
-    ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *);\r
-    if(ecpri_hdr == NULL){\r
-        print_err("ecpri_hdr error\n");\r
-        return MBUF_FREE;\r
-    }\r
-\r
-    rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);\r
-    switch(ecpri_hdr->cmnhdr.ecpri_mesg_type) {\r
-        case ECPRI_IQ_DATA:\r
-           // t1 = MLogTick();\r
-            ret = process_mbuf(pkt);\r
-          //  MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick());\r
-            break;\r
-        // For RU emulation\r
-        case ECPRI_RT_CONTROL_DATA:\r
-            t1 = MLogTick();\r
-            if(xran_dev_get_ctx()->fh_init.io_cfg.id == O_RU) {\r
-                ret = process_cplane(pkt);\r
-            } else {\r
-                print_err("O-DU recevied C-Plane message!");\r
-            }\r
-            MLogTask(PID_PROCESS_CP_PKT, t1, MLogTick());\r
-            break;\r
-        default:\r
-            print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.ecpri_mesg_type);\r
-        }\r
-\r
-    return ret;\r
-}\r
-\r
-int xran_process_prach_sym(void *arg,\r
-                        struct rte_mbuf *mbuf,\r
-                        void *iq_data_start,\r
-                        uint16_t size,\r
-                        uint8_t CC_ID,\r
-                        uint8_t Ant_ID,\r
-                        uint8_t frame_id,\r
-                        uint8_t subframe_id,\r
-                        uint8_t slot_id,\r
-                        uint8_t symb_id,\r
-                        uint16_t num_prbu,\r
-                        uint16_t start_prbu,\r
-                        uint16_t sym_inc,\r
-                        uint16_t rb,\r
-                        uint16_t sect_id,\r
-                        uint32_t *mb_free)\r
-{\r
-    char        *pos = NULL;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    uint8_t symb_id_offset;\r
-    uint32_t tti = 0;\r
-    xran_status_t status;\r
-    void *pHandle = NULL;\r
-    struct rte_mbuf *mb;\r
-\r
-    uint16_t iq_sample_size_bits = 16;\r
-\r
-    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)\r
-        return 0;\r
-\r
-    tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;\r
-\r
-    status = tti << 16 | symb_id;\r
-\r
-    if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){\r
-        symb_id_offset = symb_id - p_xran_dev_ctx->prach_start_symbol[CC_ID]; //make the storing of prach packets to start from 0 for easy of processing within PHY\r
-        pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData;\r
-        if(pos && iq_data_start && size){\r
-            if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {\r
-                int idx = 0;\r
-                uint16_t *psrc = (uint16_t *)iq_data_start;\r
-                uint16_t *pdst = (uint16_t *)pos;\r
-                /* network byte (be) order of IQ to CPU byte order (le) */\r
-                for (idx = 0; idx < size/sizeof(int16_t); idx++){\r
-                    pdst[idx]  = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);\r
-                }\r
-                *mb_free = MBUF_FREE;\r
-            }else {\r
-                mb = p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl;\r
-                if(mb){\r
-                   rte_pktmbuf_free(mb);\r
-                }else{\r
-                   print_err("mb==NULL\n");\r
-                }\r
-                p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData = iq_data_start;\r
-                p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl = mbuf;\r
-                *mb_free = MBUF_KEEP;\r
-            }\r
-        } else {\r
-            print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);\r
-        }\r
-    } else {\r
-        print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);\r
-    }\r
-\r
-/*    if (symb_id == p_xran_dev_ctx->prach_last_symbol[CC_ID] ){\r
-        p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id]++;\r
-        if(p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] >= xran_get_num_eAxc(pHandle)){\r
-            if(p_xran_dev_ctx->pPrachCallback[0])\r
-               p_xran_dev_ctx->pPrachCallback[0](p_xran_dev_ctx->pPrachCallbackTag[0], status);\r
-            p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] = 0;\r
-        }\r
-    }\r
-*/\r
-    return size;\r
-}\r
-\r
-int32_t xran_process_srs_sym(void *arg,\r
-                        struct rte_mbuf *mbuf,\r
-                        void *iq_data_start,\r
-                        uint16_t size,\r
-                        uint8_t CC_ID,\r
-                        uint8_t Ant_ID,\r
-                        uint8_t frame_id,\r
-                        uint8_t subframe_id,\r
-                        uint8_t slot_id,\r
-                        uint8_t symb_id,\r
-                        uint16_t num_prbu,\r
-                        uint16_t start_prbu,\r
-                        uint16_t sym_inc,\r
-                        uint16_t rb,\r
-                        uint16_t sect_id,\r
-                        uint32_t *mb_free)\r
-{\r
-    char        *pos = NULL;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    uint32_t tti = 0;\r
-    xran_status_t status;\r
-    void *pHandle = NULL;\r
-    struct rte_mbuf *mb = NULL;\r
-\r
-    uint16_t iq_sample_size_bits = 16;\r
-\r
-    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)\r
-        return 0;\r
-\r
-    tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;\r
-\r
-    status = tti << 16 | symb_id;\r
-\r
-    if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < p_xran_dev_ctx->fh_cfg.nAntElmTRx && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT) {\r
-        pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;\r
-        pos += start_prbu * N_SC_PER_PRB*(iq_sample_size_bits/8)*2;\r
-        if(pos && iq_data_start && size){\r
-            if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {\r
-                int idx = 0;\r
-                uint16_t *psrc = (uint16_t *)iq_data_start;\r
-                uint16_t *pdst = (uint16_t *)pos;\r
-                rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);\r
-                /* network byte (be) order of IQ to CPU byte order (le) */\r
-                for (idx = 0; idx < size/sizeof(int16_t); idx++){\r
-                    pdst[idx]  = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);\r
-                }\r
-            } else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){\r
-                if (likely (p_xran_dev_ctx->fh_init.mtu >=\r
-                              p_xran_dev_ctx->fh_cfg.nULRBs * N_SC_PER_PRB*(iq_sample_size_bits/8)*2)) {\r
-                    /* no fragmentation */\r
-                    mb = p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl;\r
-                    if(mb){\r
-                       rte_pktmbuf_free(mb);\r
-                    }else{\r
-                       print_err("mb==NULL\n");\r
-                    }\r
-                    p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData = iq_data_start;\r
-                    p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl = mbuf;\r
-                    *mb_free = MBUF_KEEP;\r
-                } else {\r
-                    /* packet can be fragmented copy RBs */\r
-                    rte_memcpy(pos, iq_data_start, size);\r
-                    *mb_free = MBUF_FREE;\r
-                }\r
-            }\r
-        } else {\r
-            print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);\r
-        }\r
-    } else {\r
-        print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);\r
-    }\r
-\r
-    return size;\r
-}\r
-\r
-int32_t xran_pkt_validate(void *arg,\r
-                        struct rte_mbuf *mbuf,\r
-                        void *iq_data_start,\r
-                        uint16_t size,\r
-                        uint8_t CC_ID,\r
-                        uint8_t Ant_ID,\r
-                        uint8_t frame_id,\r
-                        uint8_t subframe_id,\r
-                        uint8_t slot_id,\r
-                        uint8_t symb_id,\r
-                        struct ecpri_seq_id *seq_id,\r
-                        uint16_t num_prbu,\r
-                        uint16_t start_prbu,\r
-                        uint16_t sym_inc,\r
-                        uint16_t rb,\r
-                        uint16_t sect_id)\r
-{\r
-    struct xran_device_ctx * pctx = xran_dev_get_ctx();\r
-    struct xran_common_counters *pCnt = &pctx->fh_counters;\r
-\r
-    if(pctx->fh_init.io_cfg.id == O_DU) {\r
-        if(xran_check_upul_seqid(NULL, CC_ID, Ant_ID, slot_id, seq_id->seq_id) != XRAN_STATUS_SUCCESS) {\r
-            pCnt->Rx_pkt_dupl++;\r
-            return (XRAN_STATUS_FAIL);\r
-        }\r
-    }else if(pctx->fh_init.io_cfg.id == O_RU) {\r
-        if(xran_check_updl_seqid(NULL, CC_ID, Ant_ID, slot_id, seq_id->seq_id) != XRAN_STATUS_SUCCESS) {\r
-            pCnt->Rx_pkt_dupl++;\r
-            return (XRAN_STATUS_FAIL);\r
-        }\r
-    }else {\r
-        print_err("incorrect dev type %d\n", pctx->fh_init.io_cfg.id);\r
-    }\r
-\r
-    rx_counter++;\r
-\r
-    pCnt->Rx_on_time++;\r
-    pCnt->Total_msgs_rcvd++;\r
-\r
-    return XRAN_STATUS_SUCCESS;\r
-}\r
-\r
-int32_t xran_process_rx_sym(void *arg,\r
-                        struct rte_mbuf *mbuf,\r
-                        void *iq_data_start,\r
-                        uint16_t size,\r
-                        uint8_t CC_ID,\r
-                        uint8_t Ant_ID,\r
-                        uint8_t frame_id,\r
-                        uint8_t subframe_id,\r
-                        uint8_t slot_id,\r
-                        uint8_t symb_id,\r
-                        uint16_t num_prbu,\r
-                        uint16_t start_prbu,\r
-                        uint16_t sym_inc,\r
-                        uint16_t rb,\r
-                        uint16_t sect_id,\r
-                        uint32_t *mb_free)\r
-{\r
-    char        *pos = NULL;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    uint32_t tti = 0;\r
-    xran_status_t status;\r
-    void *pHandle = NULL;\r
-    struct rte_mbuf *mb = NULL;\r
-    struct xran_prb_map * pRbMap    = NULL;\r
-    struct xran_prb_elm * prbMapElm = NULL;\r
-\r
-    uint16_t iq_sample_size_bits = 16;\r
-\r
-    tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;\r
-\r
-    status = tti << 16 | symb_id;\r
-\r
-    if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){\r
-        pos = (char*) p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;\r
-        pRbMap = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers->pData;\r
-        if(pRbMap){\r
-            prbMapElm = &pRbMap->prbMap[sect_id];\r
-            if(sect_id >= pRbMap->nPrbElm) {\r
-                print_err("sect_id %d !=pRbMap->nPrbElm %d\n", sect_id,pRbMap->nPrbElm);\r
-                *mb_free = MBUF_FREE;\r
-                return size;\r
-            }\r
-        } else {\r
-            print_err("pRbMap==NULL\n");\r
-            *mb_free = MBUF_FREE;\r
-            return size;\r
-        }\r
-\r
-        pos += start_prbu * N_SC_PER_PRB*(iq_sample_size_bits/8)*2;\r
-        if(pos && iq_data_start && size){\r
-            if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {\r
-                int idx = 0;\r
-                uint16_t *psrc = (uint16_t *)iq_data_start;\r
-                uint16_t *pdst = (uint16_t *)pos;\r
-                rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);\r
-                /* network byte (be) order of IQ to CPU byte order (le) */\r
-                for (idx = 0; idx < size/sizeof(int16_t); idx++){\r
-                    pdst[idx]  = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);\r
-                }\r
-            } else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){\r
-                if (/*likely (p_xran_dev_ctx->fh_init.mtu >=\r
-                              p_xran_dev_ctx->fh_cfg.nULRBs * N_SC_PER_PRB*(iq_sample_size_bits/8)*2)\r
-                              &&  p_xran_dev_ctx->fh_init.io_cfg.id == O_DU*/ 1) {\r
-                    if (pRbMap->nPrbElm == 1){\r
-                        /* no fragmentation */\r
-                        mb = p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl;\r
-                        if(mb){\r
-                           rte_pktmbuf_free(mb);\r
-                        }else{\r
-                           print_err("mb==NULL\n");\r
-                        }\r
-                        p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData = iq_data_start;\r
-                        p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl = mbuf;\r
-                        *mb_free = MBUF_KEEP;\r
-                    } else {\r
-                        prbMapElm = &pRbMap->prbMap[sect_id];\r
-                        struct xran_section_desc *p_sec_desc =  prbMapElm->p_sec_desc[symb_id];\r
-                        if(p_sec_desc){\r
-                            mb = p_sec_desc->pCtrl;\r
-                            if(mb){\r
-                               rte_pktmbuf_free(mb);\r
-                            }\r
-                            p_sec_desc->pData         = iq_data_start;\r
-                            p_sec_desc->pCtrl         = mbuf;\r
-                            p_sec_desc->iq_buffer_len = size;\r
-                            p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);\r
-                        } else {\r
-                            print_err("p_sec_desc==NULL\n");\r
-                            *mb_free = MBUF_FREE;\r
-                            return size;\r
-                        }\r
-                        *mb_free = MBUF_KEEP;\r
-                    }\r
-                } else {\r
-                    /* packet can be fragmented copy RBs */\r
-                    rte_memcpy(pos, iq_data_start, size);\r
-                    *mb_free = MBUF_FREE;\r
-                }\r
-            }\r
-        } else {\r
-            print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);\r
-        }\r
-    } else {\r
-        print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);\r
-    }\r
-\r
-    return size;\r
-}\r
-\r
-/* Send burst of packets on an output interface */\r
-static inline int\r
-xran_send_burst(struct xran_device_ctx *dev, uint16_t n, uint16_t port)\r
-{\r
-    struct rte_mbuf **m_table;\r
-    struct rte_mbuf *m;\r
-    int32_t i   = 0;\r
-    int j;\r
-    int32_t ret = 0;\r
-\r
-    m_table = (struct rte_mbuf **)dev->tx_mbufs[port].m_table;\r
-\r
-    for(i = 0; i < n; i++){\r
-        rte_mbuf_sanity_check(m_table[i], 0);\r
-        /*rte_pktmbuf_dump(stdout, m_table[i], 256);*/\r
-        tx_counter++;\r
-        tx_bytes_counter += rte_pktmbuf_pkt_len(m_table[i]);\r
-        ret += dev->send_upmbuf2ring(m_table[i], ETHER_TYPE_ECPRI);\r
-    }\r
-\r
-\r
-    if (unlikely(ret < n)) {\r
-        print_err("ret < n\n");\r
-    }\r
-\r
-    return 0;\r
-}\r
-\r
-int32_t xran_process_tx_sym_cp_off(uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id,\r
-    int32_t do_srs)\r
-{\r
-    int32_t     retval = 0;\r
-    uint64_t    t1 = MLogTick();\r
-\r
-    void        *pHandle = NULL;\r
-    char        *pos = NULL;\r
-    char        *p_sec_iq = NULL;\r
-    char        *p_sect_iq = NULL;\r
-    void        *mb  = NULL;\r
-    int         prb_num = 0;\r
-    uint16_t    iq_sample_size_bits = 16; // TODO: make dynamic per\r
-\r
-    struct xran_prb_map *prb_map = NULL;\r
-    uint8_t  num_ant_elm  = 0;\r
-\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);\r
-    struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);\r
-    num_ant_elm = xran_get_num_ant_elm(pHandle);\r
-    enum xran_pkt_dir direction;\r
-\r
-    struct rte_mbuf *eth_oran_hdr = NULL;\r
-    char        *ext_buff = NULL;\r
-    uint16_t    ext_buff_len = 0;\r
-    struct rte_mbuf *tmp = NULL;\r
-    rte_iova_t ext_buff_iova = 0;\r
-\r
-    struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN];\r
-\r
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {\r
-        direction = XRAN_DIR_DL; /* O-DU */\r
-        prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;\r
-    } else {\r
-        direction = XRAN_DIR_UL; /* RU */\r
-        prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;\r
-    }\r
-\r
-    if(xran_fs_get_slot_type(cc_id, tti, ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SLOT_TYPE_DL : XRAN_SLOT_TYPE_UL)) ==  1\r
-            || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_SP) ==  1\r
-            || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_FDD) ==  1){\r
-\r
-        if(xran_fs_get_symbol_type(cc_id, tti, sym_id) == ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SYMBOL_TYPE_DL : XRAN_SYMBOL_TYPE_UL)\r
-           || xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){\r
-\r
-            if(iq_sample_size_bits != 16)\r
-                print_err("Incorrect iqWidth %d\n", iq_sample_size_bits );\r
-\r
-            pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;\r
-            mb  = (void*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;\r
-            prb_map  = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;\r
-\r
-\r
-            if(prb_map){\r
-                int32_t elmIdx = 0;\r
-                for (elmIdx = 0; elmIdx < prb_map->nPrbElm; elmIdx++){\r
-                    uint16_t sec_id  = elmIdx;\r
-                    struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];\r
-                    struct xran_section_desc * p_sec_desc = NULL;\r
-\r
-                    if(prb_map_elm == NULL){\r
-                        rte_panic("p_sec_desc == NULL\n");\r
-                    }\r
-\r
-                    p_sec_desc =  prb_map_elm->p_sec_desc[sym_id];\r
-\r
-                    if(p_sec_desc == NULL){\r
-                        rte_panic("p_sec_desc == NULL\n");\r
-                    }\r
-\r
-#if 1\r
-                    p_sec_iq = ((char*)pos + p_sec_desc->iq_buffer_offset);\r
-\r
-                    /* calculete offset for external buffer */\r
-                    ext_buff_len = p_sec_desc->iq_buffer_len;\r
-                    ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +\r
-                                    sizeof (struct xran_ecpri_hdr) +\r
-                                    sizeof (struct radio_app_common_hdr) +\r
-                                    sizeof(struct data_section_hdr));\r
-\r
-                    ext_buff_len += RTE_PKTMBUF_HEADROOM +\r
-                                    sizeof (struct xran_ecpri_hdr) +\r
-                                    sizeof (struct radio_app_common_hdr) +\r
-                                    sizeof(struct data_section_hdr) + 18;\r
-\r
-                    if(prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE){\r
-                        ext_buff     -= sizeof (struct data_section_compression_hdr);\r
-                        ext_buff_len += sizeof (struct data_section_compression_hdr);\r
-                    }\r
-\r
-                    eth_oran_hdr =  rte_pktmbuf_alloc(_eth_mbuf_pool_small);\r
-\r
-                    if (unlikely (( eth_oran_hdr) == NULL)) {\r
-                        rte_panic("Failed rte_pktmbuf_alloc\n");\r
-                    }\r
-\r
-                    p_share_data->free_cb = extbuf_free_callback;\r
-                    p_share_data->fcb_opaque = NULL;\r
-                    rte_mbuf_ext_refcnt_set(p_share_data, 1);\r
-\r
-                    ext_buff_iova = rte_mempool_virt2iova(mb);\r
-                    if (unlikely (( ext_buff_iova) == 0)) {\r
-                        rte_panic("Failed rte_mem_virt2iova \n");\r
-                    }\r
-\r
-                    if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {\r
-                        rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");\r
-                    }\r
-\r
-                    rte_pktmbuf_attach_extbuf(eth_oran_hdr,\r
-                                              ext_buff,\r
-                                              ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),\r
-                                              ext_buff_len,\r
-                                              p_share_data);\r
-\r
-                    rte_pktmbuf_reset_headroom(eth_oran_hdr);\r
-\r
-                    tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct ether_hdr));\r
-                    if (unlikely (( tmp) == NULL)) {\r
-                        rte_panic("Failed rte_pktmbuf_prepend \n");\r
-                    }\r
-                    mb = eth_oran_hdr;\r
-\r
-                    /* first all PRBs */\r
-                    prepare_symbol_ex(direction, sec_id,\r
-                                      mb,\r
-                                      (struct rb_map *)p_sec_iq,\r
-                                      prb_map_elm->compMethod,\r
-                                      prb_map_elm->iqWidth,\r
-                                      p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                                      frame_id, subframe_id, slot_id, sym_id,\r
-                                      prb_map_elm->nRBStart, prb_map_elm->nRBSize,\r
-                                      cc_id, ant_id,\r
-                                      (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?\r
-                                          xran_get_updl_seqid(pHandle, cc_id, ant_id) :\r
-                                          xran_get_upul_seqid(pHandle, cc_id, ant_id),\r
-                                      0);\r
-\r
-                    rte_mbuf_sanity_check((struct rte_mbuf *)mb, 0);\r
-                    tx_counter++;\r
-                    tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)mb);\r
-                    p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)mb, ETHER_TYPE_ECPRI);\r
-#else\r
-        p_sect_iq = pos + p_sec_desc->iq_buffer_offset;\r
-        prb_num = prb_map_elm->nRBSize;\r
-\r
-        if( prb_num > 136 || prb_num == 0) {\r
-            /* first 136 PRBs */\r
-            rte_panic("first 136 PRBs\n");\r
-            send_symbol_ex(direction,\r
-                            sec_id,\r
-                            NULL,\r
-                            (struct rb_map *)p_sect_iq,\r
-                            p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                            frame_id, subframe_id, slot_id, sym_id,\r
-                            0, 136,\r
-                            cc_id, ant_id,\r
-                            (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?\r
-                                xran_get_updl_seqid(pHandle, cc_id, ant_id) :\r
-                                xran_get_upul_seqid(pHandle, cc_id, ant_id));\r
-\r
-             pos += 136 * N_SC_PER_PRB * (iq_sample_size_bits/8)*2;\r
-             /* last 137 PRBs */\r
-             send_symbol_ex(direction, sec_id,\r
-                             NULL,\r
-                             (struct rb_map *)p_sect_iq,\r
-                             p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                             frame_id, subframe_id, slot_id, sym_id,\r
-                             136, 137,\r
-                             cc_id, ant_id,\r
-                             (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?\r
-                                xran_get_updl_seqid(pHandle, cc_id, ant_id) :\r
-                                xran_get_upul_seqid(pHandle,  cc_id, ant_id));\r
-            retval = 1;\r
-        } else {\r
-            send_symbol_ex(direction,\r
-                    sec_id, /* xran_alloc_sectionid(pHandle, direction, cc_id, ant_id, slot_id)*/\r
-                    /*(struct rte_mbuf *)mb*/ NULL,\r
-                    (struct rb_map *)p_sect_iq,\r
-                    p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                    frame_id, subframe_id, slot_id, sym_id,\r
-                    prb_map_elm->nRBStart, prb_map_elm->nRBSize,\r
-                    cc_id, ant_id,\r
-                    (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?\r
-                        xran_get_updl_seqid(pHandle, cc_id, ant_id) :\r
-                        xran_get_upul_seqid(pHandle, cc_id, ant_id));\r
-            retval = 1;\r
-        }\r
-\r
-#endif\r
-\r
-                }\r
-            } else {\r
-                printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, sym_id);\r
-            }\r
-\r
-            if(p_xran_dev_ctx->enablePrach\r
-              && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)) {   /* Only RU needs to send PRACH I/Q */\r
-                uint32_t is_prach_slot = xran_is_prach_slot(subframe_id, slot_id);\r
-                if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])\r
-                        && (is_prach_slot == 1)\r
-                        && (sym_id >= p_xran_dev_ctx->prach_start_symbol[cc_id])\r
-                        && (sym_id <= p_xran_dev_ctx->prach_last_symbol[cc_id])) {  //is prach slot\r
-                        int prach_port_id = ant_id + pPrachCPConfig->eAxC_offset;\r
-                        pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pData;\r
-                        pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id]) * pPrachCPConfig->numPrbc * N_SC_PER_PRB * 4;\r
-                        mb  = NULL;//(void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;\r
-\r
-                        send_symbol_ex(direction,\r
-                                xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, slot_id),\r
-                                (struct rte_mbuf *)mb,\r
-                                (struct rb_map *)pos,\r
-                                p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                                frame_id, subframe_id, slot_id, sym_id,\r
-                                pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,\r
-                                cc_id, prach_port_id,\r
-                                xran_get_upul_seqid(pHandle, cc_id, prach_port_id));\r
-                        retval = 1;\r
-                } /* if((frame_id % pPrachCPConfig->x == pPrachCPConfig->y[0]) .... */\r
-            } /* if(p_xran_dev_ctx->enablePrach ..... */\r
-\r
-\r
-            if(p_xran_dev_ctx->enableSrs && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)){\r
-                if( p_srs_cfg->symbMask & (1 << sym_id) /* is SRS symbol */\r
-                    && do_srs) {\r
-                    int32_t ant_elm_id = 0;\r
-\r
-                    for (ant_elm_id = 0; ant_elm_id < num_ant_elm; ant_elm_id++){\r
-                        int32_t ant_elm_eAxC_id = ant_elm_id + p_srs_cfg->eAxC_offset;\r
-\r
-                        pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_elm_id].sBufferList.pBuffers[sym_id].pData;\r
-                        mb  = (void*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_elm_id].sBufferList.pBuffers[sym_id].pCtrl;\r
-\r
-                        if( prb_num > 136 || prb_num == 0) {\r
-                            uint16_t sec_id  = xran_alloc_sectionid(pHandle, direction, cc_id, ant_id, slot_id);\r
-                            /* first 136 PRBs */\r
-                            send_symbol_ex(direction,\r
-                                            sec_id,\r
-                                            NULL,\r
-                                            (struct rb_map *)pos,\r
-                                            p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                                            frame_id, subframe_id, slot_id, sym_id,\r
-                                            0, 136,\r
-                                            cc_id, ant_elm_eAxC_id,\r
-                                            (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?\r
-                                                xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :\r
-                                                xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id));\r
-\r
-                             pos += 136 * N_SC_PER_PRB * (iq_sample_size_bits/8)*2;\r
-                             /* last 137 PRBs */\r
-                             send_symbol_ex(direction, sec_id,\r
-                                             NULL,\r
-                                             (struct rb_map *)pos,\r
-                                             p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                                             frame_id, subframe_id, slot_id, sym_id,\r
-                                             136, 137,\r
-                                             cc_id, ant_elm_eAxC_id,\r
-                                             (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?\r
-                                                xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :\r
-                                                xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id));\r
-                        } else {\r
-                            send_symbol_ex(direction,\r
-                                    xran_alloc_sectionid(pHandle, direction, cc_id, ant_elm_eAxC_id, slot_id),\r
-                                    (struct rte_mbuf *)mb,\r
-                                    (struct rb_map *)pos,\r
-                                    p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                                    frame_id, subframe_id, slot_id, sym_id,\r
-                                    0, prb_num,\r
-                                    cc_id, ant_elm_eAxC_id,\r
-                                    (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?\r
-                                        xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :\r
-                                        xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id));\r
-                            retval = 1;\r
-                        }\r
-                    } /* for ant elem */\r
-                } /* SRS symbol */\r
-            } /* SRS enabled */\r
-        } /* RU mode or C-Plane is not used */\r
-    }\r
-\r
-    return retval;\r
-}\r
-\r
-\r
-int32_t xran_process_tx_sym_cp_on(uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,\r
-    uint32_t slot_id, uint32_t sym_id)\r
-{\r
-    int32_t     retval = 0;\r
-    uint64_t    t1 = MLogTick();\r
-\r
-    struct rte_mbuf *eth_oran_hdr = NULL;\r
-    char        *ext_buff = NULL;\r
-    uint16_t    ext_buff_len = 0;\r
-    struct rte_mbuf *tmp = NULL;\r
-    rte_iova_t ext_buff_iova = 0;\r
-    void        *pHandle  = NULL;\r
-    char        *pos      = NULL;\r
-    char        *p_sec_iq = NULL;\r
-    void        *mb  = NULL;\r
-    int         prb_num = 0;\r
-    uint16_t    iq_sample_size_bits = 16; // TODO: make dynamic per\r
-    uint32_t    next = 0;\r
-    int32_t     num_sections = 0;\r
-\r
-    struct xran_section_info *sectinfo = NULL;\r
-    struct xran_device_ctx   *p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);\r
-    struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);\r
-    enum xran_pkt_dir direction;\r
-\r
-    struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN];\r
-\r
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {\r
-        direction = XRAN_DIR_DL; /* O-DU */\r
-        prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;\r
-    } else {\r
-        direction = XRAN_DIR_UL; /* RU */\r
-        prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;\r
-    }\r
-\r
-    next = 0;\r
-    num_sections = xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id, ctx_id);\r
-    /* iterate C-Plane configuration to generate corresponding U-Plane */\r
-    while(next < num_sections) {\r
-        sectinfo = xran_cp_iterate_section_info(pHandle, direction, cc_id, ant_id, ctx_id, &next);\r
-\r
-        if(sectinfo == NULL)\r
-            break;\r
-\r
-        if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) {   /* only supports type 1 */\r
-            print_err("Invalid section type in section DB - %d", sectinfo->type);\r
-            continue;\r
-        }\r
-\r
-        /* skip, if not scheduled */\r
-        if(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol)\r
-            continue;\r
-\r
-        if(sectinfo->compMeth)\r
-            iq_sample_size_bits = sectinfo->iqWidth;\r
-\r
-        print_dbg(">>> sym %2d [%d] type%d, id %d, startPrbc=%d, numPrbc=%d, numSymbol=%d\n", sym_id, next,\r
-                        sectinfo->type, sectinfo->id, sectinfo->startPrbc,\r
-                        sectinfo->numPrbc, sectinfo->numSymbol);\r
-\r
-        p_xran_dev_ctx->tx_mbufs[0].len = 0;\r
-        uint16_t len  = p_xran_dev_ctx->tx_mbufs[0].len;\r
-        int16_t len2 = 0;\r
-        uint16_t i    = 0;\r
-\r
-        //Added for Klocworks\r
-        if (len >= MBUF_TABLE_SIZE)\r
-            len = MBUF_TABLE_SIZE - 1;\r
-\r
-        pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;\r
-        mb  = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;\r
-\r
-#if 1\r
-        p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);\r
-\r
-        /* calculete offset for external buffer */\r
-        ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;\r
-        ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +\r
-                        sizeof (struct xran_ecpri_hdr) +\r
-                        sizeof (struct radio_app_common_hdr) +\r
-                        sizeof(struct data_section_hdr));\r
-\r
-        ext_buff_len += RTE_PKTMBUF_HEADROOM +\r
-                        sizeof (struct xran_ecpri_hdr) +\r
-                        sizeof (struct radio_app_common_hdr) +\r
-                        sizeof(struct data_section_hdr) + 18;\r
-\r
-        if(sectinfo->compMeth != XRAN_COMPMETHOD_NONE){\r
-            ext_buff     -= sizeof (struct data_section_compression_hdr);\r
-            ext_buff_len += sizeof (struct data_section_compression_hdr);\r
-        }\r
-\r
-        eth_oran_hdr =  rte_pktmbuf_alloc(_eth_mbuf_pool_small);\r
-\r
-        if (unlikely (( eth_oran_hdr) == NULL)) {\r
-            rte_panic("Failed rte_pktmbuf_alloc\n");\r
-        }\r
-\r
-        p_share_data->free_cb = extbuf_free_callback;\r
-        p_share_data->fcb_opaque = NULL;\r
-        rte_mbuf_ext_refcnt_set(p_share_data, 1);\r
-\r
-        ext_buff_iova = rte_mempool_virt2iova(mb);\r
-        if (unlikely (( ext_buff_iova) == 0)) {\r
-            rte_panic("Failed rte_mem_virt2iova \n");\r
-        }\r
-\r
-        if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {\r
-            rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");\r
-        }\r
-\r
-        rte_pktmbuf_attach_extbuf(eth_oran_hdr,\r
-                                  ext_buff,\r
-                                  ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),\r
-                                  ext_buff_len,\r
-                                  p_share_data);\r
-\r
-        rte_pktmbuf_reset_headroom(eth_oran_hdr);\r
-\r
-        tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct ether_hdr));\r
-        if (unlikely (( tmp) == NULL)) {\r
-            rte_panic("Failed rte_pktmbuf_prepend \n");\r
-        }\r
-        mb = eth_oran_hdr;\r
-#else\r
-        rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */\r
-#endif\r
-        /* first all PRBs */\r
-        prepare_symbol_ex(direction, sectinfo->id,\r
-                          mb,\r
-                          (struct rb_map *)p_sec_iq,\r
-                          sectinfo->compMeth,\r
-                          sectinfo->iqWidth,\r
-                          p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,\r
-                          frame_id, subframe_id, slot_id, sym_id,\r
-                          sectinfo->startPrbc, sectinfo->numPrbc,\r
-                          cc_id, ant_id,\r
-                          xran_get_updl_seqid(pHandle, cc_id, ant_id),\r
-                          0);\r
-\r
-        /* if we don't need to do any fragmentation */\r
-        if (likely (p_xran_dev_ctx->fh_init.mtu >=\r
-                        sectinfo->numPrbc * (3*iq_sample_size_bits + 1))) {\r
-            /* no fragmentation */\r
-            p_xran_dev_ctx->tx_mbufs[0].m_table[len] = mb;\r
-            len2 = 1;\r
-        } else {\r
-            /* fragmentation */\r
-            uint8_t * seq_num = xran_get_updl_seqid_addr(pHandle, cc_id, ant_id);\r
-            if(seq_num)\r
-                (*seq_num)--;\r
-            else\r
-                rte_panic("pointer to seq number is NULL [CC %d Ant %d]\n", cc_id, ant_id);\r
-\r
-            len2 = xran_app_fragment_packet(mb,\r
-                                        &p_xran_dev_ctx->tx_mbufs[0].m_table[len],\r
-                                        (uint16_t)(MBUF_TABLE_SIZE - len),\r
-                                        p_xran_dev_ctx->fh_init.mtu,\r
-                                        p_xran_dev_ctx->direct_pool,\r
-                                        p_xran_dev_ctx->indirect_pool,\r
-                                        sectinfo,\r
-                                        seq_num);\r
-\r
-            /* Free input packet */\r
-            rte_pktmbuf_free(mb);\r
-\r
-            /* If we fail to fragment the packet */\r
-            if (unlikely (len2 < 0)){\r
-                print_err("len2= %d\n", len2);\r
-                return 0;\r
-            }\r
-        }\r
-\r
-        if(len2 > 1){\r
-            for (i = len; i < len + len2; i ++) {\r
-                struct rte_mbuf *m;\r
-                m = p_xran_dev_ctx->tx_mbufs[0].m_table[i];\r
-                struct ether_hdr *eth_hdr = (struct ether_hdr *)\r
-                    rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr));\r
-                if (eth_hdr == NULL) {\r
-                    rte_panic("No headroom in mbuf.\n");\r
-                }\r
-            }\r
-        }\r
-\r
-        len += len2;\r
-\r
-        if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM)) {\r
-              rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");\r
-        }\r
-\r
-        /* Transmit packets */\r
-        xran_send_burst(p_xran_dev_ctx, (uint16_t)len, 0);\r
-        p_xran_dev_ctx->tx_mbufs[0].len = 0;\r
-        retval = 1;\r
-    } /* while(section) */\r
-\r
-    return retval;\r
-}\r
-\r
-int32_t xran_process_tx_sym(void *arg)\r
-{\r
-    int32_t     retval = 0;\r
-    uint32_t    tti=0;\r
-#if XRAN_MLOG_VAR\r
-    uint32_t    mlogVar[10];\r
-    uint32_t    mlogVarCnt = 0;\r
-#endif\r
-    unsigned long t1 = MLogTick();\r
-\r
-    void        *pHandle = NULL;\r
-    int32_t     ant_id   = 0;\r
-    int32_t     cc_id    = 0;\r
-    uint8_t     num_eAxc = 0;\r
-    uint8_t     num_CCPorts = 0;\r
-    uint8_t     num_ant_elm = 0;\r
-    uint32_t    frame_id    = 0;\r
-    uint32_t    subframe_id = 0;\r
-    uint32_t    slot_id     = 0;\r
-    uint32_t    sym_id      = 0;\r
-    uint32_t    sym_idx     = 0;\r
-\r
-    uint8_t     ctx_id;\r
-    enum xran_pkt_dir  direction;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)\r
-        return 0;\r
-\r
-    /* O-RU: send symb after OTA time with delay (UL) */\r
-    /* O-DU: send symb in advance of OTA time (DL) */\r
-    sym_idx     = XranOffsetSym(p_xran_dev_ctx->sym_up, xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME*1000);\r
-\r
-    tti         = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);\r
-    slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);\r
-    subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-    frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-    // ORAN frameId, 8 bits, [0, 255]\r
-    frame_id = (frame_id & 0xff);\r
-\r
-    sym_id      = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);\r
-    ctx_id      = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME) % XRAN_MAX_SECTIONDB_CTX;\r
-\r
-    print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);\r
-\r
-#if XRAN_MLOG_VAR\r
-    mlogVar[mlogVarCnt++] = 0xAAAAAAAA;\r
-    mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx;\r
-    mlogVar[mlogVarCnt++] = sym_idx;\r
-    mlogVar[mlogVarCnt++] = abs(p_xran_dev_ctx->sym_up);\r
-    mlogVar[mlogVarCnt++] = tti;\r
-    mlogVar[mlogVarCnt++] = frame_id;\r
-    mlogVar[mlogVarCnt++] = subframe_id;\r
-    mlogVar[mlogVarCnt++] = slot_id;\r
-    mlogVar[mlogVarCnt++] = sym_id;\r
-    MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());\r
-#endif\r
-\r
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {\r
-            num_eAxc    = xran_get_num_eAxcUl(pHandle);\r
-    } else {\r
-            num_eAxc    = xran_get_num_eAxc(pHandle);\r
-    }\r
-\r
-    num_CCPorts = xran_get_num_cc(pHandle);\r
-    /* U-Plane */\r
-    for(ant_id = 0; ant_id < num_eAxc; ant_id++) {\r
-        for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {\r
-            if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP){\r
-                retval = xran_process_tx_sym_cp_on(ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);\r
-            } else {\r
-                retval = xran_process_tx_sym_cp_off(ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, (ant_id == (num_eAxc - 1)));\r
-            }\r
-        } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */\r
-    } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */\r
-\r
-    MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());\r
-    return retval;\r
-}\r
-\r
-int xran_packet_and_dpdk_timer_thread(void *args)\r
-{\r
-    struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();\r
-\r
-    uint64_t prev_tsc = 0;\r
-    uint64_t cur_tsc = rte_rdtsc();\r
-    uint64_t diff_tsc = cur_tsc - prev_tsc;\r
-    cpu_set_t cpuset;\r
-    struct sched_param sched_param;\r
-    int res = 0;\r
-    printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());\r
-\r
-    sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;\r
-\r
-    if ((res  = pthread_setschedparam(pthread_self(), 1, &sched_param)))\r
-    {\r
-        printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);\r
-    }\r
-\r
-    while(1){\r
-\r
-        cur_tsc  = rte_rdtsc();\r
-        diff_tsc = cur_tsc - prev_tsc;\r
-        if (diff_tsc > TIMER_RESOLUTION_CYCLES) {\r
-            rte_timer_manage();\r
-            prev_tsc = cur_tsc;\r
-        }\r
-\r
-        if (XRAN_STOPPED == xran_if_current_state)\r
-            break;\r
-    }\r
-\r
-    printf("Closing pkts timer thread...\n");\r
-    return 0;\r
-}\r
-\r
-\r
-int32_t xran_init(int argc, char *argv[],\r
-           struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle)\r
-{\r
-    int32_t i;\r
-    int32_t j;\r
-\r
-    struct xran_io_loop_cfg *p_io_cfg = (struct xran_io_loop_cfg *)&p_xran_fh_init->io_cfg;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    cpu_set_t system_cpuset;\r
-    pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &system_cpuset);\r
-    for (j = 0; j < CPU_SETSIZE; j++)\r
-        if (CPU_ISSET(j, &system_cpuset))\r
-            break;\r
-\r
-    int32_t  lcore_id = j;\r
-\r
-    char filename[64];\r
-    int64_t offset_sec, offset_nsec;\r
-\r
-    memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx));\r
-\r
-    /* copy init */\r
-    p_xran_dev_ctx->fh_init = *p_xran_fh_init;\r
-\r
-    printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu);\r
-\r
-    xran_if_current_state = XRAN_INIT;\r
-\r
-    memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config));\r
-\r
-    p_xran_dev_ctx->enableCP    = p_xran_fh_init->enableCP;\r
-    p_xran_dev_ctx->enablePrach = p_xran_fh_init->prachEnable;\r
-    p_xran_dev_ctx->enableSrs   = p_xran_fh_init->srsEnable;\r
-    p_xran_dev_ctx->DynamicSectionEna = p_xran_fh_init->DynamicSectionEna;\r
-\r
-    /* To make sure to set default functions */\r
-    p_xran_dev_ctx->send_upmbuf2ring    = NULL;\r
-    p_xran_dev_ctx->send_cpmbuf2ring    = NULL;\r
-\r
-    xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype);\r
-    if (p_io_cfg->id == 0)\r
-        xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,\r
-                           p_io_cfg,\r
-                           &lcore_id,\r
-                           (struct ether_addr *)p_xran_fh_init->p_o_du_addr,\r
-                           (struct ether_addr *)p_xran_fh_init->p_o_ru_addr,\r
-                           p_xran_fh_init->cp_vlan_tag,\r
-                           p_xran_fh_init->up_vlan_tag);\r
-    else\r
-        xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,\r
-                           p_io_cfg,\r
-                           &lcore_id,\r
-                           (struct ether_addr *)p_xran_fh_init->p_o_ru_addr,\r
-                           (struct ether_addr *)p_xran_fh_init->p_o_du_addr,\r
-                           p_xran_fh_init->cp_vlan_tag,\r
-                           p_xran_fh_init->up_vlan_tag);\r
-\r
-    for(i = 0; i < 10; i++ )\r
-        rte_timer_init(&tti_to_phy_timer[i]);\r
-\r
-    rte_timer_init(&sym_timer);\r
-    for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++)\r
-        rte_timer_init(&dpdk_timer[i]);\r
-\r
-    p_xran_dev_ctx->direct_pool   = socket_direct_pool;\r
-    p_xran_dev_ctx->indirect_pool = socket_indirect_pool;\r
-\r
-    for (i = 0; i< XRAN_MAX_SECTOR_NR; i++){\r
-        for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){\r
-            LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[i][j]);\r
-        }\r
-    }\r
-\r
-    printf("Set debug stop %d, debug stop count %d\n", p_xran_fh_init->debugStop, p_xran_fh_init->debugStopCount);\r
-    timing_set_debug_stop(p_xran_fh_init->debugStop, p_xran_fh_init->debugStopCount);\r
-\r
-    for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){\r
-        xran_fs_clear_slot_type(nCellIdx);\r
-    }\r
-\r
-    *pXranLayerHandle = p_xran_dev_ctx;\r
-\r
-    if(p_xran_fh_init->GPS_Alpha || p_xran_fh_init->GPS_Beta ){\r
-        offset_sec = p_xran_fh_init->GPS_Beta / 100;    //resolution of beta is 10ms\r
-        offset_nsec = (p_xran_fh_init->GPS_Beta - offset_sec * 100) * 1e7 + p_xran_fh_init->GPS_Alpha;\r
-        p_xran_dev_ctx->offset_sec = offset_sec;\r
-        p_xran_dev_ctx->offset_nsec = offset_nsec;\r
-    }else {\r
-        p_xran_dev_ctx->offset_sec  = 0;\r
-        p_xran_dev_ctx->offset_nsec = 0;\r
-    }\r
-\r
-    return 0;\r
-}\r
-\r
-int32_t xran_sector_get_instances (void * pDevHandle, uint16_t nNumInstances,\r
-               xran_cc_handle_t * pSectorInstanceHandles)\r
-{\r
-    xran_status_t nStatus = XRAN_STATUS_FAIL;\r
-    struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle;\r
-    XranSectorHandleInfo *pCcHandle = NULL;\r
-    int32_t i = 0;\r
-\r
-    /* Check for the Valid Parameters */\r
-    CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM);\r
-\r
-    if (!nNumInstances) {\r
-        print_dbg("Instance is not assigned for this function !!! \n");\r
-        return XRAN_STATUS_INVALID_PARAM;\r
-    }\r
-\r
-    for (i = 0; i < nNumInstances; i++) {\r
-\r
-        /* Allocate Memory for CC handles */\r
-        pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64);\r
-\r
-        if(pCcHandle == NULL)\r
-            return XRAN_STATUS_RESOURCE;\r
-\r
-        memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo)));\r
-\r
-        pCcHandle->nIndex    = i;\r
-        pCcHandle->nXranPort = pDev->xran_port_id;\r
-\r
-        printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle);\r
-        pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle;\r
-\r
-        printf("Handle: %p Instance: %p\n",\r
-            &pSectorInstanceHandles[i], pSectorInstanceHandles[i]);\r
-    }\r
-\r
-    return XRAN_STATUS_SUCCESS;\r
-}\r
-\r
-int32_t xran_mm_init (void * pHandle, uint64_t nMemorySize,\r
-            uint32_t nMemorySegmentSize)\r
-{\r
-    /* we use mbuf from dpdk memory */\r
-    return 0;\r
-}\r
-\r
-int32_t xran_bm_init (void * pHandle, uint32_t * pPoolIndex, uint32_t nNumberOfBuffers, uint32_t nBufferSize)\r
-{\r
-    XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;\r
-    uint32_t nAllocBufferSize;\r
-\r
-    char pool_name[RTE_MEMPOOL_NAMESIZE];\r
-\r
-    snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "ru_%d_cc_%d_idx_%d",\r
-        pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex);\r
-\r
-    nAllocBufferSize = nBufferSize + sizeof(struct ether_hdr) +\r
-        sizeof (struct xran_ecpri_hdr) +\r
-        sizeof (struct radio_app_common_hdr) +\r
-        sizeof(struct data_section_hdr) + 256;\r
-\r
-    if(nAllocBufferSize >= UINT16_MAX) {\r
-        rte_panic("nAllocBufferSize is failed [ handle %p %d %d ] [nPoolIndex %d] nNumberOfBuffers %d nBufferSize %d nAllocBufferSize %d\n",\r
-                    pXranCc, pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex, nNumberOfBuffers, nBufferSize, nAllocBufferSize);\r
-        return -1;\r
-    }\r
-\r
-    printf("%s: [ handle %p %d %d ] [nPoolIndex %d] nNumberOfBuffers %d nBufferSize %d\n", pool_name,\r
-                        pXranCc, pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex, nNumberOfBuffers, nBufferSize);\r
-\r
-    pXranCc->p_bufferPool[pXranCc->nBufferPoolIndex] = rte_pktmbuf_pool_create(pool_name, nNumberOfBuffers,\r
-                                                                               MBUF_CACHE, 0, nAllocBufferSize, rte_socket_id());\r
-\r
-    if(pXranCc->p_bufferPool[pXranCc->nBufferPoolIndex] == NULL){\r
-        rte_panic("rte_pktmbuf_pool_create failed [ handle %p %d %d ] [nPoolIndex %d] nNumberOfBuffers %d nBufferSize %d errno %s\n",\r
-                    pXranCc, pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex, nNumberOfBuffers, nBufferSize, rte_strerror(rte_errno));\r
-        return -1;\r
-    }\r
-\r
-    pXranCc->bufferPoolElmSz[pXranCc->nBufferPoolIndex]  = nBufferSize;\r
-    pXranCc->bufferPoolNumElm[pXranCc->nBufferPoolIndex] = nNumberOfBuffers;\r
-\r
-    printf("CC:[ handle %p ru %d cc_idx %d ] [nPoolIndex %d] mb pool %p \n",\r
-                pXranCc, pXranCc->nXranPort, pXranCc->nIndex,\r
-                    pXranCc->nBufferPoolIndex,  pXranCc->p_bufferPool[pXranCc->nBufferPoolIndex]);\r
-\r
-    *pPoolIndex = pXranCc->nBufferPoolIndex++;\r
-\r
-    return 0;\r
-}\r
-\r
-int32_t xran_bm_allocate_buffer(void * pHandle, uint32_t nPoolIndex, void **ppData,  void **ppCtrl)\r
-{\r
-    XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;\r
-    *ppData = NULL;\r
-    *ppCtrl = NULL;\r
-\r
-    struct rte_mbuf * mb =  rte_pktmbuf_alloc(pXranCc->p_bufferPool[nPoolIndex]);\r
-\r
-    if(mb){\r
-        char * start     = rte_pktmbuf_append(mb, pXranCc->bufferPoolElmSz[nPoolIndex]);\r
-        char * ethhdr    = rte_pktmbuf_prepend(mb, sizeof(struct ether_hdr));\r
-\r
-        if(start && ethhdr){\r
-            char * iq_offset = rte_pktmbuf_mtod(mb, char * );\r
-            /* skip headers */\r
-            iq_offset = iq_offset + sizeof(struct ether_hdr) +\r
-                                    sizeof (struct xran_ecpri_hdr) +\r
-                                    sizeof (struct radio_app_common_hdr) +\r
-                                    sizeof(struct data_section_hdr);\r
-\r
-            if (0) /* if compression */\r
-                iq_offset += sizeof (struct data_section_compression_hdr);\r
-\r
-            *ppData = (void *)iq_offset;\r
-            *ppCtrl  = (void *)mb;\r
-        } else {\r
-            print_err("[nPoolIndex %d] start ethhdr failed \n", nPoolIndex );\r
-            return -1;\r
-        }\r
-    } else {\r
-        print_err("[nPoolIndex %d] mb alloc failed \n", nPoolIndex );\r
-        return -1;\r
-    }\r
-\r
-    if (*ppData ==  NULL){\r
-        print_err("[nPoolIndex %d] rte_pktmbuf_append for %d failed \n", nPoolIndex, pXranCc->bufferPoolElmSz[nPoolIndex]);\r
-        return -1;\r
-    }\r
-\r
-    return 0;\r
-}\r
-\r
-int32_t xran_bm_free_buffer(void * pHandle, void *pData, void *pCtrl)\r
-{\r
-    XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;\r
-\r
-    if(pCtrl)\r
-        rte_pktmbuf_free(pCtrl);\r
-\r
-    return 0;\r
-}\r
-\r
-int32_t xran_5g_fronthault_config (void * pHandle,\r
-                struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],\r
-                struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],\r
-                struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],\r
-                struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],\r
-                xran_transport_callback_fn pCallback,\r
-                void *pCallbackTag)\r
-{\r
-    XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;\r
-    xran_status_t nStatus = XRAN_STATUS_SUCCESS;\r
-    int j, i = 0, z, k;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    print_dbg("%s\n", __FUNCTION__);\r
-\r
-    if(NULL == pHandle)\r
-    {\r
-        printf("Handle is NULL!\n");\r
-        return XRAN_STATUS_FAIL;\r
-    }\r
-\r
-    if (pCallback == NULL)\r
-    {\r
-        printf ("no callback\n");\r
-        return XRAN_STATUS_FAIL;\r
-    }\r
-\r
-    i = pXranCc->nIndex;\r
-\r
-    for(j=0; j<XRAN_N_FE_BUF_LEN; j++)\r
-    {\r
-        for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){\r
-            /* U-plane TX */\r
-\r
-            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0;\r
-            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;\r
-            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;\r
-            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;\r
-            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;\r
-            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];\r
-\r
-            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcBuffer[z][j];\r
-\r
-            /* C-plane TX */\r
-            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;\r
-            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;\r
-            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;\r
-            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;\r
-            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;\r
-            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];\r
-\r
-            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcCpBuffer[z][j];\r
-\r
-            /* U-plane RX */\r
-\r
-            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0;\r
-            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;\r
-            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;\r
-            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;\r
-            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;\r
-            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];\r
-\r
-            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];\r
-\r
-            /* C-plane RX */\r
-            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;\r
-            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;\r
-            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;\r
-            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;\r
-            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;\r
-            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];\r
-\r
-            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pDstCpBuffer[z][j];\r
-        }\r
-    }\r
-\r
-\r
-    p_xran_dev_ctx->pCallback[i]    = pCallback;\r
-    p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag;\r
-\r
-    p_xran_dev_ctx->xran2phy_mem_ready = 1;\r
-\r
-    return nStatus;\r
-}\r
-\r
-int32_t xran_5g_prach_req (void *  pHandle,\r
-                struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],\r
-                xran_transport_callback_fn pCallback,\r
-                void *pCallbackTag)\r
-{\r
-    XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;\r
-    xran_status_t nStatus = XRAN_STATUS_SUCCESS;\r
-    int j, i = 0, z;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    if(NULL == pHandle)\r
-    {\r
-        printf("Handle is NULL!\n");\r
-        return XRAN_STATUS_FAIL;\r
-    }\r
-    if (pCallback == NULL)\r
-    {\r
-        printf ("no callback\n");\r
-        return XRAN_STATUS_FAIL;\r
-    }\r
-\r
-    i = pXranCc->nIndex;\r
-\r
-    for(j=0; j<XRAN_N_FE_BUF_LEN; j++)\r
-    {\r
-        for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){\r
-           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0;\r
-           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;\r
-           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;\r
-           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;\r
-           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number.\r
-           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];\r
-           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];\r
-        }\r
-    }\r
-\r
-    p_xran_dev_ctx->pPrachCallback[i]    = pCallback;\r
-    p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;\r
-\r
-    return 0;\r
-}\r
-\r
-\r
-int32_t xran_5g_srs_req (void *  pHandle,\r
-                struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],\r
-                xran_transport_callback_fn pCallback,\r
-                void *pCallbackTag)\r
-{\r
-    XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;\r
-    xran_status_t nStatus = XRAN_STATUS_SUCCESS;\r
-    int j, i = 0, z;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    if(NULL == pHandle)\r
-    {\r
-        printf("Handle is NULL!\n");\r
-        return XRAN_STATUS_FAIL;\r
-    }\r
-    if (pCallback == NULL)\r
-    {\r
-        printf ("no callback\n");\r
-        return XRAN_STATUS_FAIL;\r
-    }\r
-\r
-    i = pXranCc->nIndex;\r
-\r
-    for(j=0; j<XRAN_N_FE_BUF_LEN; j++)\r
-    {\r
-        for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){\r
-           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;\r
-           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;\r
-           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;\r
-           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;\r
-           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.\r
-           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];\r
-           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];\r
-        }\r
-    }\r
-\r
-    p_xran_dev_ctx->pSrsCallback[i]    = pCallback;\r
-    p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;\r
-\r
-    return 0;\r
-}\r
-\r
-uint32_t xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *core_used, uint32_t clear)\r
-{\r
-    *total_time = xran_total_tick;\r
-    *used_time = xran_used_tick;\r
-    *core_used = xran_core_used;\r
-\r
-    if (clear)\r
-    {\r
-        xran_total_tick = 0;\r
-        xran_used_tick = 0;\r
-    }\r
-\r
-    return 0;\r
-}\r
-\r
-void * xran_malloc(size_t buf_len)\r
-{\r
-    return rte_malloc("External buffer", buf_len, RTE_CACHE_LINE_SIZE);\r
-}\r
-\r
-uint8_t  *xran_add_hdr_offset(uint8_t  *dst, int16_t compMethod)\r
-{\r
-    dst+= (RTE_PKTMBUF_HEADROOM +\r
-          sizeof (struct xran_ecpri_hdr) +\r
-          sizeof (struct radio_app_common_hdr) +\r
-          sizeof(struct data_section_hdr));\r
-\r
-    if(compMethod != XRAN_COMPMETHOD_NONE)\r
-          dst += sizeof (struct data_section_compression_hdr);\r
-\r
-    dst = RTE_PTR_ALIGN_CEIL(dst, 64);\r
-\r
-    return dst;\r
-}\r
-\r
-int32_t xran_open(void *pHandle, struct xran_fh_config* pConf)\r
-{\r
-    int32_t i;\r
-    uint8_t nNumerology = 0;\r
-    int32_t  lcore_id = 0;\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-    struct xran_fh_config *pFhCfg;\r
-    pFhCfg = &(p_xran_dev_ctx->fh_cfg);\r
-\r
-    memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));\r
-\r
-    if(pConf->log_level)\r
-        printf(" %s: O-RU Category %s\n", __FUNCTION__, (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");\r
-\r
-    nNumerology = xran_get_conf_numerology(pHandle);\r
-\r
-    if (pConf->nCC > XRAN_MAX_SECTOR_NR)\r
-    {\r
-        if(pConf->log_level)\r
-            printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR);\r
-        pConf->nCC = XRAN_MAX_SECTOR_NR;\r
-\r
-    }\r
-    if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER\r
-        || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ){\r
-\r
-        print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder);\r
-        return XRAN_STATUS_FAIL;\r
-    }\r
-\r
-    /* setup PRACH configuration for C-Plane */\r
-    xran_init_prach(pConf, p_xran_dev_ctx);\r
-    xran_init_srs(pConf, p_xran_dev_ctx);\r
-\r
-    xran_cp_init_sectiondb(pHandle);\r
-    xran_init_sectionid(pHandle);\r
-    xran_init_seqid(pHandle);\r
-\r
-    if(pConf->ru_conf.xran_max_frame) {\r
-       xran_max_frame = pConf->ru_conf.xran_max_frame;\r
-       printf("xran_max_frame %d\n", xran_max_frame);\r
-    }\r
-\r
-    interval_us = xran_fs_get_tti_interval(nNumerology);\r
-\r
-    if(pConf->log_level){\r
-        printf("%s: interval_us=%ld\n", __FUNCTION__, interval_us);\r
-    }\r
-    timing_set_numerology(nNumerology);\r
-\r
-    for(i = 0 ; i <pConf->nCC; i++){\r
-        xran_fs_set_slot_type(i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod,\r
-            pConf->frame_conf.sSlotConfig);\r
-    }\r
-\r
-    xran_fs_slot_limit_init(xran_fs_get_tti_interval(nNumerology));\r
-\r
-    if(xran_ethdi_get_ctx()->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){\r
-        p_xran_dev_ctx->bbdev_dec = pConf->bbdev_dec;\r
-        p_xran_dev_ctx->bbdev_enc = pConf->bbdev_enc;\r
-    }\r
-\r
-    /* if send_xpmbuf2ring needs to be changed from default functions,\r
-     * then those should be set between xran_init and xran_open */\r
-    if(p_xran_dev_ctx->send_cpmbuf2ring == NULL)\r
-        p_xran_dev_ctx->send_cpmbuf2ring    = xran_ethdi_mbuf_send_cp;\r
-    if(p_xran_dev_ctx->send_upmbuf2ring == NULL)\r
-        p_xran_dev_ctx->send_upmbuf2ring    = xran_ethdi_mbuf_send;\r
-\r
-    /* Start packet processing thread */\r
-    if((uint16_t)xran_ethdi_get_ctx()->io_cfg.port[XRAN_UP_VF] != 0xFFFF &&\r
-        (uint16_t)xran_ethdi_get_ctx()->io_cfg.port[XRAN_CP_VF] != 0xFFFF ){\r
-        if(pConf->log_level){\r
-            print_dbg("XRAN_UP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_UP_VF]);\r
-            print_dbg("XRAN_CP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_CP_VF]);\r
-        }\r
-        if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), xran_ethdi_get_ctx()->io_cfg.timing_core))\r
-            rte_panic("thread_run() failed to start\n");\r
-    } else if(pConf->log_level){\r
-            printf("Eth port was not open. Processing thread was not started\n");\r
-    }\r
-\r
-    return 0;\r
-}\r
-\r
-int32_t xran_start(void *pHandle)\r
-{\r
-    if(xran_get_if_state() == XRAN_RUNNING) {\r
-        print_err("Already STARTED!!");\r
-        return (-1);\r
-        }\r
-\r
-    xran_if_current_state = XRAN_RUNNING;\r
-    return 0;\r
-}\r
-\r
-int32_t xran_stop(void *pHandle)\r
-{\r
-    if(xran_get_if_state() == XRAN_STOPPED) {\r
-        print_err("Already STOPPED!!");\r
-        return (-1);\r
-        }\r
-\r
-    xran_if_current_state = XRAN_STOPPED;\r
-    return 0;\r
-}\r
-\r
-int32_t xran_close(void *pHandle)\r
-{\r
-    xran_if_current_state = XRAN_STOPPED;\r
-    //TODO: fix memory leak xran_cp_free_sectiondb(pHandle);\r
-    //rte_eal_mp_wait_lcore();\r
-    //xran_ethdi_ports_stats();\r
-\r
-#ifdef RTE_LIBRTE_PDUMP\r
-    /* uninitialize packet capture framework */\r
-    rte_pdump_uninit();\r
-#endif\r
-    return 0;\r
-}\r
-\r
-int32_t xran_mm_destroy (void * pHandle)\r
-{\r
-    if(xran_get_if_state() == XRAN_RUNNING) {\r
-        print_err("Please STOP first !!");\r
-        return (-1);\r
-        }\r
-\r
-    /* functionality is not yet implemented */\r
-    return -1;\r
-}\r
-\r
-int32_t xran_reg_sym_cb(void *pHandle, xran_callback_sym_fn symCb, void * symCbParam, uint8_t symb,  uint8_t ant)\r
-{\r
-    if(xran_get_if_state() == XRAN_RUNNING) {\r
-        print_err("Cannot register callback while running!!\n");\r
-        return (-1);\r
-        }\r
-\r
-    /* functionality is not yet implemented */\r
-    print_err("Functionality is not yet implemented !");\r
-    return -1;\r
-}\r
-\r
-int32_t xran_reg_physide_cb(void *pHandle, xran_fh_tti_callback_fn Cb, void *cbParam, int skipTtiNum, enum callback_to_phy_id id)\r
-{\r
-    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    if(xran_get_if_state() == XRAN_RUNNING) {\r
-        print_err("Cannot register callback while running!!\n");\r
-        return (-1);\r
-        }\r
-\r
-    p_xran_dev_ctx->ttiCb[id]      = Cb;\r
-    p_xran_dev_ctx->TtiCbParam[id] = cbParam;\r
-    p_xran_dev_ctx->SkipTti[id]    = skipTtiNum;\r
-\r
-    return 0;\r
-}\r
-\r
-/* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open\r
- * each cb will be set by default duing open if it is set by NULL */\r
-int xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up)\r
-{\r
-    struct xran_device_ctx *p_xran_dev_ctx;\r
-\r
-    if(xran_get_if_state() == XRAN_RUNNING) {\r
-        print_err("Cannot register callback while running!!\n");\r
-        return (-1);\r
-        }\r
-\r
-    p_xran_dev_ctx = xran_dev_get_ctx();\r
-\r
-    p_xran_dev_ctx->send_cpmbuf2ring    = mbuf_send_cp;\r
-    p_xran_dev_ctx->send_upmbuf2ring    = mbuf_send_up;\r
-\r
-    return (0);\r
-}\r
-\r
-\r
-int32_t xran_get_slot_idx (uint32_t *nFrameIdx, uint32_t *nSubframeIdx,  uint32_t *nSlotIdx, uint64_t *nSecond)\r
-{\r
-    int32_t tti = 0;\r
-\r
-    tti           = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);\r
-    *nSlotIdx     = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);\r
-    *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME,  SUBFRAMES_PER_SYSTEMFRAME);\r
-    *nFrameIdx    = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);\r
-    *nSecond      = timing_get_current_second();\r
-\r
-    return tti;\r
-}\r
-\r
-\r
-/**\r
- * @brief Get the configuration of eAxC ID\r
- *\r
- * @return the pointer of configuration\r
- */\r
-inline struct xran_eaxcid_config *xran_get_conf_eAxC(void *pHandle)\r
-{\r
-    return (&(xran_dev_get_ctx()->eAxc_id_cfg));\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of the total number of beamforming weights on RU\r
- *\r
- * @return Configured the number of beamforming weights\r
- */\r
-inline uint8_t xran_get_conf_num_bfweights(void *pHandle)\r
-{\r
-    return (xran_dev_get_ctx()->fh_init.totalBfWeights);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of subcarrier spacing for PRACH\r
- *\r
- * @return subcarrier spacing value for PRACH\r
- */\r
-inline uint8_t xran_get_conf_prach_scs(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->prach_conf.nPrachSubcSpacing);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of FFT size for RU\r
- *\r
- * @return FFT size value for RU\r
- */\r
-inline uint8_t xran_get_conf_fftsize(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->ru_conf.fftSize);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of nummerology\r
- *\r
- * @return Configured numerology\r
- */\r
-inline uint8_t xran_get_conf_numerology(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->frame_conf.nNumerology);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of IQ bit width for RU\r
- *\r
- * @return IQ bit width for RU\r
- */\r
-inline uint8_t xran_get_conf_iqwidth(void *pHandle)\r
-{\r
-    struct xran_fh_config *pFhCfg;\r
-\r
-    pFhCfg = xran_lib_get_ctx_fhcfg();\r
-    return ((pFhCfg->ru_conf.iqWidth==16)?0:pFhCfg->ru_conf.iqWidth);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of compression method for RU\r
- *\r
- * @return Compression method for RU\r
- */\r
-inline uint8_t xran_get_conf_compmethod(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->ru_conf.compMeth);\r
-}\r
-\r
-\r
-/**\r
- * @brief Get the configuration of the number of component carriers\r
- *\r
- * @return Configured the number of component carriers\r
- */\r
-inline uint8_t xran_get_num_cc(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->nCC);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of the number of antenna for UL\r
- *\r
- * @return Configured the number of antenna\r
- */\r
-inline uint8_t xran_get_num_eAxc(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->neAxc);\r
-}\r
-\r
-/**\r
- * @brief Get configuration of O-RU (Cat A or Cat B)\r
- *\r
- * @return Configured the number of antenna\r
- */\r
-inline enum xran_category xran_get_ru_category(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->ru_conf.xranCat);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of the number of antenna\r
- *\r
- * @return Configured the number of antenna\r
- */\r
-inline uint8_t xran_get_num_eAxcUl(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->neAxcUl);\r
-}\r
-\r
-/**\r
- * @brief Get the configuration of the number of antenna elements\r
- *\r
- * @return Configured the number of antenna\r
- */\r
-inline uint8_t xran_get_num_ant_elm(void *pHandle)\r
-{\r
-    return (xran_lib_get_ctx_fhcfg()->nAntElmTRx);\r
-}\r
-\r
-int32_t xran_get_common_counters(void *pXranLayerHandle, struct xran_common_counters *pStats)\r
-{\r
-    struct xran_device_ctx* pDev = (struct xran_device_ctx*)pXranLayerHandle;\r
-\r
-    if(pStats && pDev) {\r
-        *pStats  =  pDev->fh_counters;\r
-        return XRAN_STATUS_SUCCESS;\r
-    } else {\r
-        return XRAN_STATUS_INVALID_PARAM;\r
-    }\r
-}\r
-\r
+/******************************************************************************
+*
+*   Copyright (c) 2020 Intel.
+*
+*   Licensed under the Apache License, Version 2.0 (the "License");
+*   you may not use this file except in compliance with the License.
+*   You may obtain a copy of the License at
+*
+*       http://www.apache.org/licenses/LICENSE-2.0
+*
+*   Unless required by applicable law or agreed to in writing, software
+*   distributed under the License is distributed on an "AS IS" BASIS,
+*   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+*   See the License for the specific language governing permissions and
+*   limitations under the License.
+*
+*******************************************************************************/
+
+/**
+ * @brief XRAN main functionality module
+ * @file xran_main.c
+ * @ingroup group_source_xran
+ * @author Intel Corporation
+ **/
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <assert.h>
+#include <err.h>
+#include <libgen.h>
+#include <sys/time.h>
+#include <sys/queue.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <malloc.h>
+#include <immintrin.h>
+#include <numa.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_version.h>
+#include <rte_flow.h>
+#if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */
+#include <rte_ecpri.h>
+#endif
+#include "xran_fh_o_du.h"
+#include "xran_fh_o_ru.h"
+#include "xran_main.h"
+
+#include "ethdi.h"
+#include "xran_mem_mgr.h"
+#include "xran_tx_proc.h"
+#include "xran_rx_proc.h"
+#include "xran_pkt.h"
+#include "xran_up_api.h"
+#include "xran_cp_api.h"
+#include "xran_sync_api.h"
+#include "xran_lib_mlog_tasks_id.h"
+#include "xran_timer.h"
+#include "xran_common.h"
+#include "xran_dev.h"
+#include "xran_frame_struct.h"
+#include "xran_printf.h"
+#include "xran_cp_proc.h"
+#include "xran_tx_proc.h"
+#include "xran_rx_proc.h"
+#include "xran_cb_proc.h"
+#include "xran_ecpri_owd_measurements.h"
+
+#include "xran_mlog_lnx.h"
+
+static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {{NULL}};
+
+uint64_t interval_us = 1000; //the TTI interval of the cell with maximum numerology
+
+uint32_t xran_lib_ota_tti[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Slot index in a second [0:(1000000/TTI-1)] */
+uint32_t xran_lib_ota_sym[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a slot [0:13] */
+uint32_t xran_lib_ota_sym_idx[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1]
+                                                where TTI is TTI interval in microseconds */
+
+uint16_t xran_SFN_at_Sec_Start   = 0; /**< SFN at current second start */
+uint16_t xran_max_frame          = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2        System Frame Number Calculation */
+
+static uint64_t xran_total_tick = 0, xran_used_tick = 0;
+static uint32_t xran_num_cores_used = 0;
+static uint32_t xran_core_used[64] = {0};
+int32_t first_call = 0;
+int32_t mlogxranenable = 0;
+
+struct cp_up_tx_desc * xran_pkt_gen_desc_alloc(void);
+int32_t xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc);
+
+void tti_ota_cb(struct rte_timer *tim, void *arg);
+void tti_to_phy_cb(struct rte_timer *tim, void *arg);
+
+int32_t xran_pkt_gen_process_ring(struct rte_ring *r);
+
+void
+xran_updateSfnSecStart(void)
+{
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+    struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
+    int32_t xran_ports  = p_xran_dev_ctx->fh_init.xran_ports;
+    int32_t o_xu_id = 0;
+    uint64_t currentSecond = timing_get_current_second();
+    // Assume always positive
+    uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;
+    uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND;
+    uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));
+    xran_SFN_at_Sec_Start = sfn;
+
+    for(o_xu_id = 0; o_xu_id < xran_ports; o_xu_id++){
+    pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter;
+    pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter;
+    pCnt->tx_bytes_counter = 0;
+    pCnt->rx_bytes_counter = 0;
+        p_xran_dev_ctx++;
+        pCnt = &p_xran_dev_ctx->fh_counters;
+    }
+}
+
+#if 0
+static inline int32_t
+xran_getSlotIdxSecond(uint32_t interval)
+{
+    int32_t frameIdxSecond = xran_getSfnSecStart();
+    int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME(interval);
+    return slotIndxSecond;
+}
+#endif
+
+enum xran_if_state
+xran_get_if_state(void)
+        {
+    return xran_if_current_state;
+}
+
+int32_t xran_is_prach_slot(uint8_t PortId, uint32_t subframe_id, uint32_t slot_id)
+{
+    int32_t is_prach_slot = 0;
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
+    if (p_xran_dev_ctx == NULL)
+{
+        print_err("PortId %d not exist\n", PortId);
+        return is_prach_slot;
+}
+    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+    uint8_t nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
+
+    if (nNumerology < 2){
+        //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index
+        if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){
+            if (pPrachCPConfig->nrofPrachInSlot == 0){
+                if(slot_id == 0)
+                    is_prach_slot = 1;
+            }
+            else if (pPrachCPConfig->nrofPrachInSlot == 2)
+                is_prach_slot = 1;
+            else{
+                if (nNumerology == 0)
+                    is_prach_slot = 1;
+                else if (slot_id == 1)
+                    is_prach_slot = 1;
+            }
+        }
+    } else if (nNumerology == 3){
+        //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot
+        uint32_t slotidx;
+        slotidx = subframe_id * SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local) + slot_id;
+        if (pPrachCPConfig->nrofPrachInSlot == 2){
+            if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1)
+                is_prach_slot = 1;
+        } else {
+            if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){
+                is_prach_slot = 1;
+            }
+        }
+    } else
+        print_err("Numerology %d not supported", nNumerology);
+    return is_prach_slot;
+}
+
+int32_t
+xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
+{
+    struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg);
+
+    if(p_srs){
+        p_srs->symbMask = pConf->srs_conf.symbMask;     /* deprecated */
+        p_srs->slot             = pConf->srs_conf.slot;
+        p_srs->ndm_offset       = pConf->srs_conf.ndm_offset;
+        p_srs->ndm_txduration   = pConf->srs_conf.ndm_txduration;
+        p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset;
+
+        print_dbg("SRS sym         %d\n", p_srs->slot);
+        print_dbg("SRS NDM offset  %d\n", p_srs->ndm_offset);
+        print_dbg("SRS NDM Tx      %d\n", p_srs->ndm_txduration);
+        print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset);
+    }
+    return (XRAN_STATUS_SUCCESS);
+}
+
+int32_t
+xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
+{
+    /* update Rach for LTE */
+    return xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_LTE);
+}
+
+int32_t
+xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx, enum xran_ran_tech xran_tech)
+{
+    int32_t i;
+    uint8_t slotNr;
+    struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf);
+    const xRANPrachConfigTableStruct *pxRANPrachConfigTable;
+    uint8_t nNumerology = pConf->frame_conf.nNumerology;
+    uint8_t nPrachConfIdx = -1;// = pPRACHConfig->nPrachConfIdx;
+    struct xran_prach_cp_config *pPrachCPConfig = NULL;
+    if(pConf->dssEnable){
+        /*Check Slot type and */
+        if(xran_tech == XRAN_RAN_5GNR){
+            pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+            nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
+        }
+        else{
+            pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
+            nPrachConfIdx = pPRACHConfig->nPrachConfIdxLTE;
+        }
+    }
+    else{
+        pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+        nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
+    }
+    if (nNumerology > 2)
+        pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx];
+    else if (pConf->frame_conf.nFrameDuplexType == 1)
+        pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx];
+    else
+        pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx];
+
+    uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0];
+    const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt];
+    memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config));
+    if(pConf->log_level)
+        printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot);
+
+    if (preambleFmrt <= 2)
+    {
+        pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_012;         // 1 PRACH preamble format 0 1 2
+    }
+    else if (preambleFmrt == 3)
+    {
+        pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_3;         // 1 PRACH preamble format 3
+    }
+    else
+    {
+    pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC;         // 3, PRACH preamble format A1~3, B1~4, C0, C2
+    }
+    pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym;
+    pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart;
+    pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70;
+    pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp;
+    pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing);
+    pPrachCPConfig->x = pxRANPrachConfigTable->x;
+    pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot;
+    pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0];
+    pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1];
+    if (preambleFmrt >= FORMAT_A1)
+    {
+        pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration;
+        pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot;
+    }
+    else
+    {
+        pPrachCPConfig->numSymbol = 1;
+        pPrachCPConfig->occassionsInPrachSlot = 1;
+    }
+
+    if(pConf->log_level)
+        printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]);
+    pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1;
+    for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++)
+    {
+        slotNr = pxRANPrachConfigTable->slotNr[i];
+        if (slotNr > 0){
+            pPrachCPConfig->isPRACHslot[slotNr] = 1;
+            if(pConf->log_level)
+                printf(" %u ..", slotNr);
+        }
+    }
+    printf("\n");
+    for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){
+        p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId;
+        p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
+    }
+    if(pConf->log_level){
+        printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]);
+    }
+
+    pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(p_xran_dev_ctx);
+    print_dbg("PRACH eAxC_offset %d\n",  pPrachCPConfig->eAxC_offset);
+
+    /* Save some configs for app */
+    pPRACHConfig->startSymId    = pPrachCPConfig->startSymId;
+    pPRACHConfig->lastSymId     = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
+    pPRACHConfig->startPrbc     = pPrachCPConfig->startPrbc;
+    pPRACHConfig->numPrbc       = pPrachCPConfig->numPrbc;
+    pPRACHConfig->timeOffset    = pPrachCPConfig->timeOffset;
+    pPRACHConfig->freqOffset    = pPrachCPConfig->freqOffset;
+    pPRACHConfig->eAxC_offset   = pPrachCPConfig->eAxC_offset;
+
+        return (XRAN_STATUS_SUCCESS);
+        }
+
+uint32_t
+xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid
+{
+    return slot_id;
+#if 0
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+    uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
+    uint8_t FR = 1;
+    if (mu > 2)
+        FR=2;
+    if (dir == 0)
+    {
+        if (FR == 1)
+        {
+            return (slot_id << (2-mu));
+        }
+        else
+        {
+            return (slot_id << (3-mu));
+        }
+    }
+    else
+    {
+        if (FR == 1)
+        {
+            return (slot_id >> (2-mu));
+        }
+        else
+        {
+            return (slot_id >> (3-mu));
+        }
+    }
+#endif
+}
+
+void
+sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick)
+{
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    long t1 = MLogXRANTick(), t2;
+    long t3;
+
+    if(XranGetSymNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){
+        t3 = xran_tick();
+        tti_ota_cb(NULL, (void*)p_xran_dev_ctx);
+        *used_tick += get_ticks_diff(xran_tick(), t3);
+    }
+
+            t3 = xran_tick();
+    if (xran_process_tx_sym(p_xran_dev_ctx))
+    {
+        *used_tick += get_ticks_diff(xran_tick(), t3);
+    }
+
+    /* check if there is call back to do something else on this symbol */
+    struct cb_elem_entry *cb_elm;
+    LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[xran_lib_ota_sym[p_xran_dev_ctx->xran_port_id]], pointers){
+        if(cb_elm){
+            cb_elm->pSymCallback(&p_xran_dev_ctx->dpdk_timer[p_xran_dev_ctx->ctx % MAX_NUM_OF_DPDK_TIMERS], cb_elm->pSymCallbackTag, cb_elm->p_dev_ctx);
+            p_xran_dev_ctx->ctx = DpdkTimerIncrementCtx(p_xran_dev_ctx->ctx);
+        }
+    }
+
+    t2 = MLogXRANTick();
+    MLogXRANTask(PID_SYM_OTA_CB, t1, t2);
+}
+
+uint32_t
+xran_schedule_to_worker(enum xran_job_type_id job_type_id, struct xran_device_ctx * p_xran_dev_ctx)
+{
+    struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
+    uint32_t tim_lcore = eth_ctx->io_cfg.timing_core; /* default to timing core */
+
+    if(eth_ctx) {
+        if(eth_ctx->num_workers == 0) { /* no workers */
+            tim_lcore = eth_ctx->io_cfg.timing_core;
+        } else if (eth_ctx->num_workers == 1) { /* one worker */
+            switch (job_type_id)
+            {
+                case XRAN_JOB_TYPE_OTA_CB:
+                    tim_lcore = eth_ctx->io_cfg.timing_core;
+                    break;
+                case XRAN_JOB_TYPE_CP_DL:
+                case XRAN_JOB_TYPE_CP_UL:
+                case XRAN_JOB_TYPE_DEADLINE:
+                case XRAN_JOB_TYPE_SYM_CB:
+                    tim_lcore = eth_ctx->worker_core[0];
+                    break;
+                default:
+                    print_err("incorrect job type id %d\n", job_type_id);
+                    tim_lcore = eth_ctx->io_cfg.timing_core;
+                    break;
+            }
+        } else if (eth_ctx->num_workers >= 2 && eth_ctx->num_workers <= 6) {
+            switch (job_type_id)
+            {
+                case XRAN_JOB_TYPE_OTA_CB:
+                    tim_lcore = eth_ctx->worker_core[0];
+                    break;
+                case XRAN_JOB_TYPE_CP_DL:
+                    tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_DL]];
+                    break;
+                case XRAN_JOB_TYPE_CP_UL:
+                    tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_UL]];
+                    break;
+                case XRAN_JOB_TYPE_DEADLINE:
+                case XRAN_JOB_TYPE_SYM_CB:
+                    tim_lcore = eth_ctx->worker_core[0];
+                    break;
+                default:
+                    print_err("incorrect job type id %d\n", job_type_id);
+                    tim_lcore = eth_ctx->io_cfg.timing_core;
+                    break;
+            }
+        } else {
+            print_err("incorrect eth_ctx->num_workers id %d\n", eth_ctx->num_workers);
+            tim_lcore = eth_ctx->io_cfg.timing_core;
+        }
+    }
+
+    return tim_lcore;
+}
+
+void
+tti_ota_cb(struct rte_timer *tim, void *arg)
+{
+    uint32_t    frame_id    = 0;
+    uint32_t    subframe_id = 0;
+    uint32_t    slot_id     = 0;
+    uint32_t    next_tti    = 0;
+
+    uint32_t mlogVar[10];
+    uint32_t mlogVarCnt = 0;
+    uint64_t t1 = MLogTick();
+    uint32_t reg_tti  = 0;
+    uint32_t reg_sfn  = 0;
+
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)p_xran_dev_ctx->timer_ctx;
+    uint8_t PortId = p_xran_dev_ctx->xran_port_id;
+    uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
+
+    unsigned tim_lcore =  xran_schedule_to_worker(XRAN_JOB_TYPE_OTA_CB, p_xran_dev_ctx);
+
+    MLogTask(PID_TTI_TIMER, t1, MLogTick());
+
+    if(p_xran_dev_ctx->xran_port_id == 0){
+    /* To match TTbox */
+        if(xran_lib_ota_tti[0] == 0)
+            reg_tti = xran_fs_get_max_slot(PortId) - 1;
+    else
+            reg_tti = xran_lib_ota_tti[0] -1;
+
+    MLogIncrementCounter();
+        reg_sfn    = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us))*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME(interval_us), SUBFRAMES_PER_SYSTEMFRAME);;
+    /* subframe and slot */
+        MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME(interval_us)));
+    MLogMark(1, t1);
+    }
+
+    slot_id     = XranGetSlotNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local));
+    subframe_id = XranGetSubFrameNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
+    frame_id    = XranGetFrameNum(xran_lib_ota_tti[PortId],xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
+
+    pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process = xran_lib_ota_tti[PortId];
+
+    /** tti as seen from PHY */
+    int32_t nSfIdx = -1;
+    uint32_t nFrameIdx;
+    uint32_t nSubframeIdx;
+    uint32_t nSlotIdx;
+    uint64_t nSecond;
+    uint8_t Numerlogy = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
+    uint8_t nNrOfSlotInSf = 1<<Numerlogy;
+
+    xran_get_slot_idx(0, &nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+    nSfIdx = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*nNrOfSlotInSf
+             + nSubframeIdx*nNrOfSlotInSf
+             + nSlotIdx;
+
+    mlogVar[mlogVarCnt++] = 0x11111111;
+    mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId];
+    mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
+    mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId] / 14;
+    mlogVar[mlogVarCnt++] = frame_id;
+    mlogVar[mlogVarCnt++] = subframe_id;
+    mlogVar[mlogVarCnt++] = slot_id;
+    mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId] % XRAN_N_FE_BUF_LEN;
+    mlogVar[mlogVarCnt++] = nSfIdx;
+    mlogVar[mlogVarCnt++] = nSfIdx % XRAN_N_FE_BUF_LEN;
+    MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
+
+
+    if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU)
+        next_tti = xran_lib_ota_tti[PortId] + 1;
+    else{
+        next_tti = xran_lib_ota_tti[PortId];
+    }
+
+    if(next_tti>= xran_fs_get_max_slot(PortId)){
+        print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
+        next_tti=0;
+    }
+
+    slot_id     = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
+    subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
+    frame_id    = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
+
+    print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
+
+    if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){
+        pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = next_tti;
+    } else {
+        pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti[PortId] & 1)^1].tti_to_process;
+    }
+
+    if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]) {
+    p_xran_dev_ctx->phy_tti_cb_done = 0;
+        xran_timer_arm_ex(&p_xran_dev_ctx->tti_to_phy_timer[xran_lib_ota_tti[PortId] % MAX_TTI_TO_PHY_TIMER], tti_to_phy_cb, (void*)p_xran_dev_ctx, tim_lcore);
+    }
+    //slot index is increased to next slot at the beginning of current OTA slot
+    xran_lib_ota_tti[PortId]++;
+    if(xran_lib_ota_tti[PortId] >= xran_fs_get_max_slot(PortId)) {
+        print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti[PortId], frame_id, subframe_id, slot_id);
+        xran_lib_ota_tti[PortId] = 0;
+    }
+    MLogXRANTask(PID_TTI_CB, t1, MLogTick());
+}
+
+
+int32_t
+xran_prepare_cp_dl_slot(uint16_t xran_port_id, uint32_t nSlotIdx,  uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
+                            uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
+{
+    long t1 = MLogXRANTick();
+    int32_t ret = XRAN_STATUS_SUCCESS;
+    int tti, buf_id;
+    uint32_t slot_id, subframe_id, frame_id;
+    int cc_id;
+    uint8_t ctx_id;
+    uint8_t ant_id, num_eAxc, num_CCPorts;
+    void *pHandle;
+    //int num_list;
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
+    if(unlikely(!p_xran_dev_ctx))
+    {
+        print_err("Null xRAN context!!\n");
+        return ret;
+    }
+    //struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
+    uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
+    uint8_t PortId = p_xran_dev_ctx->xran_port_id;
+    pHandle     = p_xran_dev_ctx;
+
+    num_eAxc    = xran_get_num_eAxc(pHandle);
+    num_CCPorts = xran_get_num_cc(pHandle);
+
+    if(first_call && p_xran_dev_ctx->enableCP)
+    {
+        tti    = nSlotIdx ;//pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
+        buf_id = tti % XRAN_N_FE_BUF_LEN;
+
+        slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
+        subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
+        frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
+        if (tti == 0)
+        {
+            /* Wrap around to next second */
+            frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
+        }
+
+        ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
+
+        print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
+#if defined(__INTEL_COMPILER)
+#pragma vector always
+#endif
+        for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum)  && ant_id < num_eAxc); ++ant_id) {
+            for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) {
+                /* start new section information list */
+                xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
+                if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
+                    if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
+                        if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData) {
+                            /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
+                                (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
+                                &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]),
+                                p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
+                        } else {
+                               print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
+                        }
+                    } else {
+                        print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pBuffers] \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
+                    }
+                } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
+            } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
+        } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
+        MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick());
+    }
+    return ret;
+}
+
+void
+tx_cp_dl_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    int tti, buf_id;
+    uint32_t slot_id, subframe_id, frame_id;
+    int cc_id;
+    uint8_t ctx_id;
+    uint8_t ant_id, num_eAxc, num_CCPorts;
+    void *pHandle;
+    //int num_list;
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+
+    if(unlikely(!p_xran_dev_ctx))
+    {
+        print_err("Null xRAN context!!\n");
+        return;
+    }
+
+    if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
+        return;
+
+    struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0];
+    uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local;
+    uint8_t PortId = p_xran_dev_ctx->xran_port_id;
+    pHandle     = p_xran_dev_ctx;
+
+    num_eAxc    = xran_get_num_eAxc(pHandle);
+    num_CCPorts = xran_get_num_cc(pHandle);
+
+    if(first_call && p_xran_dev_ctx->enableCP)
+    {
+        tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
+        buf_id = tti % XRAN_N_FE_BUF_LEN;
+
+        slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local));
+        subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
+        frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local));
+        if (tti == 0)
+        {
+            /* Wrap around to next second */
+            frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
+        }
+
+        ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
+
+        print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
+        for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
+            for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) {
+                if(0== p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id].numSymsRemaining)
+                {/* Start of new slot - reset the section info */
+                xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
+                }
+                if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
+                    if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) {
+                    if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
+                            /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
+                            (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
+                                    &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]),
+                            p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
+                        }
+                        else
+                            print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
+                    }
+                } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
+            } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
+        } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
+        MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick());
+    }
+}
+
+void
+rx_ul_static_srs_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    xran_status_t status = 0;
+    int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
+    int32_t cc_id = 0;
+    //uint32_t nFrameIdx;
+    //uint32_t nSubframeIdx;
+    //uint32_t nSlotIdx;
+    //uint64_t nSecond;
+    struct xran_timer_ctx* p_timer_ctx = NULL;
+
+    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
+        return;
+
+    p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
+
+    if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
+        p_xran_dev_ctx->timer_put = 0;
+
+    rx_tti = p_timer_ctx->tti_to_process;
+
+    if(rx_tti == 0)
+       rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
+    else
+       rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
+
+    /* U-Plane */
+    for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
+
+        if(0 == p_xran_dev_ctx->enableSrsCp)
+        {
+            if(p_xran_dev_ctx->pSrsCallback[cc_id]){
+                struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
+                if(pTag) {
+                    //pTag->cellId = cc_id;
+                    pTag->slotiId = rx_tti;
+                    pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
+                    p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
+                }
+            }
+        }
+    }
+    MLogXRANTask(PID_UP_STATIC_SRS_DEAD_LINE_CB, t1, MLogXRANTick());
+}
+
+
+
+void
+rx_ul_deadline_one_fourths_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    xran_status_t status;
+    /* half of RX for current TTI as measured against current OTA time */
+    int32_t rx_tti;
+    int32_t cc_id;
+    //uint32_t nFrameIdx;
+    //uint32_t nSubframeIdx;
+    //uint32_t nSlotIdx;
+    //uint64_t nSecond;
+    struct xran_timer_ctx* p_timer_ctx = NULL;
+    /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+    rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
+           + nSubframeIdx*SLOTNUM_PER_SUBFRAME
+           + nSlotIdx;*/
+    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
+        return;
+
+    p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
+    if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
+        p_xran_dev_ctx->timer_put = 0;
+
+    rx_tti = p_timer_ctx->tti_to_process;
+
+    for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
+        if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
+            if(p_xran_dev_ctx->pCallback[cc_id]) {
+                struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
+                if(pTag) {
+                    //pTag->cellId = cc_id;
+                    pTag->slotiId = rx_tti;
+                    pTag->symbol  = XRAN_ONE_FOURTHS_CB_SYM;
+                    status = XRAN_STATUS_SUCCESS;
+
+                    p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
+                }
+            }
+        } else {
+            p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
+        }
+    }
+
+    if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
+        if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
+            p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
+        }else{
+            p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
+        }
+    }
+
+    MLogXRANTask(PID_UP_UL_ONE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick());
+}
+
+void
+rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    xran_status_t status;
+    /* half of RX for current TTI as measured against current OTA time */
+    int32_t rx_tti;
+    int32_t cc_id;
+    //uint32_t nFrameIdx;
+    //uint32_t nSubframeIdx;
+    //uint32_t nSlotIdx;
+    //uint64_t nSecond;
+    struct xran_timer_ctx* p_timer_ctx = NULL;
+    /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+    rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
+           + nSubframeIdx*SLOTNUM_PER_SUBFRAME
+           + nSlotIdx;*/
+    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
+        return;
+
+    p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
+    if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
+        p_xran_dev_ctx->timer_put = 0;
+
+    rx_tti = p_timer_ctx->tti_to_process;
+
+    for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
+        if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
+            if(p_xran_dev_ctx->pCallback[cc_id]) {
+                struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
+                if(pTag) {
+                    //pTag->cellId = cc_id;
+                    pTag->slotiId = rx_tti;
+                    pTag->symbol  = XRAN_HALF_CB_SYM;
+                    status = XRAN_STATUS_SUCCESS;
+
+                    p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
+                }
+            }
+        } else {
+            p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
+        }
+    }
+
+    if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
+        if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
+            p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
+        }else{
+            p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
+        }
+    }
+
+    MLogXRANTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogXRANTick());
+}
+
+void
+rx_ul_deadline_three_fourths_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    xran_status_t status;
+    /* half of RX for current TTI as measured against current OTA time */
+    int32_t rx_tti;
+    int32_t cc_id;
+    //uint32_t nFrameIdx;
+    //uint32_t nSubframeIdx;
+    //uint32_t nSlotIdx;
+    //uint64_t nSecond;
+    struct xran_timer_ctx* p_timer_ctx = NULL;
+    /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+    rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
+           + nSubframeIdx*SLOTNUM_PER_SUBFRAME
+           + nSlotIdx;*/
+    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
+        return;
+
+    p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
+    if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
+        p_xran_dev_ctx->timer_put = 0;
+
+    rx_tti = p_timer_ctx->tti_to_process;
+
+    for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
+        if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
+            if(p_xran_dev_ctx->pCallback[cc_id]) {
+            struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
+                if(pTag) {
+                    //pTag->cellId = cc_id;
+            pTag->slotiId = rx_tti;
+                    pTag->symbol  = XRAN_THREE_FOURTHS_CB_SYM;
+            status = XRAN_STATUS_SUCCESS;
+
+               p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
+                }
+            }
+        } else {
+            p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
+        }
+    }
+
+    if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){
+        if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){
+            p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]);
+        }else{
+            p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--;
+        }
+    }
+
+    MLogXRANTask(PID_UP_UL_THREE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick());
+}
+
+void
+rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    xran_status_t status = 0;
+    int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
+    int32_t cc_id = 0;
+    //uint32_t nFrameIdx;
+    //uint32_t nSubframeIdx;
+    //uint32_t nSlotIdx;
+    //uint64_t nSecond;
+    struct xran_timer_ctx* p_timer_ctx = NULL;
+
+    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
+        return;
+
+    /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+    rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
+        + nSubframeIdx*SLOTNUM_PER_SUBFRAME
+        + nSlotIdx;*/
+    p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX];
+
+    if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX)
+        p_xran_dev_ctx->timer_put = 0;
+
+    rx_tti = p_timer_ctx->tti_to_process;
+#if 1
+    if(rx_tti == 0)
+       rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1);
+    else
+       rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
+#endif
+    /* U-Plane */
+    for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
+        if(p_xran_dev_ctx->pCallback[cc_id]){
+        struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
+            if(pTag) {
+                //pTag->cellId = cc_id;
+        pTag->slotiId = rx_tti;
+                pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
+        status = XRAN_STATUS_SUCCESS;
+            p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
+            }
+        }
+
+        if(p_xran_dev_ctx->pPrachCallback[cc_id]){
+            struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];
+            if(pTag) {
+                //pTag->cellId = cc_id;
+            pTag->slotiId = rx_tti;
+                pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
+            p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);
+        }
+        }
+
+        if(p_xran_dev_ctx->enableSrsCp)
+        {
+        if(p_xran_dev_ctx->pSrsCallback[cc_id]){
+            struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
+            if(pTag) {
+                //pTag->cellId = cc_id;
+            pTag->slotiId = rx_tti;
+                    pTag->symbol  = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */
+            p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
+        }
+    }
+    }
+    }
+
+    /* user call backs if any */
+    if(p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX]){
+        if(p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX] <= 0){
+            p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_FULL_SLOT_RX]);
+        }else{
+            p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX]--;
+        }
+    }
+
+    MLogXRANTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogXRANTick());
+}
+
+void
+rx_ul_user_sym_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    struct xran_device_ctx * p_dev_ctx = NULL;
+    struct cb_user_per_sym_ctx *p_sym_cb_ctx = (struct cb_user_per_sym_ctx *)arg;
+    int32_t rx_tti = 0; //(int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
+    uint32_t interval, ota_sym_idx = 0;
+    uint8_t nNumerology = 0;
+    struct xran_timer_ctx* p_timer_ctx =  NULL;
+
+    if(p_sym_cb_ctx->p_dev)
+        p_dev_ctx = (struct xran_device_ctx *)p_sym_cb_ctx->p_dev;
+    else
+        rte_panic("p_sym_cb_ctx->p_dev == NULL");
+
+    if(p_dev_ctx->xran2phy_mem_ready == 0)
+        return;
+    nNumerology = xran_get_conf_numerology(p_dev_ctx);
+    interval = p_dev_ctx->interval_us_local;
+
+    p_timer_ctx = &p_sym_cb_ctx->user_cb_timer_ctx[p_sym_cb_ctx->user_timer_get++ % MAX_CB_TIMER_CTX];
+    if (p_sym_cb_ctx->user_timer_get >= MAX_CB_TIMER_CTX)
+        p_sym_cb_ctx->user_timer_get = 0;
+
+    rx_tti = p_timer_ctx->tti_to_process;
+
+    if( p_sym_cb_ctx->sym_diff > 0)
+        /* + advacne TX Wind: at OTA Time we indicating event in future */
+        ota_sym_idx = ((p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology));
+    else if (p_sym_cb_ctx->sym_diff < 0) {
+        /* - dealy RX Win: at OTA Time we indicate event in the past */
+        if(p_timer_ctx->ota_sym_idx >= abs(p_sym_cb_ctx->sym_diff)) {
+            ota_sym_idx = p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff;
+        } else {
+            ota_sym_idx = ((xran_max_ota_sym_idx(nNumerology) + p_timer_ctx->ota_sym_idx) + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology);
+        }
+    } else /* 0 - OTA exact time */
+        ota_sym_idx = p_timer_ctx->ota_sym_idx;
+
+    rx_tti = (int32_t)XranGetTtiNum(ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
+
+    if(p_sym_cb_ctx->symCbTimeInfo) {
+            struct xran_sense_of_time *p_sense_time = p_sym_cb_ctx->symCbTimeInfo;
+            p_sense_time->type_of_event = p_sym_cb_ctx->cb_type_id;
+            p_sense_time->nSymIdx       = p_sym_cb_ctx->symb_num_req;
+            p_sense_time->tti_counter   = rx_tti;
+            p_sense_time->nSlotIdx      = (uint32_t)XranGetSlotNum(rx_tti, SLOTNUM_PER_SUBFRAME(interval));
+            p_sense_time->nSubframeIdx  = (uint32_t)XranGetSubFrameNum(rx_tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+            p_sense_time->nFrameIdx     = (uint32_t)XranGetFrameNum(rx_tti, p_timer_ctx->xran_sfn_at_sec_start,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+            p_sense_time->nSecond       = p_timer_ctx->current_second;
+    }
+
+    /* user call backs if any */
+    if(p_sym_cb_ctx->symCb){
+        p_sym_cb_ctx->symCb(p_sym_cb_ctx->symCbParam, p_sym_cb_ctx->symCbTimeInfo);
+    }
+
+    MLogXRANTask(PID_UP_UL_USER_DEAD_LINE_CB, t1, MLogXRANTick());
+}
+
+int32_t
+xran_prepare_cp_ul_slot(uint16_t xran_port_id, uint32_t nSlotIdx,  uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
+                            uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
+{
+    int32_t ret = XRAN_STATUS_SUCCESS;
+    long t1 = MLogXRANTick();
+    int tti, buf_id;
+    uint32_t slot_id, subframe_id, frame_id;
+    int32_t cc_id;
+    int ant_id, port_id;
+    uint16_t occasionid;
+    uint16_t beam_id;
+    uint8_t num_eAxc, num_CCPorts;
+    uint8_t ctx_id;
+
+    void *pHandle;
+    uint32_t interval;
+    uint8_t PortId;
+
+    //struct xran_timer_ctx *pTCtx;
+    struct xran_buffer_list *pBufList;
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
+    if(unlikely(!p_xran_dev_ctx))
+    {
+        print_err("Null xRAN context!!\n");
+        return ret;
+    }
+
+    if(first_call && p_xran_dev_ctx->enableCP)
+    {
+        pHandle     = p_xran_dev_ctx;
+        //pTCtx       = &p_xran_dev_ctx->timer_ctx[0];
+        interval    = p_xran_dev_ctx->interval_us_local;
+        PortId      = p_xran_dev_ctx->xran_port_id;
+        tti         = nSlotIdx; //pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
+
+        buf_id      = tti % XRAN_N_FE_BUF_LEN;
+        ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
+        slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
+        subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+        frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+
+        /* Wrap around to next second */
+        if(tti == 0)
+            frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
+        if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
+            num_eAxc = xran_get_num_eAxc(pHandle);
+        else
+            num_eAxc = xran_get_num_eAxcUl(pHandle);
+        num_CCPorts = xran_get_num_cc(pHandle);
+
+        print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
+
+        /* General Uplink */
+#if defined(__INTEL_COMPILER)
+#pragma vector always
+#endif
+        for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum)  && ant_id < num_eAxc); ++ant_id) {
+            for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) {
+                /* start new section information list */
+                xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
+                if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1)
+                {
+                    pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
+                    if(pBufList->pBuffers && pBufList->pBuffers->pData)
+                    {
+                        ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
+                                        (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
+                                        p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
+                    }
+                }
+            }
+        } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
+
+        /* PRACH */
+        if(p_xran_dev_ctx->enablePrach)
+        {
+            struct xran_prach_cp_config *pPrachCPConfig = NULL;
+            //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE.
+            if(p_xran_dev_ctx->dssEnable){
+                int i = tti % p_xran_dev_ctx->dssPeriod;
+                if(p_xran_dev_ctx->technology[i]==1) {
+                    pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+                }
+                else{
+                    pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
+                }
+            }
+            else{
+                pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+            }
+            uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
+
+            if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
+                && (is_prach_slot==1))
+            {
+                for(ant_id = 0; ant_id < num_eAxc; ant_id++)
+                {
+                    port_id = ant_id + pPrachCPConfig->eAxC_offset;
+                    for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
+                    {
+                        /* start new section information list */
+                        xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
+                        for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++)
+                        {
+                            struct xran_cp_gen_params params;
+                            struct xran_section_gen_info sect_geninfo[8];
+                            struct xran_section_info sectInfo[8];
+                            for(int secId=0;secId<8;secId++)
+                                sect_geninfo[secId].info = &sectInfo[secId];
+                            struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
+                            uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id);
+
+                            beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id);
+                            ret = generate_cpmsg_prach(pHandle, &params, sect_geninfo, mbuf, p_xran_dev_ctx,
+                                        frame_id, subframe_id, slot_id, tti,
+                                        beam_id, cc_id, port_id, occasionid, seqid);
+                            if(ret == XRAN_STATUS_SUCCESS)
+                                send_cpmsg(pHandle, mbuf, &params, sect_geninfo,
+                                        cc_id, port_id, seqid);
+                        }
+                    }
+                }
+            }
+        } /* if(p_xran_dev_ctx->enablePrach) */
+
+        /* SRS */
+        if(p_xran_dev_ctx->enableSrsCp)
+        {
+            struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
+
+            for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
+            {
+                port_id = ant_id + pSrsCfg->eAxC_offset;
+                for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
+                {
+                    /* start new section information list */
+                    xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
+                    if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
+                    {
+                        pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
+                        if(pBufList->pBuffers && pBufList->pBuffers->pData)
+                        {
+                            ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id,
+                                            (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
+                                            p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
+                        }
+                    }
+                }
+            }
+        } /* if(p_xran_dev_ctx->enableSrs) */
+
+        MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick());
+    } /* if(p_xran_dev_ctx->enableCP) */
+
+    return ret;
+}
+
+
+void
+tx_cp_ul_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogXRANTick();
+    int tti, buf_id;
+    int ret;
+    uint32_t slot_id, subframe_id, frame_id;
+    int32_t cc_id;
+    int ant_id, port_id;
+    uint16_t occasionid = 0;
+    uint16_t beam_id;
+    uint8_t num_eAxc, num_CCPorts;
+    uint8_t ctx_id;
+
+    void *pHandle;
+    uint32_t interval;
+    uint8_t PortId;
+
+    struct xran_timer_ctx *pTCtx;
+    struct xran_buffer_list *pBufList;
+    struct xran_device_ctx *p_xran_dev_ctx;
+
+    if(unlikely(!arg))
+    {
+        print_err("Null xRAN context!!\n");
+        return;
+    }
+
+    p_xran_dev_ctx  = (struct xran_device_ctx *)arg;
+
+    if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
+        return;
+
+    /* */
+    if(first_call && p_xran_dev_ctx->enableCP)
+    {
+        pHandle     = p_xran_dev_ctx;
+        pTCtx       = &p_xran_dev_ctx->timer_ctx[0];
+        interval    = p_xran_dev_ctx->interval_us_local;
+        PortId      = p_xran_dev_ctx->xran_port_id;
+    tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process;
+
+    buf_id = tti % XRAN_N_FE_BUF_LEN;
+        ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
+    slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
+    subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+    frame_id    = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+
+        /* Wrap around to next second */
+        if(tti == 0)
+            frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
+    if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
+        num_eAxc    = xran_get_num_eAxc(pHandle);
+    else
+        num_eAxc    = xran_get_num_eAxcUl(pHandle);
+    num_CCPorts = xran_get_num_cc(pHandle);
+
+        print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
+
+        /* General Uplink */
+        for(ant_id = 0; ant_id < num_eAxc; ant_id++)
+        {
+            for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
+            {
+                    /* start new section information list */
+                    xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
+                if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1)
+                {
+                    pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
+                    if(pBufList->pBuffers && pBufList->pBuffers->pData)
+                    {
+                        ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
+                                        (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
+                        p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
+                        }
+                    }
+                }
+        } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
+
+        /* PRACH */
+        if(p_xran_dev_ctx->enablePrach)
+        {
+            struct xran_prach_cp_config *pPrachCPConfig = NULL;
+            //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE.
+            if(p_xran_dev_ctx->dssEnable){
+                int i = tti % p_xran_dev_ctx->dssPeriod;
+                if(p_xran_dev_ctx->technology[i]==1) {
+                    pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+                }
+                else{
+                    pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
+                }
+            }
+            else{
+                pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+        }
+
+            uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
+
+            if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
+                && (is_prach_slot==1))
+            {
+                for(ant_id = 0; ant_id < num_eAxc; ant_id++)
+                {
+                    port_id = ant_id + pPrachCPConfig->eAxC_offset;
+                    for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
+                    {
+                        /* start new section information list */
+                        xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
+#ifndef FCN_ADAPT
+//for FCN only send C-P for first occasion
+                        for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++)
+#endif
+                        {
+                        struct xran_cp_gen_params params;
+                        struct xran_section_gen_info sect_geninfo[8];
+                            struct xran_section_info sectInfo[8];
+                            for(int secId=0;secId<8;secId++)
+                                sect_geninfo[secId].info = &sectInfo[secId];
+
+                        struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
+                            uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id);
+
+                            beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id);
+                        ret = generate_cpmsg_prach(pHandle, &params, sect_geninfo, mbuf, p_xran_dev_ctx,
+                                        frame_id, subframe_id, slot_id, tti,
+                                        beam_id, cc_id, port_id, occasionid, seqid);
+                        if (ret == XRAN_STATUS_SUCCESS)
+                            send_cpmsg(pHandle, mbuf, &params, sect_geninfo,
+                                        cc_id, port_id, seqid);
+                        }
+                    }
+                    }
+                }
+        } /* if(p_xran_dev_ctx->enablePrach) */
+
+        /* SRS */
+        if(p_xran_dev_ctx->enableSrsCp)
+        {
+            struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
+
+            for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
+            {
+                port_id = ant_id + pSrsCfg->eAxC_offset;
+                for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
+                {
+                    /* start new section information list */
+                    xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id);
+                    if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1)
+                    {
+                        pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */
+                        if(pBufList->pBuffers && pBufList->pBuffers->pData)
+                        {
+                            ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id,
+                                            (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL,
+                                            p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
+            }
+        }
+        }
+            }
+        } /* if(p_xran_dev_ctx->enableSrs) */
+
+    MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick());
+    } /* if(p_xran_dev_ctx->enableCP) */
+}
+
+void
+tti_to_phy_cb(struct rte_timer *tim, void *arg)
+{
+    long t1 = MLogTick();
+    struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
+    uint32_t interval = p_xran_dev_ctx->interval_us_local;
+
+    p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */
+    if (first_call){
+        if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
+            if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){
+                p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]);
+            }else{
+                p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--;
+            }
+        }
+    } else {
+        if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
+            int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT);
+            uint32_t slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
+            uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+            uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+            if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME(interval)-1)) {  //(tti == xran_fs_get_max_slot()-1)
+                first_call = 1;
+            }
+        }
+    }
+
+    MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());
+}
+
+int32_t
+xran_timing_source_thread(void *args)
+{
+    int res = 0;
+    cpu_set_t cpuset;
+    int32_t   result1;
+    uint32_t xran_port_id = 0;
+    static int owdm_init_done = 0;
+    struct sched_param sched_param;
+    struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *) args ;
+    uint64_t tWake = 0, tWakePrev = 0, tUsed = 0;
+    struct xran_device_ctx * p_dev_ctx_run = NULL;
+    /* ToS = Top of Second start +- 1.5us */
+    struct timespec ts;
+    char thread_name[32];
+    char buff[100];
+
+    printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());
+    memset(&sched_param, 0, sizeof(struct sched_param));
+    /* set main thread affinity mask to CPU2 */
+    sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
+    CPU_ZERO(&cpuset);
+    CPU_SET(p_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);
+
+    if ((result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)))
+    {
+        printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);
+    }
+    if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
+    {
+        printf("priority is not changed: coreId = 2, result1 = %d\n",result1);
+    }
+
+    snprintf(thread_name, RTE_DIM(thread_name), "%s-%d", "fh_main_poll", rte_lcore_id());
+    if ((res = pthread_setname_np(pthread_self(), thread_name))) {
+        printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res);
+        }
+
+    printf("TTI interval %ld [us]\n", interval_us);
+
+    if (!p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable) {
+        if ((res = xran_timing_create_cbs(args)) < 0){
+        return res;
+        }
+        }
+
+        do {
+           timespec_get(&ts, TIME_UTC);
+        }while (ts.tv_nsec >1500);
+
+        struct tm * ptm = gmtime(&ts.tv_sec);
+        if(ptm){
+            strftime(buff, sizeof buff, "%D %T", ptm);
+        printf("%s: thread_run start time: %s.%09ld UTC [%ld]\n",
+        (p_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
+    }
+
+    do {
+       timespec_get(&ts, TIME_UTC);
+    }while (ts.tv_nsec == 0);
+
+    p_dev_ctx->timing_source_thread_running = 1;
+    while(1) {
+
+        /* Check if owdm finished to create the timing cbs based on measurement results */
+        if ((p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable)&&(!owdm_init_done)&&unlikely(XRAN_RUNNING == xran_if_current_state)) {
+            // Adjust Windows based on Delay Measurement results
+            xran_adjust_timing_parameters(p_dev_ctx);
+            if ((res = xran_timing_create_cbs(args)) < 0){
+                return res;
+                }
+            printf("TTI interval %ld [us]\n", interval_us);
+            owdm_init_done = 1;
+
+        }
+
+
+
+        /* Update Usage Stats */
+        tWake = xran_tick();
+        xran_used_tick += tUsed;
+        if (tWakePrev)
+        {
+            xran_total_tick += get_ticks_diff(tWake, tWakePrev);
+        }
+        tWakePrev = tWake;
+        tUsed = 0;
+
+        int64_t delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed);
+        if (XRAN_STOPPED == xran_if_current_state)
+            break;
+
+        if (delta > 3E5 && tUsed > 0)//300us about 9 symbols
+        {
+            print_err("poll_next_tick too long, delta:%ld(ns), tUsed:%ld(tick)", delta, tUsed);
+        }
+
+        if (likely(XRAN_RUNNING == xran_if_current_state)) {
+            for(xran_port_id =  0; xran_port_id < XRAN_PORTS_NUM; xran_port_id++ ) {
+                p_dev_ctx_run = xran_dev_get_ctx_by_id(xran_port_id);
+                if(p_dev_ctx_run) {
+                    if(p_dev_ctx_run->xran_port_id == xran_port_id) {
+                        if(XranGetSymNum(xran_lib_ota_sym_idx[p_dev_ctx_run->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == xran_lib_ota_sym[xran_port_id])
+                        {
+                            sym_ota_cb(&p_dev_ctx_run->sym_timer, p_dev_ctx_run, &tUsed);
+                            xran_lib_ota_sym[xran_port_id]++;
+                            if(xran_lib_ota_sym[xran_port_id] >= N_SYM_PER_SLOT)
+                                xran_lib_ota_sym[xran_port_id]=0;
+                        }
+                    }
+                    else  {
+                        rte_panic("p_dev_ctx_run == xran_port_id");
+    }
+                }
+            }
+        }
+    }
+
+    xran_timing_destroy_cbs(args);
+    printf("Closing timing source thread...\n");
+    return res;
+}
+
+/* Handle ecpri format. */
+#define MBUFS_CNT 16
+
+int32_t handle_ecpri_ethertype(struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num)
+{
+    struct rte_mbuf *pkt;
+    uint16_t i;
+    struct rte_ether_hdr* eth_hdr;
+    struct xran_ecpri_hdr* ecpri_hdr;
+    unsigned long t1;
+    int32_t ret = MBUF_FREE;
+    uint32_t ret_data[MBUFS_CNT] = { MBUFS_CNT * MBUF_FREE };
+    struct xran_device_ctx* p_dev_ctx = xran_dev_get_ctx_by_id(xport_id);
+    uint16_t num_data = 0, num_control = 0, num_meas = 0;
+    struct rte_mbuf* pkt_data[MBUFS_CNT], * pkt_control[MBUFS_CNT], * pkt_meas[MBUFS_CNT], *pkt_adj[MBUFS_CNT];
+    static uint32_t owdm_rx_first_pass = 1;
+
+    if (p_dev_ctx == NULL)
+        return ret;
+
+    for (i = 0; i < num; i++)
+    {
+        pkt = pkt_q[i];
+
+//        rte_prefetch0(rte_pktmbuf_mtod(pkt, void*));
+
+        rte_pktmbuf_adj(pkt, sizeof(*eth_hdr));
+    ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *);
+
+        p_dev_ctx->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
+
+        pkt_adj[i] = pkt;
+        switch (ecpri_hdr->cmnhdr.bits.ecpri_mesg_type)
+        {
+        case ECPRI_IQ_DATA:
+                pkt_data[num_data++] = pkt;
+            break;
+        // For RU emulation
+        case ECPRI_RT_CONTROL_DATA:
+                pkt_control[num_control++] = pkt;
+            break;
+            case ECPRI_DELAY_MEASUREMENT:
+                if (owdm_rx_first_pass != 0)
+{
+                    // Initialize and verify that Payload Length is in range */
+                    xran_initialize_and_verify_owd_pl_length((void*)p_dev_ctx);
+                    owdm_rx_first_pass = 0;
+
+                }
+                pkt_meas[num_meas++] = pkt;
+                break;
+            default:
+                if (p_dev_ctx->fh_init.io_cfg.id == O_DU) {
+                    print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.bits.ecpri_mesg_type);
+        }
+                break;
+    }
+}
+
+    if(num_data == MBUFS_CNT && p_dev_ctx->fh_cfg.ru_conf.xranCat == XRAN_CATEGORY_B) /* w/a for Cat A issue */
+{
+        for (i = 0; i < MBUFS_CNT; i++)
+{
+            ret_data[i] = MBUF_FREE;
+}
+
+        if (p_dev_ctx->fh_init.io_cfg.id == O_DU || p_dev_ctx->fh_init.io_cfg.id == O_RU)
+{
+            if (p_dev_ctx->xran2phy_mem_ready != 0)
+                ret = process_mbuf_batch(pkt_data, (void*)p_dev_ctx, MBUFS_CNT, p_cid,  ret_data );
+            for (i = 0; i < MBUFS_CNT; i++)
+                    {
+                if (ret_data[i] == MBUF_FREE)
+                    rte_pktmbuf_free(pkt_data[i]);
+                    }
+            }
+    else
+{
+            for (i = 0; i < MBUFS_CNT; i++)
+{
+                if (ret_data[i] == MBUF_FREE)
+                    rte_pktmbuf_free(pkt_data[i]);
+            }
+            print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id);
+        }
+        }
+    else
+{
+        for (i = 0; i < num_data; i++)
+    {
+            ret = process_mbuf(pkt_data[i], (void*)p_dev_ctx, p_cid);
+            if (ret == MBUF_FREE)
+                rte_pktmbuf_free(pkt_data[i]);
+    }
+
+        for (i = 0; i < num_control; i++)
+    {
+            t1 = MLogXRANTick();
+            if (p_dev_ctx->fh_init.io_cfg.id == O_RU)
+        {
+                ret = process_cplane(pkt_control[i], (void*)p_dev_ctx);
+                p_dev_ctx->fh_counters.rx_counter++;
+                if (ret == MBUF_FREE)
+                    rte_pktmbuf_free(pkt_control[i]);
+        }
+        else
+        {
+                print_err("O-DU recevied C-Plane message!");
+        }
+            MLogXRANTask(PID_PROCESS_CP_PKT, t1, MLogXRANTick());
+    }
+
+        for (i = 0; i < num_meas; i++)
+        {
+
+            /*if(p_dev_ctx->fh_init.io_cfg.id == O_RU)
+                printf("Got delay_meas_pkt xport_id %d p_dev_ctx %08"PRIx64" %d\n", xport_id,(int64_t*)p_dev_ctx, num_meas) ;*/
+            t1 = MLogXRANTick();
+            if(xran_if_current_state != XRAN_RUNNING)
+            ret = process_delay_meas(pkt_meas[i], (void*)p_dev_ctx, xport_id);
+            else
+                ret = MBUF_FREE;
+            if (ret == MBUF_FREE)
+                rte_pktmbuf_free(pkt_meas[i]);
+            MLogXRANTask(PID_PROCESS_DELAY_MEAS_PKT, t1, MLogXRANTick());
+    }
+            }
+
+    return MBUF_FREE;
+}
+
+int32_t
+xran_packet_and_dpdk_timer_thread(void *args)
+{
+    //struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
+
+    uint64_t prev_tsc = 0;
+    uint64_t cur_tsc = rte_rdtsc();
+    uint64_t diff_tsc = cur_tsc - prev_tsc;
+    struct sched_param sched_param;
+    int res = 0;
+    printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  rte_lcore_id(), getpid());
+
+    memset(&sched_param, 0, sizeof(struct sched_param));
+    sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
+
+    if ((res  = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
+    {
+        printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
+    }
+
+    while(1){
+
+        cur_tsc  = rte_rdtsc();
+        diff_tsc = cur_tsc - prev_tsc;
+        if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
+            rte_timer_manage();
+            prev_tsc = cur_tsc;
+        }
+
+        if (XRAN_STOPPED == xran_if_current_state)
+            break;
+    }
+
+    printf("Closing pkts timer thread...\n");
+    return 0;
+}
+
+void xran_initialize_ecpri_owd_meas_cmn( struct xran_io_cfg *ptr)
+{
+//    ptr->eowd_cmn.initiator_en = 0; // Initiator 1, Recipient 0
+//    ptr->eowd_cmn.filterType = 0;  // 0 Simple average based on number of measurements
+    // Set default values if the Timeout and numberOfSamples are not set
+    if ( ptr->eowd_cmn[ptr->id].responseTo == 0)
+        ptr->eowd_cmn[ptr->id].responseTo = 10E6; // 10 ms timeout expressed in ns
+    if ( ptr->eowd_cmn[ptr->id].numberOfSamples == 0)
+        ptr->eowd_cmn[ptr->id].numberOfSamples = 8; // Number of samples to be averaged
+}
+void xran_initialize_ecpri_owd_meas_per_port (int i, struct xran_io_cfg *ptr )
+{
+   /* This function initializes one_way delay measurements on a per port basis,
+      most variables default to zero    */
+   ptr->eowd_port[ptr->id][i].portid = (uint8_t)i;
+}
+
+int32_t
+xran_init(int argc, char *argv[],
+           struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle)
+{
+    int32_t ret = XRAN_STATUS_SUCCESS;
+    int32_t i;
+    int32_t j;
+    int32_t o_xu_id = 0;
+    struct xran_io_cfg      *p_io_cfg       = NULL;
+    struct xran_device_ctx * p_xran_dev_ctx = NULL;
+    int32_t  lcore_id = 0;
+    const char *version = rte_version();
+
+    if (version == NULL)
+        rte_panic("version == NULL");
+
+    printf("'%s'\n", version);
+
+    if (p_xran_fh_init->xran_ports < 1 || p_xran_fh_init->xran_ports > XRAN_PORTS_NUM) {
+        ret = XRAN_STATUS_INVALID_PARAM;
+        print_err("fh_init xran_ports= %d is wrong [%d]\n", p_xran_fh_init->xran_ports, ret);
+        return ret;
+    }
+    mlogxranenable = p_xran_fh_init->mlogxranenable;
+    p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg;
+
+    if ((ret = xran_dev_create_ctx(p_xran_fh_init->xran_ports)) < 0) {
+        print_err("context allocation error [%d]\n", ret);
+        return ret;
+    }
+
+    for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
+        p_xran_dev_ctx  = xran_dev_get_ctx_by_id(o_xu_id);
+    memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx));
+        p_xran_dev_ctx->xran_port_id  = o_xu_id;
+
+    /* copy init */
+    p_xran_dev_ctx->fh_init = *p_xran_fh_init;
+    printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu);
+
+    memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config));
+    /* To make sure to set default functions */
+    p_xran_dev_ctx->send_upmbuf2ring    = NULL;
+    p_xran_dev_ctx->send_cpmbuf2ring    = NULL;
+        // Ecpri initialization for One Way delay measurements common variables to default values
+        xran_initialize_ecpri_owd_meas_cmn(&p_xran_dev_ctx->fh_init.io_cfg);
+    }
+
+    /* default values if not set */
+    if(p_io_cfg->nEthLinePerPort == 0)
+        p_io_cfg->nEthLinePerPort = 1;
+
+    if(p_io_cfg->nEthLineSpeed == 0)
+        p_io_cfg->nEthLineSpeed = 25;
+
+    /** at least 1 RX Q */
+    if(p_io_cfg->num_rxq == 0)
+        p_io_cfg->num_rxq = 1;
+
+    if (p_io_cfg->id == 1) {
+        /* 1 HW for O-RU */
+        p_io_cfg->num_rxq =  1;
+    }
+
+#if (RTE_VER_YEAR < 21) /* eCPRI flow supported with DPDK 21.02 or later */
+    if (p_io_cfg->num_rxq > 1){
+        p_io_cfg->num_rxq =  1;
+        printf("%s does support eCPRI flows. Set rxq to %d\n", version, p_io_cfg->num_rxq);
+    }
+#endif
+    printf("PF Eth line speed %dG\n",p_io_cfg->nEthLineSpeed);
+    printf("PF Eth lines per O-xU port %d\n",p_io_cfg->nEthLinePerPort);
+    printf("RX HW queues per O-xU Eth line %d \n",p_io_cfg->num_rxq);
+
+    if(p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane)  != p_io_cfg->num_vfs) {
+        print_err("Incorrect VFs configurations: For %d O-xUs with %d Ethernet ports expected number of VFs is %d. [provided %d]\n",
+            p_xran_fh_init->xran_ports, p_io_cfg->nEthLinePerPort,
+            p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane), p_io_cfg->num_vfs);
+    }
+
+    xran_if_current_state = XRAN_INIT;
+    xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype);
+    if (p_io_cfg->id == 0)
+        xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
+                           p_io_cfg,
+                           &lcore_id,
+                           (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
+                           (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
+                           p_xran_dev_ctx->fh_init.mtu);
+    else
+        xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
+                           p_io_cfg,
+                           &lcore_id,
+                           (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
+                           (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
+                           p_xran_dev_ctx->fh_init.mtu);
+
+    for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){
+        p_xran_dev_ctx  = xran_dev_get_ctx_by_id(o_xu_id);
+
+        for(i = 0; i < MAX_TTI_TO_PHY_TIMER; i++ )
+            rte_timer_init(&p_xran_dev_ctx->tti_to_phy_timer[i]);
+
+        rte_timer_init(&p_xran_dev_ctx->sym_timer);
+    for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++)
+            rte_timer_init(&p_xran_dev_ctx->dpdk_timer[i]);
+
+    p_xran_dev_ctx->direct_pool   = socket_direct_pool;
+    p_xran_dev_ctx->indirect_pool = socket_indirect_pool;
+
+
+        for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){
+            LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[j]);
+    }
+
+    }
+
+    for (i=0; i<XRAN_PORTS_NUM; i++){
+    for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){
+            xran_fs_clear_slot_type(i,nCellIdx);
+        }
+    }
+
+    *pXranLayerHandle = xran_dev_get_ctx();
+
+
+    // The ecpri initialization loop needs to be done per pf and vf (Outer loop pf and inner loop vf)
+    for (i=0;  i< p_io_cfg->num_vfs; i++)
+    {
+        /* Initialize ecpri one-way delay measurement info on a per vf port basis */
+        xran_initialize_ecpri_owd_meas_per_port (i, p_io_cfg);
+    }
+
+    return ret;
+}
+
+int32_t
+xran_sector_get_instances (uint32_t xran_port, void * pDevHandle, uint16_t nNumInstances,
+               xran_cc_handle_t * pSectorInstanceHandles)
+{
+    struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle;
+    XranSectorHandleInfo *pCcHandle = NULL;
+    int32_t i = 0;
+
+    pDev += xran_port;
+
+    /* Check for the Valid Parameters */
+    CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM);
+
+    if (!nNumInstances) {
+        print_dbg("Instance is not assigned for this function !!! \n");
+        return XRAN_STATUS_INVALID_PARAM;
+    }
+
+    for (i = 0; i < nNumInstances; i++) {
+
+        /* Allocate Memory for CC handles */
+        pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64);
+
+        if(pCcHandle == NULL)
+            return XRAN_STATUS_RESOURCE;
+
+        memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo)));
+
+        pCcHandle->nIndex    = i;
+        pCcHandle->nXranPort = pDev->xran_port_id;
+
+        printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle);
+        pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle;
+
+        printf("Handle: %p Instance: %p\n",
+            &pSectorInstanceHandles[i], pSectorInstanceHandles[i]);
+    }
+
+    return XRAN_STATUS_SUCCESS;
+}
+
+
+int32_t
+xran_5g_fronthault_config (void * pHandle,
+                struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
+                struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
+                struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
+                struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
+                xran_transport_callback_fn pCallback,
+                void *pCallbackTag)
+{
+    int j, i = 0, z;
+    XranSectorHandleInfo* pXranCc = NULL;
+    struct xran_device_ctx * p_xran_dev_ctx = NULL;
+
+    if(NULL == pHandle) {
+        printf("Handle is NULL!\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    pXranCc = (XranSectorHandleInfo*) pHandle;
+    p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
+    if (p_xran_dev_ctx == NULL) {
+        printf ("p_xran_dev_ctx is NULL\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    i = pXranCc->nIndex;
+
+    for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
+        for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
+            /* U-plane TX */
+
+            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0;
+            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+            p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];
+
+            if(pSrcBuffer[z][j])
+                p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcBuffer[z][j]));
+
+
+            /* C-plane TX */
+            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
+            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+            p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
+
+            if(pSrcCpBuffer[z][j])
+                p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcCpBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcCpBuffer[z][j]));
+            /* U-plane RX */
+
+            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0;
+            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+            p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];
+
+            if(pDstBuffer[z][j])
+                p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
+
+
+            /* C-plane RX */
+            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
+            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+            p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
+
+            if(pDstCpBuffer[z][j])
+                p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pDstCpBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
+        }
+    }
+
+    p_xran_dev_ctx->pCallback[i]    = pCallback;
+    p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag;
+    print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
+        p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pCallback[i], p_xran_dev_ctx->pCallbackTag[i]);
+
+    p_xran_dev_ctx->xran2phy_mem_ready = 1;
+
+    return XRAN_STATUS_SUCCESS;
+}
+
+int32_t xran_5g_bfw_config(void * pHandle,
+                    struct xran_buffer_list *pSrcRxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
+                    struct xran_buffer_list *pSrcTxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
+                    xran_transport_callback_fn pCallback,
+                    void *pCallbackTag){
+    int j, i = 0, z;
+    XranSectorHandleInfo* pXranCc = NULL;
+    struct xran_device_ctx * p_xran_dev_ctx = NULL;
+
+    if(NULL == pHandle) {
+        printf("Handle is NULL!\n");
+        return XRAN_STATUS_FAIL;
+    }
+    pXranCc = (XranSectorHandleInfo*) pHandle;
+    p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
+    if (p_xran_dev_ctx == NULL) {
+        printf ("p_xran_dev_ctx is NULL\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    i = pXranCc->nIndex;
+
+    for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
+        for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
+            /* C-plane RX - RU */
+            p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
+            p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+            p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+            p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+            p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
+
+            if(pSrcRxCpBuffer[z][j])
+                p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcRxCpBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcRxCpBuffer[z][j]));
+
+            /* C-plane TX - RU */
+            p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
+            p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+            p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+            p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+            p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
+
+            if(pSrcTxCpBuffer[z][j])
+                p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pSrcTxCpBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcTxCpBuffer[z][j]));
+        }
+    }
+    return XRAN_STATUS_SUCCESS;
+}
+
+int32_t
+xran_5g_prach_req (void *  pHandle,
+                struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
+                struct xran_buffer_list *pDstBufferDecomp[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],                
+                xran_transport_callback_fn pCallback,
+                void *pCallbackTag)
+{
+    int j, i = 0, z;
+    XranSectorHandleInfo* pXranCc = NULL;
+    struct xran_device_ctx * p_xran_dev_ctx = NULL;
+
+    if(NULL == pHandle) {
+        printf("Handle is NULL!\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    pXranCc = (XranSectorHandleInfo*) pHandle;
+    p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
+    if (p_xran_dev_ctx == NULL) {
+        printf ("p_xran_dev_ctx is NULL\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    i = pXranCc->nIndex;
+
+    for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) {
+        for(z = 0; z < XRAN_MAX_PRACH_ANT_NUM; z++){
+           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0;
+           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_PRACH_ANT_NUM; // ant number.
+           p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];
+           if(pDstBuffer[z][j])
+               p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
+                
+            p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0];
+            if(pDstBufferDecomp[z][j])
+                p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList =   *pDstBufferDecomp[z][j];
+        }
+    }
+
+    p_xran_dev_ctx->pPrachCallback[i]    = pCallback;
+    p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
+
+    print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
+        p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]);
+
+    return XRAN_STATUS_SUCCESS;
+}
+
+int32_t
+xran_5g_srs_req (void *  pHandle,
+                struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
+                struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
+                xran_transport_callback_fn pCallback,
+                void *pCallbackTag)
+{
+    int j, i = 0, z;
+    XranSectorHandleInfo* pXranCc = NULL;
+    struct xran_device_ctx * p_xran_dev_ctx = NULL;
+
+    if(NULL == pHandle) {
+        printf("Handle is NULL!\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    pXranCc = (XranSectorHandleInfo*) pHandle;
+    p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort);
+    if (p_xran_dev_ctx == NULL) {
+        printf ("p_xran_dev_ctx is NULL\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    i = pXranCc->nIndex;
+
+    for(j=0; j<XRAN_N_FE_BUF_LEN; j++) {
+        for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){
+           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;
+           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
+           p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
+           if(pDstBuffer[z][j])
+               p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList =   *pDstBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j]));
+
+            /* C-plane SRS */
+            p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
+            p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
+            p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
+            p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
+            p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
+            p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z];
+
+            if(pDstCpBuffer[z][j])
+                p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList =   *pDstCpBuffer[z][j];
+            else
+                memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j]));
+
+        }
+    }
+
+    p_xran_dev_ctx->pSrsCallback[i]    = pCallback;
+    p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
+
+    print_dbg("%s: [p %d CC  %d] Cb %p cb %p\n",__FUNCTION__,
+        p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]);
+
+    return XRAN_STATUS_SUCCESS;
+}
+
+uint32_t
+xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear)
+{
+    uint32_t i;
+
+    *num_core_used = xran_num_cores_used;
+    for (i = 0; i < xran_num_cores_used; i++)
+    {
+        core_used[i] = xran_core_used[i];
+    }
+
+    *total_time = xran_total_tick;
+    *used_time = xran_used_tick;
+
+    if (clear)
+    {
+        xran_total_tick = 0;
+        xran_used_tick = 0;
+    }
+
+    return 0;
+}
+
+uint8_t*
+xran_add_cp_hdr_offset(uint8_t  *dst)
+{
+    dst += (RTE_PKTMBUF_HEADROOM +
+            sizeof(struct xran_ecpri_hdr) +
+            sizeof(struct xran_cp_radioapp_section1_header) +
+            sizeof(struct xran_cp_radioapp_section1));
+
+    dst = RTE_PTR_ALIGN_CEIL(dst, 64);
+
+    return dst;
+}
+
+uint8_t*
+xran_add_hdr_offset(uint8_t  *dst, int16_t compMethod)
+{
+    dst+= (RTE_PKTMBUF_HEADROOM +
+          sizeof (struct xran_ecpri_hdr) +
+          sizeof (struct radio_app_common_hdr) +
+          sizeof(struct data_section_hdr));
+    if(compMethod != XRAN_COMPMETHOD_NONE)
+          dst += sizeof (struct data_section_compression_hdr);
+    dst = RTE_PTR_ALIGN_CEIL(dst, 64);
+
+    return dst;
+}
+
+int32_t
+xran_pkt_gen_process_ring(struct rte_ring *r)
+{
+    assert(r);
+    struct rte_mbuf *mbufs[16];
+    int i;
+    uint32_t remaining;
+    uint64_t t1;
+    struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
+    const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
+        RTE_DIM(mbufs), &remaining);
+
+
+    if (!dequeued)
+        return 0;
+
+    t1 = MLogXRANTick();
+    for (i = 0; i < dequeued; ++i) {
+        struct cp_up_tx_desc * p_tx_desc =  (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i],  struct cp_up_tx_desc *);
+        xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle,
+                                        p_tx_desc->ctx_id,
+                                        p_tx_desc->tti,
+                                        p_tx_desc->start_cc,
+                                        p_tx_desc->cc_num,
+                                        p_tx_desc->start_ant,
+                                        p_tx_desc->ant_num,
+                                        p_tx_desc->frame_id,
+                                        p_tx_desc->subframe_id,
+                                        p_tx_desc->slot_id,
+                                        p_tx_desc->sym_id,
+                                        (enum xran_comp_hdr_type)p_tx_desc->compType,
+                                        (enum xran_pkt_dir) p_tx_desc->direction,
+                                        p_tx_desc->xran_port_id,
+                                        (PSECTION_DB_TYPE)p_tx_desc->p_sec_db);
+
+        xran_pkt_gen_desc_free(p_tx_desc);
+        if (XRAN_STOPPED == xran_if_current_state){
+            MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick());
+            return -1;
+        }
+    }
+
+    if(p_io_cfg->io_sleep)
+       nanosleep(&sleeptime,NULL);
+
+    MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick());
+
+    return remaining;
+}
+
+int32_t
+xran_dl_pkt_ring_processing_func(void* args)
+{
+    struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
+    uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF);
+    uint16_t current_port;
+
+    rte_timer_manage();
+
+    for (current_port = 0; current_port < XRAN_PORTS_NUM;  current_port++) {
+        if( xran_port_mask & (1<<current_port)) {
+            xran_pkt_gen_process_ring(ctx->up_dl_pkt_gen_ring[current_port]);
+        }
+    }
+
+    if (XRAN_STOPPED == xran_if_current_state)
+        return -1;
+
+    return 0;
+}
+
+int32_t xran_fh_rx_and_up_tx_processing(void *port_mask)
+{
+    int32_t ret_val=0;
+
+    ret_val = ring_processing_func((void *)0);
+    if(ret_val != 0)
+       return ret_val;
+
+    ret_val = xran_dl_pkt_ring_processing_func(port_mask);
+    if(ret_val != 0)
+       return ret_val;
+
+    return 0;
+}
+/** Function to peforms serves of DPDK times */
+int32_t
+xran_processing_timer_only_func(void* args)
+{
+    rte_timer_manage();
+    if (XRAN_STOPPED == xran_if_current_state)
+        return -1;
+
+    return 0;
+}
+
+/** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */
+int32_t
+xran_all_tasks(void* arg)
+{
+
+    ring_processing_func(arg);
+    process_dpdk_io(arg);
+    return 0;
+}
+
+/** Function to pefromrm TX and RX on ETH device */
+int32_t
+xran_eth_trx_tasks(void* arg)
+{
+    process_dpdk_io(arg);
+    return 0;
+}
+
+/** Function to pefromrm RX on ETH device */
+int32_t
+xran_eth_rx_tasks(void* arg)
+{
+    process_dpdk_io_rx(arg);
+    return 0;
+}
+
+/** Function to porcess ORAN FH packet per port */
+int32_t
+ring_processing_func_per_port(void* args)
+{
+    struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
+    int32_t i;
+    uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF);
+    queueid_t qi;
+
+    for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) {
+        if (ctx->vf2xran_port[i] == port_id) {
+            for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
+                if (process_ring(ctx->rx_ring[i][qi], i, qi))
+                    return 0;
+            }
+        }
+    }
+
+    if (XRAN_STOPPED == xran_if_current_state)
+        return -1;
+
+    return 0;
+}
+
+/** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */
+int32_t
+xran_spawn_workers(void)
+{
+    uint64_t nWorkerCore = 1LL;
+    uint32_t coreNum     = sysconf(_SC_NPROCESSORS_CONF);
+    int32_t  i = 0;
+    uint32_t total_num_cores  = 1; /*start with timing core */
+    uint32_t worker_num_cores = 0;
+    uint32_t icx_cpu = 0;
+    int32_t core_map[2*sizeof(uint64_t)*8];
+    uint64_t xran_port_mask = 0;
+
+    struct xran_ethdi_ctx  *eth_ctx   = xran_ethdi_get_ctx();
+    struct xran_device_ctx *p_dev     = NULL;
+    struct xran_fh_init    *fh_init   = NULL;
+    struct xran_fh_config  *fh_cfg    = NULL;
+    struct xran_worker_th_ctx* pThCtx = NULL;
+    void *worker_ports=NULL;
+
+    p_dev =  xran_dev_get_ctx_by_id(0);
+    if(p_dev == NULL) {
+        print_err("p_dev\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    fh_init = &p_dev->fh_init;
+    if(fh_init == NULL) {
+        print_err("fh_init\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    fh_cfg = &p_dev->fh_cfg;
+    if(fh_cfg == NULL) {
+        print_err("fh_cfg\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    for (i = 0; i < coreNum && i < 64; i++) {
+        if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
+            core_map[worker_num_cores++] = i;
+            total_num_cores++;
+        }
+        nWorkerCore = nWorkerCore << 1;
+    }
+
+    nWorkerCore = 1LL;
+    for (i = 64; i < coreNum && i < 128; i++) {
+        if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
+            core_map[worker_num_cores++] = i;
+            total_num_cores++;
+        }
+        nWorkerCore = nWorkerCore << 1;
+    }
+
+    extern int _may_i_use_cpu_feature(unsigned __int64);
+    icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52);
+
+    printf("O-XU      %d\n", eth_ctx->io_cfg.id);
+    printf("HW        %d\n", icx_cpu);
+    printf("Num cores %d\n", total_num_cores);
+    printf("Num ports %d\n", fh_init->xran_ports);
+    printf("O-RU Cat  %d\n", fh_cfg->ru_conf.xranCat);
+    printf("O-RU CC   %d\n", fh_cfg->nCC);
+    printf("O-RU eAxC %d\n", fh_cfg->neAxc);
+
+    for (i = 0; i < fh_init->xran_ports; i++){
+        xran_port_mask |= 1L<<i;
+    }
+
+    for (i = 0; i < fh_init->xran_ports; i++) {
+        struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+        if(p_dev_update == NULL){
+            print_err("p_dev_update\n");
+            return XRAN_STATUS_FAIL;
+        }
+        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1;
+        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1;
+        printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+        printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+    }
+
+    if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
+        switch(total_num_cores) {
+            case 1: /** only timing core */
+                eth_ctx->time_wrk_cfg.f = xran_all_tasks;
+                eth_ctx->time_wrk_cfg.arg   = NULL;
+                eth_ctx->time_wrk_cfg.state = 1;
+            break;
+            case 2:
+                eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks;
+                eth_ctx->time_wrk_cfg.arg   = NULL;
+                eth_ctx->time_wrk_cfg.state = 1;
+
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id    = 0;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func;
+                pThCtx->task_arg  = NULL;
+                eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
+            break;
+            case 3:
+                /* timing core */
+                eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                eth_ctx->time_wrk_cfg.arg   = NULL;
+                eth_ctx->time_wrk_cfg.state = 1;
+
+                /* workers */
+                /** 0 **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id      = 0;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func;
+                pThCtx->task_arg  = NULL;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                for (i = 0; i < fh_init->xran_ports; i++) {
+                    struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                    if(p_dev_update == NULL) {
+                        print_err("p_dev_update\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                    p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                    printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                    printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                }
+
+                /** 1 - CP GEN **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id      = 1;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                pThCtx->task_arg  = (void*)xran_port_mask;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+            break;
+            default:
+                print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                return XRAN_STATUS_FAIL;
+        }
+    } else if ((fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1)  || fh_init->io_cfg.bbu_offload) {
+        switch(total_num_cores) {
+            case 1: /** only timing core */
+                print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                return XRAN_STATUS_FAIL;
+            break;
+            case 2:
+                eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                eth_ctx->time_wrk_cfg.arg   = NULL;
+                eth_ctx->time_wrk_cfg.state = 1;
+
+                if (p_dev->fh_init.io_cfg.bbu_offload)
+                    p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring;
+                else
+                p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
+
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id    = 0;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func;
+                pThCtx->task_arg  = NULL;
+                eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
+            break;
+            case 3:
+                if(1) {
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0 **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    for (i = 0; i < fh_init->xran_ports; i++) {
+                        struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                        if(p_dev_update == NULL) {
+                            print_err("p_dev_update\n");
+                            return XRAN_STATUS_FAIL;
+                        }
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                        printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                        printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                    }
+
+                    /** 1 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)xran_port_mask;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+                } else {
+                    print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                    return XRAN_STATUS_FAIL;
+                }
+            break;
+            case 4:
+                if(1) {
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0 **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    for (i = 1; i < fh_init->xran_ports; i++) {
+                        struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                        if(p_dev_update == NULL) {
+                            print_err("p_dev_update\n");
+                            return XRAN_STATUS_FAIL;
+                        }
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                        printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                        printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                    }
+                } else {
+                    print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                    return XRAN_STATUS_FAIL;
+                }
+                break;
+            case 5:
+                if(1) {
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_rx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0 **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 3 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 3;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    for (i = 1; i < fh_init->xran_ports; i++) {
+                        struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                        if(p_dev_update == NULL) {
+                            print_err("p_dev_update\n");
+                            return XRAN_STATUS_FAIL;
+                        }
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                        printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                        printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                    }
+                } else {
+                    print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                    return XRAN_STATUS_FAIL;
+                }
+                break;
+            case 6:
+                if(eth_ctx->io_cfg.id == O_DU) {
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_rx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0 **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1 Eth Tx **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = process_dpdk_io_tx;
+                    pThCtx->task_arg  = (void*)2;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 3 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 3;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 4 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 4;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    for (i = 0; i < fh_init->xran_ports; i++) {
+                        struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                        if(p_dev_update == NULL) {
+                            print_err("p_dev_update\n");
+                            return XRAN_STATUS_FAIL;
+                        }
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id;
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id;
+                        printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                        printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                    }
+                } else if(eth_ctx->io_cfg.id == O_RU) {
+                    /*** O_RU specific config */
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = NULL;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0  Eth RX */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = process_dpdk_io_rx;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1  FH RX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func_per_port;
+                    pThCtx->task_arg  = (void*)0;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2  FH RX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func_per_port;
+                    pThCtx->task_arg  = (void*)1;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 3  FH RX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 3;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func_per_port;
+                    pThCtx->task_arg  = (void*)2;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /**  FH TX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 4;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = process_dpdk_io_tx;
+                    pThCtx->task_arg  = (void*)2;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+                } else {
+                    print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                    return XRAN_STATUS_FAIL;
+                }
+                break;
+            default:
+                print_err("unsupported configuration\n");
+                return XRAN_STATUS_FAIL;
+        }
+    } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) {
+        switch(total_num_cores) {
+            case 1:
+            print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+            return XRAN_STATUS_FAIL;
+            break;
+
+            case 2:
+            if(fh_init->xran_ports == 2)
+                worker_ports = (void *)((1L<<0 | 1L<<1) & xran_port_mask);
+            else if(fh_init->xran_ports == 3)
+                worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2) & xran_port_mask);
+            else if(fh_init->xran_ports == 4)
+                worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2 | 1L<<3) & xran_port_mask);
+            else
+            {
+                print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                return XRAN_STATUS_FAIL;
+            }
+
+            eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+            eth_ctx->time_wrk_cfg.arg   = NULL;
+            eth_ctx->time_wrk_cfg.state = 1;
+
+            /* p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; */
+
+            pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+            if(pThCtx == NULL){
+                print_err("pThCtx allocation error\n");
+                return XRAN_STATUS_FAIL;
+            }
+            memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+            pThCtx->worker_id    = 0;
+            pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+            snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+            pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
+            pThCtx->task_arg  = worker_ports;
+            eth_ctx->pkt_wrk_cfg[0].f     = xran_generic_worker_thread;
+            eth_ctx->pkt_wrk_cfg[0].arg   = pThCtx;
+
+            for (i = 1; i < fh_init->xran_ports; i++) {
+                struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                if(p_dev_update == NULL) {
+                    print_err("p_dev_update\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+            }
+            break;
+            case 3:
+                if(icx_cpu) {
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0 **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    for (i = 1; i < fh_init->xran_ports; i++) {
+                        struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                        if(p_dev_update == NULL) {
+                            print_err("p_dev_update\n");
+                            return XRAN_STATUS_FAIL;
+                        }
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                        printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                        printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                    }
+
+                    /** 1 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)xran_port_mask;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+            }
+            else /* csx cpu */
+            {
+                if(fh_init->xran_ports == 3)
+                    worker_ports = (void *)(1L<<2 & xran_port_mask);
+                else if(fh_init->xran_ports == 4)
+                    worker_ports = (void *)((1L<<2 | 1L<<3) & xran_port_mask);
+                else{
+                    print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                    return XRAN_STATUS_FAIL;
+                }
+                /* timing core */
+                eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                eth_ctx->time_wrk_cfg.arg   = NULL;
+                eth_ctx->time_wrk_cfg.state = 1;
+
+                /* workers */
+                /** 0 **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id      = 0;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                pThCtx->task_arg  = (void *)((1L<<0|1L<<1) & xran_port_mask);
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                for (i = 1; i < fh_init->xran_ports; i++) {
+                    struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                    if(p_dev_update == NULL) {
+                        print_err("p_dev_update\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                    p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                    printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                    printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                }
+
+                /** 1 - CP GEN **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id      = 1;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_fh_rx_and_up_tx_processing;
+                pThCtx->task_arg  = worker_ports;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+            }
+
+            break;
+
+            case 4:
+                if(1) {
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0 **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)(((1L<<1) | (1L<<2)) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1L<<0) & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    for (i = 1; i < fh_init->xran_ports; i++) {
+                        struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                        if(p_dev_update == NULL) {
+                            print_err("p_dev_update\n");
+                            return XRAN_STATUS_FAIL;
+                        }
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                        p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                        printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                        printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                    }
+                }
+                else {
+                    print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                    return XRAN_STATUS_FAIL;
+                }
+            break;
+            case 5:
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0  FH RX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1<<0)  & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1<<1)  & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 3 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 3;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1<<2)  & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+
+                    if(eth_ctx->io_cfg.id == O_DU && 0 == fh_init->dlCpProcBurst) {
+                        for (i = 1; i < fh_init->xran_ports; i++) {
+                            struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                            if(p_dev_update == NULL) {
+                                print_err("p_dev_update\n");
+                                return XRAN_STATUS_FAIL;
+                            }
+                            p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = i+1;
+                            printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                        }
+                    }
+
+            break;
+            case 6:
+                if(eth_ctx->io_cfg.id == O_DU){
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0 **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1 - CP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id      = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_processing_timer_only_func;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1<<0)  & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 3 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 3;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1<<1)  & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 4 UP GEN **/
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id    = 4;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                    pThCtx->task_arg  = (void*)((1<<2)  & xran_port_mask);
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+                } else {
+                    /*** O_RU specific config */
+                    /* timing core */
+                    eth_ctx->time_wrk_cfg.f     = NULL;
+                    eth_ctx->time_wrk_cfg.arg   = NULL;
+                    eth_ctx->time_wrk_cfg.state = 1;
+
+                    /* workers */
+                    /** 0  Eth RX */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 0;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = process_dpdk_io_rx;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 1  FH RX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 1;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func_per_port;
+                    pThCtx->task_arg  = (void*)0;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 2  FH RX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 2;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func_per_port;
+                    pThCtx->task_arg  = (void*)1;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /** 3  FH RX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 3;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = ring_processing_func_per_port;
+                    pThCtx->task_arg  = (void*)2;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                    /**  FH TX and BBDEV */
+                    pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                    if(pThCtx == NULL){
+                        print_err("pThCtx allocation error\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                    pThCtx->worker_id = 4;
+                    pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                    snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+                    pThCtx->task_func = process_dpdk_io_tx;
+                    pThCtx->task_arg  = NULL;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+                }
+            break;
+            case 7:
+            /*** O_RU specific config */
+            if((fh_init->xran_ports == 4) && (eth_ctx->io_cfg.id == O_RU))
+            {
+                /*** O_RU specific config */
+                /* timing core */
+                eth_ctx->time_wrk_cfg.f     = NULL;
+                eth_ctx->time_wrk_cfg.arg   = NULL;
+                eth_ctx->time_wrk_cfg.state = 1;
+
+                /* workers */
+                /** 0  Eth RX */
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id = 0;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = process_dpdk_io_rx;
+                pThCtx->task_arg  = NULL;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                /** 1  FH RX and BBDEV */
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id = 1;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func_per_port;
+                pThCtx->task_arg  = (void*)0;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                /** 2  FH RX and BBDEV */
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id = 2;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func_per_port;
+                pThCtx->task_arg  = (void*)1;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                /** 3  FH RX and BBDEV */
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id = 3;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func_per_port;
+                    pThCtx->task_arg  = (void*)2;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                    eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                /** 4  FH RX and BBDEV */
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id = 4;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p3", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func_per_port;
+                pThCtx->task_arg  = (void*)3;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                /**  FH TX and BBDEV */
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id = 5;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = process_dpdk_io_tx;
+                pThCtx->task_arg  = NULL;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+            } /* -- if xran->ports == 4 -- */
+            else if(eth_ctx->io_cfg.id == O_DU){
+                if(fh_init->xran_ports == 3)
+                    worker_ports = (void *)((1<<2) & xran_port_mask);
+                else if(fh_init->xran_ports == 4)
+                    worker_ports = (void *)((1<<3) & xran_port_mask);
+                /* timing core */
+                eth_ctx->time_wrk_cfg.f     = xran_eth_trx_tasks;
+                eth_ctx->time_wrk_cfg.arg   = NULL;
+                eth_ctx->time_wrk_cfg.state = 1;
+
+                /* workers */
+                /** 0 **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id      = 0;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = ring_processing_func;
+                pThCtx->task_arg  = NULL;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                for (i = 2; i < fh_init->xran_ports; i++) {
+                    struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                    if(p_dev_update == NULL) {
+                        print_err("p_dev_update\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id;
+                    printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]);
+                }
+
+                /** 1 - CP GEN **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id      = 1;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_processing_timer_only_func;
+                pThCtx->task_arg  = NULL;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                /** 2 UP GEN **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id    = 2;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                pThCtx->task_arg  = (void*)((1<<0) & xran_port_mask);
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                for (i = (fh_init->xran_ports-1); i < fh_init->xran_ports; i++) {
+                    struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                    if(p_dev_update == NULL) {
+                        print_err("p_dev_update\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                    printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                }
+
+                /** 3 UP GEN **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id    = 3;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                pThCtx->task_arg  = (void*)((1<<1) & xran_port_mask);
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                for (i = (fh_init->xran_ports - 2); i < (fh_init->xran_ports - 1); i++) {
+                    struct xran_device_ctx * p_dev_update =  xran_dev_get_ctx_by_id(i);
+                    if(p_dev_update == NULL) {
+                        print_err("p_dev_update\n");
+                        return XRAN_STATUS_FAIL;
+                    }
+                    p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id;
+                    printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i,  p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]);
+                }
+
+                /** 4 UP GEN **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id    = 4;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                pThCtx->task_arg  = (void*)((1<<2) & xran_port_mask);
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+
+                /** 5 UP GEN **/
+                pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64);
+                if(pThCtx == NULL){
+                    print_err("pThCtx allocation error\n");
+                    return XRAN_STATUS_FAIL;
+                }
+                memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx));
+                pThCtx->worker_id    = 5;
+                pThCtx->worker_core_id = core_map[pThCtx->worker_id];
+                snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]);
+                pThCtx->task_func = xran_dl_pkt_ring_processing_func;
+                pThCtx->task_arg  = worker_ports;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f     = xran_generic_worker_thread;
+                eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg   = pThCtx;
+            }
+            else{
+                print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                return XRAN_STATUS_FAIL;
+                }
+            break;
+
+            default:
+                print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores);
+                return XRAN_STATUS_FAIL;
+        }
+    } else {
+        print_err("unsupported configuration\n");
+        return XRAN_STATUS_FAIL;
+    }
+
+    nWorkerCore = 1LL;
+    if(eth_ctx->io_cfg.pkt_proc_core) {
+        for (i = 0; i < coreNum && i < 64; i++) {
+            if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) {
+                xran_core_used[xran_num_cores_used++] = i;
+                if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
+                    rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
+                eth_ctx->pkt_wrk_cfg[i].state = 1;
+                if(eth_ctx->pkt_proc_core_id == 0)
+                    eth_ctx->pkt_proc_core_id = i;
+                printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
+                eth_ctx->worker_core[eth_ctx->num_workers++] = i;
+            }
+            nWorkerCore = nWorkerCore << 1;
+        }
+    }
+
+    nWorkerCore = 1LL;
+    if(eth_ctx->io_cfg.pkt_proc_core_64_127) {
+        for (i = 64; i < coreNum && i < 128; i++) {
+            if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) {
+                xran_core_used[xran_num_cores_used++] = i;
+                if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i))
+                    rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n");
+                eth_ctx->pkt_wrk_cfg[i].state = 1;
+                if(eth_ctx->pkt_proc_core_id == 0)
+                    eth_ctx->pkt_proc_core_id = i;
+                printf("spawn worker %d core %d\n",eth_ctx->num_workers, i);
+                eth_ctx->worker_core[eth_ctx->num_workers++] = i;
+            }
+            nWorkerCore = nWorkerCore << 1;
+        }
+    }
+
+    return XRAN_STATUS_SUCCESS;
+}
+int32_t
+xran_open(void *pHandle, struct xran_fh_config* pConf)
+{
+    int32_t ret = XRAN_STATUS_SUCCESS;
+    int32_t i;
+    uint8_t nNumerology = 0;
+    struct xran_device_ctx  *p_xran_dev_ctx = NULL;
+    struct xran_fh_config   *pFhCfg  = NULL;
+    struct xran_fh_init     *fh_init = NULL;
+    struct xran_ethdi_ctx   *eth_ctx = xran_ethdi_get_ctx();
+    int32_t wait_time = 10;
+    int64_t offset_sec, offset_nsec;
+
+     if(pConf->dpdk_port < XRAN_PORTS_NUM) {
+        p_xran_dev_ctx  = xran_dev_get_ctx_by_id(pConf->dpdk_port);
+    } else {
+        print_err("@0x%p [ru %d ] pConf->dpdk_port > XRAN_PORTS_NUM\n", pConf,  pConf->dpdk_port);
+        return XRAN_STATUS_FAIL;
+    }
+
+    if(p_xran_dev_ctx == NULL) {
+        print_err("[ru %d] p_xran_dev_ctx == NULL ", pConf->dpdk_port);
+        return XRAN_STATUS_FAIL;
+    }
+
+    pFhCfg = &p_xran_dev_ctx->fh_cfg;
+    memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));
+
+    fh_init = &p_xran_dev_ctx->fh_init;
+    if(fh_init == NULL)
+        return XRAN_STATUS_FAIL;
+
+    if(pConf->log_level) {
+        printf(" %s: %s Category %s\n", __FUNCTION__,
+        (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE",
+        (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
+    }
+
+    p_xran_dev_ctx->enableCP    = pConf->enableCP;
+    p_xran_dev_ctx->enablePrach = pConf->prachEnable;
+    p_xran_dev_ctx->enableSrs   = pConf->srsEnable;
+    p_xran_dev_ctx->enableSrsCp   = pConf->srsEnableCp;
+    p_xran_dev_ctx->nSrsDelaySym   = pConf->SrsDelaySym;
+    p_xran_dev_ctx->puschMaskEnable = pConf->puschMaskEnable;
+    p_xran_dev_ctx->puschMaskSlot = pConf->puschMaskSlot;
+    p_xran_dev_ctx->DynamicSectionEna = pConf->DynamicSectionEna;
+    p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable = pConf->RunSlotPrbMapBySymbolEnable;
+    p_xran_dev_ctx->dssEnable = pConf->dssEnable;
+    p_xran_dev_ctx->dssPeriod = pConf->dssPeriod;
+    for(i=0; i<pConf->dssPeriod; i++) {
+        p_xran_dev_ctx->technology[i] = pConf->technology[i];
+    }
+
+    if(pConf->GPS_Alpha || pConf->GPS_Beta ){
+        offset_sec = pConf->GPS_Beta / 100;    /* resolution of beta is 10ms */
+        offset_nsec = (pConf->GPS_Beta - offset_sec * 100) * 1e7 + pConf->GPS_Alpha;
+        p_xran_dev_ctx->offset_sec = offset_sec;
+        p_xran_dev_ctx->offset_nsec = offset_nsec;
+    }else {
+        p_xran_dev_ctx->offset_sec  = 0;
+        p_xran_dev_ctx->offset_nsec = 0;
+    }
+
+
+    nNumerology = xran_get_conf_numerology(p_xran_dev_ctx);
+
+    if (pConf->nCC > XRAN_MAX_SECTOR_NR) {
+        if(pConf->log_level)
+            printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR);
+        pConf->nCC = XRAN_MAX_SECTOR_NR;
+    }
+
+    if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER  || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ) {
+        print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder);
+        return XRAN_STATUS_FAIL;
+    }
+
+    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) {
+        if((ret = xran_ruemul_init(p_xran_dev_ctx)) < 0) {
+            return ret;
+        }
+    }
+
+    /* setup PRACH configuration for C-Plane */
+    if(pConf->dssEnable){
+        if((ret  = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0)
+            return ret;
+        if((ret  =  xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0)
+            return ret;
+    }
+    else{
+    if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR) {
+            if((ret  = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0){
+            return ret;
+        }
+    } else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE) {
+        if((ret  =  xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0){
+            return ret;
+        }
+    }
+    }
+
+    if((ret  = xran_init_srs(pConf, p_xran_dev_ctx))< 0){
+        return ret;
+    }
+
+    if((ret  = xran_cp_init_sectiondb(p_xran_dev_ctx)) < 0){
+        return ret;
+    }
+
+    if((ret  = xran_init_sectionid(p_xran_dev_ctx)) < 0){
+        return ret;
+    }
+
+    if((ret  = xran_init_seqid(p_xran_dev_ctx)) < 0){
+        return ret;
+    }
+
+    if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
+        if((ret  = xran_init_vfs_mapping(p_xran_dev_ctx)) < 0) {
+            return ret;
+        }
+
+        if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->fh_init.io_cfg.num_rxq > 1) {
+            if((ret  = xran_init_vf_rxq_to_pcid_mapping(p_xran_dev_ctx)) < 0) {
+                return ret;
+            }
+        }
+    }
+
+    if(pConf->ru_conf.xran_max_frame) {
+       xran_max_frame = pConf->ru_conf.xran_max_frame;
+       printf("xran_max_frame %d\n", xran_max_frame);
+    }
+
+    p_xran_dev_ctx->interval_us_local = xran_fs_get_tti_interval(nNumerology);
+    if (interval_us > p_xran_dev_ctx->interval_us_local)
+    {
+        interval_us = xran_fs_get_tti_interval(nNumerology); //only update interval_us based on maximum numerology
+    }
+
+//    if(pConf->log_level){
+        printf("%s: interval_us=%ld, interval_us_local=%d\n", __FUNCTION__, interval_us, p_xran_dev_ctx->interval_us_local);
+//    }
+    if (nNumerology >= timing_get_numerology())
+    {
+    timing_set_numerology(nNumerology);
+    }
+
+    for(i = 0 ; i <pConf->nCC; i++){
+        xran_fs_set_slot_type(pConf->dpdk_port, i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod,
+            pConf->frame_conf.sSlotConfig);
+    }
+
+    xran_fs_slot_limit_init(pConf->dpdk_port, xran_fs_get_tti_interval(nNumerology));
+
+    /* if send_xpmbuf2ring needs to be changed from default functions,
+     * then those should be set between xran_init and xran_open */
+    if(p_xran_dev_ctx->send_cpmbuf2ring == NULL)
+        p_xran_dev_ctx->send_cpmbuf2ring    = xran_ethdi_mbuf_send_cp;
+    if(p_xran_dev_ctx->send_upmbuf2ring == NULL)
+        p_xran_dev_ctx->send_upmbuf2ring    = xran_ethdi_mbuf_send;
+
+    if(pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) {
+        if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
+            p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
+    } else {
+        if(p_xran_dev_ctx->tx_sym_gen_func == NULL )
+            p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_dispatch_opt;
+    }
+
+    if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload)
+        p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring;
+    printf("bbu_offload %d\n", p_xran_dev_ctx->fh_init.io_cfg.bbu_offload);
+    if(pConf->dpdk_port == 0) {
+        /* create all thread on open of port 0 */
+        xran_num_cores_used = 0;
+        if(eth_ctx->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){
+            eth_ctx->bbdev_dec = pConf->bbdev_dec;
+            eth_ctx->bbdev_enc = pConf->bbdev_enc;
+        }
+
+        if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
+            printf("XRAN_UP_VF: 0x%04x\n", eth_ctx->io_cfg.port[XRAN_UP_VF]);
+            p_xran_dev_ctx->timing_source_thread_running = 0;
+            xran_core_used[xran_num_cores_used++] = eth_ctx->io_cfg.timing_core;
+            if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), eth_ctx->io_cfg.timing_core))
+            rte_panic("thread_run() failed to start\n");
+        } else if(pConf->log_level) {
+                printf("Eth port was not open. Processing thread was not started\n");
+        }
+    } else {
+        if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF) {
+            if ((ret = xran_timing_create_cbs(p_xran_dev_ctx)) < 0) {
+                return ret;
+            }
+        }
+    }
+
+    if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){
+        if(pConf->dpdk_port == (fh_init->xran_ports - 1)) {
+            if((ret = xran_spawn_workers()) < 0) {
+                return ret;
+                }
+            }
+        printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__,  sched_getcpu(), getpid());
+        printf("Waiting on Timing thread...\n");
+        while (p_xran_dev_ctx->timing_source_thread_running == 0 && wait_time--) {
+            usleep(100);
+        }
+    }
+
+    print_dbg("%s : %d", __FUNCTION__, pConf->dpdk_port);
+    return ret;
+}
+
+int32_t
+xran_start(void *pHandle)
+{
+    struct tm * ptm;
+    /* ToS = Top of Second start +- 1.5us */
+    struct timespec ts;
+    char buff[100];
+    int i;
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+    struct xran_prb_map * prbMap0 = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[0][0][0].sBufferList.pBuffers->pData;
+    for(i = 0; i < XRAN_MAX_SECTIONS_PER_SLOT && i < prbMap0->nPrbElm; i++)
+    {
+        p_xran_dev_ctx->numSetBFWs_arr[i] = prbMap0->prbMap[i].bf_weight.numSetBFWs;
+    }
+
+    if(xran_get_if_state() == XRAN_RUNNING) {
+        print_err("Already STARTED!!");
+        return (-1);
+        }
+    timespec_get(&ts, TIME_UTC);
+    ptm = gmtime(&ts.tv_sec);
+    if(ptm){
+        strftime(buff, sizeof(buff), "%D %T", ptm);
+        printf("%s: XRAN start time: %s.%09ld UTC [%ld]\n",
+            (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us);
+    }
+
+    if (p_xran_dev_ctx->fh_init.io_cfg.eowd_cmn[p_xran_dev_ctx->fh_init.io_cfg.id].owdm_enable)
+        {
+        xran_if_current_state = XRAN_OWDM;
+        }
+    else
+        {
+    xran_if_current_state = XRAN_RUNNING;
+        }
+    return 0;
+}
+
+int32_t
+xran_stop(void *pHandle)
+{
+    if(xran_get_if_state() == XRAN_STOPPED) {
+        print_err("Already STOPPED!!");
+        return (-1);
+        }
+
+    xran_if_current_state = XRAN_STOPPED;
+    return 0;
+}
+
+int32_t
+xran_close(void *pHandle)
+{
+    int32_t ret = XRAN_STATUS_SUCCESS;
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+
+    xran_if_current_state = XRAN_STOPPED;
+    ret = xran_cp_free_sectiondb(p_xran_dev_ctx);
+
+    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)
+        xran_ruemul_release(p_xran_dev_ctx);
+
+#ifdef RTE_LIBRTE_PDUMP
+    /* uninitialize packet capture framework */
+    rte_pdump_uninit();
+#endif
+    return ret;
+}
+
+/* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open
+ * each cb will be set by default duing open if it is set by NULL */
+int32_t
+xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up)
+{
+    struct xran_device_ctx *p_xran_dev_ctx;
+
+    if(xran_get_if_state() == XRAN_RUNNING) {
+        print_err("Cannot register callback while running!!\n");
+        return (-1);
+        }
+
+    p_xran_dev_ctx = xran_dev_get_ctx();
+
+    p_xran_dev_ctx->send_cpmbuf2ring    = mbuf_send_cp;
+    p_xran_dev_ctx->send_upmbuf2ring    = mbuf_send_up;
+
+    p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt;
+
+    return (0);
+}
+
+int32_t
+xran_get_slot_idx (uint32_t PortId, uint32_t *nFrameIdx, uint32_t *nSubframeIdx,  uint32_t *nSlotIdx, uint64_t *nSecond)
+{
+    int32_t tti = 0;
+    struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId);
+    if (!p_xran_dev_ctx)
+{
+        print_err("Null xRAN context on port id %u!!\n", PortId);
+        return 0;
+}
+
+    tti           = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT);
+    *nSlotIdx     = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
+    *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local),  SUBFRAMES_PER_SYSTEMFRAME);
+    *nFrameIdx    = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local));
+    *nSecond      = timing_get_current_second();
+
+    return tti;
+}
+
+int32_t
+xran_set_debug_stop(int32_t value, int32_t count)
+{
+    return timing_set_debug_stop(value, count);
+    }
+
+
+int32_t xran_get_num_prb_elm(struct xran_prb_map* p_PrbMapIn, uint32_t mtu)
+{
+    int32_t i,j = 0;
+    int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
+    struct xran_prb_elm *p_prb_elm_src;
+    int32_t nRBremain;
+    // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
+    // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
+    int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
+    int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
+    uint32_t nRBSize=0;
+
+    if (mtu==9600)
+        nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
+
+    for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
+    {
+        p_prb_elm_src = &p_PrbMapIn->prbMap[i];
+        if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
+        {
+            j++;
+        }
+        else
+        {
+            nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
+            j++;
+            while (nRBremain > 0)
+            {
+                nRBSize = RTE_MIN(nmaxRB, nRBremain);
+                nRBremain -= nRBSize;
+                j++;
+            }
+        }
+    }
+
+    return j;
+}
+
+
+int32_t xran_init_PrbMap_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu)
+{
+    int32_t i,j = 0;
+    int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
+    struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
+    int32_t nRBStart_tmp, nRBremain;
+    // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
+    // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
+    int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
+    int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
+
+    if (mtu==9600)
+        nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
+
+    memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
+    for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
+    {
+        p_prb_elm_src = &p_PrbMapIn->prbMap[i];
+        p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
+        memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
+
+        // int32_t nStartSymb, nEndSymb, numSymb, nRBStart, nRBEnd, nRBSize;
+        // nStartSymb = p_prb_elm_src->nStartSymb;
+        // nEndSymb = nStartSymb + p_prb_elm_src->numSymb;
+        if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
+        {
+            p_prb_elm_dst->IsNewSect = 1;
+            p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
+            p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
+            p_prb_elm_dst->nSectId = i;
+            j++;
+        }
+        else
+        {
+            nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
+            nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
+            p_prb_elm_dst->IsNewSect = 1;
+            p_prb_elm_dst->UP_nRBSize = nmaxRB;
+            p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
+            p_prb_elm_dst->nSectId = i;
+            j++;
+            while (nRBremain > 0)
+            {
+                p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
+                memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
+                p_prb_elm_dst->IsNewSect = 0;
+                p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
+                p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
+                nRBremain -= p_prb_elm_dst->UP_nRBSize;
+                nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
+                p_prb_elm_dst->nSectId = i;
+                j++;
+            }
+        }
+    }
+
+    p_PrbMapOut->nPrbElm = j;
+    return 0;
+}
+
+
+int32_t xran_init_PrbMap_from_cfg_for_rx(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu)
+{
+    int32_t i,j = 0;
+    int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
+    struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
+    int32_t nRBStart_tmp, nRBremain;
+    // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
+    // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
+    int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
+    int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
+
+    if (mtu==9600)
+        nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
+    nmaxRB *= XRAN_MAX_FRAGMENT; 
+
+    memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
+    for (i = 0;i < p_PrbMapIn->nPrbElm; i++)
+    {
+        p_prb_elm_src = &p_PrbMapIn->prbMap[i];
+        p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
+        memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
+
+        if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
+        {
+            p_prb_elm_dst->IsNewSect = 1;
+            p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
+            p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
+            p_prb_elm_dst->nSectId = j;
+            j++;
+        }
+        else
+        {
+            nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
+            nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
+            p_prb_elm_dst->IsNewSect = 1;
+            p_prb_elm_dst->nRBSize = nmaxRB;
+            p_prb_elm_dst->UP_nRBSize = nmaxRB;
+            p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
+            p_prb_elm_dst->nSectId = j;
+            j++;
+            while (nRBremain > 0)
+            {
+                p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
+                memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
+                p_prb_elm_dst->IsNewSect = 1;
+                p_prb_elm_dst->nRBSize = RTE_MIN(nmaxRB, nRBremain);
+                p_prb_elm_dst->nRBStart = nRBStart_tmp;
+                p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
+                p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
+                nRBremain -= p_prb_elm_dst->UP_nRBSize;
+                nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
+                p_prb_elm_dst->nSectId = j;
+                j++;
+            }
+        }
+    }
+
+    p_PrbMapOut->nPrbElm = j;
+    return 0;
+}
+
+
+int32_t xran_init_PrbMap_by_symbol_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu, uint32_t xran_max_prb)
+{
+    int32_t i = 0, j = 0, nPrbElm = 0;
+    int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth;
+    struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst;
+    struct xran_prb_elm prbMapTemp[XRAN_NUM_OF_SYMBOL_PER_SLOT];
+    int32_t nRBStart_tmp, nRBremain, nStartSymb, nEndSymb, nRBStart, nRBEnd, nRBSize;
+    // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
+    // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth);
+    int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr);
+    int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr));
+    if (mtu==9600)
+        nmaxRB--;   //for some reason when mtu is 9600, only 195 RB can be sent, not 196
+
+
+    memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map));
+    for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
+    {
+        p_prb_elm_dst = &prbMapTemp[i];
+        // nRBStart = 273;
+        nRBStart = xran_max_prb;
+        nRBEnd = 0;
+
+        for(j = 0; j < p_PrbMapIn->nPrbElm; j++)
+        {
+            p_prb_elm_src = &(p_PrbMapIn->prbMap[j]);
+            nStartSymb = p_prb_elm_src->nStartSymb;
+            nEndSymb = nStartSymb + p_prb_elm_src->numSymb;
+
+            if((i >=  nStartSymb) && (i < nEndSymb))
+            {
+                if(nRBStart > p_prb_elm_src->nRBStart)
+                {
+                    nRBStart = p_prb_elm_src->nRBStart;
+                }
+                if(nRBEnd < (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize))
+                {
+                    nRBEnd = (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize);
+                }
+
+                p_prb_elm_dst->nBeamIndex = p_prb_elm_src->nBeamIndex;
+                p_prb_elm_dst->bf_weight_update = p_prb_elm_src->bf_weight_update;
+                p_prb_elm_dst->compMethod = p_prb_elm_src->compMethod;
+                p_prb_elm_dst->iqWidth = p_prb_elm_src->iqWidth;
+                p_prb_elm_dst->ScaleFactor = p_prb_elm_src->ScaleFactor;
+                p_prb_elm_dst->reMask = p_prb_elm_src->reMask;
+                p_prb_elm_dst->BeamFormingType = p_prb_elm_src->BeamFormingType;
+            }
+        }
+
+        if(nRBEnd < nRBStart)
+        {
+            p_prb_elm_dst->nRBStart = 0;
+            p_prb_elm_dst->nRBSize = 0;
+            p_prb_elm_dst->nStartSymb = i;
+            p_prb_elm_dst->numSymb = 1;
+        }
+        else
+        {
+            p_prb_elm_dst->nRBStart = nRBStart;
+            p_prb_elm_dst->nRBSize = nRBEnd - nRBStart;
+            p_prb_elm_dst->nStartSymb = i;
+            p_prb_elm_dst->numSymb = 1;
+        }
+    }
+
+    for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
+    {
+        if((prbMapTemp[i].nRBSize != 0))
+        {
+            nRBStart = prbMapTemp[i].nRBStart;
+            nRBSize = prbMapTemp[i].nRBSize;
+            prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart;
+            prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize;
+            prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb;
+            prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex;
+            prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update;
+            prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod;
+            prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth;
+            prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor;
+            prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask;
+            prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType;
+            i++;
+            break;
+        }
+    }
+
+    for(; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
+    {
+        if((nRBStart == prbMapTemp[i].nRBStart) && (nRBSize == prbMapTemp[i].nRBSize))
+        {
+                prbMapTemp[nPrbElm].numSymb++;
+        }
+        else
+        {
+            nPrbElm++;
+            prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb;
+            prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart;
+            prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize;
+            prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex;
+            prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update;
+            prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod;
+            prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth;
+            prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor;
+            prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask;
+            prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType;
+
+            nRBStart = prbMapTemp[i].nRBStart;
+            nRBSize = prbMapTemp[i].nRBSize;
+        }
+    }
+
+    for(i = 0; i < nPrbElm; i++)
+    {
+        if(prbMapTemp[i].nRBSize == 0)
+            prbMapTemp[i].nRBSize = 1;
+    }
+
+    if(prbMapTemp[nPrbElm].nRBSize != 0)
+        nPrbElm++;
+
+
+    j = 0;
+
+    for (i = 0;i < nPrbElm; i++)
+    {
+        p_prb_elm_src = &prbMapTemp[i];
+        p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
+        memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
+        if (p_prb_elm_src->nRBSize <= nmaxRB)    //no fragmentation needed
+        {
+            p_prb_elm_dst->IsNewSect = 1;
+            p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize;
+            p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
+            p_prb_elm_dst->nSectId = i;
+            j++;
+        }
+        else
+        {
+            nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB;
+            nRBremain = p_prb_elm_src->nRBSize - nmaxRB;
+            p_prb_elm_dst->IsNewSect = 1;
+            p_prb_elm_dst->UP_nRBSize = nmaxRB;
+            p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart;
+            p_prb_elm_dst->nSectId = i;
+            j++;
+            while (nRBremain > 0)
+            {
+                p_prb_elm_dst = &p_PrbMapOut->prbMap[j];
+                memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm));
+                p_prb_elm_dst->IsNewSect = 0;
+                p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain);
+                p_prb_elm_dst->UP_nRBStart = nRBStart_tmp;
+                nRBremain -= p_prb_elm_dst->UP_nRBSize;
+                nRBStart_tmp += p_prb_elm_dst->UP_nRBSize;
+                p_prb_elm_dst->nSectId = i;
+                j++;
+            }
+        }
+    }
+
+    p_PrbMapOut->nPrbElm = j;
+
+    return 0;
+}
+
+inline void MLogXRANTask(uint32_t taskid, uint64_t ticksstart, uint64_t ticksstop)
+{
+    if (mlogxranenable)
+    {
+        MLogTask(taskid, ticksstart, ticksstop);
+    }
+    return;
+}
+
+inline uint64_t MLogXRANTick(void)
+{
+    if (mlogxranenable)
+        return MLogTick();
+    else
+        return 0;
+}
+
+