X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=blobdiff_plain;f=fhi_lib%2Flib%2Fsrc%2Fxran_main.c;h=89dcc1f60c432dda1001baf4314a717f522dbd8c;hb=2de97529a4c5a1922214ba0e6f0fb84cacbd0bc7;hp=94751f6bd0cb70efc4c3f1f93b7272bbe5e1746d;hpb=4745e5c88ba931c6d71cb6d8c681f76cf364eac5;p=o-du%2Fphy.git diff --git a/fhi_lib/lib/src/xran_main.c b/fhi_lib/lib/src/xran_main.c index 94751f6..89dcc1f 100644 --- a/fhi_lib/lib/src/xran_main.c +++ b/fhi_lib/lib/src/xran_main.c @@ -1,6 +1,6 @@ /****************************************************************************** * -* Copyright (c) 2019 Intel. +* Copyright (c) 2020 Intel. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ * *******************************************************************************/ - /** * @brief XRAN main functionality module * @file xran_main.c @@ -30,22 +29,35 @@ #include #include #include +#include #include #include #include #include +#include +#include +#include #include #include #include #include #include #include +#include #include - -#include "xran_fh_lls_cu.h" +#include +#include +#if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */ +#include +#endif +#include "xran_fh_o_du.h" +#include "xran_main.h" #include "ethdi.h" +#include "xran_mem_mgr.h" +#include "xran_tx_proc.h" +#include "xran_rx_proc.h" #include "xran_pkt.h" #include "xran_up_api.h" #include "xran_cp_api.h" @@ -53,208 +65,353 @@ #include "xran_lib_mlog_tasks_id.h" #include "xran_timer.h" #include "xran_common.h" +#include "xran_dev.h" +#include "xran_frame_struct.h" #include "xran_printf.h" +#include "xran_app_frag.h" +#include "xran_cp_proc.h" +#include "xran_tx_proc.h" +#include "xran_rx_proc.h" +#include "xran_cb_proc.h" +#include "xran_ecpri_owd_measurements.h" -#ifndef MLOG_ENABLED -#include "mlog_lnx_xRAN.h" -#else -#include "mlog_lnx.h" -#endif - -#define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) ) - -#define XranOffsetSym(offSym, otaSym, numSymTotal) (((int32_t)offSym > (int32_t)otaSym) ? \ - ((int32_t)otaSym + ((int32_t)numSymTotal) - (uint32_t)offSym) : \ - (((int32_t)otaSym - (int32_t)offSym) >= numSymTotal) ? \ - (((int32_t)otaSym - (int32_t)offSym) - numSymTotal) : \ - ((int32_t)otaSym - (int32_t)offSym)) - -#define MAX_NUM_OF_XRAN_CTX (2) -#define XranIncrementCtx(ctx) ((ctx >= (MAX_NUM_OF_XRAN_CTX-1)) ? 0 : (ctx+1)) -#define XranDecrementCtx(ctx) ((ctx == 0) ? (MAX_NUM_OF_XRAN_CTX-1) : (ctx-1)) - -struct xran_timer_ctx { - uint32_t tti_to_process; -}; +#include "xran_mlog_lnx.h" -static XranLibHandleInfoStruct DevHandle; -static struct xran_lib_ctx g_xran_lib_ctx = { 0 }; +static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {NULL}; -struct xran_timer_ctx timer_ctx[MAX_NUM_OF_XRAN_CTX]; +uint64_t interval_us = 1000; //the TTI interval of the cell with maximum numerology -static struct rte_timer tti_to_phy_timer[10]; -static struct rte_timer tti_timer; -static struct rte_timer sym_timer; -static struct rte_timer tx_cp_dl_timer; -static struct rte_timer tx_cp_ul_timer; -static struct rte_timer tx_up_timer; +uint32_t xran_lib_ota_tti[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Slot index in a second [0:(1000000/TTI-1)] */ +uint32_t xran_lib_ota_sym[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Symbol index in a slot [0:13] */ +uint32_t xran_lib_ota_sym_idx[XRAN_PORTS_NUM] = {0,0,0,0}; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1] + where TTI is TTI interval in microseconds */ -static long interval_us = 125; +uint16_t xran_SFN_at_Sec_Start = 0; /**< SFN at current second start */ +uint16_t xran_max_frame = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2 System Frame Number Calculation */ -uint32_t xran_lib_ota_tti = 0; /* [0:7999] */ -uint32_t xran_lib_ota_sym = 0; /* [0:7] */ -uint32_t xran_lib_ota_sym_idx = 0; /* [0 : 14*8*1000-1] */ +static uint64_t xran_total_tick = 0, xran_used_tick = 0; +static uint32_t xran_num_cores_used = 0; +static uint32_t xran_core_used[64] = {0}; +static int32_t first_call = 0; -uint64_t xran_lib_gps_second = 0; - -static uint8_t xran_cp_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_DIR_MAX][XRAN_MAX_ANTENNA_NR]; -static uint8_t xran_section_id_curslot[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR]; -static uint16_t xran_section_id[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR]; - -void xran_timer_arm(struct rte_timer *tim, void* arg); -int xran_process_tx_sym(void *arg); - -int xran_process_rx_sym(void *arg, - void *iq_data_start, - uint16_t size, - uint8_t CC_ID, - uint8_t Ant_ID, - uint8_t frame_id, - uint8_t subframe_id, - uint8_t slot_id, - uint8_t symb_id); +struct cp_up_tx_desc * xran_pkt_gen_desc_alloc(void); +int32_t xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc); void tti_ota_cb(struct rte_timer *tim, void *arg); void tti_to_phy_cb(struct rte_timer *tim, void *arg); -void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore); -struct xran_lib_ctx *xran_lib_get_ctx(void) +int32_t xran_pkt_gen_process_ring(struct rte_ring *r); + +void +xran_updateSfnSecStart(void) { - return &g_xran_lib_ctx; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters; + int32_t xran_ports = p_xran_dev_ctx->fh_init.xran_ports; + int32_t o_xu_id = 0; + uint64_t currentSecond = timing_get_current_second(); + // Assume always positive + uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET; + uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND; + uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1)); + xran_SFN_at_Sec_Start = sfn; + + for(o_xu_id = 0; o_xu_id < xran_ports; o_xu_id++){ + pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter; + pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter; + pCnt->tx_bytes_counter = 0; + pCnt->rx_bytes_counter = 0; + p_xran_dev_ctx++; + pCnt = &p_xran_dev_ctx->fh_counters; + } } -static inline XRANFHCONFIG *xran_lib_get_ctx_fhcfg(void) +static inline int32_t +xran_getSlotIdxSecond(uint32_t interval) { - return (&(xran_lib_get_ctx()->xran_fh_cfg)); + int32_t frameIdxSecond = xran_getSfnSecStart(); + int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME(interval); + return slotIndxSecond; } -inline uint16_t xran_get_beamid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id) -{ - return (0); // NO BEAMFORMING +enum xran_if_state +xran_get_if_state(void) + { + return xran_if_current_state; } -int xran_init_sectionid(void *pHandle) +int32_t xran_is_prach_slot(uint8_t PortId, uint32_t subframe_id, uint32_t slot_id) { - int cell, dir, ant; - - for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) { - for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++) { - xran_section_id[cell][ant] = 0; - xran_section_id_curslot[cell][ant] = 255; + int32_t is_prach_slot = 0; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId); + if (p_xran_dev_ctx == NULL) +{ + print_err("PortId %d not exist\n", PortId); + return is_prach_slot; +} + struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + uint8_t nNumerology = xran_get_conf_numerology(p_xran_dev_ctx); + + if (nNumerology < 2){ + //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index + if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){ + if (pPrachCPConfig->nrofPrachInSlot == 0){ + if(slot_id == 0) + is_prach_slot = 1; + } + else if (pPrachCPConfig->nrofPrachInSlot == 2) + is_prach_slot = 1; + else{ + if (nNumerology == 0) + is_prach_slot = 1; + else if (slot_id == 1) + is_prach_slot = 1; } } - - return (0); + } else if (nNumerology == 3){ + //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot + uint32_t slotidx; + slotidx = subframe_id * SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local) + slot_id; + if (pPrachCPConfig->nrofPrachInSlot == 2){ + if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) + is_prach_slot = 1; + } else { + if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){ + is_prach_slot = 1; + } + } + } else + print_err("Numerology %d not supported", nNumerology); + return is_prach_slot; } -int xran_init_seqid(void *pHandle) +int32_t +xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx) { - int cell, dir, ant; + struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg); - for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) { - for(dir=0; dir < XRAN_DIR_MAX; dir++) { - for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++) { - xran_cp_seq_id_num[cell][dir][ant] = 0; - } - } - } + if(p_srs){ + p_srs->symbMask = pConf->srs_conf.symbMask; + p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset; + print_dbg("SRS sym %d\n", p_srs->symbMask ); + print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset); + } + return (XRAN_STATUS_SUCCESS); +} - return (0); +int32_t +xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx) +{ + /* update Rach for LTE */ + return xran_init_prach(pConf, p_xran_dev_ctx); } -inline uint16_t xran_alloc_sectionid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id) +int32_t +xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx) { - if(cc_id >= XRAN_MAX_CELLS_PER_PORT) { - print_err("Invalid CC ID - %d", cc_id); - return (0); - } - if(ant_id >= XRAN_MAX_ANTENNA_NR) { - print_err("Invalid antenna ID - %d", ant_id); - return (0); + int32_t i; + uint8_t slotNr; + struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf); + const xRANPrachConfigTableStruct *pxRANPrachConfigTable; + uint8_t nNumerology = pConf->frame_conf.nNumerology; + uint8_t nPrachConfIdx = pPRACHConfig->nPrachConfIdx; + struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + + if (nNumerology > 2) + pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx]; + else if (pConf->frame_conf.nFrameDuplexType == 1) + pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx]; + else + pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx]; + + uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0]; + const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt]; + memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config)); + if(pConf->log_level) + printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot); + + pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC; // 3, PRACH preamble format A1~3, B1~4, C0, C2 + pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym; + pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart; + pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70; + pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp; + pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing); + pPrachCPConfig->x = pxRANPrachConfigTable->x; + pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot; + pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0]; + pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1]; + if (preambleFmrt >= FORMAT_A1) + { + pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration; + pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot; + } + else + { + pPrachCPConfig->numSymbol = 1; + pPrachCPConfig->occassionsInPrachSlot = 1; + } + + if(pConf->log_level) + printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]); + pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1; + for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++) + { + slotNr = pxRANPrachConfigTable->slotNr[i]; + if (slotNr > 0){ + pPrachCPConfig->isPRACHslot[slotNr] = 1; + if(pConf->log_level) + printf(" %u ..", slotNr); } + } + printf("\n"); + for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){ + p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId; + p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1; + } + if(pConf->log_level){ + printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]); + } + + pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(p_xran_dev_ctx); + print_dbg("PRACH eAxC_offset %d\n", pPrachCPConfig->eAxC_offset); + + /* Save some configs for app */ + pPRACHConfig->startSymId = pPrachCPConfig->startSymId; + pPRACHConfig->lastSymId = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1; + pPRACHConfig->startPrbc = pPrachCPConfig->startPrbc; + pPRACHConfig->numPrbc = pPrachCPConfig->numPrbc; + pPRACHConfig->timeOffset = pPrachCPConfig->timeOffset; + pPRACHConfig->freqOffset = pPrachCPConfig->freqOffset; + pPRACHConfig->eAxC_offset = pPrachCPConfig->eAxC_offset; - /* if new slot has been started, - * then initializes section id again for new start */ - if(xran_section_id_curslot[cc_id][ant_id] != slot_id) { - xran_section_id[cc_id][ant_id] = 0; - xran_section_id_curslot[cc_id][ant_id] = slot_id; + return (XRAN_STATUS_SUCCESS); } - - return(xran_section_id[cc_id][ant_id]++); -} -inline uint8_t xran_get_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id) +uint32_t +xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid { - if(dir >= XRAN_DIR_MAX) { - print_err("Invalid direction - %d", dir); - return (0); + return slot_id; +#if 0 + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology; + uint8_t FR = 1; + if (mu > 2) + FR=2; + if (dir == 0) + { + if (FR == 1) + { + return (slot_id << (2-mu)); } - if(cc_id >= XRAN_MAX_CELLS_PER_PORT) { - print_err("Invalid CC ID - %d", cc_id); - return (0); + else + { + return (slot_id << (3-mu)); + } + } + else + { + if (FR == 1) + { + return (slot_id >> (2-mu)); } - if(ant_id >= XRAN_MAX_ANTENNA_NR) { - print_err("Invalid antenna ID - %d", ant_id); - return (0); + else + { + return (slot_id >> (3-mu)); } - - return(xran_cp_seq_id_num[cc_id][dir][ant_id]++); + } +#endif } -inline int xran_update_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id, uint8_t seq_id) +void +sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick) { - return (0); -} + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + long t1 = MLogTick(), t2; + long t3; + + if(XranGetSymNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){ + t3 = xran_tick(); + tti_ota_cb(NULL, (void*)p_xran_dev_ctx); + *used_tick += get_ticks_diff(xran_tick(), t3); + } -////////////////////////////////////////// -// For RU emulation -static struct xran_section_gen_info cpSections[255]; -static struct xran_cp_gen_params cpInfo; -int process_cplane(struct rte_mbuf *pkt) -{ - int xran_parse_cp_pkt(struct rte_mbuf *mbuf, struct xran_cp_gen_params *result); + t3 = xran_tick(); + if (xran_process_tx_sym(p_xran_dev_ctx)) + { + *used_tick += get_ticks_diff(xran_tick(), t3); + } - cpInfo.sections = cpSections; - xran_parse_cp_pkt(pkt, &cpInfo); + /* check if there is call back to do something else on this symbol */ + struct cb_elem_entry *cb_elm; + LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[xran_lib_ota_sym[p_xran_dev_ctx->xran_port_id]], pointers){ + if(cb_elm){ + cb_elm->pSymCallback(&p_xran_dev_ctx->dpdk_timer[p_xran_dev_ctx->ctx % MAX_NUM_OF_DPDK_TIMERS], cb_elm->pSymCallbackTag, cb_elm->p_dev_ctx); + p_xran_dev_ctx->ctx = DpdkTimerIncrementCtx(p_xran_dev_ctx->ctx); + } + } - return (0); + t2 = MLogTick(); + MLogTask(PID_SYM_OTA_CB, t1, t2); } -////////////////////////////////////////// -void sym_ota_cb(struct rte_timer *tim, void *arg) +uint32_t +xran_schedule_to_worker(enum xran_job_type_id job_type_id, struct xran_device_ctx * p_xran_dev_ctx) { - uint8_t offset = 0; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; - long t1 = MLogTick(); - - if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){ - tti_ota_cb(NULL, arg); - } - - if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 1){ - if(p_xran_lib_ctx->phy_tti_cb_done == 0){ - uint64_t t3 = MLogTick(); - /* rearm timer to deliver TTI event to PHY */ - p_xran_lib_ctx->phy_tti_cb_done = 0; - xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_lib_ctx->xran_init_cfg.io_cfg.pkt_proc_core); - MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick()); + struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx(); + uint32_t tim_lcore = eth_ctx->io_cfg.timing_core; /* default to timing core */ + + if(eth_ctx) { + if(eth_ctx->num_workers == 0) { /* no workers */ + tim_lcore = eth_ctx->io_cfg.timing_core; + } else if (eth_ctx->num_workers == 1) { /* one worker */ + switch (job_type_id) + { + case XRAN_JOB_TYPE_OTA_CB: + tim_lcore = eth_ctx->io_cfg.timing_core; + break; + case XRAN_JOB_TYPE_CP_DL: + case XRAN_JOB_TYPE_CP_UL: + case XRAN_JOB_TYPE_DEADLINE: + case XRAN_JOB_TYPE_SYM_CB: + tim_lcore = eth_ctx->worker_core[0]; + break; + default: + print_err("incorrect job type id %d\n", job_type_id); + tim_lcore = eth_ctx->io_cfg.timing_core; + break; + } + } else if (eth_ctx->num_workers >= 2 && eth_ctx->num_workers <= 6) { + switch (job_type_id) + { + case XRAN_JOB_TYPE_OTA_CB: + tim_lcore = eth_ctx->worker_core[0]; + break; + case XRAN_JOB_TYPE_CP_DL: + tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_DL]]; + break; + case XRAN_JOB_TYPE_CP_UL: + tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_UL]]; + break; + case XRAN_JOB_TYPE_DEADLINE: + case XRAN_JOB_TYPE_SYM_CB: + tim_lcore = eth_ctx->worker_core[0]; + break; + default: + print_err("incorrect job type id %d\n", job_type_id); + tim_lcore = eth_ctx->io_cfg.timing_core; + break; + } + } else { + print_err("incorrect eth_ctx->num_workers id %d\n", eth_ctx->num_workers); + tim_lcore = eth_ctx->io_cfg.timing_core; } } - xran_process_tx_sym(timer_ctx); - /* check if there is call back to do something else on this symbol */ - if(p_xran_lib_ctx->pSymCallback[0][xran_lib_ota_sym]) - p_xran_lib_ctx->pSymCallback[0][xran_lib_ota_sym](&tx_cp_dl_timer, p_xran_lib_ctx->pSymCallbackTag[0][xran_lib_ota_sym]); - - xran_lib_ota_sym++; - if(xran_lib_ota_sym >= N_SYM_PER_SLOT){ - xran_lib_ota_sym=0; - } - MLogTask(PID_SYM_OTA_CB, t1, MLogTick()); + return tim_lcore; } -void tti_ota_cb(struct rte_timer *tim, void *arg) +void +tti_ota_cb(struct rte_timer *tim, void *arg) { uint32_t frame_id = 0; uint32_t subframe_id = 0; @@ -266,650 +423,734 @@ void tti_ota_cb(struct rte_timer *tim, void *arg) uint64_t t1 = MLogTick(); uint64_t t3 = 0; uint32_t reg_tti = 0; - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + uint32_t reg_sfn = 0; + uint32_t i; + + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)p_xran_dev_ctx->timer_ctx; + uint8_t PortId = p_xran_dev_ctx->xran_port_id; + uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local; + + unsigned tim_lcore = xran_schedule_to_worker(XRAN_JOB_TYPE_OTA_CB, p_xran_dev_ctx); MLogTask(PID_TTI_TIMER, t1, MLogTick()); + if(p_xran_dev_ctx->xran_port_id == 0){ /* To match TTbox */ - if(xran_lib_ota_tti == 0) - reg_tti = 8000-1; + if(xran_lib_ota_tti[0] == 0) + reg_tti = xran_fs_get_max_slot(PortId) - 1; else - reg_tti = xran_lib_ota_tti -1; + reg_tti = xran_lib_ota_tti[0] -1; + MLogIncrementCounter(); + reg_sfn = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us))*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME(interval_us), SUBFRAMES_PER_SYSTEMFRAME);; /* subframe and slot */ - MLogRegisterFrameSubframe(((reg_tti/SLOTNUM_PER_SUBFRAME) % SUBFRAMES_PER_SYSTEMFRAME), - reg_tti % (SLOTNUM_PER_SUBFRAME)); + MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME(interval_us))); MLogMark(1, t1); + } - slot_id = XranGetSlotNum(xran_lib_ota_tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(xran_lib_ota_tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(xran_lib_ota_tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); + slot_id = XranGetSlotNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local)); + subframe_id = XranGetSubFrameNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(xran_lib_ota_tti[PortId],xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local)); - pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process = xran_lib_ota_tti; + pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process = xran_lib_ota_tti[PortId]; mlogVar[mlogVarCnt++] = 0x11111111; - mlogVar[mlogVarCnt++] = xran_lib_ota_tti; - mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx; - mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx / 14; + mlogVar[mlogVarCnt++] = xran_lib_ota_tti[PortId]; + mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId]; + mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId] / 14; mlogVar[mlogVarCnt++] = frame_id; mlogVar[mlogVarCnt++] = subframe_id; mlogVar[mlogVarCnt++] = slot_id; mlogVar[mlogVarCnt++] = 0; MLogAddVariables(mlogVarCnt, mlogVar, MLogTick()); - if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == ID_LLS_CU) - next_tti = xran_lib_ota_tti + 1; - else - next_tti = xran_lib_ota_tti; - if(next_tti>= SLOTNUM_PER_SUBFRAME*1000){ + if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU) + next_tti = xran_lib_ota_tti[PortId] + 1; + else{ + next_tti = xran_lib_ota_tti[PortId]; + } + + if(next_tti>= xran_fs_get_max_slot(PortId)){ print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id); next_tti=0; } - /* [0 - 7] */ - slot_id = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME); - /* sf [0 - 9] */ - subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - /* frame [0 - 99] for now */ - frame_id = XranGetFrameNum(next_tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); + + slot_id = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME(interval_us_local)); + subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local)); print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id); - if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == ID_LLS_CU){ - pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = next_tti; + if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){ + pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = next_tti; } else { - pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti & 1)^1].tti_to_process; + pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti[PortId] & 1)^1].tti_to_process; } - t3 = MLogTick(); - p_xran_lib_ctx->phy_tti_cb_done = 0; - xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_lib_ctx->xran_init_cfg.io_cfg.pkt_proc_core); - MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick()); - - xran_lib_ota_tti++; - /* within 1 sec we have 8000 TTIs as 1000ms/0.125ms where TTI is 125us*/ - if(xran_lib_ota_tti >= SLOTNUM_PER_SUBFRAME*1000){ - print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti, frame_id, subframe_id, slot_id); - xran_lib_ota_tti=0; + if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]) { + p_xran_dev_ctx->phy_tti_cb_done = 0; + xran_timer_arm_ex(&p_xran_dev_ctx->tti_to_phy_timer[xran_lib_ota_tti[PortId] % MAX_TTI_TO_PHY_TIMER], tti_to_phy_cb, (void*)p_xran_dev_ctx, tim_lcore); } - MLogTask(PID_TTI_CB, t1, MLogTick()); -} - -void xran_timer_arm(struct rte_timer *tim, void* arg) -{ - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - if (xran_if_current_state == XRAN_RUNNING){ - rte_timer_cb_t fct = (rte_timer_cb_t)arg; - rte_timer_reset_sync(tim, 0, SINGLE, p_xran_lib_ctx->xran_init_cfg.io_cfg.pkt_proc_core, fct, timer_ctx); + //slot index is increased to next slot at the beginning of current OTA slot + xran_lib_ota_tti[PortId]++; + if(xran_lib_ota_tti[PortId] >= xran_fs_get_max_slot(PortId)) { + print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti[PortId], frame_id, subframe_id, slot_id); + xran_lib_ota_tti[PortId] = 0; } + MLogTask(PID_TTI_CB, t1, MLogTick()); } -void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore) +void +tx_cp_dl_cb(struct rte_timer *tim, void *arg) { - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - if (xran_if_current_state == XRAN_RUNNING){ - rte_timer_cb_t fct = (rte_timer_cb_t)CbFct; - rte_timer_init(tim); - rte_timer_reset_sync(tim, 0, SINGLE, tim_lcore, fct, CbArg); + long t1 = MLogTick(); + int tti, buf_id; + uint32_t slot_id, subframe_id, frame_id; + int cc_id; + uint8_t ctx_id; + uint8_t ant_id, num_eAxc, num_CCPorts; + void *pHandle; + int num_list; + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + if(!p_xran_dev_ctx) + { + print_err("Null xRAN context!!\n"); + return; } -} - -void tx_cp_dl_cb(struct rte_timer *tim, void *arg) -{ - long t1 = MLogTick(); - int tti, sym; - uint32_t slot_id, subframe_id, frame_id; - int ant_id; - int32_t cc_id = 0; - uint16_t beam_id; - uint8_t num_eAxc, num_CCPorts; - void *pHandle; - - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; + struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0]; + uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local; + uint8_t PortId = p_xran_dev_ctx->xran_port_id; + pHandle = p_xran_dev_ctx; - - pHandle = NULL; // TODO: temp implemantation num_eAxc = xran_get_num_eAxc(pHandle); num_CCPorts = xran_get_num_cc(pHandle); - if(p_xran_lib_ctx->enableCP) { + if(first_call && p_xran_dev_ctx->enableCP) { + + tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process; + buf_id = tti % XRAN_N_FE_BUF_LEN; - tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process; + slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local)); + subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local)); + if (tti == 0){ + /* Wrap around to next second */ + frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff; + } - slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); + ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval_us_local)) % XRAN_MAX_SECTIONDB_CTX; print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); - for(ant_id = 0; ant_id < num_eAxc; ++ant_id) { for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) { - // start new section information list - xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id); - - beam_id = xran_get_beamid(pHandle, XRAN_DIR_DL, cc_id, ant_id, slot_id); - - send_cpmsg_dlul(pHandle, XRAN_DIR_DL, - frame_id, subframe_id, slot_id, - 0, N_SYM_PER_SLOT, NUM_OF_PRB_IN_FULL_BAND, - beam_id, cc_id, ant_id, - xran_get_seqid(pHandle, XRAN_DIR_DL, cc_id, ant_id, slot_id)); - } - } - } - MLogTask(PID_CP_DL_CB, t1, MLogTick()); + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id); + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) { + if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) { + if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){ + num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id, + (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData, + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); + } else { + print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id); + } + } else { + print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pBuffers] \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id); + } + } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */ + } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */ + } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */ + MLogTask(PID_CP_DL_CB, t1, MLogTick()); + } } -void rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg) +void +rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg) { long t1 = MLogTick(); - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - XranStatusInt32 status; + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + xran_status_t status; /* half of RX for current TTI as measured against current OTA time */ - int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) + int32_t rx_tti; + int32_t cc_id; + uint32_t nFrameIdx; + uint32_t nSubframeIdx; + uint32_t nSlotIdx; + uint64_t nSecond; + struct xran_timer_ctx* p_timer_ctx = NULL; + /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond); + rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME + + nSubframeIdx*SLOTNUM_PER_SUBFRAME + + nSlotIdx;*/ + if(p_xran_dev_ctx->xran2phy_mem_ready == 0) return; - if(p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] == 0){ - status = (rx_tti << 16) | 0; /* base on PHY side implementation first 7 sym of slot */ - if(p_xran_lib_ctx->pCallback[0]) - p_xran_lib_ctx->pCallback[0](p_xran_lib_ctx->pCallbackTag[0], status); - } else { - p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] = 0; + p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX]; + if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX) + p_xran_dev_ctx->timer_put = 0; + + rx_tti = p_timer_ctx->tti_to_process; + + for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) { + if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){ + if(p_xran_dev_ctx->pCallback[cc_id]) { + struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = 0; /* last 7 sym means full slot of Symb */ + status = XRAN_STATUS_SUCCESS; + + p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status); + } + } + } else { + p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0; + } + } + + if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]); + }else{ + p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--; + } } + MLogTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogTick()); } -void rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg) +void +rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg) { long t1 = MLogTick(); - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - XranStatusInt32 status; - int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + xran_status_t status = 0; + int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + int32_t cc_id = 0; + uint32_t nFrameIdx; + uint32_t nSubframeIdx; + uint32_t nSlotIdx; + uint64_t nSecond; + struct xran_timer_ctx* p_timer_ctx = NULL; + + if(p_xran_dev_ctx->xran2phy_mem_ready == 0) + return; + + /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond); + rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME + + nSubframeIdx*SLOTNUM_PER_SUBFRAME + + nSlotIdx;*/ + p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX]; - if(rx_tti >= 8000-1) - rx_tti = 0; + if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX) + p_xran_dev_ctx->timer_put = 0; + + rx_tti = p_timer_ctx->tti_to_process; +#if 1 + if(rx_tti == 0) + rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1); else rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */ +#endif + /* U-Plane */ + for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) { + if(p_xran_dev_ctx->pCallback[cc_id]){ + struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = 7; /* last 7 sym means full slot of Symb */ + status = XRAN_STATUS_SUCCESS; + p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status); + } + } - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) - return; + if(p_xran_dev_ctx->pPrachCallback[cc_id]){ + struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = 7; /* last 7 sym means full slot of Symb */ + p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status); + } + } - if(p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] == 0){ - status = (rx_tti << 16) | 7; /* last 7 sym means full slot of Symb */ - if(p_xran_lib_ctx->pCallback[0]) - p_xran_lib_ctx->pCallback[0](p_xran_lib_ctx->pCallbackTag[0], status); - } else { - p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] = 0; + if(p_xran_dev_ctx->pSrsCallback[cc_id]){ + struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = 7; /* last 7 sym means full slot of Symb */ + p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status); + } + } + } + + /* user call backs if any */ + if(p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_FULL_SLOT_RX]); + }else{ + p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX]--; + } } MLogTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogTick()); } - -void tx_cp_ul_cb(struct rte_timer *tim, void *arg) +void +rx_ul_user_sym_cb(struct rte_timer *tim, void *arg) { long t1 = MLogTick(); - int sym, tti; - uint32_t frame_id = 0; - uint32_t subframe_id = 0; - uint32_t slot_id = 0; + struct xran_device_ctx * p_dev_ctx = NULL; + struct cb_user_per_sym_ctx *p_sym_cb_ctx = (struct cb_user_per_sym_ctx *)arg; + xran_status_t status = 0; + int32_t rx_tti = 0; //(int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + int32_t cc_id = 0; + uint32_t nFrameIdx; + uint32_t nSubframeIdx; + uint32_t nSlotIdx; + uint64_t nSecond; + uint32_t interval, ota_sym_idx = 0; + uint8_t nNumerology = 0; + struct xran_timer_ctx* p_timer_ctx = NULL; + + if(p_sym_cb_ctx->p_dev) + p_dev_ctx = (struct xran_device_ctx *)p_sym_cb_ctx->p_dev; + else + rte_panic("p_sym_cb_ctx->p_dev == NULL"); + + if(p_dev_ctx->xran2phy_mem_ready == 0) + return; + nNumerology = xran_get_conf_numerology(p_dev_ctx); + interval = p_dev_ctx->interval_us_local; + + p_timer_ctx = &p_sym_cb_ctx->user_cb_timer_ctx[p_sym_cb_ctx->user_timer_get++ % MAX_CB_TIMER_CTX]; + if (p_sym_cb_ctx->user_timer_get >= MAX_CB_TIMER_CTX) + p_sym_cb_ctx->user_timer_get = 0; + + rx_tti = p_timer_ctx->tti_to_process; + + if( p_sym_cb_ctx->sym_diff > 0) + /* + advacne TX Wind: at OTA Time we indicating event in future */ + ota_sym_idx = ((p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology)); + else if (p_sym_cb_ctx->sym_diff < 0) { + /* - dealy RX Win: at OTA Time we indicate event in the past */ + if(p_timer_ctx->ota_sym_idx >= abs(p_sym_cb_ctx->sym_diff)) { + ota_sym_idx = p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff; + } else { + ota_sym_idx = ((xran_max_ota_sym_idx(nNumerology) + p_timer_ctx->ota_sym_idx) + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology); + } + } else /* 0 - OTA exact time */ + ota_sym_idx = p_timer_ctx->ota_sym_idx; + + rx_tti = (int32_t)XranGetTtiNum(ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + + if(p_sym_cb_ctx->symCbTimeInfo) { + struct xran_sense_of_time *p_sense_time = p_sym_cb_ctx->symCbTimeInfo; + p_sense_time->type_of_event = p_sym_cb_ctx->cb_type_id; + p_sense_time->nSymIdx = p_sym_cb_ctx->symb_num_req; + p_sense_time->tti_counter = rx_tti; + p_sense_time->nSlotIdx = (uint32_t)XranGetSlotNum(rx_tti, SLOTNUM_PER_SUBFRAME(interval)); + p_sense_time->nSubframeIdx = (uint32_t)XranGetSubFrameNum(rx_tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME); + p_sense_time->nFrameIdx = (uint32_t)XranGetFrameNum(rx_tti, p_timer_ctx->xran_sfn_at_sec_start,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval)); + p_sense_time->nSecond = p_timer_ctx->current_second; + } + + /* user call backs if any */ + if(p_sym_cb_ctx->symCb){ + p_sym_cb_ctx->symCb(p_sym_cb_ctx->symCbParam, p_sym_cb_ctx->symCbTimeInfo); + } + + MLogTask(PID_UP_UL_USER_DEAD_LINE_CB, t1, MLogTick()); +} +void +tx_cp_ul_cb(struct rte_timer *tim, void *arg) +{ + long t1 = MLogTick(); + int tti, buf_id; + int ret; + uint32_t slot_id, subframe_id, frame_id; int32_t cc_id; int ant_id, prach_port_id; + uint16_t occasionid; uint16_t beam_id; uint8_t num_eAxc, num_CCPorts; + uint8_t ctx_id; void *pHandle; + int num_list; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - xRANPrachCPConfigStruct *pPrachCPConfig = &(p_xran_lib_ctx->PrachCPConfig); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + if(!p_xran_dev_ctx) + { + print_err("Null xRAN context!!\n"); + return; + } + struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + struct xran_timer_ctx *pTCtx = &p_xran_dev_ctx->timer_ctx[0]; + uint32_t interval = p_xran_dev_ctx->interval_us_local; + uint8_t PortId = p_xran_dev_ctx->xran_port_id; + + tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process; + buf_id = tti % XRAN_N_FE_BUF_LEN; + slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval)); + subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval)); + if (tti == 0) { + //Wrap around to next second + frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff; + } + ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval)) % XRAN_MAX_SECTIONDB_CTX; - pHandle = NULL; // TODO: temp implemantation - num_eAxc = xran_get_num_eAxc(pHandle); + pHandle = p_xran_dev_ctx; + if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A) + num_eAxc = xran_get_num_eAxc(pHandle); + else + num_eAxc = xran_get_num_eAxcUl(pHandle); num_CCPorts = xran_get_num_cc(pHandle); - if (p_xran_lib_ctx->enableCP){ - tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process; - slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); - print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); + if(first_call && p_xran_dev_ctx->enableCP) { + print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); for(ant_id = 0; ant_id < num_eAxc; ++ant_id) { - for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) { - // start new section information list - xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id); - - beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, ant_id, slot_id); - send_cpmsg_dlul(pHandle, XRAN_DIR_UL, - frame_id, subframe_id, slot_id, - 0, N_SYM_PER_SLOT, NUM_OF_PRB_IN_FULL_BAND, - beam_id, cc_id, ant_id, - xran_get_seqid(pHandle, XRAN_DIR_UL, cc_id, ant_id, slot_id)); + for(cc_id = 0; cc_id < num_CCPorts; cc_id++) { + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1 + /* || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_SP) == 1*/ ) { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id); + if(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers){ + if(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){ + num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id, + (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData, + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); + } + } + } } } - if ((frame_id % pPrachCPConfig->x == pPrachCPConfig->y[0]) && (pPrachCPConfig->isPRACHslot[slot_id]==1)) //is prach slot - { - for(ant_id = 0; ant_id < num_eAxc; ant_id++) { - for(cc_id = 0; cc_id < num_CCPorts; cc_id++) { -#if !defined(PRACH_USES_SHARED_PORT) - prach_port_id = ant_id + num_eAxc; - // start new section information list - xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, prach_port_id); -#else - prach_port_id = ant_id; -#endif - beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id); - send_cpmsg_prach(pHandle, + if(p_xran_dev_ctx->enablePrach) { + uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id); + if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0]) && (is_prach_slot==1)) { //is prach slot + for(ant_id = 0; ant_id < num_eAxc; ++ant_id) { + for(cc_id = 0; cc_id < num_CCPorts; cc_id++) { + for (occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++) { + struct xran_cp_gen_params params; + struct xran_section_gen_info sect_geninfo[8]; + struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc(); + prach_port_id = ant_id + num_eAxc; + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, ctx_id); + + beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id); + ret = generate_cpmsg_prach(pHandle, ¶ms, sect_geninfo, mbuf, p_xran_dev_ctx, frame_id, subframe_id, slot_id, - beam_id, cc_id, prach_port_id, - xran_get_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id)); + beam_id, cc_id, prach_port_id, occasionid, + xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id)); + if (ret == XRAN_STATUS_SUCCESS) + send_cpmsg(pHandle, mbuf, ¶ms, sect_geninfo, + cc_id, prach_port_id, xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id)); + } } } } + } + } /* if(p_xran_dev_ctx->enableCP) */ - } MLogTask(PID_CP_UL_CB, t1, MLogTick()); } -void ul_up_full_slot_cb(struct rte_timer *tim, void *arg) -{ - long t1 = MLogTick(); - rte_pause(); - MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick()); -} - -void tti_to_phy_cb(struct rte_timer *tim, void *arg) +void +tti_to_phy_cb(struct rte_timer *tim, void *arg) { long t1 = MLogTick(); - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + uint32_t interval = p_xran_dev_ctx->interval_us_local; - static int first_call = 0; - p_xran_lib_ctx->phy_tti_cb_done = 1; /* DPDK called CB */ + p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */ if (first_call){ - if(p_xran_lib_ctx->ttiCb[XRAN_CB_TTI]){ - if(p_xran_lib_ctx->SkipTti[XRAN_CB_TTI] <= 0){ - p_xran_lib_ctx->ttiCb[XRAN_CB_TTI](p_xran_lib_ctx->TtiCbParam[XRAN_CB_TTI]); + if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]); }else{ - p_xran_lib_ctx->SkipTti[XRAN_CB_TTI]--; + p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--; } } } else { - if(p_xran_lib_ctx->ttiCb[XRAN_CB_TTI]){ - int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - if(tti == 8000-1) + if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){ + int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT); + uint32_t slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval)); + uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME); + uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval)); + if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME(interval)-1)) { //(tti == xran_fs_get_max_slot()-1) first_call = 1; + } } } - MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick()); } -int xran_timing_source_thread(void *args) +int32_t +xran_timing_source_thread(void *args) { + int res = 0; cpu_set_t cpuset; int32_t do_reset = 0; uint64_t t1 = 0; uint64_t delta; - int32_t result1; - uint32_t delay_cp_dl; - uint32_t delay_cp_ul; - uint32_t delay_up; - uint32_t delay_up_ul; - uint32_t delay_cp2up; - uint32_t sym_cp_dl; - uint32_t sym_cp_ul; - uint32_t sym_up_ul; - int32_t sym_up; + int32_t result1,i,j; + + uint32_t xran_port_id = 0; + static int owdm_init_done = 0; + struct sched_param sched_param; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *) args ; + uint64_t tWake = 0, tWakePrev = 0, tUsed = 0; + struct cb_elem_entry * cb_elm = NULL; + struct xran_device_ctx * p_dev_ctx_run = NULL; /* ToS = Top of Second start +- 1.5us */ struct timespec ts; - + char thread_name[32]; char buff[100]; printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid()); - + memset(&sched_param, 0, sizeof(struct sched_param)); /* set main thread affinity mask to CPU2 */ - sched_param.sched_priority = 98; - + sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO; CPU_ZERO(&cpuset); - CPU_SET(p_xran_lib_ctx->xran_init_cfg.io_cfg.timing_core, &cpuset); + CPU_SET(p_dev_ctx->fh_init.io_cfg.timing_core, &cpuset); + if (result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)) { printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1); } - if ((result1 = pthread_setschedparam(pthread_self(), 1, &sched_param))) + if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))) { printf("priority is not changed: coreId = 2, result1 = %d\n",result1); } - if (p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU) { - do { - timespec_get(&ts, TIME_UTC); - }while (ts.tv_nsec >1500); - struct tm * ptm = gmtime(&ts.tv_sec); - if(ptm){ - strftime(buff, sizeof buff, "%D %T", ptm); - printf("lls-CU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us); + snprintf(thread_name, RTE_DIM(thread_name), "%s-%d", "fh_main_poll", rte_lcore_id()); + if ((res = pthread_setname_np(pthread_self(), thread_name))) { + printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res); } - delay_cp_dl = p_xran_lib_ctx->xran_init_cfg.ttiPeriod - p_xran_lib_ctx->xran_init_cfg.T1a_max_cp_dl; - delay_cp_ul = p_xran_lib_ctx->xran_init_cfg.ttiPeriod - p_xran_lib_ctx->xran_init_cfg.T1a_max_cp_ul; - delay_up = p_xran_lib_ctx->xran_init_cfg.T1a_max_up; - delay_up_ul = p_xran_lib_ctx->xran_init_cfg.Ta4_max; + printf("TTI interval %ld [us]\n", interval_us); - delay_cp2up = delay_up-delay_cp_dl; - - sym_cp_dl = delay_cp_dl*1000/(interval_us*1000/N_SYM_PER_SLOT)+1; - sym_cp_ul = delay_cp_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1; - sym_up_ul = delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT); - p_xran_lib_ctx->sym_up = sym_up = -(delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT)+1); - p_xran_lib_ctx->sym_up_ul = sym_up_ul = (delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1); - - printf("Start C-plane DL %d us after TTI [trigger on sym %d]\n", delay_cp_dl, sym_cp_dl); - printf("Start C-plane UL %d us after TTI [trigger on sym %d]\n", delay_cp_ul, sym_cp_ul); - printf("Start U-plane DL %d us before OTA [offset in sym %d]\n", delay_up, sym_up); - printf("Start U-plane UL %d us OTA [offset in sym %d]\n", delay_up_ul, sym_up_ul); - - printf("C-plane to U-plane delay %d us after TTI\n", delay_cp2up); - printf("Start Sym timer %ld ns\n", TX_TIMER_INTERVAL/N_SYM_PER_SLOT); - - p_xran_lib_ctx->pSymCallback[0][sym_cp_dl] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_cp_dl] = tx_cp_dl_cb; - - p_xran_lib_ctx->pSymCallback[0][sym_cp_ul] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_cp_ul] = tx_cp_ul_cb; - - /* Full slot UL OTA + delay_up_ul */ - p_xran_lib_ctx->pSymCallback[0][sym_up_ul] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_up_ul] = rx_ul_deadline_full_cb; - - /* Half slot UL OTA + delay_up_ul*/ - p_xran_lib_ctx->pSymCallback[0][sym_up_ul + N_SYM_PER_SLOT/2] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_up_ul + N_SYM_PER_SLOT/2] = rx_ul_deadline_half_cb; + if (!p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable) { + if ((res = xran_timing_create_cbs(args)) < 0){ + return res; + } + } - } else { // APP_RU - /* calcualte when to send UL U-plane */ - delay_up = p_xran_lib_ctx->xran_init_cfg.Ta3_min; - p_xran_lib_ctx->sym_up = sym_up = delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT)+1; - printf("Start UL U-plane %d us after OTA [offset in sym %d]\n", delay_up, sym_up); do { timespec_get(&ts, TIME_UTC); }while (ts.tv_nsec >1500); + struct tm * ptm = gmtime(&ts.tv_sec); if(ptm){ strftime(buff, sizeof buff, "%D %T", ptm); - printf("RU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us); - } + printf("%s: thread_run start time: %s.%09ld UTC [%ld]\n", + (p_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us); } do { timespec_get(&ts, TIME_UTC); }while (ts.tv_nsec == 0); + p_dev_ctx->timing_source_thread_running = 1; while(1) { - delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT); + + /* Check if owdm finished to create the timing cbs based on measurement results */ + if ((p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable)&&(!owdm_init_done)&&unlikely(XRAN_RUNNING == xran_if_current_state)) { + // Adjust Windows based on Delay Measurement results + xran_adjust_timing_parameters(p_dev_ctx); + if ((res = xran_timing_create_cbs(args)) < 0){ + return res; + } + printf("TTI interval %ld [us]\n", interval_us); + owdm_init_done = 1; + + } + + + + /* Update Usage Stats */ + tWake = xran_tick(); + xran_used_tick += tUsed; + if (tWakePrev) + { + xran_total_tick += get_ticks_diff(tWake, tWakePrev); + } + tWakePrev = tWake; + tUsed = 0; + + delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed); if (XRAN_STOPPED == xran_if_current_state) break; - sym_ota_cb(&sym_timer, timer_ctx); + + if (likely(XRAN_RUNNING == xran_if_current_state)) { + for(xran_port_id = 0; xran_port_id < XRAN_PORTS_NUM; xran_port_id++ ) { + p_dev_ctx_run = xran_dev_get_ctx_by_id(xran_port_id); + if(p_dev_ctx_run) { + if(p_dev_ctx_run->xran_port_id == xran_port_id) { + if(XranGetSymNum(xran_lib_ota_sym_idx[p_dev_ctx_run->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == xran_lib_ota_sym[xran_port_id]) + { + sym_ota_cb(&p_dev_ctx_run->sym_timer, p_dev_ctx_run, &tUsed); + xran_lib_ota_sym[xran_port_id]++; + if(xran_lib_ota_sym[xran_port_id] >= N_SYM_PER_SLOT) + xran_lib_ota_sym[xran_port_id]=0; + } + } + else { + rte_panic("p_dev_ctx_run == xran_port_id"); + } + } + } + } } - printf("Closing timing source thread...\n"); - return 0; + xran_timing_destroy_cbs(args); + printf("Closing timing source thread...\n"); + return res; } /* Handle ecpri format. */ -int handle_ecpri_ethertype(struct rte_mbuf *pkt, uint64_t rx_time) +#define MBUFS_CNT 16 + +int32_t handle_ecpri_ethertype(struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num) { - const struct xran_ecpri_hdr *ecpri_hdr; + struct rte_mbuf* pkt, * pkt0; + uint16_t i; + struct rte_ether_hdr* eth_hdr; + struct xran_ecpri_hdr* ecpri_hdr; + union xran_ecpri_cmn_hdr* ecpri_cmn; unsigned long t1; + int32_t ret = MBUF_FREE; + uint32_t ret_data[MBUFS_CNT] = { MBUFS_CNT * MBUF_FREE }; + struct xran_device_ctx* p_dev_ctx = xran_dev_get_ctx_by_id(xport_id); + uint16_t num_data = 0, num_control = 0, num_meas = 0; + struct rte_mbuf* pkt_data[MBUFS_CNT], * pkt_control[MBUFS_CNT], * pkt_meas[MBUFS_CNT], *pkt_adj[MBUFS_CNT]; + static uint32_t owdm_rx_first_pass = 1; - if (rte_pktmbuf_data_len(pkt) < sizeof(struct xran_ecpri_hdr)) { - wlog("Packet too short - %d bytes", rte_pktmbuf_data_len(pkt)); - return 0; - } + if (p_dev_ctx == NULL) + return ret; + + for (i = 0; i < num; i++) + { + pkt = pkt_q[i]; - /* check eCPRI header. */ +// rte_prefetch0(rte_pktmbuf_mtod(pkt, void*)); + + rte_pktmbuf_adj(pkt, sizeof(*eth_hdr)); ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *); - if(ecpri_hdr == NULL) - return MBUF_FREE; - switch(ecpri_hdr->ecpri_mesg_type) { + p_dev_ctx->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt); + + pkt_adj[i] = pkt; + switch (ecpri_hdr->cmnhdr.bits.ecpri_mesg_type) + { case ECPRI_IQ_DATA: - t1 = MLogTick(); - process_mbuf(pkt); - MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick()); + pkt_data[num_data++] = pkt; break; // For RU emulation case ECPRI_RT_CONTROL_DATA: - t1 = MLogTick(); - if(xran_lib_get_ctx()->xran_init_cfg.io_cfg.id == APP_RU) { - process_cplane(pkt); - } else { - print_err("LLS-CU recevied CP message!"); - } - MLogTask(PID_PROCESS_CP_PKT, t1, MLogTick()); + pkt_control[num_control++] = pkt; break; - default: - wlog("Invalid eCPRI message type - %d", ecpri_hdr->ecpri_mesg_type); + case ECPRI_DELAY_MEASUREMENT: + if (owdm_rx_first_pass != 0) +{ + // Initialize and verify that Payload Length is in range */ + xran_initialize_and_verify_owd_pl_length((void*)p_dev_ctx); + owdm_rx_first_pass = 0; + + } + pkt_meas[num_meas++] = pkt; + break; + default: + if (p_dev_ctx->fh_init.io_cfg.id == O_DU) { + print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.bits.ecpri_mesg_type); } -#if 0 -//def DEBUG - return MBUF_KEEP; -#else - return MBUF_FREE; -#endif + break; + } } -int xran_process_rx_sym(void *arg, - void *iq_data_start, - uint16_t size, - uint8_t CC_ID, - uint8_t Ant_ID, - uint8_t frame_id, - uint8_t subframe_id, - uint8_t slot_id, - uint8_t symb_id) + if(num_data == MBUFS_CNT && p_dev_ctx->fh_cfg.ru_conf.xranCat == XRAN_CATEGORY_B) /* w/a for Cat A issue */ { - char *pos = NULL; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - uint32_t tti=0; - XranStatusInt32 status; - void *pHandle = NULL; - - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) - return 0; - - tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id; - - status = tti << 16 | symb_id; + for (i = 0; i < MBUFS_CNT; i++) +{ + ret_data[i] == MBUF_FREE; +} - if(tti < 8000 && CC_ID < XRAN_MAX_SECTOR_NR && CC_ID == 0 && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){ - pos = (char*) p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData; - if(pos && iq_data_start && size){ -#ifdef XRAN_BYTE_ORDER_SWAP - int idx = 0; - uint16_t *restrict psrc = (uint16_t *)iq_data_start; - uint16_t *restrict pdst = (uint16_t *)pos; - /* network byte (be) order of IQ to CPU byte order (le) */ - for (idx = 0; idx < size/sizeof(int16_t); idx++){ - pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]); + if (p_dev_ctx->fh_init.io_cfg.id == O_DU || p_dev_ctx->fh_init.io_cfg.id == O_RU) +{ + if (p_dev_ctx->xran2phy_mem_ready != 0) + ret = process_mbuf_batch(pkt_data, (void*)p_dev_ctx, MBUFS_CNT, p_cid, ret_data ); + for (i = 0; i < MBUFS_CNT; i++) + { + if (ret_data[i] == MBUF_FREE) + rte_pktmbuf_free(pkt_data[i]); + } } -#else -#error xran spec is network byte order - /* for debug */ - rte_memcpy(pdst, psrc, size); -#endif -#ifdef DEBUG_XRAN_BUFFERS - if (pos[0] != tti % XRAN_N_FE_BUF_LEN || - pos[1] != CC_ID || - pos[2] != Ant_ID || - pos[3] != symb_id){ - printf("%d %d %d %d\n", pos[0], pos[1], pos[2], pos[3]); + else +{ + for (i = 0; i < MBUFS_CNT; i++) +{ + if (ret_data[i] == MBUF_FREE) + rte_pktmbuf_free(pkt_data[i]); } -#endif - } else { - print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size); + print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id); } - } else { - print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id); - } - if (symb_id == 7 || symb_id == 13){ - p_xran_lib_ctx->rx_packet_symb_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id]++; - - if(p_xran_lib_ctx->rx_packet_symb_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] >= xran_get_num_eAxc(pHandle)){ - if(p_xran_lib_ctx->pCallback[0]) - p_xran_lib_ctx->pCallback[0](p_xran_lib_ctx->pCallbackTag[0], status); - p_xran_lib_ctx->rx_packet_callback_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID] = 1; - p_xran_lib_ctx->rx_packet_symb_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] = 0; } - } - return size; -} - - -int xran_process_tx_sym(void *arg) + else { - uint32_t tti=0; - uint32_t mlogVar[10]; - uint32_t mlogVarCnt = 0; - unsigned long t1 = MLogTick(); - - void *pHandle = NULL; - int32_t ant_id; - int32_t cc_id = 0; - uint8_t num_eAxc = 0; - uint8_t num_CCPorts = 0; - - uint32_t frame_id = 0; - uint32_t subframe_id = 0; - uint32_t slot_id = 0; - uint32_t sym_id = 0; - uint32_t sym_idx = 0; - - char *pos = NULL; - int prb_num = 0; - - struct xran_section_info *sectinfo; - uint32_t next; - - enum xran_pkt_dir direction; - - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; - - - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) - return 0; + for (i = 0; i < num_data; i++) + { + ret = process_mbuf(pkt_data[i], (void*)p_dev_ctx, p_cid); + if (ret == MBUF_FREE) + rte_pktmbuf_free(pkt_data[i]); + } - if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU) { - direction = XRAN_DIR_DL; /* lls-CU */ - prb_num = NUM_OF_PRB_IN_FULL_BAND; - } - else { - direction = XRAN_DIR_UL; /* RU */ - prb_num = NUM_OF_PRB_IN_FULL_BAND; /*TODO: simulation on D-1541 @ 2.10GHz has issue with performace. reduce copy size */ + for (i = 0; i < num_control; i++) + { + t1 = MLogTick(); + if (p_dev_ctx->fh_init.io_cfg.id == O_RU) + { + ret = process_cplane(pkt_control[i], (void*)p_dev_ctx); + p_dev_ctx->fh_counters.rx_counter++; + if (ret == MBUF_FREE) + rte_pktmbuf_free(pkt_control[i]); } - - /* RU: send symb after OTA time with delay (UL) */ - /* lls-CU:send symb in advance of OTA time (DL) */ - sym_idx = XranOffsetSym(p_xran_lib_ctx->sym_up, xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME*1000); - - tti = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); - sym_id = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - - print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); - - mlogVar[mlogVarCnt++] = 0xAAAAAAAA; - mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx; - mlogVar[mlogVarCnt++] = sym_idx; - mlogVar[mlogVarCnt++] = abs(p_xran_lib_ctx->sym_up); - mlogVar[mlogVarCnt++] = tti; - mlogVar[mlogVarCnt++] = frame_id; - mlogVar[mlogVarCnt++] = subframe_id; - mlogVar[mlogVarCnt++] = slot_id; - mlogVar[mlogVarCnt++] = sym_id; - MLogAddVariables(mlogVarCnt, mlogVar, MLogTick()); - - if(frame_id > 99) { - print_err("OTA %d: TX:[sym_idx %d: TTI %d] fr %d sf %d slot %d sym %d\n",xran_lib_ota_sym_idx, sym_idx, tti, frame_id, subframe_id, slot_id, sym_id); - xran_if_current_state =XRAN_STOPPED; + else + { + print_err("O-DU recevied C-Plane message!"); } + MLogTask(PID_PROCESS_CP_PKT, t1, MLogTick()); + } - num_eAxc = xran_get_num_eAxc(pHandle); - num_CCPorts = xran_get_num_cc(pHandle); - - /* U-Plane */ - for(ant_id = 0; ant_id < num_eAxc; ant_id++) { - for(cc_id = 0; cc_id < num_CCPorts; cc_id++) { - if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU && p_xran_lib_ctx->enableCP) { - next = 0; - while(next < xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id)) { - sectinfo = xran_cp_iterate_section_info(pHandle, direction, - cc_id, ant_id, subframe_id, slot_id, &next); - if(sectinfo == NULL) - break; - - /* pointer to IQs input */ - /* TODO: need to implement the case of partial RB assignment */ - pos = (char*) p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData; - print_dbg(">>> [%d] type%d, id %d, startPrbc=%d, numPrbc=%d, numSymbol=%d\n", next, - sectinfo->type, sectinfo->id, sectinfo->startPrbc, - sectinfo->numPrbc, sectinfo->numSymbol); - - if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) { - print_err("Invalid section type in section DB - %d", sectinfo->type); - continue; - } - - send_symbol_ex(direction, sectinfo->id, - (struct rb_map *)pos, - frame_id, subframe_id, slot_id, sym_id, - sectinfo->startPrbc, sectinfo->numPrbc, - cc_id, ant_id, - xran_get_seqid(pHandle, direction, cc_id, ant_id, slot_id)); - } - } - - else { /* if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU && p_xran_lib_ctx->enableCP) */ - /* pointer to IQs input */ - pos = (char*) p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData; -#ifdef DEBUG_XRAN_BUFFERS - if (pos[0] != tti % XRAN_N_FE_BUF_LEN || - pos[1] != cc_id || - pos[2] != ant_id || - pos[3] != sym_id) - printf("%d %d %d %d\n", pos[0], pos[1], pos[2], pos[3]); -#endif - send_symbol_ex(direction, - xran_alloc_sectionid(pHandle, direction, cc_id, ant_id, slot_id), - (struct rb_map *)pos, - frame_id, subframe_id, slot_id, sym_id, - 0, prb_num, - cc_id, ant_id, - xran_get_seqid(pHandle, direction, cc_id, ant_id, slot_id)); - } + for (i = 0; i < num_meas; i++) + { + t1 = MLogTick(); + ret = process_delay_meas(pkt_meas[i], (void*)p_dev_ctx, xport_id); + // printf("Got delay_meas_pkt xport_id %d p_dev_ctx %08"PRIx64"\n", xport_id,(int64_t*)p_dev_ctx) ; + if (ret == MBUF_FREE) + rte_pktmbuf_free(pkt_meas[i]); + MLogTask(PID_PROCESS_DELAY_MEAS_PKT, t1, MLogTick()); + } } - } - MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick()); - return 0; + return MBUF_FREE; } -int xran_packet_and_dpdk_timer_thread(void *args) +int32_t +xran_packet_and_dpdk_timer_thread(void *args) { struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx(); @@ -921,9 +1162,10 @@ int xran_packet_and_dpdk_timer_thread(void *args) int res = 0; printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid()); + memset(&sched_param, 0, sizeof(struct sched_param)); sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO; - if ((res = pthread_setschedparam(pthread_self(), 1, &sched_param))) + if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))) { printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res); } @@ -945,501 +1187,1895 @@ int xran_packet_and_dpdk_timer_thread(void *args) return 0; } +void xran_initialize_ecpri_owd_meas_cmn( struct xran_io_cfg *ptr) +{ +// ptr->eowd_cmn.initiator_en = 0; // Initiator 1, Recipient 0 +// ptr->eowd_cmn.filterType = 0; // 0 Simple average based on number of measurements + // Set default values if the Timeout and numberOfSamples are not set + if ( ptr->eowd_cmn[ptr->id].responseTo == 0) + ptr->eowd_cmn[ptr->id].responseTo = 10E6; // 10 ms timeout expressed in ns + if ( ptr->eowd_cmn[ptr->id].numberOfSamples == 0) + ptr->eowd_cmn[ptr->id].numberOfSamples = 8; // Number of samples to be averaged +} +void xran_initialize_ecpri_owd_meas_per_port (int i, struct xran_io_cfg *ptr ) +{ + /* This function initializes one_way delay measurements on a per port basis, + most variables default to zero */ + ptr->eowd_port[ptr->id][i].portid = (uint8_t)i; +} -int32_t xran_init(int argc, char *argv[], PXRANFHINIT p_xran_fh_init, char *appName, void ** pHandle) +int32_t +xran_init(int argc, char *argv[], + struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle) { - int i; - int j; + int32_t ret = XRAN_STATUS_SUCCESS; + int32_t i; + int32_t j; + int32_t o_xu_id = 0; - struct xran_io_loop_cfg *p_io_cfg = (struct xran_io_loop_cfg *)&p_xran_fh_init->io_cfg; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + struct xran_io_cfg *p_io_cfg = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; - int lcore_id = 0; + int32_t lcore_id = 0; char filename[64]; - memset(p_xran_lib_ctx, 0, sizeof(struct xran_lib_ctx)); + const char *version = rte_version(); + + if (version == NULL) + rte_panic("version == NULL"); + + printf("'%s'\n", version); + + if (p_xran_fh_init->xran_ports < 1 || p_xran_fh_init->xran_ports > XRAN_PORTS_NUM) { + ret = XRAN_STATUS_INVALID_PARAM; + print_err("fh_init xran_ports= %d is wrong [%d]\n", p_xran_fh_init->xran_ports, ret); + return ret; + } + + p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg; + + if ((ret = xran_dev_create_ctx(p_xran_fh_init->xran_ports)) < 0) { + print_err("context allocation error [%d]\n", ret); + return ret; + } + + for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){ + p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id); + memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx)); + p_xran_dev_ctx->xran_port_id = o_xu_id; + /* copy init */ - p_xran_lib_ctx->xran_init_cfg = *p_xran_fh_init; + p_xran_dev_ctx->fh_init = *p_xran_fh_init; + printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu); + + memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config)); + /* To make sure to set default functions */ + p_xran_dev_ctx->send_upmbuf2ring = NULL; + p_xran_dev_ctx->send_cpmbuf2ring = NULL; + // Ecpri initialization for One Way delay measurements common variables to default values + xran_initialize_ecpri_owd_meas_cmn(&p_xran_dev_ctx->fh_init.io_cfg); + } - xran_if_current_state = XRAN_RUNNING; - interval_us = p_xran_fh_init->ttiPeriod; + /* default values if not set */ + if(p_io_cfg->nEthLinePerPort == 0) + p_io_cfg->nEthLinePerPort = 1; - p_xran_lib_ctx->llscu_id = p_xran_fh_init->llscuId; - memcpy(&(p_xran_lib_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(XRANEAXCIDCONFIG)); + if(p_io_cfg->nEthLineSpeed == 0) + p_io_cfg->nEthLineSpeed = 25; - p_xran_lib_ctx->enableCP = p_xran_fh_init->enableCP; + /** at least 1 RX Q */ + if(p_io_cfg->num_rxq == 0) + p_io_cfg->num_rxq = 1; + if (p_io_cfg->id == 1) { + /* 1 HW for O-RU */ + p_io_cfg->num_rxq = 1; + } + +#if (RTE_VER_YEAR < 21) /* eCPRI flow supported with DPDK 21.02 or later */ + if (p_io_cfg->num_rxq > 1){ + p_io_cfg->num_rxq = 1; + printf("%s does support eCPRI flows. Set rxq to %d\n", version, p_io_cfg->num_rxq); + } +#endif + printf("PF Eth line speed %dG\n",p_io_cfg->nEthLineSpeed); + printf("PF Eth lines per O-xU port %d\n",p_io_cfg->nEthLinePerPort); + printf("RX HW queues per O-xU Eth line %d \n",p_io_cfg->num_rxq); + + if(p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane) != p_io_cfg->num_vfs) { + print_err("Incorrect VFs configurations: For %d O-xUs with %d Ethernet ports expected number of VFs is %d. [provided %d]\n", + p_xran_fh_init->xran_ports, p_io_cfg->nEthLinePerPort, + p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane), p_io_cfg->num_vfs); + } + + xran_if_current_state = XRAN_INIT; xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype); if (p_io_cfg->id == 0) - xran_ethdi_init_dpdk_io(basename(appName), + xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix, p_io_cfg, &lcore_id, - (struct ether_addr *)p_xran_fh_init->p_lls_cu_addr, - (struct ether_addr *)p_xran_fh_init->p_ru_addr, - p_xran_fh_init->cp_vlan_tag, - p_xran_fh_init->up_vlan_tag); + (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr, + (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr, + p_xran_dev_ctx->fh_init.mtu); else - xran_ethdi_init_dpdk_io(basename(appName), + xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix, p_io_cfg, &lcore_id, - (struct ether_addr *)p_xran_fh_init->p_ru_addr, - (struct ether_addr *)p_xran_fh_init->p_lls_cu_addr, - p_xran_fh_init->cp_vlan_tag, - p_xran_fh_init->up_vlan_tag); - - for(i = 0; i < 10; i++ ) - rte_timer_init(&tti_to_phy_timer[i]); + (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr, + (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr, + p_xran_dev_ctx->fh_init.mtu); - rte_timer_init(&tti_timer); - rte_timer_init(&sym_timer); - rte_timer_init(&tx_cp_dl_timer); - rte_timer_init(&tx_cp_ul_timer); - rte_timer_init(&tx_up_timer); + for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){ + p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id); - for(i = 0; i < XRAN_MAX_SECTOR_NR; i++ ){ - unsigned n = snprintf(&p_xran_lib_ctx->ring_name[0][i][0], RTE_RING_NAMESIZE, "dl_sym_ring_%u", i); - p_xran_lib_ctx->dl_sym_idx_ring[i] = rte_ring_create(&p_xran_lib_ctx->ring_name[0][i][0], XRAN_RING_SIZE, - rte_lcore_to_socket_id(lcore_id), RING_F_SP_ENQ | RING_F_SC_DEQ); - } + for(i = 0; i < MAX_TTI_TO_PHY_TIMER; i++ ) + rte_timer_init(&p_xran_dev_ctx->tti_to_phy_timer[i]); + rte_timer_init(&p_xran_dev_ctx->sym_timer); + for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++) + rte_timer_init(&p_xran_dev_ctx->dpdk_timer[i]); - lcore_id = rte_get_next_lcore(lcore_id, 0, 0); - PANIC_ON(lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()"); + p_xran_dev_ctx->direct_pool = socket_direct_pool; + p_xran_dev_ctx->indirect_pool = socket_indirect_pool; - /* Start packet processing thread */ - if (rte_eal_remote_launch(ring_processing_thread, NULL, lcore_id)) - rte_panic("ring_processing_thread() failed to start\n"); - if(p_io_cfg->pkt_aux_core > 0){ - lcore_id = rte_get_next_lcore(lcore_id, 0, 0); - PANIC_ON(lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()"); - - /* Start packet processing thread */ - if (rte_eal_remote_launch(xran_packet_and_dpdk_timer_thread, NULL, lcore_id)) - rte_panic("ring_processing_thread() failed to start\n"); + for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){ + LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[j]); } - lcore_id = rte_get_next_lcore(lcore_id, 0, 0); - PANIC_ON(lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()"); + } - /* Start packet processing thread */ - if (rte_eal_remote_launch(xran_timing_source_thread, xran_lib_get_ctx(), lcore_id)) - rte_panic("thread_run() failed to start\n"); + for (i=0; idebugStop); - timing_set_debug_stop(p_xran_fh_init->debugStop); + *pXranLayerHandle = xran_dev_get_ctx(); - memset(&DevHandle, 0, sizeof(XranLibHandleInfoStruct)); - *pHandle = &DevHandle; + // The ecpri initialization loop needs to be done per pf and vf (Outer loop pf and inner loop vf) + for (i=0; i< p_io_cfg->num_vfs; i++) + { + /* Initialize ecpri one-way delay measurement info on a per vf port basis */ + xran_initialize_ecpri_owd_meas_per_port (i, p_io_cfg); + } - return 0; + return ret; } -int32_t xran_sector_get_instances (void * pHandle, uint16_t nNumInstances, - XranCcInstanceHandleVoidP * pSectorInstanceHandles) +int32_t +xran_sector_get_instances (uint32_t xran_port, void * pDevHandle, uint16_t nNumInstances, + xran_cc_handle_t * pSectorInstanceHandles) { - int i; + xran_status_t nStatus = XRAN_STATUS_FAIL; + struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle; + XranSectorHandleInfo *pCcHandle = NULL; + int32_t i = 0; - /* only one handle as only one CC is currently supported */ - for(i = 0; i < nNumInstances; i++ ) - pSectorInstanceHandles[i] = pHandle; + pDev += xran_port; - return 0; -} + /* Check for the Valid Parameters */ + CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM); -int32_t xran_mm_init (void * pHandle, uint64_t nMemorySize, - uint32_t nMemorySegmentSize) -{ - /* we use mbuf from dpdk memory */ - return 0; -} + if (!nNumInstances) { + print_dbg("Instance is not assigned for this function !!! \n"); + return XRAN_STATUS_INVALID_PARAM; + } -int32_t xran_bm_init (void * pHandle, uint32_t * pPoolIndex, uint32_t nNumberOfBuffers, uint32_t nBufferSize) -{ - XranLibHandleInfoStruct* pXran = (XranLibHandleInfoStruct*) pHandle; + for (i = 0; i < nNumInstances; i++) { - char pool_name[RTE_MEMPOOL_NAMESIZE]; + /* Allocate Memory for CC handles */ + pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64); - snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "bm_mempool_%ld", pPoolIndex); + if(pCcHandle == NULL) + return XRAN_STATUS_RESOURCE; - pXran->p_bufferPool[pXran->nBufferPoolIndex] = rte_pktmbuf_pool_create(pool_name, nNumberOfBuffers, - MBUF_CACHE, 0, XRAN_MAX_MBUF_LEN, rte_socket_id()); + memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo))); - pXran->bufferPoolElmSz[pXran->nBufferPoolIndex] = nBufferSize; - pXran->bufferPoolNumElm[pXran->nBufferPoolIndex] = nNumberOfBuffers; + pCcHandle->nIndex = i; + pCcHandle->nXranPort = pDev->xran_port_id; - print_dbg("[nPoolIndex %d] mb pool %p \n", pXran->nBufferPoolIndex, pXran->p_bufferPool[pXran->nBufferPoolIndex]); + printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle); + pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle; - *pPoolIndex = pXran->nBufferPoolIndex++; + printf("Handle: %p Instance: %p\n", + &pSectorInstanceHandles[i], pSectorInstanceHandles[i]); + } - return 0; + return XRAN_STATUS_SUCCESS; } -int32_t xran_bm_allocate_buffer(void * pHandle, uint32_t nPoolIndex, void **ppVirtAddr) -{ - XranLibHandleInfoStruct* pXran = (XranLibHandleInfoStruct*) pHandle; - *ppVirtAddr = NULL; - struct rte_mbuf * mb = rte_pktmbuf_alloc(pXran->p_bufferPool[nPoolIndex]); +int32_t +xran_5g_fronthault_config (void * pHandle, + struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + xran_transport_callback_fn pCallback, + void *pCallbackTag) +{ + int j, i = 0, z, k; + XranSectorHandleInfo* pXranCc = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; - if(mb){ - *ppVirtAddr = rte_pktmbuf_append(mb, pXran->bufferPoolElmSz[nPoolIndex]); + if(NULL == pHandle) { + printf("Handle is NULL!\n"); + return XRAN_STATUS_FAIL; + } - }else { - print_err("[nPoolIndex %d] mb alloc failed \n", nPoolIndex ); - return -1; + pXranCc = (XranSectorHandleInfo*) pHandle; + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort); + if (p_xran_dev_ctx == NULL) { + printf ("p_xran_dev_ctx is NULL\n"); + return XRAN_STATUS_FAIL; } - if (*ppVirtAddr == NULL){ - print_err("[nPoolIndex %d] rte_pktmbuf_append for %d failed \n", nPoolIndex, pXran->bufferPoolElmSz[nPoolIndex]); - return -1; + i = pXranCc->nIndex; + + for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) { + for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){ + /* U-plane TX */ + + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0]; + + if(pSrcBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcBuffer[z][j])); + + + /* C-plane TX */ + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0]; + + if(pSrcCpBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcCpBuffer[z][j])); + /* U-plane RX */ + + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0]; + + if(pDstBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j])); + + + /* C-plane RX */ + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0]; + + if(pDstCpBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j])); + + } } - return 0; -} + p_xran_dev_ctx->pCallback[i] = pCallback; + p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag; + print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__, + p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pCallback[i], p_xran_dev_ctx->pCallbackTag[i]); -int32_t xran_bm_free_buffer(void * pHandle, void *pVirtAddr) -{ - XranLibHandleInfoStruct* pXran = (XranLibHandleInfoStruct*) pHandle; - rte_pktmbuf_free(pVirtAddr); + p_xran_dev_ctx->xran2phy_mem_ready = 1; - return 0; + return XRAN_STATUS_SUCCESS; } -int32_t xran_5g_fronthault_config (void * pHandle, - XRANBufferListStruct *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], - XRANBufferListStruct *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], - XranTransportBlockCallbackFn pCallback, +int32_t +xran_5g_prach_req (void * pHandle, + struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstBufferDecomp[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + xran_transport_callback_fn pCallback, void *pCallbackTag) { - XranLibHandleInfoStruct *pInfo = (XranLibHandleInfoStruct *) pHandle; - XranStatusInt32 nStatus = XRAN_STATUS_SUCCESS; - int j, i = 0, z, k; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - - print_dbg("%s\n", __FUNCTION__); + int j, i = 0, z; + XranSectorHandleInfo* pXranCc = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; - if(NULL == pHandle) - { + if(NULL == pHandle) { printf("Handle is NULL!\n"); return XRAN_STATUS_FAIL; } - if (pCallback == NULL) - { - printf ("no callback\n"); - return XRAN_STATUS_FAIL; - } - for(j=0; jsFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_lib_ctx->sFrontHaulTxBuffers[j][i][z][0]; - - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j]; - - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_lib_ctx->sFrontHaulRxBuffers[j][i][z][0]; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; - } + pXranCc = (XranSectorHandleInfo*) pHandle; + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort); + if (p_xran_dev_ctx == NULL) { + printf ("p_xran_dev_ctx is NULL\n"); + return XRAN_STATUS_FAIL; } -#if 0 - for(j=0; jsFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers[k].pData; - printf(" sym: %2d %p 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", k, ptr, ptr[0],ptr[1], ptr[2], ptr[3], ptr[4]); - } - } + i = pXranCc->nIndex; - for(j=0; jsFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers[k].pData; - printf(" sym: %2d %p 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", k, ptr, ptr[0],ptr[1], ptr[2], ptr[3], ptr[4]); - } + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number. + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0]; + if(pDstBuffer[z][j]) + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j])); + + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0]; + if(pDstBufferDecomp[z][j]) + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList = *pDstBufferDecomp[z][j]; + } -#endif + } - p_xran_lib_ctx->pCallback[i] = pCallback; - p_xran_lib_ctx->pCallbackTag[i] = pCallbackTag; + p_xran_dev_ctx->pPrachCallback[i] = pCallback; + p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag; - p_xran_lib_ctx->xran2phy_mem_ready = 1; + print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__, + p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]); - return nStatus; + return XRAN_STATUS_SUCCESS; } -int32_t xran_5g_prach_req (void * pHandle, - XRANBufferListStruct *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], - XranTransportBlockCallbackFn pCallback, +int32_t +xran_5g_srs_req (void * pHandle, + struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN], + xran_transport_callback_fn pCallback, void *pCallbackTag) { - XranLibHandleInfoStruct *pInfo = (XranLibHandleInfoStruct *) pHandle; - XranStatusInt32 nStatus = XRAN_STATUS_SUCCESS; int j, i = 0, z; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + XranSectorHandleInfo* pXranCc = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; - if(NULL == pHandle) - { + if(NULL == pHandle) { printf("Handle is NULL!\n"); return XRAN_STATUS_FAIL; } - if (pCallback == NULL) - { - printf ("no callback\n"); + + pXranCc = (XranSectorHandleInfo*) pHandle; + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort); + if (p_xran_dev_ctx == NULL) { + printf ("p_xran_dev_ctx is NULL\n"); return XRAN_STATUS_FAIL; } - for(j=0; jsFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number. - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_lib_ctx->sFHPrachRxBuffers[j][i][z][0]; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + i = pXranCc->nIndex; + + for(j=0; jsFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number. + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0]; + if(pDstBuffer[z][j]) + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j])); + + /* C-plane SRS */ + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z]; + + if(pDstCpBuffer[z][j]) + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j])); + } } - p_xran_lib_ctx->pPrachCallback[i] = pCallback; - p_xran_lib_ctx->pPrachCallbackTag[i] = pCallbackTag; + p_xran_dev_ctx->pSrsCallback[i] = pCallback; + p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag; - return 0; + print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__, + p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]); + + return XRAN_STATUS_SUCCESS; } -int32_t xran_5g_pre_compenstor_cfg(void* pHandle, - uint32_t nTxPhaseCps, - uint32_t nRxPhaseCps, - uint8_t nSectorId) +uint32_t +xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear) { - /* functionality is not yet implemented */ + uint32_t i; + + *num_core_used = xran_num_cores_used; + for (i = 0; i < xran_num_cores_used; i++) + { + core_used[i] = xran_core_used[i]; + } + + *total_time = xran_total_tick; + *used_time = xran_used_tick; + + if (clear) + { + xran_total_tick = 0; + xran_used_tick = 0; + } + return 0; } -int32_t xran_open(void *pHandle, PXRANFHCONFIG pConf) +uint8_t* +xran_add_cp_hdr_offset(uint8_t *dst) { + dst += (RTE_PKTMBUF_HEADROOM + + sizeof(struct xran_ecpri_hdr) + + sizeof(struct xran_cp_radioapp_section1_header) + + sizeof(struct xran_cp_radioapp_section1)); + + dst = RTE_PTR_ALIGN_CEIL(dst, 64); + + return dst; +} + +uint8_t* +xran_add_hdr_offset(uint8_t *dst, int16_t compMethod) +{ + dst+= (RTE_PKTMBUF_HEADROOM + + sizeof (struct xran_ecpri_hdr) + + sizeof (struct radio_app_common_hdr) + + sizeof(struct data_section_hdr)); + if(compMethod != XRAN_COMPMETHOD_NONE) + dst += sizeof (struct data_section_compression_hdr); + dst = RTE_PTR_ALIGN_CEIL(dst, 64); + + return dst; +} + +int32_t +xran_pkt_gen_process_ring(struct rte_ring *r) +{ + assert(r); + int32_t retval = 0; + struct rte_mbuf *mbufs[16]; int i; - uint8_t slotNr; - XRANFHCONFIG *pFhCfg; - xRANPrachCPConfigStruct *pPrachCPConfig = &(xran_lib_get_ctx()->PrachCPConfig); - pFhCfg = &(xran_lib_get_ctx()->xran_fh_cfg); - memcpy(pFhCfg, pConf, sizeof(XRANFHCONFIG)); - PXRANPRACHCONFIG pPRACHConfig = &pFhCfg->prach_conf; - uint8_t nPrachConfIdx = pPRACHConfig->nPrachConfIdx; - const xRANPrachConfigTableStruct *pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx]; - uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0]; - const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt - FORMAT_A1]; - memset(pPrachCPConfig, 0, sizeof(xRANPrachCPConfigStruct)); + uint32_t remaining; + uint64_t t1; + struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg); + const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs, + RTE_DIM(mbufs), &remaining); - //setup PRACH configuration for C-Plane - pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC; // 3, PRACH preamble format A1~3, B1~4, C0, C2 - pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym; - pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart; - pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70; - pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration; - pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp; - pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing); - pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot; - pPrachCPConfig->x = pxRANPrachConfigTable->x; - pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0]; - pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1]; + if (!dequeued) + return 0; - pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1; - for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++) - { - slotNr = pxRANPrachConfigTable->slotNr[i]; - if (slotNr > 0) - pPrachCPConfig->isPRACHslot[slotNr] = 1; + t1 = MLogTick(); + for (i = 0; i < dequeued; ++i) { + struct cp_up_tx_desc * p_tx_desc = (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i], struct cp_up_tx_desc *); + retval = xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle, + p_tx_desc->ctx_id, + p_tx_desc->tti, + p_tx_desc->cc_id, + p_tx_desc->ant_id, + p_tx_desc->frame_id, + p_tx_desc->subframe_id, + p_tx_desc->slot_id, + p_tx_desc->sym_id, + (enum xran_comp_hdr_type)p_tx_desc->compType, + (enum xran_pkt_dir) p_tx_desc->direction, + p_tx_desc->xran_port_id, + (PSECTION_DB_TYPE)p_tx_desc->p_sec_db); + + xran_pkt_gen_desc_free(p_tx_desc); + if (XRAN_STOPPED == xran_if_current_state){ + MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick()); + return -1; + } } - xran_cp_init_sectiondb(pHandle); - xran_init_sectionid(pHandle); - xran_init_seqid(pHandle); + if(p_io_cfg->io_sleep) + nanosleep(&sleeptime,NULL); - return 0; + MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick()); + + return remaining; } -int32_t xran_start(void *pHandle) +int32_t +xran_dl_pkt_ring_processing_func(void* args) { - xran_if_current_state = XRAN_RUNNING; + struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx(); + uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF); + uint16_t current_port; + + rte_timer_manage(); + + for (current_port = 0; current_port < XRAN_PORTS_NUM; current_port++) { + if( xran_port_mask & (1<up_dl_pkt_gen_ring[current_port]); + } + } + + if (XRAN_STOPPED == xran_if_current_state) + return -1; + return 0; } -int32_t xran_stop(void *pHandle) +/** Function to peforms serves of DPDK times */ +int32_t +xran_processing_timer_only_func(void* args) { - xran_if_current_state = XRAN_STOPPED; + rte_timer_manage(); + if (XRAN_STOPPED == xran_if_current_state) + return -1; + return 0; } -int32_t xran_close(void *pHandle) +/** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */ +int32_t +xran_all_tasks(void* arg) { - xran_if_current_state = XRAN_STOPPED; - xran_cp_free_sectiondb(pHandle); - rte_eal_mp_wait_lcore(); + + ring_processing_func(arg); + process_dpdk_io(arg); return 0; } -int32_t xran_mm_destroy (void * pHandle) +/** Function to pefromrm TX and RX on ETH device */ +int32_t +xran_eth_trx_tasks(void* arg) { - /* functionality is not yet implemented */ - return -1; + process_dpdk_io(arg); + return 0; } -int32_t xran_reg_sym_cb(void *pHandle, XRANFHSYMPROCCB symCb, void * symCbParam, uint8_t symb, uint8_t ant) +/** Function to pefromrm RX on ETH device */ +int32_t +xran_eth_rx_tasks(void* arg) { - /* functionality is not yet implemented */ - return -1; + process_dpdk_io_rx(arg); + return 0; } -int32_t xran_reg_physide_cb(void *pHandle, XRANFHTTIPROCCB Cb, void *cbParam, int skipTtiNum, enum callback_to_phy_id id) +/** Function to porcess ORAN FH packet per port */ +int32_t +ring_processing_func_per_port(void* args) { - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx(); + int16_t retPoll = 0; + int32_t i; + uint64_t t1, t2; + uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF); + queueid_t qi; + + for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) { + if (ctx->vf2xran_port[i] == port_id) { + for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){ + if (process_ring(ctx->rx_ring[i][qi], i, qi)) + return 0; + } + } + } - p_xran_lib_ctx->ttiCb[id] = Cb; - p_xran_lib_ctx->TtiCbParam[id] = cbParam; - p_xran_lib_ctx->SkipTti[id] = skipTtiNum; + if (XRAN_STOPPED == xran_if_current_state) + return -1; return 0; } -int32_t xran_get_slot_idx (uint32_t *nFrameIdx, uint32_t *nSubframeIdx, uint32_t *nSlotIdx, uint64_t *nSecond) +/** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */ +int32_t +xran_spawn_workers(void) { - int32_t tti = 0; + uint64_t nWorkerCore = 1LL; + uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF); + int32_t i = 0; + uint32_t total_num_cores = 1; /*start with timing core */ + uint32_t worker_num_cores = 0; + uint32_t icx_cpu = 0; + int32_t core_map[2*sizeof(uint64_t)*8]; + uint32_t xran_port_mask = 0; + + struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx(); + struct xran_device_ctx *p_dev = NULL; + struct xran_fh_init *fh_init = NULL; + struct xran_fh_config *fh_cfg = NULL; + struct xran_worker_th_ctx* pThCtx = NULL; + + p_dev = xran_dev_get_ctx_by_id(0); + if(p_dev == NULL) { + print_err("p_dev\n"); + return XRAN_STATUS_FAIL; + } - tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - *nSlotIdx = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - *nFrameIdx = (uint32_t)XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); - *nSecond = timing_get_current_second(); + fh_init = &p_dev->fh_init; + if(fh_init == NULL) { + print_err("fh_init\n"); + return XRAN_STATUS_FAIL; + } - return tti; -} + fh_cfg = &p_dev->fh_cfg; + if(fh_cfg == NULL) { + print_err("fh_cfg\n"); + return XRAN_STATUS_FAIL; + } -/** - * @brief Get supported maximum number of sections - * - * @return maximum number of sections - */ -inline uint8_t xran_get_max_sections(void *pHandle) -{ - return (XRAN_MAX_NUM_SECTIONS); -} + for (i = 0; i < coreNum && i < 64; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) { + core_map[worker_num_cores++] = i; + total_num_cores++; + } + nWorkerCore = nWorkerCore << 1; + } -/** - * @brief Get the configuration of eAxC ID - * - * @return the pointer of configuration - */ -inline XRANEAXCIDCONFIG *xran_get_conf_eAxC(void *pHandle) -{ - return (&(xran_lib_get_ctx()->eAxc_id_cfg)); -} + nWorkerCore = 1LL; + for (i = 64; i < coreNum && i < 128; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) { + core_map[worker_num_cores++] = i; + total_num_cores++; + } + nWorkerCore = nWorkerCore << 1; + } -/** - * @brief Get the configuration of subcarrier spacing for PRACH - * - * @return subcarrier spacing value for PRACH - */ -inline uint8_t xran_get_conf_prach_scs(void *pHandle) -{ - return (xran_lib_get_ctx_fhcfg()->prach_conf.nPrachSubcSpacing); -} + extern int _may_i_use_cpu_feature(unsigned __int64); + icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52); -/** - * @brief Get the configuration of FFT size for RU - * - * @return FFT size value for RU - */ -inline uint8_t xran_get_conf_fftsize(void *pHandle) -{ - return (xran_lib_get_ctx_fhcfg()->ru_conf.fftSize); -} + printf("O-XU %d\n", eth_ctx->io_cfg.id); + printf("HW %d\n", icx_cpu); + printf("Num cores %d\n", total_num_cores); + printf("Num ports %d\n", fh_init->xran_ports); + printf("O-RU Cat %d\n", fh_cfg->ru_conf.xranCat); + printf("O-RU CC %d\n", fh_cfg->nCC); + printf("O-RU eAxC %d\n", fh_cfg->neAxc); -/** - * @brief Get the configuration of nummerology - * - * @return subcarrier spacing value for PRACH - */ -inline uint8_t xran_get_conf_numerology(void *pHandle) -{ - return (xran_lib_get_ctx_fhcfg()->frame_conf.nNumerology); -} + for (i = 0; i < fh_init->xran_ports; i++){ + xran_port_mask |= 1<xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL){ + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) { + switch(total_num_cores) { + case 1: /** only timing core */ + eth_ctx->time_wrk_cfg.f = xran_all_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + break; + case 2: + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[0].arg = pThCtx; + break; + case 3: + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 0; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)xran_port_mask; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + break; + default: + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1) { + switch(total_num_cores) { + case 1: /** only timing core */ + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + break; + case 2: + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; + + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[0].arg = pThCtx; + break; + case 3: + if(icx_cpu) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 0; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)xran_port_mask; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 4: + if(icx_cpu) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 5: + if(icx_cpu) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 6: + if(eth_ctx->io_cfg.id == O_DU) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 Eth Tx **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_tx; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1<<1) | (1<<2) |(1<<0)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 4 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 0; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } else if(eth_ctx->io_cfg.id == O_RU) { + /*** O_RU specific config */ + /* timing core */ + eth_ctx->time_wrk_cfg.f = NULL; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 Eth RX */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_rx; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)0; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)1; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** FH TX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_tx; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + default: + print_err("unsupported configuration\n"); + return XRAN_STATUS_FAIL; + } + } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) { + switch(total_num_cores) { + case 1: + case 2: + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + break; + case 3: + if(icx_cpu) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)xran_port_mask; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 4: + if(icx_cpu) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1<<1) | (1<<2)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 5: + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(1<<0); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(1<<1); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(1<<2); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + break; + case 6: + if(eth_ctx->io_cfg.id == O_DU){ + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_processing_timer_only_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(1<<0); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(1<<1); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 4 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(1<<2); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } else { + /*** O_RU specific config */ + /* timing core */ + eth_ctx->time_wrk_cfg.f = NULL; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 Eth RX */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_rx; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)0; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)1; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** FH TX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_tx; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } + break; + default: + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + } else { + print_err("unsupported configuration\n"); + return XRAN_STATUS_FAIL; + } + + nWorkerCore = 1LL; + if(eth_ctx->io_cfg.pkt_proc_core) { + for (i = 0; i < coreNum && i < 64; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) { + xran_core_used[xran_num_cores_used++] = i; + if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i)) + rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n"); + eth_ctx->pkt_wrk_cfg[i].state = 1; + if(eth_ctx->pkt_proc_core_id == 0) + eth_ctx->pkt_proc_core_id = i; + printf("spawn worker %d core %d\n",eth_ctx->num_workers, i); + eth_ctx->worker_core[eth_ctx->num_workers++] = i; + } + nWorkerCore = nWorkerCore << 1; + } + } + + nWorkerCore = 1LL; + if(eth_ctx->io_cfg.pkt_proc_core_64_127) { + for (i = 64; i < coreNum && i < 128; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) { + xran_core_used[xran_num_cores_used++] = i; + if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i)) + rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n"); + eth_ctx->pkt_wrk_cfg[i].state = 1; + if(eth_ctx->pkt_proc_core_id == 0) + eth_ctx->pkt_proc_core_id = i; + printf("spawn worker %d core %d\n",eth_ctx->num_workers, i); + eth_ctx->worker_core[eth_ctx->num_workers++] = i; + } + nWorkerCore = nWorkerCore << 1; + } + } + + return XRAN_STATUS_SUCCESS; +} +int32_t +xran_open(void *pHandle, struct xran_fh_config* pConf) { - XRANFHCONFIG *pFhCfg; + int32_t ret = XRAN_STATUS_SUCCESS; + int32_t i; + uint8_t nNumerology = 0; + int32_t lcore_id = 0; + struct xran_device_ctx *p_xran_dev_ctx = NULL; + struct xran_fh_config *pFhCfg = NULL; + struct xran_fh_init *fh_init = NULL; + struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx(); + int32_t wait_time = 10; + int64_t offset_sec, offset_nsec; + + if(pConf->dpdk_port < XRAN_PORTS_NUM) { + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pConf->dpdk_port); + } else { + print_err("@0x%08p [ru %d ] pConf->dpdk_port > XRAN_PORTS_NUM\n", pConf, pConf->dpdk_port); + return XRAN_STATUS_FAIL; + } + + if(p_xran_dev_ctx == NULL) { + print_err("[ru %d] p_xran_dev_ctx == NULL ", pConf->dpdk_port); + return XRAN_STATUS_FAIL; + } + + pFhCfg = &p_xran_dev_ctx->fh_cfg; + memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config)); + + fh_init = &p_xran_dev_ctx->fh_init; + if(fh_init == NULL) + return XRAN_STATUS_FAIL; + + if(pConf->log_level) { + printf(" %s: %s Category %s\n", __FUNCTION__, + (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE", + (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B"); + } + + p_xran_dev_ctx->enableCP = pConf->enableCP; + p_xran_dev_ctx->enablePrach = pConf->prachEnable; + p_xran_dev_ctx->enableSrs = pConf->srsEnable; + p_xran_dev_ctx->puschMaskEnable = pConf->puschMaskEnable; + p_xran_dev_ctx->puschMaskSlot = pConf->puschMaskSlot; + p_xran_dev_ctx->DynamicSectionEna = pConf->DynamicSectionEna; + + if(pConf->GPS_Alpha || pConf->GPS_Beta ){ + offset_sec = pConf->GPS_Beta / 100; /* resolution of beta is 10ms */ + offset_nsec = (pConf->GPS_Beta - offset_sec * 100) * 1e7 + pConf->GPS_Alpha; + p_xran_dev_ctx->offset_sec = offset_sec; + p_xran_dev_ctx->offset_nsec = offset_nsec; + }else { + p_xran_dev_ctx->offset_sec = 0; + p_xran_dev_ctx->offset_nsec = 0; + } + + + nNumerology = xran_get_conf_numerology(p_xran_dev_ctx); - pFhCfg = xran_lib_get_ctx_fhcfg(); - return ((pFhCfg->ru_conf.iqWidth==16)?0:pFhCfg->ru_conf.iqWidth); + if (pConf->nCC > XRAN_MAX_SECTOR_NR) { + if(pConf->log_level) + printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR); + pConf->nCC = XRAN_MAX_SECTOR_NR; + } + + if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ) { + print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder); + return XRAN_STATUS_FAIL; + } + + if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) { + if((ret = xran_ruemul_init(p_xran_dev_ctx)) < 0) { + return ret; + } + } + + /* setup PRACH configuration for C-Plane */ + if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR) { + if((ret = xran_init_prach(pConf, p_xran_dev_ctx))< 0){ + return ret; + } + } else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE) { + if((ret = xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0){ + return ret; + } + } + + if((ret = xran_init_srs(pConf, p_xran_dev_ctx))< 0){ + return ret; + } + + if((ret = xran_cp_init_sectiondb(p_xran_dev_ctx)) < 0){ + return ret; + } + + if((ret = xran_init_sectionid(p_xran_dev_ctx)) < 0){ + return ret; + } + + if((ret = xran_init_seqid(p_xran_dev_ctx)) < 0){ + return ret; + } + + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){ + if((ret = xran_init_vfs_mapping(p_xran_dev_ctx)) < 0) { + return ret; + } + + if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->fh_init.io_cfg.num_rxq > 1) { + if((ret = xran_init_vf_rxq_to_pcid_mapping(p_xran_dev_ctx)) < 0) { + return ret; + } + } + } + + if(pConf->ru_conf.xran_max_frame) { + xran_max_frame = pConf->ru_conf.xran_max_frame; + printf("xran_max_frame %d\n", xran_max_frame); + } + + p_xran_dev_ctx->interval_us_local = xran_fs_get_tti_interval(nNumerology); + if (interval_us > p_xran_dev_ctx->interval_us_local) + { + interval_us = xran_fs_get_tti_interval(nNumerology); //only update interval_us based on maximum numerology + } + +// if(pConf->log_level){ + printf("%s: interval_us=%ld, interval_us_local=%d\n", __FUNCTION__, interval_us, p_xran_dev_ctx->interval_us_local); +// } + if (nNumerology >= timing_get_numerology()) + { + timing_set_numerology(nNumerology); + } + + for(i = 0 ; i nCC; i++){ + xran_fs_set_slot_type(pConf->dpdk_port, i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod, + pConf->frame_conf.sSlotConfig); + } + + xran_fs_slot_limit_init(pConf->dpdk_port, xran_fs_get_tti_interval(nNumerology)); + + /* if send_xpmbuf2ring needs to be changed from default functions, + * then those should be set between xran_init and xran_open */ + if(p_xran_dev_ctx->send_cpmbuf2ring == NULL) + p_xran_dev_ctx->send_cpmbuf2ring = xran_ethdi_mbuf_send_cp; + if(p_xran_dev_ctx->send_upmbuf2ring == NULL) + p_xran_dev_ctx->send_upmbuf2ring = xran_ethdi_mbuf_send; + + if(pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) { + if(p_xran_dev_ctx->tx_sym_gen_func == NULL ) + p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; + } else { + if(p_xran_dev_ctx->tx_sym_gen_func == NULL ) + p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_dispatch_opt; + } + + if(pConf->dpdk_port == 0) { + /* create all thread on open of port 0 */ + xran_num_cores_used = 0; + if(eth_ctx->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){ + eth_ctx->bbdev_dec = pConf->bbdev_dec; + eth_ctx->bbdev_enc = pConf->bbdev_enc; + } + + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){ + printf("XRAN_UP_VF: 0x%04x\n", eth_ctx->io_cfg.port[XRAN_UP_VF]); + p_xran_dev_ctx->timing_source_thread_running = 0; + xran_core_used[xran_num_cores_used++] = eth_ctx->io_cfg.timing_core; + if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), eth_ctx->io_cfg.timing_core)) + rte_panic("thread_run() failed to start\n"); + } else if(pConf->log_level) { + printf("Eth port was not open. Processing thread was not started\n"); + } + } else { + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF) { + if ((ret = xran_timing_create_cbs(p_xran_dev_ctx)) < 0) { + return ret; + } + } + } + + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){ + if(pConf->dpdk_port == (fh_init->xran_ports - 1)) { + if((ret = xran_spawn_workers()) < 0) { + return ret; + } + } + printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, sched_getcpu(), getpid()); + printf("Waiting on Timing thread...\n"); + while (p_xran_dev_ctx->timing_source_thread_running == 0 && wait_time--) { + usleep(100); + } + } + + print_dbg("%s : %d", __FUNCTION__, pConf->dpdk_port); + return ret; } -/** - * @brief Get the configuration of compression method for RU - * - * @return Compression method for RU - */ -inline uint8_t xran_get_conf_compmethod(void *pHandle) +int32_t +xran_start(void *pHandle) { - return (xran_lib_get_ctx_fhcfg()->ru_conf.compMeth); + struct tm * ptm; + /* ToS = Top of Second start +- 1.5us */ + struct timespec ts; + char buff[100]; + + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + if(xran_get_if_state() == XRAN_RUNNING) { + print_err("Already STARTED!!"); + return (-1); + } + timespec_get(&ts, TIME_UTC); + ptm = gmtime(&ts.tv_sec); + if(ptm){ + strftime(buff, sizeof(buff), "%D %T", ptm); + printf("%s: XRAN start time: %s.%09ld UTC [%ld]\n", + (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us); + } + + if (p_xran_dev_ctx->fh_init.io_cfg.eowd_cmn[p_xran_dev_ctx->fh_init.io_cfg.id].owdm_enable) + { + xran_if_current_state = XRAN_OWDM; + } + else + { + xran_if_current_state = XRAN_RUNNING; + } + return 0; } -/** - * @brief Get the configuration of lls-cu ID - * - * @return Configured lls-cu ID - */ -inline uint8_t xran_get_llscuid(void *pHandle) +int32_t +xran_stop(void *pHandle) { - return (xran_lib_get_ctx()->llscu_id); + if(xran_get_if_state() == XRAN_STOPPED) { + print_err("Already STOPPED!!"); + return (-1); + } + + xran_if_current_state = XRAN_STOPPED; + return 0; } -/** - * @brief Get the configuration of lls-cu ID - * - * @return Configured lls-cu ID - */ -inline uint8_t xran_get_sectorid(void *pHandle) +int32_t +xran_close(void *pHandle) { - return (xran_lib_get_ctx()->sector_id); + int32_t ret = XRAN_STATUS_SUCCESS; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + + xran_if_current_state = XRAN_STOPPED; + ret = xran_cp_free_sectiondb(p_xran_dev_ctx); + + if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) + xran_ruemul_release(p_xran_dev_ctx); + +#ifdef RTE_LIBRTE_PDUMP + /* uninitialize packet capture framework */ + rte_pdump_uninit(); +#endif + return ret; } -/** - * @brief Get the configuration of the number of component carriers - * - * @return Configured the number of componen carriers - */ -inline uint8_t xran_get_num_cc(void *pHandle) +/* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open + * each cb will be set by default duing open if it is set by NULL */ +int32_t +xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up) { - return (xran_lib_get_ctx_fhcfg()->nCC); + struct xran_device_ctx *p_xran_dev_ctx; + + if(xran_get_if_state() == XRAN_RUNNING) { + print_err("Cannot register callback while running!!\n"); + return (-1); + } + + p_xran_dev_ctx = xran_dev_get_ctx(); + + p_xran_dev_ctx->send_cpmbuf2ring = mbuf_send_cp; + p_xran_dev_ctx->send_upmbuf2ring = mbuf_send_up; + + p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; + + return (0); } -/** - * @brief Get the configuration of the number of antenna - * - * @return Configured the number of antenna - */ -inline uint8_t xran_get_num_eAxc(void *pHandle) +int32_t +xran_get_slot_idx (uint32_t PortId, uint32_t *nFrameIdx, uint32_t *nSubframeIdx, uint32_t *nSlotIdx, uint64_t *nSecond) +{ + int32_t tti = 0; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId); + if (!p_xran_dev_ctx) { - return (xran_lib_get_ctx_fhcfg()->neAxc); + print_err("Null xRAN context on port id %u!!\n", PortId); + return 0; } + tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT); + *nSlotIdx = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local)); + *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + *nFrameIdx = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local)); + *nSecond = timing_get_current_second(); + return tti; +} + +int32_t +xran_set_debug_stop(int32_t value, int32_t count) +{ + return timing_set_debug_stop(value, count); + }