X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=blobdiff_plain;f=fhi_lib%2Flib%2Fsrc%2Fxran_main.c;h=7c472d7e5d88b94a0aff3dea5b0e9fa4c80c03bf;hb=892daba4c616407f16506415d5a69549519ef11d;hp=94751f6bd0cb70efc4c3f1f93b7272bbe5e1746d;hpb=4745e5c88ba931c6d71cb6d8c681f76cf364eac5;p=o-du%2Fphy.git diff --git a/fhi_lib/lib/src/xran_main.c b/fhi_lib/lib/src/xran_main.c index 94751f6..7c472d7 100644 --- a/fhi_lib/lib/src/xran_main.c +++ b/fhi_lib/lib/src/xran_main.c @@ -1,6 +1,6 @@ /****************************************************************************** * -* Copyright (c) 2019 Intel. +* Copyright (c) 2020 Intel. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ * *******************************************************************************/ - /** * @brief XRAN main functionality module * @file xran_main.c @@ -30,22 +29,36 @@ #include #include #include +#include #include #include #include #include - +#include +#include +#include +#include #include #include #include #include #include #include +#include #include - -#include "xran_fh_lls_cu.h" +#include +#include +#if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */ +#include +#endif +#include "xran_fh_o_du.h" +#include "xran_fh_o_ru.h" +#include "xran_main.h" #include "ethdi.h" +#include "xran_mem_mgr.h" +#include "xran_tx_proc.h" +#include "xran_rx_proc.h" #include "xran_pkt.h" #include "xran_up_api.h" #include "xran_cp_api.h" @@ -53,208 +66,386 @@ #include "xran_lib_mlog_tasks_id.h" #include "xran_timer.h" #include "xran_common.h" +#include "xran_dev.h" +#include "xran_frame_struct.h" #include "xran_printf.h" +#include "xran_cp_proc.h" +#include "xran_tx_proc.h" +#include "xran_rx_proc.h" +#include "xran_cb_proc.h" +#include "xran_ecpri_owd_measurements.h" -#ifndef MLOG_ENABLED -#include "mlog_lnx_xRAN.h" -#else -#include "mlog_lnx.h" -#endif - -#define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) ) - -#define XranOffsetSym(offSym, otaSym, numSymTotal) (((int32_t)offSym > (int32_t)otaSym) ? \ - ((int32_t)otaSym + ((int32_t)numSymTotal) - (uint32_t)offSym) : \ - (((int32_t)otaSym - (int32_t)offSym) >= numSymTotal) ? \ - (((int32_t)otaSym - (int32_t)offSym) - numSymTotal) : \ - ((int32_t)otaSym - (int32_t)offSym)) - -#define MAX_NUM_OF_XRAN_CTX (2) -#define XranIncrementCtx(ctx) ((ctx >= (MAX_NUM_OF_XRAN_CTX-1)) ? 0 : (ctx+1)) -#define XranDecrementCtx(ctx) ((ctx == 0) ? (MAX_NUM_OF_XRAN_CTX-1) : (ctx-1)) +#include "xran_mlog_lnx.h" -struct xran_timer_ctx { - uint32_t tti_to_process; -}; +static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {{NULL}}; -static XranLibHandleInfoStruct DevHandle; -static struct xran_lib_ctx g_xran_lib_ctx = { 0 }; +uint64_t interval_us = 1000; //the TTI interval of the cell with maximum numerology -struct xran_timer_ctx timer_ctx[MAX_NUM_OF_XRAN_CTX]; +uint32_t xran_lib_ota_tti[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Slot index in a second [0:(1000000/TTI-1)] */ +uint32_t xran_lib_ota_sym[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a slot [0:13] */ +uint32_t xran_lib_ota_sym_idx[XRAN_PORTS_NUM] = {0,0,0,0,0,0,0,0}; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1] + where TTI is TTI interval in microseconds */ -static struct rte_timer tti_to_phy_timer[10]; -static struct rte_timer tti_timer; -static struct rte_timer sym_timer; -static struct rte_timer tx_cp_dl_timer; -static struct rte_timer tx_cp_ul_timer; -static struct rte_timer tx_up_timer; +uint16_t xran_SFN_at_Sec_Start = 0; /**< SFN at current second start */ +uint16_t xran_max_frame = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2 System Frame Number Calculation */ -static long interval_us = 125; +static uint64_t xran_total_tick = 0, xran_used_tick = 0; +static uint32_t xran_num_cores_used = 0; +static uint32_t xran_core_used[64] = {0}; +int32_t first_call = 0; +int32_t mlogxranenable = 0; -uint32_t xran_lib_ota_tti = 0; /* [0:7999] */ -uint32_t xran_lib_ota_sym = 0; /* [0:7] */ -uint32_t xran_lib_ota_sym_idx = 0; /* [0 : 14*8*1000-1] */ - -uint64_t xran_lib_gps_second = 0; - -static uint8_t xran_cp_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_DIR_MAX][XRAN_MAX_ANTENNA_NR]; -static uint8_t xran_section_id_curslot[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR]; -static uint16_t xran_section_id[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR]; - -void xran_timer_arm(struct rte_timer *tim, void* arg); -int xran_process_tx_sym(void *arg); - -int xran_process_rx_sym(void *arg, - void *iq_data_start, - uint16_t size, - uint8_t CC_ID, - uint8_t Ant_ID, - uint8_t frame_id, - uint8_t subframe_id, - uint8_t slot_id, - uint8_t symb_id); +struct cp_up_tx_desc * xran_pkt_gen_desc_alloc(void); +int32_t xran_pkt_gen_desc_free(struct cp_up_tx_desc *p_desc); void tti_ota_cb(struct rte_timer *tim, void *arg); void tti_to_phy_cb(struct rte_timer *tim, void *arg); -void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore); -struct xran_lib_ctx *xran_lib_get_ctx(void) +int32_t xran_pkt_gen_process_ring(struct rte_ring *r); + +void +xran_updateSfnSecStart(void) { - return &g_xran_lib_ctx; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters; + int32_t xran_ports = p_xran_dev_ctx->fh_init.xran_ports; + int32_t o_xu_id = 0; + uint64_t currentSecond = timing_get_current_second(); + // Assume always positive + uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET; + uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND; + uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1)); + xran_SFN_at_Sec_Start = sfn; + + for(o_xu_id = 0; o_xu_id < xran_ports; o_xu_id++){ + pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter; + pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter; + pCnt->tx_bytes_counter = 0; + pCnt->rx_bytes_counter = 0; + p_xran_dev_ctx++; + pCnt = &p_xran_dev_ctx->fh_counters; + } } -static inline XRANFHCONFIG *xran_lib_get_ctx_fhcfg(void) +#if 0 +static inline int32_t +xran_getSlotIdxSecond(uint32_t interval) { - return (&(xran_lib_get_ctx()->xran_fh_cfg)); + int32_t frameIdxSecond = xran_getSfnSecStart(); + int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME(interval); + return slotIndxSecond; } +#endif -inline uint16_t xran_get_beamid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id) -{ - return (0); // NO BEAMFORMING +enum xran_if_state +xran_get_if_state(void) + { + return xran_if_current_state; } -int xran_init_sectionid(void *pHandle) +int32_t xran_is_prach_slot(uint8_t PortId, uint32_t subframe_id, uint32_t slot_id) { - int cell, dir, ant; - - for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) { - for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++) { - xran_section_id[cell][ant] = 0; - xran_section_id_curslot[cell][ant] = 255; + int32_t is_prach_slot = 0; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId); + if (p_xran_dev_ctx == NULL) +{ + print_err("PortId %d not exist\n", PortId); + return is_prach_slot; +} + struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + uint8_t nNumerology = xran_get_conf_numerology(p_xran_dev_ctx); + + if (nNumerology < 2){ + //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index + if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){ + if (pPrachCPConfig->nrofPrachInSlot == 0){ + if(slot_id == 0) + is_prach_slot = 1; + } + else if (pPrachCPConfig->nrofPrachInSlot == 2) + is_prach_slot = 1; + else{ + if (nNumerology == 0) + is_prach_slot = 1; + else if (slot_id == 1) + is_prach_slot = 1; } } - - return (0); + } else if (nNumerology == 3){ + //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot + uint32_t slotidx; + slotidx = subframe_id * SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local) + slot_id; + if (pPrachCPConfig->nrofPrachInSlot == 2){ + if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) + is_prach_slot = 1; + } else { + if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){ + is_prach_slot = 1; + } + } + } else + print_err("Numerology %d not supported", nNumerology); + return is_prach_slot; } -int xran_init_seqid(void *pHandle) +int32_t +xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx) { - int cell, dir, ant; - - for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) { - for(dir=0; dir < XRAN_DIR_MAX; dir++) { - for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++) { - xran_cp_seq_id_num[cell][dir][ant] = 0; - } - } - } + struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg); + + if(p_srs){ + p_srs->symbMask = pConf->srs_conf.symbMask; /* deprecated */ + p_srs->slot = pConf->srs_conf.slot; + p_srs->ndm_offset = pConf->srs_conf.ndm_offset; + p_srs->ndm_txduration = pConf->srs_conf.ndm_txduration; + p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset; + + print_dbg("SRS sym %d\n", p_srs->slot); + print_dbg("SRS NDM offset %d\n", p_srs->ndm_offset); + print_dbg("SRS NDM Tx %d\n", p_srs->ndm_txduration); + print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset); + } + return (XRAN_STATUS_SUCCESS); +} - return (0); +int32_t +xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx) +{ + /* update Rach for LTE */ + return xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_LTE); } -inline uint16_t xran_alloc_sectionid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id) +int32_t +xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx, enum xran_ran_tech xran_tech) { - if(cc_id >= XRAN_MAX_CELLS_PER_PORT) { - print_err("Invalid CC ID - %d", cc_id); - return (0); + int32_t i; + uint8_t slotNr; + struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf); + const xRANPrachConfigTableStruct *pxRANPrachConfigTable; + uint8_t nNumerology = pConf->frame_conf.nNumerology; + uint8_t nPrachConfIdx = -1;// = pPRACHConfig->nPrachConfIdx; + struct xran_prach_cp_config *pPrachCPConfig = NULL; + if(pConf->dssEnable){ + /*Check Slot type and */ + if(xran_tech == XRAN_RAN_5GNR){ + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + nPrachConfIdx = pPRACHConfig->nPrachConfIdx; } - if(ant_id >= XRAN_MAX_ANTENNA_NR) { - print_err("Invalid antenna ID - %d", ant_id); - return (0); + else{ + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE); + nPrachConfIdx = pPRACHConfig->nPrachConfIdxLTE; + } + } + else{ + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + nPrachConfIdx = pPRACHConfig->nPrachConfIdx; + } + if (nNumerology > 2) + pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx]; + else if (pConf->frame_conf.nFrameDuplexType == 1) + pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx]; + else + pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx]; + + uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0]; + const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt]; + memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config)); + if(pConf->log_level) + printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot); + + if (preambleFmrt <= 2) + { + pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_012; // 1 PRACH preamble format 0 1 2 + } + else if (preambleFmrt == 3) + { + pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_3; // 1 PRACH preamble format 3 + } + else + { + pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC; // 3, PRACH preamble format A1~3, B1~4, C0, C2 + } + pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym; + pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart; + pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70; + pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp; + pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing); + pPrachCPConfig->x = pxRANPrachConfigTable->x; + pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot; + pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0]; + pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1]; + if (preambleFmrt >= FORMAT_A1) + { + pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration; + pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot; + } + else + { + pPrachCPConfig->numSymbol = 1; + pPrachCPConfig->occassionsInPrachSlot = 1; + } + + if(pConf->log_level) + printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]); + pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1; + for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++) + { + slotNr = pxRANPrachConfigTable->slotNr[i]; + if (slotNr > 0){ + pPrachCPConfig->isPRACHslot[slotNr] = 1; + if(pConf->log_level) + printf(" %u ..", slotNr); } + } + printf("\n"); + for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){ + p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId; + p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1; + } + if(pConf->log_level){ + printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]); + } + + pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(p_xran_dev_ctx); + print_dbg("PRACH eAxC_offset %d\n", pPrachCPConfig->eAxC_offset); - /* if new slot has been started, - * then initializes section id again for new start */ - if(xran_section_id_curslot[cc_id][ant_id] != slot_id) { - xran_section_id[cc_id][ant_id] = 0; - xran_section_id_curslot[cc_id][ant_id] = slot_id; + /* Save some configs for app */ + pPRACHConfig->startSymId = pPrachCPConfig->startSymId; + pPRACHConfig->lastSymId = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1; + pPRACHConfig->startPrbc = pPrachCPConfig->startPrbc; + pPRACHConfig->numPrbc = pPrachCPConfig->numPrbc; + pPRACHConfig->timeOffset = pPrachCPConfig->timeOffset; + pPRACHConfig->freqOffset = pPrachCPConfig->freqOffset; + pPRACHConfig->eAxC_offset = pPrachCPConfig->eAxC_offset; + + return (XRAN_STATUS_SUCCESS); } - - return(xran_section_id[cc_id][ant_id]++); -} -inline uint8_t xran_get_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id) +uint32_t +xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid { - if(dir >= XRAN_DIR_MAX) { - print_err("Invalid direction - %d", dir); - return (0); + return slot_id; +#if 0 + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology; + uint8_t FR = 1; + if (mu > 2) + FR=2; + if (dir == 0) + { + if (FR == 1) + { + return (slot_id << (2-mu)); + } + else + { + return (slot_id << (3-mu)); } - if(cc_id >= XRAN_MAX_CELLS_PER_PORT) { - print_err("Invalid CC ID - %d", cc_id); - return (0); + } + else + { + if (FR == 1) + { + return (slot_id >> (2-mu)); } - if(ant_id >= XRAN_MAX_ANTENNA_NR) { - print_err("Invalid antenna ID - %d", ant_id); - return (0); + else + { + return (slot_id >> (3-mu)); } - - return(xran_cp_seq_id_num[cc_id][dir][ant_id]++); + } +#endif } -inline int xran_update_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id, uint8_t seq_id) +void +sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick) { - return (0); -} + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + long t1 = MLogXRANTick(), t2; + long t3; + + if(XranGetSymNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){ + t3 = xran_tick(); + tti_ota_cb(NULL, (void*)p_xran_dev_ctx); + *used_tick += get_ticks_diff(xran_tick(), t3); + } -////////////////////////////////////////// -// For RU emulation -static struct xran_section_gen_info cpSections[255]; -static struct xran_cp_gen_params cpInfo; -int process_cplane(struct rte_mbuf *pkt) -{ - int xran_parse_cp_pkt(struct rte_mbuf *mbuf, struct xran_cp_gen_params *result); + t3 = xran_tick(); + if (xran_process_tx_sym(p_xran_dev_ctx)) + { + *used_tick += get_ticks_diff(xran_tick(), t3); + } - cpInfo.sections = cpSections; - xran_parse_cp_pkt(pkt, &cpInfo); + /* check if there is call back to do something else on this symbol */ + struct cb_elem_entry *cb_elm; + LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[xran_lib_ota_sym[p_xran_dev_ctx->xran_port_id]], pointers){ + if(cb_elm){ + cb_elm->pSymCallback(&p_xran_dev_ctx->dpdk_timer[p_xran_dev_ctx->ctx % MAX_NUM_OF_DPDK_TIMERS], cb_elm->pSymCallbackTag, cb_elm->p_dev_ctx); + p_xran_dev_ctx->ctx = DpdkTimerIncrementCtx(p_xran_dev_ctx->ctx); + } + } - return (0); + t2 = MLogXRANTick(); + MLogXRANTask(PID_SYM_OTA_CB, t1, t2); } -////////////////////////////////////////// -void sym_ota_cb(struct rte_timer *tim, void *arg) +uint32_t +xran_schedule_to_worker(enum xran_job_type_id job_type_id, struct xran_device_ctx * p_xran_dev_ctx) { - uint8_t offset = 0; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; - long t1 = MLogTick(); - - if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){ - tti_ota_cb(NULL, arg); - } - - if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 1){ - if(p_xran_lib_ctx->phy_tti_cb_done == 0){ - uint64_t t3 = MLogTick(); - /* rearm timer to deliver TTI event to PHY */ - p_xran_lib_ctx->phy_tti_cb_done = 0; - xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_lib_ctx->xran_init_cfg.io_cfg.pkt_proc_core); - MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick()); + struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx(); + uint32_t tim_lcore = eth_ctx->io_cfg.timing_core; /* default to timing core */ + + if(eth_ctx) { + if(eth_ctx->num_workers == 0) { /* no workers */ + tim_lcore = eth_ctx->io_cfg.timing_core; + } else if (eth_ctx->num_workers == 1) { /* one worker */ + switch (job_type_id) + { + case XRAN_JOB_TYPE_OTA_CB: + tim_lcore = eth_ctx->io_cfg.timing_core; + break; + case XRAN_JOB_TYPE_CP_DL: + case XRAN_JOB_TYPE_CP_UL: + case XRAN_JOB_TYPE_DEADLINE: + case XRAN_JOB_TYPE_SYM_CB: + tim_lcore = eth_ctx->worker_core[0]; + break; + default: + print_err("incorrect job type id %d\n", job_type_id); + tim_lcore = eth_ctx->io_cfg.timing_core; + break; + } + } else if (eth_ctx->num_workers >= 2 && eth_ctx->num_workers <= 6) { + switch (job_type_id) + { + case XRAN_JOB_TYPE_OTA_CB: + tim_lcore = eth_ctx->worker_core[0]; + break; + case XRAN_JOB_TYPE_CP_DL: + tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_DL]]; + break; + case XRAN_JOB_TYPE_CP_UL: + tim_lcore = eth_ctx->worker_core[p_xran_dev_ctx->job2wrk_id[XRAN_JOB_TYPE_CP_UL]]; + break; + case XRAN_JOB_TYPE_DEADLINE: + case XRAN_JOB_TYPE_SYM_CB: + tim_lcore = eth_ctx->worker_core[0]; + break; + default: + print_err("incorrect job type id %d\n", job_type_id); + tim_lcore = eth_ctx->io_cfg.timing_core; + break; + } + } else { + print_err("incorrect eth_ctx->num_workers id %d\n", eth_ctx->num_workers); + tim_lcore = eth_ctx->io_cfg.timing_core; } } - xran_process_tx_sym(timer_ctx); - /* check if there is call back to do something else on this symbol */ - if(p_xran_lib_ctx->pSymCallback[0][xran_lib_ota_sym]) - p_xran_lib_ctx->pSymCallback[0][xran_lib_ota_sym](&tx_cp_dl_timer, p_xran_lib_ctx->pSymCallbackTag[0][xran_lib_ota_sym]); - - xran_lib_ota_sym++; - if(xran_lib_ota_sym >= N_SYM_PER_SLOT){ - xran_lib_ota_sym=0; - } - MLogTask(PID_SYM_OTA_CB, t1, MLogTick()); + return tim_lcore; } -void tti_ota_cb(struct rte_timer *tim, void *arg) +void +tti_ota_cb(struct rte_timer *tim, void *arg) { uint32_t frame_id = 0; uint32_t subframe_id = 0; @@ -264,666 +455,1232 @@ void tti_ota_cb(struct rte_timer *tim, void *arg) uint32_t mlogVar[10]; uint32_t mlogVarCnt = 0; uint64_t t1 = MLogTick(); - uint64_t t3 = 0; uint32_t reg_tti = 0; - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + uint32_t reg_sfn = 0; + + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)p_xran_dev_ctx->timer_ctx; + uint8_t PortId = p_xran_dev_ctx->xran_port_id; + uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local; + + unsigned tim_lcore = xran_schedule_to_worker(XRAN_JOB_TYPE_OTA_CB, p_xran_dev_ctx); MLogTask(PID_TTI_TIMER, t1, MLogTick()); + if(p_xran_dev_ctx->xran_port_id == 0){ /* To match TTbox */ - if(xran_lib_ota_tti == 0) - reg_tti = 8000-1; + if(xran_lib_ota_tti[0] == 0) + reg_tti = xran_fs_get_max_slot(PortId) - 1; else - reg_tti = xran_lib_ota_tti -1; + reg_tti = xran_lib_ota_tti[0] -1; + MLogIncrementCounter(); + reg_sfn = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us))*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME(interval_us), SUBFRAMES_PER_SYSTEMFRAME);; /* subframe and slot */ - MLogRegisterFrameSubframe(((reg_tti/SLOTNUM_PER_SUBFRAME) % SUBFRAMES_PER_SYSTEMFRAME), - reg_tti % (SLOTNUM_PER_SUBFRAME)); + MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME(interval_us))); MLogMark(1, t1); + } + + slot_id = XranGetSlotNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local)); + subframe_id = XranGetSubFrameNum(xran_lib_ota_tti[PortId], SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(xran_lib_ota_tti[PortId],xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local)); - slot_id = XranGetSlotNum(xran_lib_ota_tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(xran_lib_ota_tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(xran_lib_ota_tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); + pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process = xran_lib_ota_tti[PortId]; - pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process = xran_lib_ota_tti; + /** tti as seen from PHY */ + int32_t nSfIdx = -1; + uint32_t nFrameIdx; + uint32_t nSubframeIdx; + uint32_t nSlotIdx; + uint64_t nSecond; + uint8_t Numerlogy = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology; + uint8_t nNrOfSlotInSf = 1<xran_init_cfg.io_cfg.id == ID_LLS_CU) - next_tti = xran_lib_ota_tti + 1; - else - next_tti = xran_lib_ota_tti; - if(next_tti>= SLOTNUM_PER_SUBFRAME*1000){ + if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU) + next_tti = xran_lib_ota_tti[PortId] + 1; + else{ + next_tti = xran_lib_ota_tti[PortId]; + } + + if(next_tti>= xran_fs_get_max_slot(PortId)){ print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id); next_tti=0; } - /* [0 - 7] */ - slot_id = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME); - /* sf [0 - 9] */ - subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - /* frame [0 - 99] for now */ - frame_id = XranGetFrameNum(next_tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); + + slot_id = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME(interval_us_local)); + subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local)); print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id); - if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == ID_LLS_CU){ - pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = next_tti; + if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){ + pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = next_tti; } else { - pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti & 1)^1].tti_to_process; + pTCtx[(xran_lib_ota_tti[PortId] & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti[PortId] & 1)^1].tti_to_process; } - t3 = MLogTick(); - p_xran_lib_ctx->phy_tti_cb_done = 0; - xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_lib_ctx->xran_init_cfg.io_cfg.pkt_proc_core); - MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick()); - - xran_lib_ota_tti++; - /* within 1 sec we have 8000 TTIs as 1000ms/0.125ms where TTI is 125us*/ - if(xran_lib_ota_tti >= SLOTNUM_PER_SUBFRAME*1000){ - print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti, frame_id, subframe_id, slot_id); - xran_lib_ota_tti=0; + if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]) { + p_xran_dev_ctx->phy_tti_cb_done = 0; + xran_timer_arm_ex(&p_xran_dev_ctx->tti_to_phy_timer[xran_lib_ota_tti[PortId] % MAX_TTI_TO_PHY_TIMER], tti_to_phy_cb, (void*)p_xran_dev_ctx, tim_lcore); } - MLogTask(PID_TTI_CB, t1, MLogTick()); + //slot index is increased to next slot at the beginning of current OTA slot + xran_lib_ota_tti[PortId]++; + if(xran_lib_ota_tti[PortId] >= xran_fs_get_max_slot(PortId)) { + print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti[PortId], frame_id, subframe_id, slot_id); + xran_lib_ota_tti[PortId] = 0; + } + MLogXRANTask(PID_TTI_CB, t1, MLogTick()); } -void xran_timer_arm(struct rte_timer *tim, void* arg) + +int32_t +xran_prepare_cp_dl_slot(uint16_t xran_port_id, uint32_t nSlotIdx, uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart, + uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum) { - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - if (xran_if_current_state == XRAN_RUNNING){ - rte_timer_cb_t fct = (rte_timer_cb_t)arg; - rte_timer_reset_sync(tim, 0, SINGLE, p_xran_lib_ctx->xran_init_cfg.io_cfg.pkt_proc_core, fct, timer_ctx); + long t1 = MLogXRANTick(); + int32_t ret = XRAN_STATUS_SUCCESS; + int tti, buf_id; + uint32_t slot_id, subframe_id, frame_id; + int cc_id; + uint8_t ctx_id; + uint8_t ant_id, num_eAxc, num_CCPorts; + void *pHandle; + //int num_list; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id); + if(unlikely(!p_xran_dev_ctx)) + { + print_err("Null xRAN context!!\n"); + return ret; } -} + //struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0]; + uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local; + uint8_t PortId = p_xran_dev_ctx->xran_port_id; + pHandle = p_xran_dev_ctx; -void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore) -{ - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - if (xran_if_current_state == XRAN_RUNNING){ - rte_timer_cb_t fct = (rte_timer_cb_t)CbFct; - rte_timer_init(tim); - rte_timer_reset_sync(tim, 0, SINGLE, tim_lcore, fct, CbArg); + num_eAxc = xran_get_num_eAxc(pHandle); + num_CCPorts = xran_get_num_cc(pHandle); + + if(first_call && p_xran_dev_ctx->enableCP) + { + tti = nSlotIdx ;//pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process; + buf_id = tti % XRAN_N_FE_BUF_LEN; + + slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local)); + subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local)); + if (tti == 0) + { + /* Wrap around to next second */ + frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff; + } + + ctx_id = tti % XRAN_MAX_SECTIONDB_CTX; + + print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); +#if defined(__INTEL_COMPILER) +#pragma vector always +#endif + for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum) && ant_id < num_eAxc); ++ant_id) { + for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id); + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) { + if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) { + if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData) { + /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id, + (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData, + &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]), + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); + } else { + print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id); + } + } else { + print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pBuffers] \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id); + } + } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */ + } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */ + } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */ + MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick()); } + return ret; } -void tx_cp_dl_cb(struct rte_timer *tim, void *arg) +void +tx_cp_dl_cb(struct rte_timer *tim, void *arg) { - long t1 = MLogTick(); - int tti, sym; - uint32_t slot_id, subframe_id, frame_id; - int ant_id; - int32_t cc_id = 0; - uint16_t beam_id; - uint8_t num_eAxc, num_CCPorts; - void *pHandle; + long t1 = MLogXRANTick(); + int tti, buf_id; + uint32_t slot_id, subframe_id, frame_id; + int cc_id; + uint8_t ctx_id; + uint8_t ant_id, num_eAxc, num_CCPorts; + void *pHandle; + //int num_list; + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + + if(unlikely(!p_xran_dev_ctx)) + { + print_err("Null xRAN context!!\n"); + return; + } - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; + if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload) + return; + struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)&p_xran_dev_ctx->timer_ctx[0]; + uint32_t interval_us_local = p_xran_dev_ctx->interval_us_local; + uint8_t PortId = p_xran_dev_ctx->xran_port_id; + pHandle = p_xran_dev_ctx; - pHandle = NULL; // TODO: temp implemantation num_eAxc = xran_get_num_eAxc(pHandle); num_CCPorts = xran_get_num_cc(pHandle); - if(p_xran_lib_ctx->enableCP) { + if(first_call && p_xran_dev_ctx->enableCP) + { + tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process; + buf_id = tti % XRAN_N_FE_BUF_LEN; - tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process; + slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval_us_local)); + subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval_us_local)); + if (tti == 0) + { + /* Wrap around to next second */ + frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff; + } - slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); + ctx_id = tti % XRAN_MAX_SECTIONDB_CTX; print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); - for(ant_id = 0; ant_id < num_eAxc; ++ant_id) { for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) { - // start new section information list - xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id); - - beam_id = xran_get_beamid(pHandle, XRAN_DIR_DL, cc_id, ant_id, slot_id); - - send_cpmsg_dlul(pHandle, XRAN_DIR_DL, - frame_id, subframe_id, slot_id, - 0, N_SYM_PER_SLOT, NUM_OF_PRB_IN_FULL_BAND, - beam_id, cc_id, ant_id, - xran_get_seqid(pHandle, XRAN_DIR_DL, cc_id, ant_id, slot_id)); + if(0== p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id].numSymsRemaining) + {/* Start of new slot - reset the section info */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id); } - } - } - MLogTask(PID_CP_DL_CB, t1, MLogTick()); + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) { + if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers) { + if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){ + /*num_list = */xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id, + (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData, + &(p_xran_dev_ctx->prbElmProcInfo[buf_id][cc_id][ant_id]), + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); + } + else + print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d [pData]\n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id); + } + } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */ + } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */ + } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */ + MLogXRANTask(PID_CP_DL_CB, t1, MLogXRANTick()); + } } -void rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg) +void +rx_ul_static_srs_cb(struct rte_timer *tim, void *arg) { - long t1 = MLogTick(); - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - XranStatusInt32 status; - /* half of RX for current TTI as measured against current OTA time */ - int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) + long t1 = MLogXRANTick(); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + xran_status_t status = 0; + int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + int32_t cc_id = 0; + //uint32_t nFrameIdx; + //uint32_t nSubframeIdx; + //uint32_t nSlotIdx; + //uint64_t nSecond; + struct xran_timer_ctx* p_timer_ctx = NULL; + + if(p_xran_dev_ctx->xran2phy_mem_ready == 0) return; - if(p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] == 0){ - status = (rx_tti << 16) | 0; /* base on PHY side implementation first 7 sym of slot */ - if(p_xran_lib_ctx->pCallback[0]) - p_xran_lib_ctx->pCallback[0](p_xran_lib_ctx->pCallbackTag[0], status); - } else { - p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] = 0; - } - MLogTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogTick()); -} + p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX]; -void rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg) -{ - long t1 = MLogTick(); - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - XranStatusInt32 status; - int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX) + p_xran_dev_ctx->timer_put = 0; - if(rx_tti >= 8000-1) - rx_tti = 0; + rx_tti = p_timer_ctx->tti_to_process; + + if(rx_tti == 0) + rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1); else rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */ - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) - return; + /* U-Plane */ + for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) { - if(p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] == 0){ - status = (rx_tti << 16) | 7; /* last 7 sym means full slot of Symb */ - if(p_xran_lib_ctx->pCallback[0]) - p_xran_lib_ctx->pCallback[0](p_xran_lib_ctx->pCallbackTag[0], status); - } else { - p_xran_lib_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][0] = 0; + if(0 == p_xran_dev_ctx->enableSrsCp) + { + if(p_xran_dev_ctx->pSrsCallback[cc_id]){ + struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */ + p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status); + } + } + } } - - MLogTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogTick()); + MLogXRANTask(PID_UP_STATIC_SRS_DEAD_LINE_CB, t1, MLogXRANTick()); } -void tx_cp_ul_cb(struct rte_timer *tim, void *arg) -{ - long t1 = MLogTick(); - int sym, tti; - uint32_t frame_id = 0; - uint32_t subframe_id = 0; - uint32_t slot_id = 0; +void +rx_ul_deadline_one_fourths_cb(struct rte_timer *tim, void *arg) +{ + long t1 = MLogXRANTick(); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + xran_status_t status; + /* half of RX for current TTI as measured against current OTA time */ + int32_t rx_tti; int32_t cc_id; - int ant_id, prach_port_id; - uint16_t beam_id; - uint8_t num_eAxc, num_CCPorts; - - void *pHandle; - - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - xRANPrachCPConfigStruct *pPrachCPConfig = &(p_xran_lib_ctx->PrachCPConfig); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; - - pHandle = NULL; // TODO: temp implemantation - num_eAxc = xran_get_num_eAxc(pHandle); - num_CCPorts = xran_get_num_cc(pHandle); + //uint32_t nFrameIdx; + //uint32_t nSubframeIdx; + //uint32_t nSlotIdx; + //uint64_t nSecond; + struct xran_timer_ctx* p_timer_ctx = NULL; + /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond); + rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME + + nSubframeIdx*SLOTNUM_PER_SUBFRAME + + nSlotIdx;*/ + if(p_xran_dev_ctx->xran2phy_mem_ready == 0) + return; - if (p_xran_lib_ctx->enableCP){ - tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process; - slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); - print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); + p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX]; + if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX) + p_xran_dev_ctx->timer_put = 0; + rx_tti = p_timer_ctx->tti_to_process; - for(ant_id = 0; ant_id < num_eAxc; ++ant_id) { - for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) { - // start new section information list - xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id); - - beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, ant_id, slot_id); - send_cpmsg_dlul(pHandle, XRAN_DIR_UL, - frame_id, subframe_id, slot_id, - 0, N_SYM_PER_SLOT, NUM_OF_PRB_IN_FULL_BAND, - beam_id, cc_id, ant_id, - xran_get_seqid(pHandle, XRAN_DIR_UL, cc_id, ant_id, slot_id)); - } - } + for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) { + if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){ + if(p_xran_dev_ctx->pCallback[cc_id]) { + struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = XRAN_ONE_FOURTHS_CB_SYM; + status = XRAN_STATUS_SUCCESS; - if ((frame_id % pPrachCPConfig->x == pPrachCPConfig->y[0]) && (pPrachCPConfig->isPRACHslot[slot_id]==1)) //is prach slot - { - for(ant_id = 0; ant_id < num_eAxc; ant_id++) { - for(cc_id = 0; cc_id < num_CCPorts; cc_id++) { -#if !defined(PRACH_USES_SHARED_PORT) - prach_port_id = ant_id + num_eAxc; - // start new section information list - xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, prach_port_id); -#else - prach_port_id = ant_id; -#endif - beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id); - send_cpmsg_prach(pHandle, - frame_id, subframe_id, slot_id, - beam_id, cc_id, prach_port_id, - xran_get_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id)); + p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status); } } + } else { + p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0; } + } + if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]); + }else{ + p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--; + } } - MLogTask(PID_CP_UL_CB, t1, MLogTick()); -} -void ul_up_full_slot_cb(struct rte_timer *tim, void *arg) -{ - long t1 = MLogTick(); - rte_pause(); - MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick()); + MLogXRANTask(PID_UP_UL_ONE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick()); } -void tti_to_phy_cb(struct rte_timer *tim, void *arg) +void +rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg) { - long t1 = MLogTick(); - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + long t1 = MLogXRANTick(); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + xran_status_t status; + /* half of RX for current TTI as measured against current OTA time */ + int32_t rx_tti; + int32_t cc_id; + //uint32_t nFrameIdx; + //uint32_t nSubframeIdx; + //uint32_t nSlotIdx; + //uint64_t nSecond; + struct xran_timer_ctx* p_timer_ctx = NULL; + /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond); + rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME + + nSubframeIdx*SLOTNUM_PER_SUBFRAME + + nSlotIdx;*/ + if(p_xran_dev_ctx->xran2phy_mem_ready == 0) + return; - static int first_call = 0; - p_xran_lib_ctx->phy_tti_cb_done = 1; /* DPDK called CB */ - if (first_call){ - if(p_xran_lib_ctx->ttiCb[XRAN_CB_TTI]){ - if(p_xran_lib_ctx->SkipTti[XRAN_CB_TTI] <= 0){ - p_xran_lib_ctx->ttiCb[XRAN_CB_TTI](p_xran_lib_ctx->TtiCbParam[XRAN_CB_TTI]); - }else{ - p_xran_lib_ctx->SkipTti[XRAN_CB_TTI]--; + p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX]; + if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX) + p_xran_dev_ctx->timer_put = 0; + + rx_tti = p_timer_ctx->tti_to_process; + + for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) { + if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){ + if(p_xran_dev_ctx->pCallback[cc_id]) { + struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = XRAN_HALF_CB_SYM; + status = XRAN_STATUS_SUCCESS; + + p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status); + } } - } - } else { - if(p_xran_lib_ctx->ttiCb[XRAN_CB_TTI]){ - int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - if(tti == 8000-1) - first_call = 1; + } else { + p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0; } } + if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]); + }else{ + p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--; + } + } - MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick()); + MLogXRANTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogXRANTick()); } -int xran_timing_source_thread(void *args) +void +rx_ul_deadline_three_fourths_cb(struct rte_timer *tim, void *arg) { - cpu_set_t cpuset; - int32_t do_reset = 0; - uint64_t t1 = 0; - uint64_t delta; - int32_t result1; - uint32_t delay_cp_dl; - uint32_t delay_cp_ul; - uint32_t delay_up; - uint32_t delay_up_ul; - uint32_t delay_cp2up; - uint32_t sym_cp_dl; - uint32_t sym_cp_ul; - uint32_t sym_up_ul; - int32_t sym_up; - struct sched_param sched_param; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - - /* ToS = Top of Second start +- 1.5us */ - struct timespec ts; + long t1 = MLogXRANTick(); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + xran_status_t status; + /* half of RX for current TTI as measured against current OTA time */ + int32_t rx_tti; + int32_t cc_id; + //uint32_t nFrameIdx; + //uint32_t nSubframeIdx; + //uint32_t nSlotIdx; + //uint64_t nSecond; + struct xran_timer_ctx* p_timer_ctx = NULL; + /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond); + rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME + + nSubframeIdx*SLOTNUM_PER_SUBFRAME + + nSlotIdx;*/ + if(p_xran_dev_ctx->xran2phy_mem_ready == 0) + return; - char buff[100]; + p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX]; + if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX) + p_xran_dev_ctx->timer_put = 0; - printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid()); + rx_tti = p_timer_ctx->tti_to_process; - /* set main thread affinity mask to CPU2 */ - sched_param.sched_priority = 98; + for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) { + if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){ + if(p_xran_dev_ctx->pCallback[cc_id]) { + struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = XRAN_THREE_FOURTHS_CB_SYM; + status = XRAN_STATUS_SUCCESS; - CPU_ZERO(&cpuset); - CPU_SET(p_xran_lib_ctx->xran_init_cfg.io_cfg.timing_core, &cpuset); - if (result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)) - { - printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1); - } - if ((result1 = pthread_setschedparam(pthread_self(), 1, &sched_param))) - { - printf("priority is not changed: coreId = 2, result1 = %d\n",result1); + p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status); + } + } + } else { + p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0; + } } - if (p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU) { - do { - timespec_get(&ts, TIME_UTC); - }while (ts.tv_nsec >1500); - struct tm * ptm = gmtime(&ts.tv_sec); - if(ptm){ - strftime(buff, sizeof buff, "%D %T", ptm); - printf("lls-CU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us); + if(p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_HALF_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_HALF_SLOT_RX]); + }else{ + p_xran_dev_ctx->SkipTti[XRAN_CB_HALF_SLOT_RX]--; } + } - delay_cp_dl = p_xran_lib_ctx->xran_init_cfg.ttiPeriod - p_xran_lib_ctx->xran_init_cfg.T1a_max_cp_dl; - delay_cp_ul = p_xran_lib_ctx->xran_init_cfg.ttiPeriod - p_xran_lib_ctx->xran_init_cfg.T1a_max_cp_ul; - delay_up = p_xran_lib_ctx->xran_init_cfg.T1a_max_up; - delay_up_ul = p_xran_lib_ctx->xran_init_cfg.Ta4_max; - - delay_cp2up = delay_up-delay_cp_dl; - - sym_cp_dl = delay_cp_dl*1000/(interval_us*1000/N_SYM_PER_SLOT)+1; - sym_cp_ul = delay_cp_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1; - sym_up_ul = delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT); - p_xran_lib_ctx->sym_up = sym_up = -(delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT)+1); - p_xran_lib_ctx->sym_up_ul = sym_up_ul = (delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1); - - printf("Start C-plane DL %d us after TTI [trigger on sym %d]\n", delay_cp_dl, sym_cp_dl); - printf("Start C-plane UL %d us after TTI [trigger on sym %d]\n", delay_cp_ul, sym_cp_ul); - printf("Start U-plane DL %d us before OTA [offset in sym %d]\n", delay_up, sym_up); - printf("Start U-plane UL %d us OTA [offset in sym %d]\n", delay_up_ul, sym_up_ul); - - printf("C-plane to U-plane delay %d us after TTI\n", delay_cp2up); - printf("Start Sym timer %ld ns\n", TX_TIMER_INTERVAL/N_SYM_PER_SLOT); + MLogXRANTask(PID_UP_UL_THREE_FOURTHS_DEAD_LINE_CB, t1, MLogXRANTick()); +} - p_xran_lib_ctx->pSymCallback[0][sym_cp_dl] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_cp_dl] = tx_cp_dl_cb; +void +rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg) +{ + long t1 = MLogXRANTick(); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + xran_status_t status = 0; + int32_t rx_tti = 0;// = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + int32_t cc_id = 0; + //uint32_t nFrameIdx; + //uint32_t nSubframeIdx; + //uint32_t nSlotIdx; + //uint64_t nSecond; + struct xran_timer_ctx* p_timer_ctx = NULL; + + if(p_xran_dev_ctx->xran2phy_mem_ready == 0) + return; - p_xran_lib_ctx->pSymCallback[0][sym_cp_ul] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_cp_ul] = tx_cp_ul_cb; + /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond); + rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME + + nSubframeIdx*SLOTNUM_PER_SUBFRAME + + nSlotIdx;*/ + p_timer_ctx = &p_xran_dev_ctx->cb_timer_ctx[p_xran_dev_ctx->timer_put++ % MAX_CB_TIMER_CTX]; - /* Full slot UL OTA + delay_up_ul */ - p_xran_lib_ctx->pSymCallback[0][sym_up_ul] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_up_ul] = rx_ul_deadline_full_cb; + if (p_xran_dev_ctx->timer_put >= MAX_CB_TIMER_CTX) + p_xran_dev_ctx->timer_put = 0; - /* Half slot UL OTA + delay_up_ul*/ - p_xran_lib_ctx->pSymCallback[0][sym_up_ul + N_SYM_PER_SLOT/2] = xran_timer_arm; - p_xran_lib_ctx->pSymCallbackTag[0][sym_up_ul + N_SYM_PER_SLOT/2] = rx_ul_deadline_half_cb; + rx_tti = p_timer_ctx->tti_to_process; +#if 1 + if(rx_tti == 0) + rx_tti = (xran_fs_get_max_slot_SFN(p_xran_dev_ctx->xran_port_id)-1); + else + rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */ +#endif + /* U-Plane */ + for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) { + if(p_xran_dev_ctx->pCallback[cc_id]){ + struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */ + status = XRAN_STATUS_SUCCESS; + p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status); + } + } - } else { // APP_RU - /* calcualte when to send UL U-plane */ - delay_up = p_xran_lib_ctx->xran_init_cfg.Ta3_min; - p_xran_lib_ctx->sym_up = sym_up = delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT)+1; - printf("Start UL U-plane %d us after OTA [offset in sym %d]\n", delay_up, sym_up); - do { - timespec_get(&ts, TIME_UTC); - }while (ts.tv_nsec >1500); - struct tm * ptm = gmtime(&ts.tv_sec); - if(ptm){ - strftime(buff, sizeof buff, "%D %T", ptm); - printf("RU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us); + if(p_xran_dev_ctx->pPrachCallback[cc_id]){ + struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */ + p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status); + } + } + + if(p_xran_dev_ctx->enableSrsCp) + { + if(p_xran_dev_ctx->pSrsCallback[cc_id]){ + struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id]; + if(pTag) { + //pTag->cellId = cc_id; + pTag->slotiId = rx_tti; + pTag->symbol = XRAN_FULL_CB_SYM; /* last 7 sym means full slot of Symb */ + p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status); } } + } + } - do { - timespec_get(&ts, TIME_UTC); - }while (ts.tv_nsec == 0); + /* user call backs if any */ + if(p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_FULL_SLOT_RX](p_xran_dev_ctx->TtiCbParam[XRAN_CB_FULL_SLOT_RX]); + }else{ + p_xran_dev_ctx->SkipTti[XRAN_CB_FULL_SLOT_RX]--; + } + } - while(1) { - delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT); - if (XRAN_STOPPED == xran_if_current_state) - break; - sym_ota_cb(&sym_timer, timer_ctx); + MLogXRANTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogXRANTick()); +} + +void +rx_ul_user_sym_cb(struct rte_timer *tim, void *arg) +{ + long t1 = MLogXRANTick(); + struct xran_device_ctx * p_dev_ctx = NULL; + struct cb_user_per_sym_ctx *p_sym_cb_ctx = (struct cb_user_per_sym_ctx *)arg; + int32_t rx_tti = 0; //(int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + uint32_t interval, ota_sym_idx = 0; + uint8_t nNumerology = 0; + struct xran_timer_ctx* p_timer_ctx = NULL; + + if(p_sym_cb_ctx->p_dev) + p_dev_ctx = (struct xran_device_ctx *)p_sym_cb_ctx->p_dev; + else + rte_panic("p_sym_cb_ctx->p_dev == NULL"); + + if(p_dev_ctx->xran2phy_mem_ready == 0) + return; + nNumerology = xran_get_conf_numerology(p_dev_ctx); + interval = p_dev_ctx->interval_us_local; + + p_timer_ctx = &p_sym_cb_ctx->user_cb_timer_ctx[p_sym_cb_ctx->user_timer_get++ % MAX_CB_TIMER_CTX]; + if (p_sym_cb_ctx->user_timer_get >= MAX_CB_TIMER_CTX) + p_sym_cb_ctx->user_timer_get = 0; + + rx_tti = p_timer_ctx->tti_to_process; + + if( p_sym_cb_ctx->sym_diff > 0) + /* + advacne TX Wind: at OTA Time we indicating event in future */ + ota_sym_idx = ((p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology)); + else if (p_sym_cb_ctx->sym_diff < 0) { + /* - dealy RX Win: at OTA Time we indicate event in the past */ + if(p_timer_ctx->ota_sym_idx >= abs(p_sym_cb_ctx->sym_diff)) { + ota_sym_idx = p_timer_ctx->ota_sym_idx + p_sym_cb_ctx->sym_diff; + } else { + ota_sym_idx = ((xran_max_ota_sym_idx(nNumerology) + p_timer_ctx->ota_sym_idx) + p_sym_cb_ctx->sym_diff) % xran_max_ota_sym_idx(nNumerology); + } + } else /* 0 - OTA exact time */ + ota_sym_idx = p_timer_ctx->ota_sym_idx; + + rx_tti = (int32_t)XranGetTtiNum(ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); + + if(p_sym_cb_ctx->symCbTimeInfo) { + struct xran_sense_of_time *p_sense_time = p_sym_cb_ctx->symCbTimeInfo; + p_sense_time->type_of_event = p_sym_cb_ctx->cb_type_id; + p_sense_time->nSymIdx = p_sym_cb_ctx->symb_num_req; + p_sense_time->tti_counter = rx_tti; + p_sense_time->nSlotIdx = (uint32_t)XranGetSlotNum(rx_tti, SLOTNUM_PER_SUBFRAME(interval)); + p_sense_time->nSubframeIdx = (uint32_t)XranGetSubFrameNum(rx_tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME); + p_sense_time->nFrameIdx = (uint32_t)XranGetFrameNum(rx_tti, p_timer_ctx->xran_sfn_at_sec_start,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval)); + p_sense_time->nSecond = p_timer_ctx->current_second; } - printf("Closing timing source thread...\n"); - return 0; + /* user call backs if any */ + if(p_sym_cb_ctx->symCb){ + p_sym_cb_ctx->symCb(p_sym_cb_ctx->symCbParam, p_sym_cb_ctx->symCbTimeInfo); + } + + MLogXRANTask(PID_UP_UL_USER_DEAD_LINE_CB, t1, MLogXRANTick()); } -/* Handle ecpri format. */ -int handle_ecpri_ethertype(struct rte_mbuf *pkt, uint64_t rx_time) +int32_t +xran_prepare_cp_ul_slot(uint16_t xran_port_id, uint32_t nSlotIdx, uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart, + uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum) { - const struct xran_ecpri_hdr *ecpri_hdr; - unsigned long t1; + int32_t ret = XRAN_STATUS_SUCCESS; + long t1 = MLogXRANTick(); + int tti, buf_id; + uint32_t slot_id, subframe_id, frame_id; + int32_t cc_id; + int ant_id, port_id; + uint16_t occasionid; + uint16_t beam_id; + uint8_t num_eAxc, num_CCPorts; + uint8_t ctx_id; - if (rte_pktmbuf_data_len(pkt) < sizeof(struct xran_ecpri_hdr)) { - wlog("Packet too short - %d bytes", rte_pktmbuf_data_len(pkt)); - return 0; + void *pHandle; + uint32_t interval; + uint8_t PortId; + + //struct xran_timer_ctx *pTCtx; + struct xran_buffer_list *pBufList; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id); + if(unlikely(!p_xran_dev_ctx)) + { + print_err("Null xRAN context!!\n"); + return ret; } - /* check eCPRI header. */ - ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *); - if(ecpri_hdr == NULL) - return MBUF_FREE; + if(first_call && p_xran_dev_ctx->enableCP) + { + pHandle = p_xran_dev_ctx; + //pTCtx = &p_xran_dev_ctx->timer_ctx[0]; + interval = p_xran_dev_ctx->interval_us_local; + PortId = p_xran_dev_ctx->xran_port_id; + tti = nSlotIdx; //pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process; + + buf_id = tti % XRAN_N_FE_BUF_LEN; + ctx_id = tti % XRAN_MAX_SECTIONDB_CTX; + slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval)); + subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval)); + + /* Wrap around to next second */ + if(tti == 0) + frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff; + if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A) + num_eAxc = xran_get_num_eAxc(pHandle); + else + num_eAxc = xran_get_num_eAxcUl(pHandle); + num_CCPorts = xran_get_num_cc(pHandle); - switch(ecpri_hdr->ecpri_mesg_type) { - case ECPRI_IQ_DATA: - t1 = MLogTick(); - process_mbuf(pkt); - MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick()); - break; - // For RU emulation - case ECPRI_RT_CONTROL_DATA: - t1 = MLogTick(); - if(xran_lib_get_ctx()->xran_init_cfg.io_cfg.id == APP_RU) { - process_cplane(pkt); - } else { - print_err("LLS-CU recevied CP message!"); - } - MLogTask(PID_PROCESS_CP_PKT, t1, MLogTick()); - break; - default: - wlog("Invalid eCPRI message type - %d", ecpri_hdr->ecpri_mesg_type); - } -#if 0 -//def DEBUG - return MBUF_KEEP; -#else - return MBUF_FREE; + print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); + + /* General Uplink */ +#if defined(__INTEL_COMPILER) +#pragma vector always #endif + for(ant_id = nAntStart; (ant_id < (nAntStart + nAntNum) && ant_id < num_eAxc); ++ant_id) { + for(cc_id = nCcStart; (cc_id < (nCcStart + nCcNum) && cc_id < num_CCPorts); cc_id++) { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id); + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1) + { + pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */ + if(pBufList->pBuffers && pBufList->pBuffers->pData) + { + ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id, + (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL, + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); + } + } + } + } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */ + + /* PRACH */ + if(p_xran_dev_ctx->enablePrach) + { + struct xran_prach_cp_config *pPrachCPConfig = NULL; + //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE. + if(p_xran_dev_ctx->dssEnable){ + int i = tti % p_xran_dev_ctx->dssPeriod; + if(p_xran_dev_ctx->technology[i]==1) { + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + } + else{ + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE); + } + } + else{ + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + } + uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id); + + if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0]) + && (is_prach_slot==1)) + { + for(ant_id = 0; ant_id < num_eAxc; ant_id++) + { + port_id = ant_id + pPrachCPConfig->eAxC_offset; + for(cc_id = 0; cc_id < num_CCPorts; cc_id++) + { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id); + for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++) + { + struct xran_cp_gen_params params; + struct xran_section_gen_info sect_geninfo[8]; + struct xran_section_info sectInfo[8]; + for(int secId=0;secId<8;secId++) + sect_geninfo[secId].info = §Info[secId]; + struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc(); + uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id); + + beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id); + ret = generate_cpmsg_prach(pHandle, ¶ms, sect_geninfo, mbuf, p_xran_dev_ctx, + frame_id, subframe_id, slot_id, tti, + beam_id, cc_id, port_id, occasionid, seqid); + if(ret == XRAN_STATUS_SUCCESS) + send_cpmsg(pHandle, mbuf, ¶ms, sect_geninfo, + cc_id, port_id, seqid); + } + } + } + } + } /* if(p_xran_dev_ctx->enablePrach) */ + + /* SRS */ + if(p_xran_dev_ctx->enableSrsCp) + { + struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg); + + for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++) + { + port_id = ant_id + pSrsCfg->eAxC_offset; + for(cc_id = 0; cc_id < num_CCPorts; cc_id++) + { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id); + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1) + { + pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */ + if(pBufList->pBuffers && pBufList->pBuffers->pData) + { + ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id, + (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL, + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); + } + } + } + } + } /* if(p_xran_dev_ctx->enableSrs) */ + + MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick()); + } /* if(p_xran_dev_ctx->enableCP) */ + + return ret; } -int xran_process_rx_sym(void *arg, - void *iq_data_start, - uint16_t size, - uint8_t CC_ID, - uint8_t Ant_ID, - uint8_t frame_id, - uint8_t subframe_id, - uint8_t slot_id, - uint8_t symb_id) -{ - char *pos = NULL; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - uint32_t tti=0; - XranStatusInt32 status; - void *pHandle = NULL; - - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) - return 0; - tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id; +void +tx_cp_ul_cb(struct rte_timer *tim, void *arg) +{ + long t1 = MLogXRANTick(); + int tti, buf_id; + int ret; + uint32_t slot_id, subframe_id, frame_id; + int32_t cc_id; + int ant_id, port_id; + uint16_t occasionid = 0; + uint16_t beam_id; + uint8_t num_eAxc, num_CCPorts; + uint8_t ctx_id; + + void *pHandle; + uint32_t interval; + uint8_t PortId; + + struct xran_timer_ctx *pTCtx; + struct xran_buffer_list *pBufList; + struct xran_device_ctx *p_xran_dev_ctx; + + if(unlikely(!arg)) + { + print_err("Null xRAN context!!\n"); + return; + } + + p_xran_dev_ctx = (struct xran_device_ctx *)arg; + + if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload) + return; + + /* */ + if(first_call && p_xran_dev_ctx->enableCP) + { + pHandle = p_xran_dev_ctx; + pTCtx = &p_xran_dev_ctx->timer_ctx[0]; + interval = p_xran_dev_ctx->interval_us_local; + PortId = p_xran_dev_ctx->xran_port_id; + tti = pTCtx[(xran_lib_ota_tti[PortId] & 1) ^ 1].tti_to_process; + + buf_id = tti % XRAN_N_FE_BUF_LEN; + ctx_id = tti % XRAN_MAX_SECTIONDB_CTX; + slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval)); + subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME); + frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval)); + + /* Wrap around to next second */ + if(tti == 0) + frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff; + if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A) + num_eAxc = xran_get_num_eAxc(pHandle); + else + num_eAxc = xran_get_num_eAxcUl(pHandle); + num_CCPorts = xran_get_num_cc(pHandle); + + print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); - status = tti << 16 | symb_id; + /* General Uplink */ + for(ant_id = 0; ant_id < num_eAxc; ant_id++) + { + for(cc_id = 0; cc_id < num_CCPorts; cc_id++) + { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id); + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) == 1) + { + pBufList = &(p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */ + if(pBufList->pBuffers && pBufList->pBuffers->pData) + { + ret = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id, + (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL, + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); + } + } + } + } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */ - if(tti < 8000 && CC_ID < XRAN_MAX_SECTOR_NR && CC_ID == 0 && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){ - pos = (char*) p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData; - if(pos && iq_data_start && size){ -#ifdef XRAN_BYTE_ORDER_SWAP - int idx = 0; - uint16_t *restrict psrc = (uint16_t *)iq_data_start; - uint16_t *restrict pdst = (uint16_t *)pos; - /* network byte (be) order of IQ to CPU byte order (le) */ - for (idx = 0; idx < size/sizeof(int16_t); idx++){ - pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]); + /* PRACH */ + if(p_xran_dev_ctx->enablePrach) + { + struct xran_prach_cp_config *pPrachCPConfig = NULL; + //check for dss enable and fill based on technology select the p_xran_dev_ctx->PrachCPConfig NR/LTE. + if(p_xran_dev_ctx->dssEnable){ + int i = tti % p_xran_dev_ctx->dssPeriod; + if(p_xran_dev_ctx->technology[i]==1) { + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + } + else{ + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE); + } } -#else -#error xran spec is network byte order - /* for debug */ - rte_memcpy(pdst, psrc, size); + else{ + pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig); + } + + uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id); + + if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0]) + && (is_prach_slot==1)) + { + for(ant_id = 0; ant_id < num_eAxc; ant_id++) + { + port_id = ant_id + pPrachCPConfig->eAxC_offset; + for(cc_id = 0; cc_id < num_CCPorts; cc_id++) + { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id); +#ifndef FCN_ADAPT +//for FCN only send C-P for first occasion + for(occasionid = 0; occasionid < pPrachCPConfig->occassionsInPrachSlot; occasionid++) #endif -#ifdef DEBUG_XRAN_BUFFERS - if (pos[0] != tti % XRAN_N_FE_BUF_LEN || - pos[1] != CC_ID || - pos[2] != Ant_ID || - pos[3] != symb_id){ - printf("%d %d %d %d\n", pos[0], pos[1], pos[2], pos[3]); + { + struct xran_cp_gen_params params; + struct xran_section_gen_info sect_geninfo[8]; + struct xran_section_info sectInfo[8]; + for(int secId=0;secId<8;secId++) + sect_geninfo[secId].info = §Info[secId]; + + struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc(); + uint8_t seqid = xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, port_id); + + beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, port_id, slot_id); + ret = generate_cpmsg_prach(pHandle, ¶ms, sect_geninfo, mbuf, p_xran_dev_ctx, + frame_id, subframe_id, slot_id, tti, + beam_id, cc_id, port_id, occasionid, seqid); + if (ret == XRAN_STATUS_SUCCESS) + send_cpmsg(pHandle, mbuf, ¶ms, sect_geninfo, + cc_id, port_id, seqid); + } + } + } + } + } /* if(p_xran_dev_ctx->enablePrach) */ + + /* SRS */ + if(p_xran_dev_ctx->enableSrsCp) + { + struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg); + + for(ant_id = 0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++) + { + port_id = ant_id + pSrsCfg->eAxC_offset; + for(cc_id = 0; cc_id < num_CCPorts; cc_id++) + { + /* start new section information list */ + xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, port_id, ctx_id); + if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1) + { + pBufList = &(p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList); /* To shorten reference */ + if(pBufList->pBuffers && pBufList->pBuffers->pData) + { + ret = xran_cp_create_and_send_section(pHandle, port_id, XRAN_DIR_UL, tti, cc_id, + (struct xran_prb_map *)(pBufList->pBuffers->pData), NULL, + p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id); } -#endif - } else { - print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size); } - } else { - print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id); - } - if (symb_id == 7 || symb_id == 13){ - p_xran_lib_ctx->rx_packet_symb_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id]++; + } + } + } /* if(p_xran_dev_ctx->enableSrs) */ + + MLogXRANTask(PID_CP_UL_CB, t1, MLogXRANTick()); + } /* if(p_xran_dev_ctx->enableCP) */ +} + +void +tti_to_phy_cb(struct rte_timer *tim, void *arg) +{ + long t1 = MLogTick(); + struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg; + uint32_t interval = p_xran_dev_ctx->interval_us_local; - if(p_xran_lib_ctx->rx_packet_symb_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] >= xran_get_num_eAxc(pHandle)){ - if(p_xran_lib_ctx->pCallback[0]) - p_xran_lib_ctx->pCallback[0](p_xran_lib_ctx->pCallbackTag[0], status); - p_xran_lib_ctx->rx_packet_callback_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID] = 1; - p_xran_lib_ctx->rx_packet_symb_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] = 0; + p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */ + if (first_call){ + if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){ + if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){ + p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]); + }else{ + p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--; + } + } + } else { + if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){ + int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[p_xran_dev_ctx->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT); + uint32_t slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval)); + uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME); + uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval)); + if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME(interval)-1)) { //(tti == xran_fs_get_max_slot()-1) + first_call = 1; + } } } - return size; -} + MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick()); +} -int xran_process_tx_sym(void *arg) +int32_t +xran_timing_source_thread(void *args) { - uint32_t tti=0; - uint32_t mlogVar[10]; - uint32_t mlogVarCnt = 0; - unsigned long t1 = MLogTick(); + int res = 0; + cpu_set_t cpuset; + int32_t result1; + uint32_t xran_port_id = 0; + static int owdm_init_done = 0; + struct sched_param sched_param; + struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *) args ; + uint64_t tWake = 0, tWakePrev = 0, tUsed = 0; + struct xran_device_ctx * p_dev_ctx_run = NULL; + /* ToS = Top of Second start +- 1.5us */ + struct timespec ts; + char thread_name[32]; + char buff[100]; - void *pHandle = NULL; - int32_t ant_id; - int32_t cc_id = 0; - uint8_t num_eAxc = 0; - uint8_t num_CCPorts = 0; + printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid()); + memset(&sched_param, 0, sizeof(struct sched_param)); + /* set main thread affinity mask to CPU2 */ + sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO; + CPU_ZERO(&cpuset); + CPU_SET(p_dev_ctx->fh_init.io_cfg.timing_core, &cpuset); - uint32_t frame_id = 0; - uint32_t subframe_id = 0; - uint32_t slot_id = 0; - uint32_t sym_id = 0; - uint32_t sym_idx = 0; + if ((result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset))) + { + printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1); + } + if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))) + { + printf("priority is not changed: coreId = 2, result1 = %d\n",result1); + } + + snprintf(thread_name, RTE_DIM(thread_name), "%s-%d", "fh_main_poll", rte_lcore_id()); + if ((res = pthread_setname_np(pthread_self(), thread_name))) { + printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res); + } - char *pos = NULL; - int prb_num = 0; + printf("TTI interval %ld [us]\n", interval_us); - struct xran_section_info *sectinfo; - uint32_t next; + if (!p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable) { + if ((res = xran_timing_create_cbs(args)) < 0){ + return res; + } + } - enum xran_pkt_dir direction; + do { + timespec_get(&ts, TIME_UTC); + }while (ts.tv_nsec >1500); - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg; + struct tm * ptm = gmtime(&ts.tv_sec); + if(ptm){ + strftime(buff, sizeof buff, "%D %T", ptm); + printf("%s: thread_run start time: %s.%09ld UTC [%ld]\n", + (p_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us); + } + do { + timespec_get(&ts, TIME_UTC); + }while (ts.tv_nsec == 0); - if(p_xran_lib_ctx->xran2phy_mem_ready == 0) - return 0; + p_dev_ctx->timing_source_thread_running = 1; + while(1) { + + /* Check if owdm finished to create the timing cbs based on measurement results */ + if ((p_dev_ctx->fh_init.io_cfg.eowd_cmn[p_dev_ctx->fh_init.io_cfg.id].owdm_enable)&&(!owdm_init_done)&&unlikely(XRAN_RUNNING == xran_if_current_state)) { + // Adjust Windows based on Delay Measurement results + xran_adjust_timing_parameters(p_dev_ctx); + if ((res = xran_timing_create_cbs(args)) < 0){ + return res; + } + printf("TTI interval %ld [us]\n", interval_us); + owdm_init_done = 1; - if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU) { - direction = XRAN_DIR_DL; /* lls-CU */ - prb_num = NUM_OF_PRB_IN_FULL_BAND; - } - else { - direction = XRAN_DIR_UL; /* RU */ - prb_num = NUM_OF_PRB_IN_FULL_BAND; /*TODO: simulation on D-1541 @ 2.10GHz has issue with performace. reduce copy size */ } - /* RU: send symb after OTA time with delay (UL) */ - /* lls-CU:send symb in advance of OTA time (DL) */ - sym_idx = XranOffsetSym(p_xran_lib_ctx->sym_up, xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME*1000); - tti = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - frame_id = XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); - sym_id = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id); + /* Update Usage Stats */ + tWake = xran_tick(); + xran_used_tick += tUsed; + if (tWakePrev) + { + xran_total_tick += get_ticks_diff(tWake, tWakePrev); + } + tWakePrev = tWake; + tUsed = 0; - mlogVar[mlogVarCnt++] = 0xAAAAAAAA; - mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx; - mlogVar[mlogVarCnt++] = sym_idx; - mlogVar[mlogVarCnt++] = abs(p_xran_lib_ctx->sym_up); - mlogVar[mlogVarCnt++] = tti; - mlogVar[mlogVarCnt++] = frame_id; - mlogVar[mlogVarCnt++] = subframe_id; - mlogVar[mlogVarCnt++] = slot_id; - mlogVar[mlogVarCnt++] = sym_id; - MLogAddVariables(mlogVarCnt, mlogVar, MLogTick()); + int64_t delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed); + if (XRAN_STOPPED == xran_if_current_state) + break; - if(frame_id > 99) { - print_err("OTA %d: TX:[sym_idx %d: TTI %d] fr %d sf %d slot %d sym %d\n",xran_lib_ota_sym_idx, sym_idx, tti, frame_id, subframe_id, slot_id, sym_id); - xran_if_current_state =XRAN_STOPPED; + if (delta > 3E5 && tUsed > 0)//300us about 9 symbols + { + print_err("poll_next_tick too long, delta:%ld(ns), tUsed:%ld(tick)", delta, tUsed); } - num_eAxc = xran_get_num_eAxc(pHandle); - num_CCPorts = xran_get_num_cc(pHandle); - - /* U-Plane */ - for(ant_id = 0; ant_id < num_eAxc; ant_id++) { - for(cc_id = 0; cc_id < num_CCPorts; cc_id++) { - if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU && p_xran_lib_ctx->enableCP) { - next = 0; - while(next < xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id)) { - sectinfo = xran_cp_iterate_section_info(pHandle, direction, - cc_id, ant_id, subframe_id, slot_id, &next); - if(sectinfo == NULL) - break; - - /* pointer to IQs input */ - /* TODO: need to implement the case of partial RB assignment */ - pos = (char*) p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData; - print_dbg(">>> [%d] type%d, id %d, startPrbc=%d, numPrbc=%d, numSymbol=%d\n", next, - sectinfo->type, sectinfo->id, sectinfo->startPrbc, - sectinfo->numPrbc, sectinfo->numSymbol); - - if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) { - print_err("Invalid section type in section DB - %d", sectinfo->type); - continue; + if (likely(XRAN_RUNNING == xran_if_current_state)) { + for(xran_port_id = 0; xran_port_id < XRAN_PORTS_NUM; xran_port_id++ ) { + p_dev_ctx_run = xran_dev_get_ctx_by_id(xran_port_id); + if(p_dev_ctx_run) { + if(p_dev_ctx_run->xran_port_id == xran_port_id) { + if(XranGetSymNum(xran_lib_ota_sym_idx[p_dev_ctx_run->xran_port_id], XRAN_NUM_OF_SYMBOL_PER_SLOT) == xran_lib_ota_sym[xran_port_id]) + { + sym_ota_cb(&p_dev_ctx_run->sym_timer, p_dev_ctx_run, &tUsed); + xran_lib_ota_sym[xran_port_id]++; + if(xran_lib_ota_sym[xran_port_id] >= N_SYM_PER_SLOT) + xran_lib_ota_sym[xran_port_id]=0; } - - send_symbol_ex(direction, sectinfo->id, - (struct rb_map *)pos, - frame_id, subframe_id, slot_id, sym_id, - sectinfo->startPrbc, sectinfo->numPrbc, - cc_id, ant_id, - xran_get_seqid(pHandle, direction, cc_id, ant_id, slot_id)); } + else { + rte_panic("p_dev_ctx_run == xran_port_id"); + } } + } + } + } + + xran_timing_destroy_cbs(args); + printf("Closing timing source thread...\n"); + return res; +} + +/* Handle ecpri format. */ +#define MBUFS_CNT 16 + +int32_t handle_ecpri_ethertype(struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num) +{ + struct rte_mbuf *pkt; + uint16_t i; + struct rte_ether_hdr* eth_hdr; + struct xran_ecpri_hdr* ecpri_hdr; + unsigned long t1; + int32_t ret = MBUF_FREE; + uint32_t ret_data[MBUFS_CNT] = { MBUFS_CNT * MBUF_FREE }; + struct xran_device_ctx* p_dev_ctx = xran_dev_get_ctx_by_id(xport_id); + uint16_t num_data = 0, num_control = 0, num_meas = 0; + struct rte_mbuf* pkt_data[MBUFS_CNT], * pkt_control[MBUFS_CNT], * pkt_meas[MBUFS_CNT], *pkt_adj[MBUFS_CNT]; + static uint32_t owdm_rx_first_pass = 1; + + if (p_dev_ctx == NULL) + return ret; + + for (i = 0; i < num; i++) + { + pkt = pkt_q[i]; + +// rte_prefetch0(rte_pktmbuf_mtod(pkt, void*)); + + rte_pktmbuf_adj(pkt, sizeof(*eth_hdr)); + ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *); + + p_dev_ctx->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt); + + pkt_adj[i] = pkt; + switch (ecpri_hdr->cmnhdr.bits.ecpri_mesg_type) + { + case ECPRI_IQ_DATA: + pkt_data[num_data++] = pkt; + break; + // For RU emulation + case ECPRI_RT_CONTROL_DATA: + pkt_control[num_control++] = pkt; + break; + case ECPRI_DELAY_MEASUREMENT: + if (owdm_rx_first_pass != 0) +{ + // Initialize and verify that Payload Length is in range */ + xran_initialize_and_verify_owd_pl_length((void*)p_dev_ctx); + owdm_rx_first_pass = 0; - else { /* if(p_xran_lib_ctx->xran_init_cfg.io_cfg.id == APP_LLS_CU && p_xran_lib_ctx->enableCP) */ - /* pointer to IQs input */ - pos = (char*) p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData; -#ifdef DEBUG_XRAN_BUFFERS - if (pos[0] != tti % XRAN_N_FE_BUF_LEN || - pos[1] != cc_id || - pos[2] != ant_id || - pos[3] != sym_id) - printf("%d %d %d %d\n", pos[0], pos[1], pos[2], pos[3]); -#endif - send_symbol_ex(direction, - xran_alloc_sectionid(pHandle, direction, cc_id, ant_id, slot_id), - (struct rb_map *)pos, - frame_id, subframe_id, slot_id, sym_id, - 0, prb_num, - cc_id, ant_id, - xran_get_seqid(pHandle, direction, cc_id, ant_id, slot_id)); } + pkt_meas[num_meas++] = pkt; + break; + default: + if (p_dev_ctx->fh_init.io_cfg.id == O_DU) { + print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.bits.ecpri_mesg_type); + } + break; + } +} + + if(num_data == MBUFS_CNT && p_dev_ctx->fh_cfg.ru_conf.xranCat == XRAN_CATEGORY_B) /* w/a for Cat A issue */ +{ + for (i = 0; i < MBUFS_CNT; i++) +{ + ret_data[i] = MBUF_FREE; +} + + if (p_dev_ctx->fh_init.io_cfg.id == O_DU || p_dev_ctx->fh_init.io_cfg.id == O_RU) +{ + if (p_dev_ctx->xran2phy_mem_ready != 0) + ret = process_mbuf_batch(pkt_data, (void*)p_dev_ctx, MBUFS_CNT, p_cid, ret_data ); + for (i = 0; i < MBUFS_CNT; i++) + { + if (ret_data[i] == MBUF_FREE) + rte_pktmbuf_free(pkt_data[i]); + } + } + else +{ + for (i = 0; i < MBUFS_CNT; i++) +{ + if (ret_data[i] == MBUF_FREE) + rte_pktmbuf_free(pkt_data[i]); } + print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id); + } } + else +{ + for (i = 0; i < num_data; i++) + { + ret = process_mbuf(pkt_data[i], (void*)p_dev_ctx, p_cid); + if (ret == MBUF_FREE) + rte_pktmbuf_free(pkt_data[i]); + } - MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick()); - return 0; + for (i = 0; i < num_control; i++) + { + t1 = MLogXRANTick(); + if (p_dev_ctx->fh_init.io_cfg.id == O_RU) + { + ret = process_cplane(pkt_control[i], (void*)p_dev_ctx); + p_dev_ctx->fh_counters.rx_counter++; + if (ret == MBUF_FREE) + rte_pktmbuf_free(pkt_control[i]); + } + else + { + print_err("O-DU recevied C-Plane message!"); + } + MLogXRANTask(PID_PROCESS_CP_PKT, t1, MLogXRANTick()); + } + + for (i = 0; i < num_meas; i++) + { + + /*if(p_dev_ctx->fh_init.io_cfg.id == O_RU) + printf("Got delay_meas_pkt xport_id %d p_dev_ctx %08"PRIx64" %d\n", xport_id,(int64_t*)p_dev_ctx, num_meas) ;*/ + t1 = MLogXRANTick(); + if(xran_if_current_state != XRAN_RUNNING) + ret = process_delay_meas(pkt_meas[i], (void*)p_dev_ctx, xport_id); + else + ret = MBUF_FREE; + if (ret == MBUF_FREE) + rte_pktmbuf_free(pkt_meas[i]); + MLogXRANTask(PID_PROCESS_DELAY_MEAS_PKT, t1, MLogXRANTick()); + } + } + + return MBUF_FREE; } -int xran_packet_and_dpdk_timer_thread(void *args) +int32_t +xran_packet_and_dpdk_timer_thread(void *args) { - struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx(); + //struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx(); uint64_t prev_tsc = 0; uint64_t cur_tsc = rte_rdtsc(); uint64_t diff_tsc = cur_tsc - prev_tsc; - cpu_set_t cpuset; struct sched_param sched_param; int res = 0; printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid()); + memset(&sched_param, 0, sizeof(struct sched_param)); sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO; - if ((res = pthread_setschedparam(pthread_self(), 1, &sched_param))) + if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))) { printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res); } @@ -945,501 +1702,2688 @@ int xran_packet_and_dpdk_timer_thread(void *args) return 0; } - -int32_t xran_init(int argc, char *argv[], PXRANFHINIT p_xran_fh_init, char *appName, void ** pHandle) +void xran_initialize_ecpri_owd_meas_cmn( struct xran_io_cfg *ptr) { - int i; - int j; +// ptr->eowd_cmn.initiator_en = 0; // Initiator 1, Recipient 0 +// ptr->eowd_cmn.filterType = 0; // 0 Simple average based on number of measurements + // Set default values if the Timeout and numberOfSamples are not set + if ( ptr->eowd_cmn[ptr->id].responseTo == 0) + ptr->eowd_cmn[ptr->id].responseTo = 10E6; // 10 ms timeout expressed in ns + if ( ptr->eowd_cmn[ptr->id].numberOfSamples == 0) + ptr->eowd_cmn[ptr->id].numberOfSamples = 8; // Number of samples to be averaged +} +void xran_initialize_ecpri_owd_meas_per_port (int i, struct xran_io_cfg *ptr ) +{ + /* This function initializes one_way delay measurements on a per port basis, + most variables default to zero */ + ptr->eowd_port[ptr->id][i].portid = (uint8_t)i; +} - struct xran_io_loop_cfg *p_io_cfg = (struct xran_io_loop_cfg *)&p_xran_fh_init->io_cfg; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); +int32_t +xran_init(int argc, char *argv[], + struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle) +{ + int32_t ret = XRAN_STATUS_SUCCESS; + int32_t i; + int32_t j; + int32_t o_xu_id = 0; + struct xran_io_cfg *p_io_cfg = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; + int32_t lcore_id = 0; + const char *version = rte_version(); + + if (version == NULL) + rte_panic("version == NULL"); + + printf("'%s'\n", version); + + if (p_xran_fh_init->xran_ports < 1 || p_xran_fh_init->xran_ports > XRAN_PORTS_NUM) { + ret = XRAN_STATUS_INVALID_PARAM; + print_err("fh_init xran_ports= %d is wrong [%d]\n", p_xran_fh_init->xran_ports, ret); + return ret; + } + mlogxranenable = p_xran_fh_init->mlogxranenable; + p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg; - int lcore_id = 0; - char filename[64]; + if ((ret = xran_dev_create_ctx(p_xran_fh_init->xran_ports)) < 0) { + print_err("context allocation error [%d]\n", ret); + return ret; + } + + for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){ + p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id); + memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx)); + p_xran_dev_ctx->xran_port_id = o_xu_id; - memset(p_xran_lib_ctx, 0, sizeof(struct xran_lib_ctx)); /* copy init */ - p_xran_lib_ctx->xran_init_cfg = *p_xran_fh_init; + p_xran_dev_ctx->fh_init = *p_xran_fh_init; + printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu); + + memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config)); + /* To make sure to set default functions */ + p_xran_dev_ctx->send_upmbuf2ring = NULL; + p_xran_dev_ctx->send_cpmbuf2ring = NULL; + // Ecpri initialization for One Way delay measurements common variables to default values + xran_initialize_ecpri_owd_meas_cmn(&p_xran_dev_ctx->fh_init.io_cfg); + } - xran_if_current_state = XRAN_RUNNING; - interval_us = p_xran_fh_init->ttiPeriod; + /* default values if not set */ + if(p_io_cfg->nEthLinePerPort == 0) + p_io_cfg->nEthLinePerPort = 1; - p_xran_lib_ctx->llscu_id = p_xran_fh_init->llscuId; - memcpy(&(p_xran_lib_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(XRANEAXCIDCONFIG)); + if(p_io_cfg->nEthLineSpeed == 0) + p_io_cfg->nEthLineSpeed = 25; - p_xran_lib_ctx->enableCP = p_xran_fh_init->enableCP; + /** at least 1 RX Q */ + if(p_io_cfg->num_rxq == 0) + p_io_cfg->num_rxq = 1; + + if (p_io_cfg->id == 1) { + /* 1 HW for O-RU */ + p_io_cfg->num_rxq = 1; + } +#if (RTE_VER_YEAR < 21) /* eCPRI flow supported with DPDK 21.02 or later */ + if (p_io_cfg->num_rxq > 1){ + p_io_cfg->num_rxq = 1; + printf("%s does support eCPRI flows. Set rxq to %d\n", version, p_io_cfg->num_rxq); + } +#endif + printf("PF Eth line speed %dG\n",p_io_cfg->nEthLineSpeed); + printf("PF Eth lines per O-xU port %d\n",p_io_cfg->nEthLinePerPort); + printf("RX HW queues per O-xU Eth line %d \n",p_io_cfg->num_rxq); + + if(p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane) != p_io_cfg->num_vfs) { + print_err("Incorrect VFs configurations: For %d O-xUs with %d Ethernet ports expected number of VFs is %d. [provided %d]\n", + p_xran_fh_init->xran_ports, p_io_cfg->nEthLinePerPort, + p_xran_fh_init->xran_ports * p_io_cfg->nEthLinePerPort *(2 - 1* p_io_cfg->one_vf_cu_plane), p_io_cfg->num_vfs); + } + + xran_if_current_state = XRAN_INIT; xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype); if (p_io_cfg->id == 0) - xran_ethdi_init_dpdk_io(basename(appName), + xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix, p_io_cfg, &lcore_id, - (struct ether_addr *)p_xran_fh_init->p_lls_cu_addr, - (struct ether_addr *)p_xran_fh_init->p_ru_addr, - p_xran_fh_init->cp_vlan_tag, - p_xran_fh_init->up_vlan_tag); + (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr, + (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr, + p_xran_dev_ctx->fh_init.mtu); else - xran_ethdi_init_dpdk_io(basename(appName), + xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix, p_io_cfg, &lcore_id, - (struct ether_addr *)p_xran_fh_init->p_ru_addr, - (struct ether_addr *)p_xran_fh_init->p_lls_cu_addr, - p_xran_fh_init->cp_vlan_tag, - p_xran_fh_init->up_vlan_tag); + (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr, + (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr, + p_xran_dev_ctx->fh_init.mtu); + + for(o_xu_id = 0; o_xu_id < p_xran_fh_init->xran_ports; o_xu_id++){ + p_xran_dev_ctx = xran_dev_get_ctx_by_id(o_xu_id); + + for(i = 0; i < MAX_TTI_TO_PHY_TIMER; i++ ) + rte_timer_init(&p_xran_dev_ctx->tti_to_phy_timer[i]); + + rte_timer_init(&p_xran_dev_ctx->sym_timer); + for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++) + rte_timer_init(&p_xran_dev_ctx->dpdk_timer[i]); + + p_xran_dev_ctx->direct_pool = socket_direct_pool; + p_xran_dev_ctx->indirect_pool = socket_indirect_pool; + + + for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){ + LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[j]); + } + + } + + for (i=0; inum_vfs; i++) + { + /* Initialize ecpri one-way delay measurement info on a per vf port basis */ + xran_initialize_ecpri_owd_meas_per_port (i, p_io_cfg); + } + + return ret; +} + +int32_t +xran_sector_get_instances (uint32_t xran_port, void * pDevHandle, uint16_t nNumInstances, + xran_cc_handle_t * pSectorInstanceHandles) +{ + struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle; + XranSectorHandleInfo *pCcHandle = NULL; + int32_t i = 0; + + pDev += xran_port; + + /* Check for the Valid Parameters */ + CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM); + + if (!nNumInstances) { + print_dbg("Instance is not assigned for this function !!! \n"); + return XRAN_STATUS_INVALID_PARAM; + } + + for (i = 0; i < nNumInstances; i++) { + + /* Allocate Memory for CC handles */ + pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64); + + if(pCcHandle == NULL) + return XRAN_STATUS_RESOURCE; + + memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo))); + + pCcHandle->nIndex = i; + pCcHandle->nXranPort = pDev->xran_port_id; + + printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle); + pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle; + + printf("Handle: %p Instance: %p\n", + &pSectorInstanceHandles[i], pSectorInstanceHandles[i]); + } + + return XRAN_STATUS_SUCCESS; +} + + +int32_t +xran_5g_fronthault_config (void * pHandle, + struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + xran_transport_callback_fn pCallback, + void *pCallbackTag) +{ + int j, i = 0, z; + XranSectorHandleInfo* pXranCc = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; + + if(NULL == pHandle) { + printf("Handle is NULL!\n"); + return XRAN_STATUS_FAIL; + } + + pXranCc = (XranSectorHandleInfo*) pHandle; + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort); + if (p_xran_dev_ctx == NULL) { + printf ("p_xran_dev_ctx is NULL\n"); + return XRAN_STATUS_FAIL; + } + + i = pXranCc->nIndex; + + for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) { + for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){ + /* U-plane TX */ + + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0]; + + if(pSrcBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcBuffer[z][j])); + + + /* C-plane TX */ + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0]; + + if(pSrcCpBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcCpBuffer[z][j])); + /* U-plane RX */ + + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0]; + + if(pDstBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j])); + + + /* C-plane RX */ + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0]; + + if(pDstCpBuffer[z][j]) + p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j])); + } + } + + p_xran_dev_ctx->pCallback[i] = pCallback; + p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag; + print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__, + p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pCallback[i], p_xran_dev_ctx->pCallbackTag[i]); + + p_xran_dev_ctx->xran2phy_mem_ready = 1; + + return XRAN_STATUS_SUCCESS; +} + +int32_t xran_5g_bfw_config(void * pHandle, + struct xran_buffer_list *pSrcRxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pSrcTxCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + xran_transport_callback_fn pCallback, + void *pCallbackTag){ + int j, i = 0, z; + XranSectorHandleInfo* pXranCc = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; + + if(NULL == pHandle) { + printf("Handle is NULL!\n"); + return XRAN_STATUS_FAIL; + } + pXranCc = (XranSectorHandleInfo*) pHandle; + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort); + if (p_xran_dev_ctx == NULL) { + printf ("p_xran_dev_ctx is NULL\n"); + return XRAN_STATUS_FAIL; + } + + i = pXranCc->nIndex; + + for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) { + for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){ + /* C-plane RX - RU */ + p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0]; + + if(pSrcRxCpBuffer[z][j]) + p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcRxCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHCpRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcRxCpBuffer[z][j])); + + /* C-plane TX - RU */ + p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0]; + + if(pSrcTxCpBuffer[z][j]) + p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcTxCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHCpTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pSrcTxCpBuffer[z][j])); + } + } + return XRAN_STATUS_SUCCESS; +} + +int32_t +xran_5g_prach_req (void * pHandle, + struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstBufferDecomp[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], + xran_transport_callback_fn pCallback, + void *pCallbackTag) +{ + int j, i = 0, z; + XranSectorHandleInfo* pXranCc = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; + + if(NULL == pHandle) { + printf("Handle is NULL!\n"); + return XRAN_STATUS_FAIL; + } + + pXranCc = (XranSectorHandleInfo*) pHandle; + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort); + if (p_xran_dev_ctx == NULL) { + printf ("p_xran_dev_ctx is NULL\n"); + return XRAN_STATUS_FAIL; + } + + i = pXranCc->nIndex; + + for(j = 0; j < XRAN_N_FE_BUF_LEN; j++) { + for(z = 0; z < XRAN_MAX_PRACH_ANT_NUM; z++){ + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_PRACH_ANT_NUM; // ant number. + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0]; + if(pDstBuffer[z][j]) + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j])); + + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffersDecomp[j][i][z][0]; + if(pDstBufferDecomp[z][j]) + p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[j][i][z].sBufferList = *pDstBufferDecomp[z][j]; + } + } + + p_xran_dev_ctx->pPrachCallback[i] = pCallback; + p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag; + + print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__, + p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pPrachCallback[i], p_xran_dev_ctx->pPrachCallbackTag[i]); + + return XRAN_STATUS_SUCCESS; +} + +int32_t +xran_5g_srs_req (void * pHandle, + struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN], + struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN], + xran_transport_callback_fn pCallback, + void *pCallbackTag) +{ + int j, i = 0, z; + XranSectorHandleInfo* pXranCc = NULL; + struct xran_device_ctx * p_xran_dev_ctx = NULL; - for(i = 0; i < 10; i++ ) - rte_timer_init(&tti_to_phy_timer[i]); + if(NULL == pHandle) { + printf("Handle is NULL!\n"); + return XRAN_STATUS_FAIL; + } - rte_timer_init(&tti_timer); - rte_timer_init(&sym_timer); - rte_timer_init(&tx_cp_dl_timer); - rte_timer_init(&tx_cp_ul_timer); - rte_timer_init(&tx_up_timer); + pXranCc = (XranSectorHandleInfo*) pHandle; + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pXranCc->nXranPort); + if (p_xran_dev_ctx == NULL) { + printf ("p_xran_dev_ctx is NULL\n"); + return XRAN_STATUS_FAIL; + } + + i = pXranCc->nIndex; + + for(j=0; jsFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number. + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0]; + if(pDstBuffer[z][j]) + p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstBuffer[z][j])); + + /* C-plane SRS */ + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxPrbMapBuffers[j][i][z]; + + if(pDstCpBuffer[z][j]) + p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j]; + else + memset(&p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList, 0, sizeof(*pDstCpBuffer[z][j])); - for(i = 0; i < XRAN_MAX_SECTOR_NR; i++ ){ - unsigned n = snprintf(&p_xran_lib_ctx->ring_name[0][i][0], RTE_RING_NAMESIZE, "dl_sym_ring_%u", i); - p_xran_lib_ctx->dl_sym_idx_ring[i] = rte_ring_create(&p_xran_lib_ctx->ring_name[0][i][0], XRAN_RING_SIZE, - rte_lcore_to_socket_id(lcore_id), RING_F_SP_ENQ | RING_F_SC_DEQ); + } } + p_xran_dev_ctx->pSrsCallback[i] = pCallback; + p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag; - lcore_id = rte_get_next_lcore(lcore_id, 0, 0); - PANIC_ON(lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()"); + print_dbg("%s: [p %d CC %d] Cb %p cb %p\n",__FUNCTION__, + p_xran_dev_ctx->xran_port_id, i, p_xran_dev_ctx->pSrsCallback[i], p_xran_dev_ctx->pSrsCallbackTag[i]); - /* Start packet processing thread */ - if (rte_eal_remote_launch(ring_processing_thread, NULL, lcore_id)) - rte_panic("ring_processing_thread() failed to start\n"); + return XRAN_STATUS_SUCCESS; +} - if(p_io_cfg->pkt_aux_core > 0){ - lcore_id = rte_get_next_lcore(lcore_id, 0, 0); - PANIC_ON(lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()"); +uint32_t +xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *num_core_used, uint32_t *core_used, uint32_t clear) +{ + uint32_t i; + + *num_core_used = xran_num_cores_used; + for (i = 0; i < xran_num_cores_used; i++) + { + core_used[i] = xran_core_used[i]; + } + + *total_time = xran_total_tick; + *used_time = xran_used_tick; - /* Start packet processing thread */ - if (rte_eal_remote_launch(xran_packet_and_dpdk_timer_thread, NULL, lcore_id)) - rte_panic("ring_processing_thread() failed to start\n"); + if (clear) + { + xran_total_tick = 0; + xran_used_tick = 0; } - lcore_id = rte_get_next_lcore(lcore_id, 0, 0); - PANIC_ON(lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()"); + return 0; +} + +uint8_t* +xran_add_cp_hdr_offset(uint8_t *dst) +{ + dst += (RTE_PKTMBUF_HEADROOM + + sizeof(struct xran_ecpri_hdr) + + sizeof(struct xran_cp_radioapp_section1_header) + + sizeof(struct xran_cp_radioapp_section1)); + + dst = RTE_PTR_ALIGN_CEIL(dst, 64); + + return dst; +} + +uint8_t* +xran_add_hdr_offset(uint8_t *dst, int16_t compMethod) +{ + dst+= (RTE_PKTMBUF_HEADROOM + + sizeof (struct xran_ecpri_hdr) + + sizeof (struct radio_app_common_hdr) + + sizeof(struct data_section_hdr)); + if(compMethod != XRAN_COMPMETHOD_NONE) + dst += sizeof (struct data_section_compression_hdr); + dst = RTE_PTR_ALIGN_CEIL(dst, 64); + + return dst; +} + +int32_t +xran_pkt_gen_process_ring(struct rte_ring *r) +{ + assert(r); + struct rte_mbuf *mbufs[16]; + int i; + uint32_t remaining; + uint64_t t1; + struct xran_io_cfg *p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg); + const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs, + RTE_DIM(mbufs), &remaining); + + + if (!dequeued) + return 0; + + t1 = MLogXRANTick(); + for (i = 0; i < dequeued; ++i) { + struct cp_up_tx_desc * p_tx_desc = (struct cp_up_tx_desc *)rte_pktmbuf_mtod(mbufs[i], struct cp_up_tx_desc *); + xran_process_tx_sym_cp_on_opt(p_tx_desc->pHandle, + p_tx_desc->ctx_id, + p_tx_desc->tti, + p_tx_desc->start_cc, + p_tx_desc->cc_num, + p_tx_desc->start_ant, + p_tx_desc->ant_num, + p_tx_desc->frame_id, + p_tx_desc->subframe_id, + p_tx_desc->slot_id, + p_tx_desc->sym_id, + (enum xran_comp_hdr_type)p_tx_desc->compType, + (enum xran_pkt_dir) p_tx_desc->direction, + p_tx_desc->xran_port_id, + (PSECTION_DB_TYPE)p_tx_desc->p_sec_db); + + xran_pkt_gen_desc_free(p_tx_desc); + if (XRAN_STOPPED == xran_if_current_state){ + MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick()); + return -1; + } + } + + if(p_io_cfg->io_sleep) + nanosleep(&sleeptime,NULL); + + MLogXRANTask(PID_PROCESS_TX_SYM, t1, MLogXRANTick()); + + return remaining; +} + +int32_t +xran_dl_pkt_ring_processing_func(void* args) +{ + struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx(); + uint16_t xran_port_mask = (uint16_t)((uint64_t)args & 0xFFFF); + uint16_t current_port; + + rte_timer_manage(); + + for (current_port = 0; current_port < XRAN_PORTS_NUM; current_port++) { + if( xran_port_mask & (1<up_dl_pkt_gen_ring[current_port]); + } + } + + if (XRAN_STOPPED == xran_if_current_state) + return -1; + + return 0; +} + +int32_t xran_fh_rx_and_up_tx_processing(void *port_mask) +{ + int32_t ret_val=0; + + ret_val = ring_processing_func((void *)0); + if(ret_val != 0) + return ret_val; + + ret_val = xran_dl_pkt_ring_processing_func(port_mask); + if(ret_val != 0) + return ret_val; + + return 0; +} +/** Function to peforms serves of DPDK times */ +int32_t +xran_processing_timer_only_func(void* args) +{ + rte_timer_manage(); + if (XRAN_STOPPED == xran_if_current_state) + return -1; + + return 0; +} + +/** Function to peforms parsing of RX packets on all ports and does TX and RX on ETH device */ +int32_t +xran_all_tasks(void* arg) +{ + + ring_processing_func(arg); + process_dpdk_io(arg); + return 0; +} + +/** Function to pefromrm TX and RX on ETH device */ +int32_t +xran_eth_trx_tasks(void* arg) +{ + process_dpdk_io(arg); + return 0; +} + +/** Function to pefromrm RX on ETH device */ +int32_t +xran_eth_rx_tasks(void* arg) +{ + process_dpdk_io_rx(arg); + return 0; +} + +/** Function to porcess ORAN FH packet per port */ +int32_t +ring_processing_func_per_port(void* args) +{ + struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx(); + int32_t i; + uint16_t port_id = (uint16_t)((uint64_t)args & 0xFFFF); + queueid_t qi; + + for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i = i+1) { + if (ctx->vf2xran_port[i] == port_id) { + for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){ + if (process_ring(ctx->rx_ring[i][qi], i, qi)) + return 0; + } + } + } + + if (XRAN_STOPPED == xran_if_current_state) + return -1; + + return 0; +} + +/** Fucntion generate configuration of worker threads and creates them base on sceanrio and used platform */ +int32_t +xran_spawn_workers(void) +{ + uint64_t nWorkerCore = 1LL; + uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF); + int32_t i = 0; + uint32_t total_num_cores = 1; /*start with timing core */ + uint32_t worker_num_cores = 0; + uint32_t icx_cpu = 0; + int32_t core_map[2*sizeof(uint64_t)*8]; + uint64_t xran_port_mask = 0; + + struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx(); + struct xran_device_ctx *p_dev = NULL; + struct xran_fh_init *fh_init = NULL; + struct xran_fh_config *fh_cfg = NULL; + struct xran_worker_th_ctx* pThCtx = NULL; + void *worker_ports=NULL; + + p_dev = xran_dev_get_ctx_by_id(0); + if(p_dev == NULL) { + print_err("p_dev\n"); + return XRAN_STATUS_FAIL; + } + + fh_init = &p_dev->fh_init; + if(fh_init == NULL) { + print_err("fh_init\n"); + return XRAN_STATUS_FAIL; + } + + fh_cfg = &p_dev->fh_cfg; + if(fh_cfg == NULL) { + print_err("fh_cfg\n"); + return XRAN_STATUS_FAIL; + } + + for (i = 0; i < coreNum && i < 64; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) { + core_map[worker_num_cores++] = i; + total_num_cores++; + } + nWorkerCore = nWorkerCore << 1; + } + + nWorkerCore = 1LL; + for (i = 64; i < coreNum && i < 128; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) { + core_map[worker_num_cores++] = i; + total_num_cores++; + } + nWorkerCore = nWorkerCore << 1; + } + + extern int _may_i_use_cpu_feature(unsigned __int64); + icx_cpu = _may_i_use_cpu_feature(_FEATURE_AVX512IFMA52); + + printf("O-XU %d\n", eth_ctx->io_cfg.id); + printf("HW %d\n", icx_cpu); + printf("Num cores %d\n", total_num_cores); + printf("Num ports %d\n", fh_init->xran_ports); + printf("O-RU Cat %d\n", fh_cfg->ru_conf.xranCat); + printf("O-RU CC %d\n", fh_cfg->nCC); + printf("O-RU eAxC %d\n", fh_cfg->neAxc); + + for (i = 0; i < fh_init->xran_ports; i++){ + xran_port_mask |= 1L<xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL){ + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 1; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 1; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + if(fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_A) { + switch(total_num_cores) { + case 1: /** only timing core */ + eth_ctx->time_wrk_cfg.f = xran_all_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + break; + case 2: + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[0].arg = pThCtx; + break; + case 3: + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 0; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)xran_port_mask; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + break; + default: + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + } else if ((fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports == 1) || fh_init->io_cfg.bbu_offload) { + switch(total_num_cores) { + case 1: /** only timing core */ + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + break; + case 2: + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + if (p_dev->fh_init.io_cfg.bbu_offload) + p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring; + else + p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; + + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[0].arg = pThCtx; + break; + case 3: + if(1) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 0; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)xran_port_mask; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 4: + if(1) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 5: + if(1) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 6: + if(eth_ctx->io_cfg.id == O_DU) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_rx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 Eth Tx **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_tx; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2) |(1L<<0)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 4 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 0; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = 0; //pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = 0; //pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } else if(eth_ctx->io_cfg.id == O_RU) { + /*** O_RU specific config */ + /* timing core */ + eth_ctx->time_wrk_cfg.f = NULL; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 Eth RX */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_rx; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)0; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)1; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** FH TX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_tx; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + default: + print_err("unsupported configuration\n"); + return XRAN_STATUS_FAIL; + } + } else if (fh_cfg->ru_conf.xranCat == XRAN_CATEGORY_B && fh_init->xran_ports > 1) { + switch(total_num_cores) { + case 1: + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + break; + + case 2: + if(fh_init->xran_ports == 2) + worker_ports = (void *)((1L<<0 | 1L<<1) & xran_port_mask); + else if(fh_init->xran_ports == 3) + worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2) & xran_port_mask); + else if(fh_init->xran_ports == 4) + worker_ports = (void *)((1L<<0 | 1L<<1 | 1L<<2 | 1L<<3) & xran_port_mask); + else + { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* p_dev->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; */ + + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_fh_rx_and_up_tx_processing; + pThCtx->task_arg = worker_ports; + eth_ctx->pkt_wrk_cfg[0].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[0].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + break; + case 3: + if(icx_cpu) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)xran_port_mask; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } + else /* csx cpu */ + { + if(fh_init->xran_ports == 3) + worker_ports = (void *)(1L<<2 & xran_port_mask); + else if(fh_init->xran_ports == 4) + worker_ports = (void *)((1L<<2 | 1L<<3) & xran_port_mask); + else{ + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void *)((1L<<0|1L<<1) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_fh_rx_and_up_tx_processing; + pThCtx->task_arg = worker_ports; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } + + break; + + case 4: + if(1) { + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)(((1L<<1) | (1L<<2)) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1L<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + } + else { + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; + case 5: + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<1) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_up_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<2) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + + if(eth_ctx->io_cfg.id == O_DU && 0 == fh_init->dlCpProcBurst) { + for (i = 1; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = i+1; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + } + } + + break; + case 6: + if(eth_ctx->io_cfg.id == O_DU){ + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_processing_timer_only_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<1) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 4 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<2) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } else { + /*** O_RU specific config */ + /* timing core */ + eth_ctx->time_wrk_cfg.f = NULL; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 Eth RX */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_rx; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)0; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)1; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** FH TX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_tx; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } + break; + case 7: + /*** O_RU specific config */ + if((fh_init->xran_ports == 4) && (eth_ctx->io_cfg.id == O_RU)) + { + /*** O_RU specific config */ + /* timing core */ + eth_ctx->time_wrk_cfg.f = NULL; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 Eth RX */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_rx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_rx; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 1 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p0", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)0; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p1", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)1; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 3 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p2", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)2; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 4 FH RX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_p3", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func_per_port; + pThCtx->task_arg = (void*)3; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** FH TX and BBDEV */ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 5; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_eth_tx", core_map[pThCtx->worker_id]); + pThCtx->task_func = process_dpdk_io_tx; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + } /* -- if xran->ports == 4 -- */ + else if(eth_ctx->io_cfg.id == O_DU){ + if(fh_init->xran_ports == 3) + worker_ports = (void *)((1<<2) & xran_port_mask); + else if(fh_init->xran_ports == 4) + worker_ports = (void *)((1<<3) & xran_port_mask); + /* timing core */ + eth_ctx->time_wrk_cfg.f = xran_eth_trx_tasks; + eth_ctx->time_wrk_cfg.arg = NULL; + eth_ctx->time_wrk_cfg.state = 1; + + /* workers */ + /** 0 **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 0; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_rx_bbdev", core_map[pThCtx->worker_id]); + pThCtx->task_func = ring_processing_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = 2; i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_UL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_UL]); + } + + /** 1 - CP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 1; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_cp_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_processing_timer_only_func; + pThCtx->task_arg = NULL; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 2 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 2; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<0) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = (fh_init->xran_ports-1); i < fh_init->xran_ports; i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + } + + /** 3 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 3; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<1) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + for (i = (fh_init->xran_ports - 2); i < (fh_init->xran_ports - 1); i++) { + struct xran_device_ctx * p_dev_update = xran_dev_get_ctx_by_id(i); + if(p_dev_update == NULL) { + print_err("p_dev_update\n"); + return XRAN_STATUS_FAIL; + } + p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL] = pThCtx->worker_id; + printf("p:%d XRAN_JOB_TYPE_CP_DL worker id %d\n", i, p_dev_update->job2wrk_id[XRAN_JOB_TYPE_CP_DL]); + } - /* Start packet processing thread */ - if (rte_eal_remote_launch(xran_timing_source_thread, xran_lib_get_ctx(), lcore_id)) - rte_panic("thread_run() failed to start\n"); + /** 4 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 4; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = (void*)((1<<2) & xran_port_mask); + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + + /** 5 UP GEN **/ + pThCtx = (struct xran_worker_th_ctx*) _mm_malloc(sizeof(struct xran_worker_th_ctx), 64); + if(pThCtx == NULL){ + print_err("pThCtx allocation error\n"); + return XRAN_STATUS_FAIL; + } + memset(pThCtx, 0, sizeof(struct xran_worker_th_ctx)); + pThCtx->worker_id = 5; + pThCtx->worker_core_id = core_map[pThCtx->worker_id]; + snprintf(pThCtx->worker_name, RTE_DIM(pThCtx->worker_name), "%s-%d", "fh_tx_gen", core_map[pThCtx->worker_id]); + pThCtx->task_func = xran_dl_pkt_ring_processing_func; + pThCtx->task_arg = worker_ports; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].f = xran_generic_worker_thread; + eth_ctx->pkt_wrk_cfg[pThCtx->worker_id].arg = pThCtx; + } + else{ + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + break; - printf("Set debug stop %d\n", p_xran_fh_init->debugStop); - timing_set_debug_stop(p_xran_fh_init->debugStop); + default: + print_err("unsupported configuration Cat %d numports %d total_num_cores = %d\n", fh_cfg->ru_conf.xranCat, fh_init->xran_ports, total_num_cores); + return XRAN_STATUS_FAIL; + } + } else { + print_err("unsupported configuration\n"); + return XRAN_STATUS_FAIL; + } - memset(&DevHandle, 0, sizeof(XranLibHandleInfoStruct)); + nWorkerCore = 1LL; + if(eth_ctx->io_cfg.pkt_proc_core) { + for (i = 0; i < coreNum && i < 64; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core) { + xran_core_used[xran_num_cores_used++] = i; + if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i)) + rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n"); + eth_ctx->pkt_wrk_cfg[i].state = 1; + if(eth_ctx->pkt_proc_core_id == 0) + eth_ctx->pkt_proc_core_id = i; + printf("spawn worker %d core %d\n",eth_ctx->num_workers, i); + eth_ctx->worker_core[eth_ctx->num_workers++] = i; + } + nWorkerCore = nWorkerCore << 1; + } + } - *pHandle = &DevHandle; + nWorkerCore = 1LL; + if(eth_ctx->io_cfg.pkt_proc_core_64_127) { + for (i = 64; i < coreNum && i < 128; i++) { + if (nWorkerCore & (uint64_t)eth_ctx->io_cfg.pkt_proc_core_64_127) { + xran_core_used[xran_num_cores_used++] = i; + if (rte_eal_remote_launch(eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f, eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].arg, i)) + rte_panic("eth_ctx->pkt_wrk_cfg[eth_ctx->num_workers].f() failed to start\n"); + eth_ctx->pkt_wrk_cfg[i].state = 1; + if(eth_ctx->pkt_proc_core_id == 0) + eth_ctx->pkt_proc_core_id = i; + printf("spawn worker %d core %d\n",eth_ctx->num_workers, i); + eth_ctx->worker_core[eth_ctx->num_workers++] = i; + } + nWorkerCore = nWorkerCore << 1; + } + } - return 0; + return XRAN_STATUS_SUCCESS; } - -int32_t xran_sector_get_instances (void * pHandle, uint16_t nNumInstances, - XranCcInstanceHandleVoidP * pSectorInstanceHandles) +int32_t +xran_open(void *pHandle, struct xran_fh_config* pConf) { - int i; - - /* only one handle as only one CC is currently supported */ - for(i = 0; i < nNumInstances; i++ ) - pSectorInstanceHandles[i] = pHandle; - - return 0; -} + int32_t ret = XRAN_STATUS_SUCCESS; + int32_t i; + uint8_t nNumerology = 0; + struct xran_device_ctx *p_xran_dev_ctx = NULL; + struct xran_fh_config *pFhCfg = NULL; + struct xran_fh_init *fh_init = NULL; + struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx(); + int32_t wait_time = 10; + int64_t offset_sec, offset_nsec; + + if(pConf->dpdk_port < XRAN_PORTS_NUM) { + p_xran_dev_ctx = xran_dev_get_ctx_by_id(pConf->dpdk_port); + } else { + print_err("@0x%p [ru %d ] pConf->dpdk_port > XRAN_PORTS_NUM\n", pConf, pConf->dpdk_port); + return XRAN_STATUS_FAIL; + } -int32_t xran_mm_init (void * pHandle, uint64_t nMemorySize, - uint32_t nMemorySegmentSize) -{ - /* we use mbuf from dpdk memory */ - return 0; -} + if(p_xran_dev_ctx == NULL) { + print_err("[ru %d] p_xran_dev_ctx == NULL ", pConf->dpdk_port); + return XRAN_STATUS_FAIL; + } -int32_t xran_bm_init (void * pHandle, uint32_t * pPoolIndex, uint32_t nNumberOfBuffers, uint32_t nBufferSize) -{ - XranLibHandleInfoStruct* pXran = (XranLibHandleInfoStruct*) pHandle; + pFhCfg = &p_xran_dev_ctx->fh_cfg; + memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config)); - char pool_name[RTE_MEMPOOL_NAMESIZE]; + fh_init = &p_xran_dev_ctx->fh_init; + if(fh_init == NULL) + return XRAN_STATUS_FAIL; - snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "bm_mempool_%ld", pPoolIndex); + if(pConf->log_level) { + printf(" %s: %s Category %s\n", __FUNCTION__, + (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE", + (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B"); + } - pXran->p_bufferPool[pXran->nBufferPoolIndex] = rte_pktmbuf_pool_create(pool_name, nNumberOfBuffers, - MBUF_CACHE, 0, XRAN_MAX_MBUF_LEN, rte_socket_id()); + p_xran_dev_ctx->enableCP = pConf->enableCP; + p_xran_dev_ctx->enablePrach = pConf->prachEnable; + p_xran_dev_ctx->enableSrs = pConf->srsEnable; + p_xran_dev_ctx->enableSrsCp = pConf->srsEnableCp; + p_xran_dev_ctx->nSrsDelaySym = pConf->SrsDelaySym; + p_xran_dev_ctx->puschMaskEnable = pConf->puschMaskEnable; + p_xran_dev_ctx->puschMaskSlot = pConf->puschMaskSlot; + p_xran_dev_ctx->DynamicSectionEna = pConf->DynamicSectionEna; + p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable = pConf->RunSlotPrbMapBySymbolEnable; + p_xran_dev_ctx->dssEnable = pConf->dssEnable; + p_xran_dev_ctx->dssPeriod = pConf->dssPeriod; + for(i=0; idssPeriod; i++) { + p_xran_dev_ctx->technology[i] = pConf->technology[i]; + } - pXran->bufferPoolElmSz[pXran->nBufferPoolIndex] = nBufferSize; - pXran->bufferPoolNumElm[pXran->nBufferPoolIndex] = nNumberOfBuffers; + if(pConf->GPS_Alpha || pConf->GPS_Beta ){ + offset_sec = pConf->GPS_Beta / 100; /* resolution of beta is 10ms */ + offset_nsec = (pConf->GPS_Beta - offset_sec * 100) * 1e7 + pConf->GPS_Alpha; + p_xran_dev_ctx->offset_sec = offset_sec; + p_xran_dev_ctx->offset_nsec = offset_nsec; + }else { + p_xran_dev_ctx->offset_sec = 0; + p_xran_dev_ctx->offset_nsec = 0; + } - print_dbg("[nPoolIndex %d] mb pool %p \n", pXran->nBufferPoolIndex, pXran->p_bufferPool[pXran->nBufferPoolIndex]); - *pPoolIndex = pXran->nBufferPoolIndex++; + nNumerology = xran_get_conf_numerology(p_xran_dev_ctx); - return 0; -} + if (pConf->nCC > XRAN_MAX_SECTOR_NR) { + if(pConf->log_level) + printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR); + pConf->nCC = XRAN_MAX_SECTOR_NR; + } -int32_t xran_bm_allocate_buffer(void * pHandle, uint32_t nPoolIndex, void **ppVirtAddr) -{ - XranLibHandleInfoStruct* pXran = (XranLibHandleInfoStruct*) pHandle; - *ppVirtAddr = NULL; + if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ) { + print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder); + return XRAN_STATUS_FAIL; + } - struct rte_mbuf * mb = rte_pktmbuf_alloc(pXran->p_bufferPool[nPoolIndex]); + if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) { + if((ret = xran_ruemul_init(p_xran_dev_ctx)) < 0) { + return ret; + } + } - if(mb){ - *ppVirtAddr = rte_pktmbuf_append(mb, pXran->bufferPoolElmSz[nPoolIndex]); + /* setup PRACH configuration for C-Plane */ + if(pConf->dssEnable){ + if((ret = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0) + return ret; + if((ret = xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0) + return ret; + } + else{ + if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR) { + if((ret = xran_init_prach(pConf, p_xran_dev_ctx, XRAN_RAN_5GNR))< 0){ + return ret; + } + } else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE) { + if((ret = xran_init_prach_lte(pConf, p_xran_dev_ctx))< 0){ + return ret; + } + } + } - }else { - print_err("[nPoolIndex %d] mb alloc failed \n", nPoolIndex ); - return -1; + if((ret = xran_init_srs(pConf, p_xran_dev_ctx))< 0){ + return ret; } - if (*ppVirtAddr == NULL){ - print_err("[nPoolIndex %d] rte_pktmbuf_append for %d failed \n", nPoolIndex, pXran->bufferPoolElmSz[nPoolIndex]); - return -1; + if((ret = xran_cp_init_sectiondb(p_xran_dev_ctx)) < 0){ + return ret; } - return 0; -} + if((ret = xran_init_sectionid(p_xran_dev_ctx)) < 0){ + return ret; + } -int32_t xran_bm_free_buffer(void * pHandle, void *pVirtAddr) -{ - XranLibHandleInfoStruct* pXran = (XranLibHandleInfoStruct*) pHandle; - rte_pktmbuf_free(pVirtAddr); + if((ret = xran_init_seqid(p_xran_dev_ctx)) < 0){ + return ret; + } - return 0; -} + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){ + if((ret = xran_init_vfs_mapping(p_xran_dev_ctx)) < 0) { + return ret; + } -int32_t xran_5g_fronthault_config (void * pHandle, - XRANBufferListStruct *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], - XRANBufferListStruct *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], - XranTransportBlockCallbackFn pCallback, - void *pCallbackTag) -{ - XranLibHandleInfoStruct *pInfo = (XranLibHandleInfoStruct *) pHandle; - XranStatusInt32 nStatus = XRAN_STATUS_SUCCESS; - int j, i = 0, z, k; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->fh_init.io_cfg.num_rxq > 1) { + if((ret = xran_init_vf_rxq_to_pcid_mapping(p_xran_dev_ctx)) < 0) { + return ret; + } + } + } - print_dbg("%s\n", __FUNCTION__); + if(pConf->ru_conf.xran_max_frame) { + xran_max_frame = pConf->ru_conf.xran_max_frame; + printf("xran_max_frame %d\n", xran_max_frame); + } - if(NULL == pHandle) + p_xran_dev_ctx->interval_us_local = xran_fs_get_tti_interval(nNumerology); + if (interval_us > p_xran_dev_ctx->interval_us_local) { - printf("Handle is NULL!\n"); - return XRAN_STATUS_FAIL; + interval_us = xran_fs_get_tti_interval(nNumerology); //only update interval_us based on maximum numerology } - if (pCallback == NULL) + +// if(pConf->log_level){ + printf("%s: interval_us=%ld, interval_us_local=%d\n", __FUNCTION__, interval_us, p_xran_dev_ctx->interval_us_local); +// } + if (nNumerology >= timing_get_numerology()) { - printf ("no callback\n"); - return XRAN_STATUS_FAIL; + timing_set_numerology(nNumerology); } - for(j=0; jsFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_lib_ctx->sFrontHaulTxBuffers[j][i][z][0]; + for(i = 0 ; i nCC; i++){ + xran_fs_set_slot_type(pConf->dpdk_port, i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod, + pConf->frame_conf.sSlotConfig); + } - p_xran_lib_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j]; + xran_fs_slot_limit_init(pConf->dpdk_port, xran_fs_get_tti_interval(nNumerology)); - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_lib_ctx->sFrontHaulRxBuffers[j][i][z][0]; - p_xran_lib_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; - } + /* if send_xpmbuf2ring needs to be changed from default functions, + * then those should be set between xran_init and xran_open */ + if(p_xran_dev_ctx->send_cpmbuf2ring == NULL) + p_xran_dev_ctx->send_cpmbuf2ring = xran_ethdi_mbuf_send_cp; + if(p_xran_dev_ctx->send_upmbuf2ring == NULL) + p_xran_dev_ctx->send_upmbuf2ring = xran_ethdi_mbuf_send; + + if(pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) { + if(p_xran_dev_ctx->tx_sym_gen_func == NULL ) + p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; + } else { + if(p_xran_dev_ctx->tx_sym_gen_func == NULL ) + p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_dispatch_opt; } -#if 0 - for(j=0; jsFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers[k].pData; - printf(" sym: %2d %p 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", k, ptr, ptr[0],ptr[1], ptr[2], ptr[3], ptr[4]); - } + if (p_xran_dev_ctx->fh_init.io_cfg.bbu_offload) + p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_ring; + printf("bbu_offload %d\n", p_xran_dev_ctx->fh_init.io_cfg.bbu_offload); + if(pConf->dpdk_port == 0) { + /* create all thread on open of port 0 */ + xran_num_cores_used = 0; + if(eth_ctx->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){ + eth_ctx->bbdev_dec = pConf->bbdev_dec; + eth_ctx->bbdev_enc = pConf->bbdev_enc; } - for(j=0; jsFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers[k].pData; - printf(" sym: %2d %p 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", k, ptr, ptr[0],ptr[1], ptr[2], ptr[3], ptr[4]); + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){ + printf("XRAN_UP_VF: 0x%04x\n", eth_ctx->io_cfg.port[XRAN_UP_VF]); + p_xran_dev_ctx->timing_source_thread_running = 0; + xran_core_used[xran_num_cores_used++] = eth_ctx->io_cfg.timing_core; + if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), eth_ctx->io_cfg.timing_core)) + rte_panic("thread_run() failed to start\n"); + } else if(pConf->log_level) { + printf("Eth port was not open. Processing thread was not started\n"); + } + } else { + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF) { + if ((ret = xran_timing_create_cbs(p_xran_dev_ctx)) < 0) { + return ret; } } -#endif - - p_xran_lib_ctx->pCallback[i] = pCallback; - p_xran_lib_ctx->pCallbackTag[i] = pCallbackTag; - - p_xran_lib_ctx->xran2phy_mem_ready = 1; - - return nStatus; -} - -int32_t xran_5g_prach_req (void * pHandle, - XRANBufferListStruct *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN], - XranTransportBlockCallbackFn pCallback, - void *pCallbackTag) -{ - XranLibHandleInfoStruct *pInfo = (XranLibHandleInfoStruct *) pHandle; - XranStatusInt32 nStatus = XRAN_STATUS_SUCCESS; - int j, i = 0, z; - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); - - if(NULL == pHandle) - { - printf("Handle is NULL!\n"); - return XRAN_STATUS_FAIL; - } - if (pCallback == NULL) - { - printf ("no callback\n"); - return XRAN_STATUS_FAIL; } - for(j=0; jsFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number. - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_lib_ctx->sFHPrachRxBuffers[j][i][z][0]; - p_xran_lib_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j]; + if((uint16_t)eth_ctx->io_cfg.port[XRAN_UP_VF] != 0xFFFF){ + if(pConf->dpdk_port == (fh_init->xran_ports - 1)) { + if((ret = xran_spawn_workers()) < 0) { + return ret; + } + } + printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, sched_getcpu(), getpid()); + printf("Waiting on Timing thread...\n"); + while (p_xran_dev_ctx->timing_source_thread_running == 0 && wait_time--) { + usleep(100); } } - p_xran_lib_ctx->pPrachCallback[i] = pCallback; - p_xran_lib_ctx->pPrachCallbackTag[i] = pCallbackTag; - - return 0; -} - -int32_t xran_5g_pre_compenstor_cfg(void* pHandle, - uint32_t nTxPhaseCps, - uint32_t nRxPhaseCps, - uint8_t nSectorId) -{ - /* functionality is not yet implemented */ - return 0; + print_dbg("%s : %d", __FUNCTION__, pConf->dpdk_port); + return ret; } -int32_t xran_open(void *pHandle, PXRANFHCONFIG pConf) +int32_t +xran_start(void *pHandle) { + struct tm * ptm; + /* ToS = Top of Second start +- 1.5us */ + struct timespec ts; + char buff[100]; int i; - uint8_t slotNr; - XRANFHCONFIG *pFhCfg; - xRANPrachCPConfigStruct *pPrachCPConfig = &(xran_lib_get_ctx()->PrachCPConfig); - pFhCfg = &(xran_lib_get_ctx()->xran_fh_cfg); - memcpy(pFhCfg, pConf, sizeof(XRANFHCONFIG)); - PXRANPRACHCONFIG pPRACHConfig = &pFhCfg->prach_conf; - uint8_t nPrachConfIdx = pPRACHConfig->nPrachConfIdx; - const xRANPrachConfigTableStruct *pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx]; - uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0]; - const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt - FORMAT_A1]; - memset(pPrachCPConfig, 0, sizeof(xRANPrachCPConfigStruct)); - - //setup PRACH configuration for C-Plane - pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC; // 3, PRACH preamble format A1~3, B1~4, C0, C2 - pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym; - pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart; - pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70; - pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration; - pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp; - pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing); - pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot; - pPrachCPConfig->x = pxRANPrachConfigTable->x; - pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0]; - pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1]; - - pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1; - for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++) + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + struct xran_prb_map * prbMap0 = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[0][0][0].sBufferList.pBuffers->pData; + for(i = 0; i < XRAN_MAX_SECTIONS_PER_SLOT && i < prbMap0->nPrbElm; i++) { - slotNr = pxRANPrachConfigTable->slotNr[i]; - if (slotNr > 0) - pPrachCPConfig->isPRACHslot[slotNr] = 1; + p_xran_dev_ctx->numSetBFWs_arr[i] = prbMap0->prbMap[i].bf_weight.numSetBFWs; } - xran_cp_init_sectiondb(pHandle); - xran_init_sectionid(pHandle); - xran_init_seqid(pHandle); - - return 0; -} + if(xran_get_if_state() == XRAN_RUNNING) { + print_err("Already STARTED!!"); + return (-1); + } + timespec_get(&ts, TIME_UTC); + ptm = gmtime(&ts.tv_sec); + if(ptm){ + strftime(buff, sizeof(buff), "%D %T", ptm); + printf("%s: XRAN start time: %s.%09ld UTC [%ld]\n", + (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU ? "O-DU": "O-RU"), buff, ts.tv_nsec, interval_us); + } -int32_t xran_start(void *pHandle) -{ + if (p_xran_dev_ctx->fh_init.io_cfg.eowd_cmn[p_xran_dev_ctx->fh_init.io_cfg.id].owdm_enable) + { + xran_if_current_state = XRAN_OWDM; + } + else + { xran_if_current_state = XRAN_RUNNING; + } return 0; } -int32_t xran_stop(void *pHandle) +int32_t +xran_stop(void *pHandle) { + if(xran_get_if_state() == XRAN_STOPPED) { + print_err("Already STOPPED!!"); + return (-1); + } + xran_if_current_state = XRAN_STOPPED; return 0; } -int32_t xran_close(void *pHandle) +int32_t +xran_close(void *pHandle) { + int32_t ret = XRAN_STATUS_SUCCESS; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx(); + xran_if_current_state = XRAN_STOPPED; - xran_cp_free_sectiondb(pHandle); - rte_eal_mp_wait_lcore(); - return 0; -} + ret = xran_cp_free_sectiondb(p_xran_dev_ctx); -int32_t xran_mm_destroy (void * pHandle) -{ - /* functionality is not yet implemented */ - return -1; -} + if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) + xran_ruemul_release(p_xran_dev_ctx); -int32_t xran_reg_sym_cb(void *pHandle, XRANFHSYMPROCCB symCb, void * symCbParam, uint8_t symb, uint8_t ant) -{ - /* functionality is not yet implemented */ - return -1; +#ifdef RTE_LIBRTE_PDUMP + /* uninitialize packet capture framework */ + rte_pdump_uninit(); +#endif + return ret; } -int32_t xran_reg_physide_cb(void *pHandle, XRANFHTTIPROCCB Cb, void *cbParam, int skipTtiNum, enum callback_to_phy_id id) +/* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open + * each cb will be set by default duing open if it is set by NULL */ +int32_t +xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up) { - struct xran_lib_ctx * p_xran_lib_ctx = xran_lib_get_ctx(); + struct xran_device_ctx *p_xran_dev_ctx; - p_xran_lib_ctx->ttiCb[id] = Cb; - p_xran_lib_ctx->TtiCbParam[id] = cbParam; - p_xran_lib_ctx->SkipTti[id] = skipTtiNum; + if(xran_get_if_state() == XRAN_RUNNING) { + print_err("Cannot register callback while running!!\n"); + return (-1); + } - return 0; + p_xran_dev_ctx = xran_dev_get_ctx(); + + p_xran_dev_ctx->send_cpmbuf2ring = mbuf_send_cp; + p_xran_dev_ctx->send_upmbuf2ring = mbuf_send_up; + + p_xran_dev_ctx->tx_sym_gen_func = xran_process_tx_sym_cp_on_opt; + + return (0); } -int32_t xran_get_slot_idx (uint32_t *nFrameIdx, uint32_t *nSubframeIdx, uint32_t *nSlotIdx, uint64_t *nSecond) +int32_t +xran_get_slot_idx (uint32_t PortId, uint32_t *nFrameIdx, uint32_t *nSubframeIdx, uint32_t *nSlotIdx, uint64_t *nSecond) { int32_t tti = 0; + struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx_by_id(PortId); + if (!p_xran_dev_ctx) +{ + print_err("Null xRAN context on port id %u!!\n", PortId); + return 0; +} - tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT); - *nSlotIdx = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME); - *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME); - *nFrameIdx = (uint32_t)XranGetFrameNum(tti,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME); + tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx[PortId], XRAN_NUM_OF_SYMBOL_PER_SLOT); + *nSlotIdx = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local)); + *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local), SUBFRAMES_PER_SYSTEMFRAME); + *nFrameIdx = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(p_xran_dev_ctx->interval_us_local)); *nSecond = timing_get_current_second(); return tti; } -/** - * @brief Get supported maximum number of sections - * - * @return maximum number of sections - */ -inline uint8_t xran_get_max_sections(void *pHandle) +int32_t +xran_set_debug_stop(int32_t value, int32_t count) { - return (XRAN_MAX_NUM_SECTIONS); -} + return timing_set_debug_stop(value, count); + } -/** - * @brief Get the configuration of eAxC ID - * - * @return the pointer of configuration - */ -inline XRANEAXCIDCONFIG *xran_get_conf_eAxC(void *pHandle) -{ - return (&(xran_lib_get_ctx()->eAxc_id_cfg)); -} -/** - * @brief Get the configuration of subcarrier spacing for PRACH - * - * @return subcarrier spacing value for PRACH - */ -inline uint8_t xran_get_conf_prach_scs(void *pHandle) +int32_t xran_get_num_prb_elm(struct xran_prb_map* p_PrbMapIn, uint32_t mtu) { - return (xran_lib_get_ctx_fhcfg()->prach_conf.nPrachSubcSpacing); -} + int32_t i,j = 0; + int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth; + struct xran_prb_elm *p_prb_elm_src; + int32_t nRBremain; + // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr); + // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth); + int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr); + int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr)); + uint32_t nRBSize=0; + + if (mtu==9600) + nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196 + + for (i = 0;i < p_PrbMapIn->nPrbElm; i++) + { + p_prb_elm_src = &p_PrbMapIn->prbMap[i]; + if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed + { + j++; + } + else + { + nRBremain = p_prb_elm_src->nRBSize - nmaxRB; + j++; + while (nRBremain > 0) + { + nRBSize = RTE_MIN(nmaxRB, nRBremain); + nRBremain -= nRBSize; + j++; + } + } + } -/** - * @brief Get the configuration of FFT size for RU - * - * @return FFT size value for RU - */ -inline uint8_t xran_get_conf_fftsize(void *pHandle) -{ - return (xran_lib_get_ctx_fhcfg()->ru_conf.fftSize); + return j; } -/** - * @brief Get the configuration of nummerology - * - * @return subcarrier spacing value for PRACH - */ -inline uint8_t xran_get_conf_numerology(void *pHandle) -{ - return (xran_lib_get_ctx_fhcfg()->frame_conf.nNumerology); -} -/** - * @brief Get the configuration of IQ bit width for RU - * - * @return IQ bit width for RU - */ -inline uint8_t xran_get_conf_iqwidth(void *pHandle) +int32_t xran_init_PrbMap_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu) { - XRANFHCONFIG *pFhCfg; + int32_t i,j = 0; + int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth; + struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst; + int32_t nRBStart_tmp, nRBremain; + // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr); + // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth); + int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr); + int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr)); + + if (mtu==9600) + nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196 + + memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map)); + for (i = 0;i < p_PrbMapIn->nPrbElm; i++) + { + p_prb_elm_src = &p_PrbMapIn->prbMap[i]; + p_prb_elm_dst = &p_PrbMapOut->prbMap[j]; + memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm)); + + // int32_t nStartSymb, nEndSymb, numSymb, nRBStart, nRBEnd, nRBSize; + // nStartSymb = p_prb_elm_src->nStartSymb; + // nEndSymb = nStartSymb + p_prb_elm_src->numSymb; + if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed + { + p_prb_elm_dst->IsNewSect = 1; + p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize; + p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart; + p_prb_elm_dst->nSectId = i; + j++; + } + else + { + nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB; + nRBremain = p_prb_elm_src->nRBSize - nmaxRB; + p_prb_elm_dst->IsNewSect = 1; + p_prb_elm_dst->UP_nRBSize = nmaxRB; + p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart; + p_prb_elm_dst->nSectId = i; + j++; + while (nRBremain > 0) + { + p_prb_elm_dst = &p_PrbMapOut->prbMap[j]; + memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm)); + p_prb_elm_dst->IsNewSect = 0; + p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain); + p_prb_elm_dst->UP_nRBStart = nRBStart_tmp; + nRBremain -= p_prb_elm_dst->UP_nRBSize; + nRBStart_tmp += p_prb_elm_dst->UP_nRBSize; + p_prb_elm_dst->nSectId = i; + j++; + } + } + } - pFhCfg = xran_lib_get_ctx_fhcfg(); - return ((pFhCfg->ru_conf.iqWidth==16)?0:pFhCfg->ru_conf.iqWidth); + p_PrbMapOut->nPrbElm = j; + return 0; } -/** - * @brief Get the configuration of compression method for RU - * - * @return Compression method for RU - */ -inline uint8_t xran_get_conf_compmethod(void *pHandle) -{ - return (xran_lib_get_ctx_fhcfg()->ru_conf.compMeth); -} -/** - * @brief Get the configuration of lls-cu ID - * - * @return Configured lls-cu ID - */ -inline uint8_t xran_get_llscuid(void *pHandle) +int32_t xran_init_PrbMap_from_cfg_for_rx(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu) { - return (xran_lib_get_ctx()->llscu_id); + int32_t i,j = 0; + int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth; + struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst; + int32_t nRBStart_tmp, nRBremain; + // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr); + // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth); + int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr); + int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr)); + + if (mtu==9600) + nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196 + nmaxRB *= XRAN_MAX_FRAGMENT; + + memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map)); + for (i = 0;i < p_PrbMapIn->nPrbElm; i++) + { + p_prb_elm_src = &p_PrbMapIn->prbMap[i]; + p_prb_elm_dst = &p_PrbMapOut->prbMap[j]; + memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm)); + + if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed + { + p_prb_elm_dst->IsNewSect = 1; + p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize; + p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart; + p_prb_elm_dst->nSectId = j; + j++; + } + else + { + nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB; + nRBremain = p_prb_elm_src->nRBSize - nmaxRB; + p_prb_elm_dst->IsNewSect = 1; + p_prb_elm_dst->nRBSize = nmaxRB; + p_prb_elm_dst->UP_nRBSize = nmaxRB; + p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart; + p_prb_elm_dst->nSectId = j; + j++; + while (nRBremain > 0) + { + p_prb_elm_dst = &p_PrbMapOut->prbMap[j]; + memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm)); + p_prb_elm_dst->IsNewSect = 1; + p_prb_elm_dst->nRBSize = RTE_MIN(nmaxRB, nRBremain); + p_prb_elm_dst->nRBStart = nRBStart_tmp; + p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain); + p_prb_elm_dst->UP_nRBStart = nRBStart_tmp; + nRBremain -= p_prb_elm_dst->UP_nRBSize; + nRBStart_tmp += p_prb_elm_dst->UP_nRBSize; + p_prb_elm_dst->nSectId = j; + j++; + } + } + } + + p_PrbMapOut->nPrbElm = j; + return 0; } -/** - * @brief Get the configuration of lls-cu ID - * - * @return Configured lls-cu ID - */ -inline uint8_t xran_get_sectorid(void *pHandle) + +int32_t xran_init_PrbMap_by_symbol_from_cfg(struct xran_prb_map* p_PrbMapIn, struct xran_prb_map* p_PrbMapOut, uint32_t mtu, uint32_t xran_max_prb) { - return (xran_lib_get_ctx()->sector_id); + int32_t i = 0, j = 0, nPrbElm = 0; + int16_t iqwidth = p_PrbMapIn->prbMap[0].iqWidth; + struct xran_prb_elm *p_prb_elm_src, *p_prb_elm_dst; + struct xran_prb_elm prbMapTemp[XRAN_NUM_OF_SYMBOL_PER_SLOT]; + int32_t nRBStart_tmp, nRBremain, nStartSymb, nEndSymb, nRBStart, nRBEnd, nRBSize; + // int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr); + // int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqwidth); + int32_t eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr) - sizeof(struct data_section_hdr); + int32_t nmaxRB = (mtu - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/(XRAN_PAYLOAD_1_RB_SZ(iqwidth)+sizeof(struct data_section_hdr)); + if (mtu==9600) + nmaxRB--; //for some reason when mtu is 9600, only 195 RB can be sent, not 196 + + + memcpy(p_PrbMapOut, p_PrbMapIn, sizeof(struct xran_prb_map)); + for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++) + { + p_prb_elm_dst = &prbMapTemp[i]; + // nRBStart = 273; + nRBStart = xran_max_prb; + nRBEnd = 0; + + for(j = 0; j < p_PrbMapIn->nPrbElm; j++) + { + p_prb_elm_src = &(p_PrbMapIn->prbMap[j]); + nStartSymb = p_prb_elm_src->nStartSymb; + nEndSymb = nStartSymb + p_prb_elm_src->numSymb; + + if((i >= nStartSymb) && (i < nEndSymb)) + { + if(nRBStart > p_prb_elm_src->nRBStart) + { + nRBStart = p_prb_elm_src->nRBStart; + } + if(nRBEnd < (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize)) + { + nRBEnd = (p_prb_elm_src->nRBStart + p_prb_elm_src->nRBSize); + } + + p_prb_elm_dst->nBeamIndex = p_prb_elm_src->nBeamIndex; + p_prb_elm_dst->bf_weight_update = p_prb_elm_src->bf_weight_update; + p_prb_elm_dst->compMethod = p_prb_elm_src->compMethod; + p_prb_elm_dst->iqWidth = p_prb_elm_src->iqWidth; + p_prb_elm_dst->ScaleFactor = p_prb_elm_src->ScaleFactor; + p_prb_elm_dst->reMask = p_prb_elm_src->reMask; + p_prb_elm_dst->BeamFormingType = p_prb_elm_src->BeamFormingType; + } + } + + if(nRBEnd < nRBStart) + { + p_prb_elm_dst->nRBStart = 0; + p_prb_elm_dst->nRBSize = 0; + p_prb_elm_dst->nStartSymb = i; + p_prb_elm_dst->numSymb = 1; + } + else + { + p_prb_elm_dst->nRBStart = nRBStart; + p_prb_elm_dst->nRBSize = nRBEnd - nRBStart; + p_prb_elm_dst->nStartSymb = i; + p_prb_elm_dst->numSymb = 1; + } + } + + for(i = 0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++) + { + if((prbMapTemp[i].nRBSize != 0)) + { + nRBStart = prbMapTemp[i].nRBStart; + nRBSize = prbMapTemp[i].nRBSize; + prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart; + prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize; + prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb; + prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex; + prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update; + prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod; + prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth; + prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor; + prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask; + prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType; + i++; + break; + } + } + + for(; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++) + { + if((nRBStart == prbMapTemp[i].nRBStart) && (nRBSize == prbMapTemp[i].nRBSize)) + { + prbMapTemp[nPrbElm].numSymb++; + } + else + { + nPrbElm++; + prbMapTemp[nPrbElm].nStartSymb = prbMapTemp[i].nStartSymb; + prbMapTemp[nPrbElm].nRBStart = prbMapTemp[i].nRBStart; + prbMapTemp[nPrbElm].nRBSize = prbMapTemp[i].nRBSize; + prbMapTemp[nPrbElm].nBeamIndex = prbMapTemp[i].nBeamIndex; + prbMapTemp[nPrbElm].bf_weight_update = prbMapTemp[i].bf_weight_update; + prbMapTemp[nPrbElm].compMethod = prbMapTemp[i].compMethod; + prbMapTemp[nPrbElm].iqWidth = prbMapTemp[i].iqWidth; + prbMapTemp[nPrbElm].ScaleFactor = prbMapTemp[i].ScaleFactor; + prbMapTemp[nPrbElm].reMask = prbMapTemp[i].reMask; + prbMapTemp[nPrbElm].BeamFormingType = prbMapTemp[i].BeamFormingType; + + nRBStart = prbMapTemp[i].nRBStart; + nRBSize = prbMapTemp[i].nRBSize; + } + } + + for(i = 0; i < nPrbElm; i++) + { + if(prbMapTemp[i].nRBSize == 0) + prbMapTemp[i].nRBSize = 1; + } + + if(prbMapTemp[nPrbElm].nRBSize != 0) + nPrbElm++; + + + j = 0; + + for (i = 0;i < nPrbElm; i++) + { + p_prb_elm_src = &prbMapTemp[i]; + p_prb_elm_dst = &p_PrbMapOut->prbMap[j]; + memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm)); + if (p_prb_elm_src->nRBSize <= nmaxRB) //no fragmentation needed + { + p_prb_elm_dst->IsNewSect = 1; + p_prb_elm_dst->UP_nRBSize = p_prb_elm_src->nRBSize; + p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart; + p_prb_elm_dst->nSectId = i; + j++; + } + else + { + nRBStart_tmp = p_prb_elm_src->nRBStart + nmaxRB; + nRBremain = p_prb_elm_src->nRBSize - nmaxRB; + p_prb_elm_dst->IsNewSect = 1; + p_prb_elm_dst->UP_nRBSize = nmaxRB; + p_prb_elm_dst->UP_nRBStart = p_prb_elm_src->nRBStart; + p_prb_elm_dst->nSectId = i; + j++; + while (nRBremain > 0) + { + p_prb_elm_dst = &p_PrbMapOut->prbMap[j]; + memcpy(p_prb_elm_dst, p_prb_elm_src, sizeof(struct xran_prb_elm)); + p_prb_elm_dst->IsNewSect = 0; + p_prb_elm_dst->UP_nRBSize = RTE_MIN(nmaxRB, nRBremain); + p_prb_elm_dst->UP_nRBStart = nRBStart_tmp; + nRBremain -= p_prb_elm_dst->UP_nRBSize; + nRBStart_tmp += p_prb_elm_dst->UP_nRBSize; + p_prb_elm_dst->nSectId = i; + j++; + } + } + } + + p_PrbMapOut->nPrbElm = j; + + return 0; } -/** - * @brief Get the configuration of the number of component carriers - * - * @return Configured the number of componen carriers - */ -inline uint8_t xran_get_num_cc(void *pHandle) +inline void MLogXRANTask(uint32_t taskid, uint64_t ticksstart, uint64_t ticksstop) { - return (xran_lib_get_ctx_fhcfg()->nCC); + if (mlogxranenable) + { + MLogTask(taskid, ticksstart, ticksstop); + } + return; } -/** - * @brief Get the configuration of the number of antenna - * - * @return Configured the number of antenna - */ -inline uint8_t xran_get_num_eAxc(void *pHandle) +inline uint64_t MLogXRANTick(void) { - return (xran_lib_get_ctx_fhcfg()->neAxc); + if (mlogxranenable) + return MLogTick(); + else + return 0; }