#define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) )
-#define XranOffsetSym(offSym, otaSym, numSymTotal) (((int32_t)offSym > (int32_t)otaSym) ? \
- ((int32_t)otaSym + ((int32_t)numSymTotal) - (uint32_t)offSym) : \
- (((int32_t)otaSym - (int32_t)offSym) >= numSymTotal) ? \
- (((int32_t)otaSym - (int32_t)offSym) - numSymTotal) : \
- ((int32_t)otaSym - (int32_t)offSym))
-
#define MAX_NUM_OF_XRAN_CTX (2)
#define XranIncrementCtx(ctx) ((ctx >= (MAX_NUM_OF_XRAN_CTX-1)) ? 0 : (ctx+1))
#define XranDecrementCtx(ctx) ((ctx == 0) ? (MAX_NUM_OF_XRAN_CTX-1) : (ctx-1))
GPS is 18 larger. 315 964 800 - 18 = 315 964 782
*/
#define UNIX_TO_GPS_SECONDS_OFFSET 315964782UL
+#define NUM_OF_FRAMES_PER_SFN_PERIOD 1024
#define NUM_OF_FRAMES_PER_SECOND 100
//#define XRAN_CREATE_RBMAP /**< generate slot map base on symbols */
uint32_t tti_to_process;
};
+enum xran_in_period
+{
+ XRAN_IN_PREV_PERIOD = 0,
+ XRAN_IN_CURR_PERIOD,
+ XRAN_IN_NEXT_PERIOD
+};
+
static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {NULL};
static struct xran_device_ctx g_xran_dev_ctx[XRAN_PORTS_NUM] = { 0 };
struct xran_timer_ctx timer_ctx[MAX_NUM_OF_XRAN_CTX];
+struct xran_timer_ctx cb_timer_ctx[10*MAX_NUM_OF_XRAN_CTX];
+
static struct rte_timer tti_to_phy_timer[10];
static struct rte_timer sym_timer;
{
}
-static struct rte_mbuf_ext_shared_info share_data[XRAN_N_FE_BUF_LEN];
+static struct rte_mbuf_ext_shared_info share_data[XRAN_N_FE_BUF_LEN][XRAN_MAX_SECTOR_NR][XRAN_MAX_ANTENNA_NR];
+static struct rte_mbuf_ext_shared_info cp_share_data[XRAN_N_FE_BUF_LEN][XRAN_MAX_SECTOR_NR][XRAN_MAX_ANTENNA_NR];
+
void xran_timer_arm(struct rte_timer *tim, void* arg);
void tti_to_phy_cb(struct rte_timer *tim, void *arg);
void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore);
+
// Return SFN at current second start, 10 bits, [0, 1023]
static inline uint16_t xran_getSfnSecStart(void)
{
}
void xran_updateSfnSecStart(void)
{
+ struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+ struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
uint64_t currentSecond = timing_get_current_second();
// Assume always positive
uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;
uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));
xran_SFN_at_Sec_Start = sfn;
- tx_bytes_per_sec = tx_bytes_counter;
- rx_bytes_per_sec = rx_bytes_counter;
- tx_bytes_counter = 0;
- rx_bytes_counter = 0;
+ pCnt->tx_bytes_per_sec = pCnt->tx_bytes_counter;
+ pCnt->rx_bytes_per_sec = pCnt->rx_bytes_counter;
+ pCnt->tx_bytes_counter = 0;
+ pCnt->rx_bytes_counter = 0;
}
static inline int32_t xran_getSlotIdxSecond(void)
return (&(xran_dev_get_ctx()->fh_cfg));
}
+static inline int32_t XranOffsetSym(int32_t offSym, int32_t otaSym, int32_t numSymTotal, enum xran_in_period* pInPeriod)
+{
+ int32_t sym;
+
+ // Suppose the offset is usually small
+ if (unlikely(offSym > otaSym))
+ {
+ sym = numSymTotal - offSym + otaSym;
+ *pInPeriod = XRAN_IN_PREV_PERIOD;
+ }
+ else
+ {
+ sym = otaSym - offSym;
+
+ if (unlikely(sym >= numSymTotal))
+ {
+ sym -= numSymTotal;
+ *pInPeriod = XRAN_IN_NEXT_PERIOD;
+ }
+ else
+ {
+ *pInPeriod = XRAN_IN_CURR_PERIOD;
+ }
+ }
+
+ return sym;
+}
+
uint16_t xran_get_beamid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id)
{
return (0); // NO BEAMFORMING
return (XRAN_STATUS_SUCCESS);
}
+int xran_init_prach_lte(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
+{
+ /* update Rach for LTE */
+ return xran_init_prach(pConf, p_xran_dev_ctx);
+}
int xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
{
if(xran_upul_seq_id_num[cc_id][ant_id] == seq_id) { /* expected sequence */
return (XRAN_STATUS_SUCCESS);
} else {
- print_err("expected seqid %u received %u, slot %u, ant %u cc %u", xran_upul_seq_id_num[cc_id][ant_id], seq_id, slot_id, ant_id, cc_id);
+ print_dbg("expected seqid %u received %u, slot %u, ant %u cc %u", xran_upul_seq_id_num[cc_id][ant_id], seq_id, slot_id, ant_id, cc_id);
xran_upul_seq_id_num[cc_id][ant_id] = seq_id; // for next
return (-1);
}
}
}
+uint32_t xran_slotid_convert(uint16_t slot_id, uint16_t dir) //dir = 0, from PHY slotid to xran spec slotid as defined in 5.3.2, dir=1, from xran slotid to phy slotid
+{
+#ifdef FCN_ADAPT
+ return slot_id;
+#endif
+
+ struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+ uint8_t mu = p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology;
+ uint8_t FR = 1;
+ if (mu > 2)
+ FR=2;
+ if (dir == 0)
+ {
+ if (FR == 1)
+ {
+ return (slot_id << (2-mu));
+ }
+ else
+ {
+ return (slot_id << (3-mu));
+ }
+ }
+ else
+ {
+ if (FR == 1)
+ {
+ return (slot_id >> (2-mu));
+ }
+ else
+ {
+ return (slot_id >> (3-mu));
+ }
+ }
+
+}
static struct xran_section_gen_info cpSections[XRAN_MAX_NUM_SECTIONS];
static struct xran_cp_gen_params cpInfo;
*used_tick += get_ticks_diff(xran_tick(), t3);
}
+#if 0
if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 3){
if(p_xran_dev_ctx->phy_tti_cb_done == 0){
/* rearm timer to deliver TTI event to PHY */
*used_tick += get_ticks_diff(xran_tick(), t3);
}
}
+#endif
+
t3 = xran_tick();
if (xran_process_tx_sym(timer_ctx))
struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+ unsigned tim_lcore = (p_xran_dev_ctx->fh_init.io_cfg.pkt_proc_core) ? p_xran_dev_ctx->pkt_proc_core_id :
+ p_xran_dev_ctx->fh_init.io_cfg.timing_core;
MLogTask(PID_TTI_TIMER, t1, MLogTick());
/* To match TTbox */
mlogVar[mlogVarCnt++] = 0;
MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
- if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_LLS_CU)
+ if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU)
next_tti = xran_lib_ota_tti + 1;
else
next_tti = xran_lib_ota_tti;
print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
- if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_LLS_CU){
+ if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_O_DU){
pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = next_tti;
} else {
pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti & 1)^1].tti_to_process;
}
p_xran_dev_ctx->phy_tti_cb_done = 0;
- xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_dev_ctx->fh_init.io_cfg.timing_core);
+ xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, tim_lcore);
//slot index is increased to next slot at the beginning of current OTA slot
xran_lib_ota_tti++;
MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick());
}
+void xran_timer_arm_for_deadline(struct rte_timer *tim, void* arg)
+{
+ struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+ uint64_t t3 = MLogTick();
+ static int timer_cnt = 0;
+ unsigned tim_lcore = (p_xran_dev_ctx->fh_init.io_cfg.pkt_proc_core) ? p_xran_dev_ctx->pkt_proc_core_id :
+ p_xran_dev_ctx->fh_init.io_cfg.timing_core;
+
+ int32_t rx_tti;
+ int32_t cc_id;
+ uint32_t nFrameIdx;
+ uint32_t nSubframeIdx;
+ uint32_t nSlotIdx;
+ uint64_t nSecond;
+
+
+ xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+ rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
+ + nSubframeIdx*SLOTNUM_PER_SUBFRAME
+ + nSlotIdx;
+
+ cb_timer_ctx[timer_cnt].tti_to_process = rx_tti;
+
+ if (xran_if_current_state == XRAN_RUNNING){
+ rte_timer_cb_t fct = (rte_timer_cb_t)arg;
+ rte_timer_init(tim);
+ rte_timer_reset_sync(tim, 0, SINGLE, tim_lcore, fct, &cb_timer_ctx[timer_cnt++]);
+ if (timer_cnt >= 10*MAX_NUM_OF_XRAN_CTX)
+ timer_cnt = 0;
+ }
+
+ MLogTask(PID_TIME_ARM_TIMER_DEADLINE, t3, MLogTick());
+}
+
+
void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore)
{
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick());
}
+uint16_t xran_map_ecpriRtcid_to_vf(int32_t dir, int32_t cc_id, int32_t ru_port_id)
+{
+ return XRAN_CP_VF;
+}
+
+uint16_t xran_map_ecpriPcid_to_vf(int32_t dir, int32_t cc_id, int32_t ru_port_id)
+{
+ return XRAN_UP_VF;
+}
+
int xran_cp_create_and_send_section(void *pHandle, uint8_t ru_port_id, int dir, int tti, int cc_id,
struct xran_prb_map *prbMap, enum xran_category category, uint8_t ctx_id)
{
- struct xran_device_ctx *p_x_ctx = xran_dev_get_ctx();
+ struct xran_device_ctx *p_x_ctx = xran_dev_get_ctx();
+ struct xran_common_counters *pCnt = &p_x_ctx->fh_counters;
struct xran_cp_gen_params params;
struct xran_section_gen_info sect_geninfo[1];
struct rte_mbuf *mbuf;
int ret = 0;
- uint32_t i, j, loc_sym;
+
+ uint32_t i, loc_sym;
uint32_t nsection = 0;
struct xran_prb_elm *pPrbMapElem = NULL;
struct xran_prb_elm *pPrbMapElemPrev = NULL;
print_err("prbMap is NULL\n");
return (-1);
}
+
for (i=0; i<nsection; i++)
{
pPrbMapElem = &prbMap->prbMap[i];
sect_geninfo[0].info.startSymId = params.hdr.startSymId; // for database
sect_geninfo[0].info.iqWidth = params.hdr.iqWidth; // for database
sect_geninfo[0].info.compMeth = params.hdr.compMeth; // for database
+
sect_geninfo[0].info.id = i; /*xran_alloc_sectionid(pHandle, dir, cc_id, ru_port_id, slot_id);*/
if(sect_geninfo[0].info.id > 7)
/* no extention sections for category */
sect_geninfo[0].info.ef = 0;
sect_geninfo[0].exDataSize = 0;
+ mbuf = xran_ethdi_mbuf_alloc();
} else if (category == XRAN_CATEGORY_B) {
/*add extantion section for BF Weights if update is needed */
if(pPrbMapElem->bf_weight_update){
+ struct rte_mbuf_ext_shared_info * p_share_data = &cp_share_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ru_port_id];
+
+ if (pPrbMapElem->bf_weight.p_ext_start){
+ /* use buffer with BF Weights for mbuf */
+ mbuf = xran_attach_cp_ext_buf(pPrbMapElem->bf_weight.p_ext_start,
+ pPrbMapElem->bf_weight.p_ext_section, pPrbMapElem->bf_weight.ext_section_sz, p_share_data);
+ } else {
+ print_err("Alloc fail!\n");
+ return (-1);
+ }
+
memset(&m_ext1, 0, sizeof (struct xran_sectionext1_info));
m_ext1.bfwNumber = pPrbMapElem->bf_weight.nAntElmTRx;
m_ext1.bfwiqWidth = pPrbMapElem->iqWidth;
sect_geninfo[0].info.ef = 1;
sect_geninfo[0].exDataSize = 1;
} else {
+ mbuf = xran_ethdi_mbuf_alloc();
sect_geninfo[0].info.ef = 0;
sect_geninfo[0].exDataSize = 0;
}
return (-1);
}
- params.numSections = 1;//nsection;
- params.sections = sect_geninfo;
-
- mbuf = xran_ethdi_mbuf_alloc();
if(unlikely(mbuf == NULL)) {
print_err("Alloc fail!\n");
return (-1);
}
+ params.numSections = 1;//nsection;
+ params.sections = sect_geninfo;
+
ret = xran_prepare_ctrl_pkt(mbuf, ¶ms, cc_id, ru_port_id, seq_id);
if(ret < 0) {
print_err("Fail to build control plane packet - [%d:%d:%d] dir=%d\n",
frame_id, subframe_id, slot_id, dir);
} else {
/* add in the ethernet header */
- struct ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));
- tx_counter++;
- tx_bytes_counter += rte_pktmbuf_pkt_len(mbuf);
- p_x_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI);
+ struct rte_ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));
+ pCnt->tx_counter++;
+ pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(mbuf);
+ p_x_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI, xran_map_ecpriRtcid_to_vf(dir, cc_id, ru_port_id));
/*for(i=0; i<nsection; i++)*/
xran_cp_add_section_info(pHandle,
{
long t1 = MLogTick();
int tti, buf_id;
- int i, ret;
uint32_t slot_id, subframe_id, frame_id;
int cc_id;
uint8_t ctx_id;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
xran_status_t status;
/* half of RX for current TTI as measured against current OTA time */
- int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
+ int32_t rx_tti;
int32_t cc_id;
uint32_t nFrameIdx;
uint32_t nSubframeIdx;
uint32_t nSlotIdx;
uint64_t nSecond;
- uint32_t nXranTime = xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+ /*xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
+ nSubframeIdx*SLOTNUM_PER_SUBFRAME
- + nSlotIdx;
+ + nSlotIdx;*/
+
+ struct xran_timer_ctx* p_timer_ctx = (struct xran_timer_ctx*)arg;
+ rx_tti = p_timer_ctx->tti_to_process;
if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
return;
for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
+ pTag->cellId = cc_id;
pTag->slotiId = rx_tti;
pTag->symbol = 0; /* last 7 sym means full slot of Symb */
status = XRAN_STATUS_SUCCESS;
uint32_t nSlotIdx;
uint64_t nSecond;
- uint32_t nXranTime = xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
+ xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
+ nSubframeIdx*SLOTNUM_PER_SUBFRAME
+ nSlotIdx;
/* U-Plane */
for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
+ pTag->cellId = cc_id;
pTag->slotiId = rx_tti;
pTag->symbol = 7; /* last 7 sym means full slot of Symb */
status = XRAN_STATUS_SUCCESS;
if(p_xran_dev_ctx->pPrachCallback[cc_id]){
struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];
+ pTag->cellId = cc_id;
pTag->slotiId = rx_tti;
pTag->symbol = 7; /* last 7 sym means full slot of Symb */
p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);
}
+
+ if(p_xran_dev_ctx->pSrsCallback[cc_id]){
+ struct xran_cb_tag *pTag = p_xran_dev_ctx->pSrsCallbackTag[cc_id];
+ pTag->cellId = cc_id;
+ pTag->slotiId = rx_tti;
+ pTag->symbol = 7; /* last 7 sym means full slot of Symb */
+ p_xran_dev_ctx->pSrsCallback[cc_id](p_xran_dev_ctx->pSrsCallbackTag[cc_id], status);
+ }
}
MLogTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogTick());
{
long t1 = MLogTick();
int tti, buf_id;
- int i, ret;
+ int ret;
uint32_t slot_id, subframe_id, frame_id;
int32_t cc_id;
int ant_id, prach_port_id;
/* start new section information list */
xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
- (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
+ (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
} /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) == 1 */
} /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
xran_core_used = rte_lcore_id();
printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
+ memset(&sched_param, 0, sizeof(struct sched_param));
+
/* set main thread affinity mask to CPU2 */
- sched_param.sched_priority = 98;
+ sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
CPU_ZERO(&cpuset);
CPU_SET(p_xran_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);
{
printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);
}
- if ((result1 = pthread_setschedparam(pthread_self(), 1, &sched_param)))
+ if ((result1 = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
{
printf("priority is not changed: coreId = 2, result1 = %d\n",result1);
}
}
/* Full slot UL OTA + delay_up_ul */
- cb_elm = xran_create_cb(xran_timer_arm, rx_ul_deadline_full_cb);
+ cb_elm = xran_create_cb(xran_timer_arm_for_deadline, rx_ul_deadline_full_cb);
if(cb_elm){
LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_up_ul],
cb_elm,
}
/* Half slot UL OTA + delay_up_ul*/
- cb_elm = xran_create_cb(xran_timer_arm, rx_ul_deadline_half_cb);
+ cb_elm = xran_create_cb(xran_timer_arm_for_deadline, rx_ul_deadline_half_cb);
if(cb_elm){
LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_up_ul + N_SYM_PER_SLOT/2],
cb_elm,
}
}
- printf("Closing timing source thread...tx counter %lu, rx counter %lu\n", tx_counter, rx_counter);
+ printf("Closing timing source thread...\n");
return res;
}
return MBUF_FREE;
}
- rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
+ xran_dev_get_ctx()->fh_counters.rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
switch(ecpri_hdr->cmnhdr.ecpri_mesg_type) {
case ECPRI_IQ_DATA:
// t1 = MLogTick();
t1 = MLogTick();
if(xran_dev_get_ctx()->fh_init.io_cfg.id == O_RU) {
ret = process_cplane(pkt);
+ xran_dev_get_ctx()->fh_counters.rx_counter++;
} else {
print_err("O-DU recevied C-Plane message!");
}
print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
}
-/* if (symb_id == p_xran_dev_ctx->prach_last_symbol[CC_ID] ){
- p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id]++;
- if(p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] >= xran_get_num_eAxc(pHandle)){
- if(p_xran_dev_ctx->pPrachCallback[0])
- p_xran_dev_ctx->pPrachCallback[0](p_xran_dev_ctx->pPrachCallbackTag[0], status);
- p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] = 0;
- }
- }
-*/
return size;
}
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
uint32_t tti = 0;
xran_status_t status;
- void *pHandle = NULL;
struct rte_mbuf *mb = NULL;
uint16_t iq_sample_size_bits = 16;
print_err("incorrect dev type %d\n", pctx->fh_init.io_cfg.id);
}
- rx_counter++;
+ pCnt->rx_counter++;
pCnt->Rx_on_time++;
pCnt->Total_msgs_rcvd++;
pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
}
} else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){
- if (/*likely (p_xran_dev_ctx->fh_init.mtu >=
- p_xran_dev_ctx->fh_cfg.nULRBs * N_SC_PER_PRB*(iq_sample_size_bits/8)*2)
- && p_xran_dev_ctx->fh_init.io_cfg.id == O_DU*/ 1) {
- if (pRbMap->nPrbElm == 1){
+ if (pRbMap->nPrbElm == 1){
+ if (likely (p_xran_dev_ctx->fh_init.mtu >=
+ p_xran_dev_ctx->fh_cfg.nULRBs * N_SC_PER_PRB*(iq_sample_size_bits/8)*2))
+ {
/* no fragmentation */
mb = p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl;
if(mb){
p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl = mbuf;
*mb_free = MBUF_KEEP;
} else {
+ /* packet can be fragmented copy RBs */
+ rte_memcpy(pos, iq_data_start, size);
+ *mb_free = MBUF_FREE;
+ }
+ } else {
prbMapElm = &pRbMap->prbMap[sect_id];
struct xran_section_desc *p_sec_desc = prbMapElm->p_sec_desc[symb_id];
if(p_sec_desc){
}
*mb_free = MBUF_KEEP;
}
- } else {
- /* packet can be fragmented copy RBs */
- rte_memcpy(pos, iq_data_start, size);
- *mb_free = MBUF_FREE;
- }
}
} else {
print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
static inline int
xran_send_burst(struct xran_device_ctx *dev, uint16_t n, uint16_t port)
{
+ struct xran_common_counters * pCnt = NULL;
struct rte_mbuf **m_table;
struct rte_mbuf *m;
int32_t i = 0;
int j;
int32_t ret = 0;
+
+ if(dev)
+ pCnt = &dev->fh_counters;
+ else
+ rte_panic("incorrect dev\n");
+
m_table = (struct rte_mbuf **)dev->tx_mbufs[port].m_table;
for(i = 0; i < n; i++){
rte_mbuf_sanity_check(m_table[i], 0);
/*rte_pktmbuf_dump(stdout, m_table[i], 256);*/
- tx_counter++;
- tx_bytes_counter += rte_pktmbuf_pkt_len(m_table[i]);
- ret += dev->send_upmbuf2ring(m_table[i], ETHER_TYPE_ECPRI);
+ pCnt->tx_counter++;
+ pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(m_table[i]);
+ ret += dev->send_upmbuf2ring(m_table[i], ETHER_TYPE_ECPRI, port);
}
-
if (unlikely(ret < n)) {
print_err("ret < n\n");
}
int32_t do_srs)
{
int32_t retval = 0;
- uint64_t t1 = MLogTick();
void *pHandle = NULL;
char *pos = NULL;
char *p_sec_iq = NULL;
- char *p_sect_iq = NULL;
+ //char *p_sect_iq = NULL;
void *mb = NULL;
+ void *send_mb = NULL;
int prb_num = 0;
uint16_t iq_sample_size_bits = 16; // TODO: make dynamic per
uint8_t num_ant_elm = 0;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+ struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
num_ant_elm = xran_get_num_ant_elm(pHandle);
struct rte_mbuf *tmp = NULL;
rte_iova_t ext_buff_iova = 0;
- struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN];
+ struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
direction = XRAN_DIR_DL; /* O-DU */
if(xran_fs_get_symbol_type(cc_id, tti, sym_id) == ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SYMBOL_TYPE_DL : XRAN_SYMBOL_TYPE_UL)
|| xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){
- if(iq_sample_size_bits != 16)
- print_err("Incorrect iqWidth %d\n", iq_sample_size_bits );
-
pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
mb = (void*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
prb_map = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
p_sec_desc = prb_map_elm->p_sec_desc[sym_id];
- if(p_sec_desc == NULL){
- rte_panic("p_sec_desc == NULL\n");
- }
-
#if 1
p_sec_iq = ((char*)pos + p_sec_desc->iq_buffer_offset);
rte_pktmbuf_reset_headroom(eth_oran_hdr);
- tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct ether_hdr));
+ tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
if (unlikely (( tmp) == NULL)) {
rte_panic("Failed rte_pktmbuf_prepend \n");
}
- mb = eth_oran_hdr;
+ send_mb = eth_oran_hdr;
+
+
+ uint8_t seq_id = (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
+ xran_get_updl_seqid(pHandle, cc_id, ant_id) :
+ xran_get_upul_seqid(pHandle, cc_id, ant_id);
+
+
/* first all PRBs */
- prepare_symbol_ex(direction, sec_id,
- mb,
+ int32_t num_bytes = prepare_symbol_ex(direction, sec_id,
+ send_mb,
(struct rb_map *)p_sec_iq,
prb_map_elm->compMethod,
prb_map_elm->iqWidth,
frame_id, subframe_id, slot_id, sym_id,
prb_map_elm->nRBStart, prb_map_elm->nRBSize,
cc_id, ant_id,
- (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
- xran_get_updl_seqid(pHandle, cc_id, ant_id) :
- xran_get_upul_seqid(pHandle, cc_id, ant_id),
+ seq_id,
0);
- rte_mbuf_sanity_check((struct rte_mbuf *)mb, 0);
- tx_counter++;
- tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)mb);
- p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)mb, ETHER_TYPE_ECPRI);
+ rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
+ pCnt->tx_counter++;
+ pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
+ p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, xran_map_ecpriPcid_to_vf(direction, cc_id, ant_id));
#else
p_sect_iq = pos + p_sec_desc->iq_buffer_offset;
prb_num = prb_map_elm->nRBSize;
mb = (void*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_elm_id].sBufferList.pBuffers[sym_id].pCtrl;
if( prb_num > 136 || prb_num == 0) {
- uint16_t sec_id = xran_alloc_sectionid(pHandle, direction, cc_id, ant_id, slot_id);
+ uint16_t sec_id = xran_alloc_sectionid(pHandle, direction, cc_id, ant_elm_id, slot_id);
/* first 136 PRBs */
send_symbol_ex(direction,
sec_id,
return retval;
}
+struct rte_mbuf *
+xran_attach_cp_ext_buf(int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
+ struct rte_mbuf_ext_shared_info * p_share_data)
+{
+ struct rte_mbuf *mb_oran_hdr_ext = NULL;
+ struct rte_mbuf *tmp = NULL;
+ int8_t *ext_buff = NULL;
+ rte_iova_t ext_buff_iova = 0;
+
+ ext_buff = p_ext_buff - (RTE_PKTMBUF_HEADROOM +
+ sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct xran_cp_radioapp_section1_header) +
+ sizeof(struct xran_cp_radioapp_section1));
+
+ ext_buff_len += (RTE_PKTMBUF_HEADROOM +
+ sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct xran_cp_radioapp_section1_header) +
+ sizeof(struct xran_cp_radioapp_section1)) + 18;
+
+ mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_small);
+
+ if (unlikely (( mb_oran_hdr_ext) == NULL)) {
+ rte_panic("Failed rte_pktmbuf_alloc\n");
+ }
+
+ p_share_data->free_cb = extbuf_free_callback;
+ p_share_data->fcb_opaque = NULL;
+ rte_mbuf_ext_refcnt_set(p_share_data, 1);
+
+ ext_buff_iova = rte_malloc_virt2iova(p_ext_buff_start);
+ if (unlikely (( ext_buff_iova) == 0)) {
+ rte_panic("Failed rte_mem_virt2iova \n");
+ }
+
+ if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
+ rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
+ }
+
+ rte_pktmbuf_attach_extbuf(mb_oran_hdr_ext,
+ ext_buff,
+ ext_buff_iova + RTE_PTR_DIFF(ext_buff , p_ext_buff_start),
+ ext_buff_len,
+ p_share_data);
+
+ rte_pktmbuf_reset_headroom(mb_oran_hdr_ext);
+
+ return mb_oran_hdr_ext;
+}
+
+
+struct rte_mbuf *
+xran_attach_up_ext_buf(int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
+ struct rte_mbuf_ext_shared_info * p_share_data,
+ enum xran_compression_method compMeth)
+{
+ struct rte_mbuf *mb_oran_hdr_ext = NULL;
+ struct rte_mbuf *tmp = NULL;
+ int8_t *ext_buff = NULL;
+ rte_iova_t ext_buff_iova = 0;
+
+ ext_buff = p_ext_buff - (RTE_PKTMBUF_HEADROOM +
+ sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct radio_app_common_hdr) +
+ sizeof(struct data_section_hdr));
+
+ ext_buff_len += RTE_PKTMBUF_HEADROOM +
+ sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct radio_app_common_hdr) +
+ sizeof(struct data_section_hdr) + 18;
+
+ if(compMeth != XRAN_COMPMETHOD_NONE) {
+ ext_buff -= sizeof (struct data_section_compression_hdr);
+ ext_buff_len += sizeof (struct data_section_compression_hdr);
+ }
+
+ mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_small);
+
+ if (unlikely (( mb_oran_hdr_ext) == NULL)) {
+ rte_panic("Failed rte_pktmbuf_alloc\n");
+ }
+
+ p_share_data->free_cb = extbuf_free_callback;
+ p_share_data->fcb_opaque = NULL;
+ rte_mbuf_ext_refcnt_set(p_share_data, 1);
+
+ ext_buff_iova = rte_mempool_virt2iova(p_ext_buff_start);
+ if (unlikely (( ext_buff_iova) == 0)) {
+ rte_panic("Failed rte_mem_virt2iova \n");
+ }
+
+ if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
+ rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
+ }
+
+ rte_pktmbuf_attach_extbuf(mb_oran_hdr_ext,
+ ext_buff,
+ ext_buff_iova + RTE_PTR_DIFF(ext_buff , p_ext_buff_start),
+ ext_buff_len,
+ p_share_data);
+
+ rte_pktmbuf_reset_headroom(mb_oran_hdr_ext);
+
+ tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(mb_oran_hdr_ext, sizeof(struct rte_ether_hdr));
+ if (unlikely (( tmp) == NULL)) {
+ rte_panic("Failed rte_pktmbuf_prepend \n");
+ }
+
+ return mb_oran_hdr_ext;
+}
int32_t xran_process_tx_sym_cp_on(uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
uint32_t slot_id, uint32_t sym_id)
{
int32_t retval = 0;
- uint64_t t1 = MLogTick();
struct rte_mbuf *eth_oran_hdr = NULL;
char *ext_buff = NULL;
struct xran_section_info *sectinfo = NULL;
struct xran_device_ctx *p_xran_dev_ctx = xran_dev_get_ctx();
-
- struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
- struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
enum xran_pkt_dir direction;
- struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN];
+ struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
direction = XRAN_DIR_DL; /* O-DU */
pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
mb = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
-#if 1
- p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
-
- /* calculete offset for external buffer */
+ p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
- ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
- sizeof (struct xran_ecpri_hdr) +
- sizeof (struct radio_app_common_hdr) +
- sizeof(struct data_section_hdr));
-
- ext_buff_len += RTE_PKTMBUF_HEADROOM +
- sizeof (struct xran_ecpri_hdr) +
- sizeof (struct radio_app_common_hdr) +
- sizeof(struct data_section_hdr) + 18;
- if(sectinfo->compMeth != XRAN_COMPMETHOD_NONE){
- ext_buff -= sizeof (struct data_section_compression_hdr);
- ext_buff_len += sizeof (struct data_section_compression_hdr);
- }
-
- eth_oran_hdr = rte_pktmbuf_alloc(_eth_mbuf_pool_small);
-
- if (unlikely (( eth_oran_hdr) == NULL)) {
- rte_panic("Failed rte_pktmbuf_alloc\n");
- }
-
- p_share_data->free_cb = extbuf_free_callback;
- p_share_data->fcb_opaque = NULL;
- rte_mbuf_ext_refcnt_set(p_share_data, 1);
-
- ext_buff_iova = rte_mempool_virt2iova(mb);
- if (unlikely (( ext_buff_iova) == 0)) {
- rte_panic("Failed rte_mem_virt2iova \n");
- }
-
- if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
- rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
- }
-
- rte_pktmbuf_attach_extbuf(eth_oran_hdr,
- ext_buff,
- ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
- ext_buff_len,
- p_share_data);
-
- rte_pktmbuf_reset_headroom(eth_oran_hdr);
-
- tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct ether_hdr));
- if (unlikely (( tmp) == NULL)) {
- rte_panic("Failed rte_pktmbuf_prepend \n");
- }
- mb = eth_oran_hdr;
-#else
- rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
-#endif
+ mb = xran_attach_up_ext_buf((int8_t *)mb, (int8_t *) p_sec_iq,
+ (uint16_t) ext_buff_len,
+ p_share_data, (enum xran_compression_method) sectinfo->compMeth);
/* first all PRBs */
prepare_symbol_ex(direction, sectinfo->id,
mb,
for (i = len; i < len + len2; i ++) {
struct rte_mbuf *m;
m = p_xran_dev_ctx->tx_mbufs[0].m_table[i];
- struct ether_hdr *eth_hdr = (struct ether_hdr *)
- rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr));
+ struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
if (eth_hdr == NULL) {
rte_panic("No headroom in mbuf.\n");
}
}
/* Transmit packets */
- xran_send_burst(p_xran_dev_ctx, (uint16_t)len, 0);
+ xran_send_burst(p_xran_dev_ctx, (uint16_t)len, xran_map_ecpriPcid_to_vf(direction, cc_id, ant_id));
p_xran_dev_ctx->tx_mbufs[0].len = 0;
retval = 1;
} /* while(section) */
int32_t cc_id = 0;
uint8_t num_eAxc = 0;
uint8_t num_CCPorts = 0;
- uint8_t num_ant_elm = 0;
uint32_t frame_id = 0;
uint32_t subframe_id = 0;
uint32_t slot_id = 0;
uint32_t sym_idx = 0;
uint8_t ctx_id;
- enum xran_pkt_dir direction;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
+ enum xran_in_period inPeriod;
if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
return 0;
/* O-RU: send symb after OTA time with delay (UL) */
/* O-DU: send symb in advance of OTA time (DL) */
- sym_idx = XranOffsetSym(p_xran_dev_ctx->sym_up, xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME*1000);
+ sym_idx = XranOffsetSym(p_xran_dev_ctx->sym_up, xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME*1000, &inPeriod);
tti = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);
subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
- frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
+
+ uint16_t sfnSecStart = xran_getSfnSecStart();
+ if (unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
+ {
+ // For DU
+ sfnSecStart = (sfnSecStart + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
+ }
+ else if (unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
+ {
+ // For RU
+ if (sfnSecStart >= NUM_OF_FRAMES_PER_SECOND)
+ {
+ sfnSecStart -= NUM_OF_FRAMES_PER_SECOND;
+ }
+ else
+ {
+ sfnSecStart += NUM_OF_FRAMES_PER_SFN_PERIOD - NUM_OF_FRAMES_PER_SECOND;
+ }
+ }
+ frame_id = XranGetFrameNum(tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
// ORAN frameId, 8 bits, [0, 255]
frame_id = (frame_id & 0xff);
int res = 0;
printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
+ memset(&sched_param, 0, sizeof(struct sched_param));
sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
- if ((res = pthread_setschedparam(pthread_self(), 1, &sched_param)))
+ if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
{
printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
}
int32_t i;
int32_t j;
- struct xran_io_loop_cfg *p_io_cfg = (struct xran_io_loop_cfg *)&p_xran_fh_init->io_cfg;
+ struct xran_io_cfg *p_io_cfg = (struct xran_io_cfg *)&p_xran_fh_init->io_cfg;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
- cpu_set_t system_cpuset;
- pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &system_cpuset);
- for (j = 0; j < CPU_SETSIZE; j++)
- if (CPU_ISSET(j, &system_cpuset))
- break;
-
- int32_t lcore_id = j;
-
+ int32_t lcore_id = 0;
char filename[64];
int64_t offset_sec, offset_nsec;
xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
p_io_cfg,
&lcore_id,
- (struct ether_addr *)p_xran_fh_init->p_o_du_addr,
- (struct ether_addr *)p_xran_fh_init->p_o_ru_addr,
- p_xran_fh_init->cp_vlan_tag,
- p_xran_fh_init->up_vlan_tag);
+ (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr,
+ (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr);
else
xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
p_io_cfg,
&lcore_id,
- (struct ether_addr *)p_xran_fh_init->p_o_ru_addr,
- (struct ether_addr *)p_xran_fh_init->p_o_du_addr,
- p_xran_fh_init->cp_vlan_tag,
- p_xran_fh_init->up_vlan_tag);
+ (struct rte_ether_addr *)p_xran_fh_init->p_o_ru_addr,
+ (struct rte_ether_addr *)p_xran_fh_init->p_o_du_addr);
for(i = 0; i < 10; i++ )
rte_timer_init(&tti_to_phy_timer[i]);
}
}
- printf("Set debug stop %d, debug stop count %d\n", p_xran_fh_init->debugStop, p_xran_fh_init->debugStopCount);
- timing_set_debug_stop(p_xran_fh_init->debugStop, p_xran_fh_init->debugStopCount);
for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){
xran_fs_clear_slot_type(nCellIdx);
snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "ru_%d_cc_%d_idx_%d",
pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex);
- nAllocBufferSize = nBufferSize + sizeof(struct ether_hdr) +
+ nAllocBufferSize = nBufferSize + sizeof(struct rte_ether_hdr) +
sizeof (struct xran_ecpri_hdr) +
sizeof (struct radio_app_common_hdr) +
sizeof(struct data_section_hdr) + 256;
if(mb){
char * start = rte_pktmbuf_append(mb, pXranCc->bufferPoolElmSz[nPoolIndex]);
- char * ethhdr = rte_pktmbuf_prepend(mb, sizeof(struct ether_hdr));
+ char * ethhdr = rte_pktmbuf_prepend(mb, sizeof(struct rte_ether_hdr));
if(start && ethhdr){
char * iq_offset = rte_pktmbuf_mtod(mb, char * );
/* skip headers */
- iq_offset = iq_offset + sizeof(struct ether_hdr) +
+ iq_offset = iq_offset + sizeof(struct rte_ether_hdr) +
sizeof (struct xran_ecpri_hdr) +
sizeof (struct radio_app_common_hdr) +
sizeof(struct data_section_hdr);
void *pCallbackTag)
{
XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
- xran_status_t nStatus = XRAN_STATUS_SUCCESS;
int j, i = 0, z, k;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
- print_dbg("%s\n", __FUNCTION__);
+ print_dbg("%s\n",__FUNCTION__);
if(NULL == pHandle)
{
p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];
- p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j];
+ if(pSrcBuffer[z][j])
+ p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j];
/* C-plane TX */
p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
- p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcCpBuffer[z][j];
+ if(pSrcCpBuffer[z][j])
+ p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcCpBuffer[z][j];
/* U-plane RX */
p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];
- p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
+ if(pDstBuffer[z][j])
+ p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
/* C-plane RX */
p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
- p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
+ if(pDstCpBuffer[z][j])
+ p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
}
}
p_xran_dev_ctx->xran2phy_mem_ready = 1;
- return nStatus;
+ return XRAN_STATUS_SUCCESS;
}
int32_t xran_5g_prach_req (void * pHandle,
void *pCallbackTag)
{
XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
- xran_status_t nStatus = XRAN_STATUS_SUCCESS;
int j, i = 0, z;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number.
p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];
- p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
+ if(pDstBuffer[z][j])
+ p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
}
}
p_xran_dev_ctx->pPrachCallback[i] = pCallback;
p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
- return 0;
+ return XRAN_STATUS_SUCCESS;
}
void *pCallbackTag)
{
XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
- xran_status_t nStatus = XRAN_STATUS_SUCCESS;
int j, i = 0, z;
struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
- p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
+ if(pDstBuffer[z][j])
+ p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
}
}
p_xran_dev_ctx->pSrsCallback[i] = pCallback;
p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
- return 0;
+ return XRAN_STATUS_SUCCESS;
}
uint32_t xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *core_used, uint32_t clear)
return rte_malloc("External buffer", buf_len, RTE_CACHE_LINE_SIZE);
}
+void xran_free(void *addr)
+{
+ return rte_free(addr);
+}
+
+
+uint8_t *xran_add_cp_hdr_offset(uint8_t *dst)
+{
+ dst += (RTE_PKTMBUF_HEADROOM +
+ sizeof(struct xran_ecpri_hdr) +
+ sizeof(struct xran_cp_radioapp_section1_header) +
+ sizeof(struct xran_cp_radioapp_section1));
+
+ dst = RTE_PTR_ALIGN_CEIL(dst, 64);
+
+ return dst;
+}
+
uint8_t *xran_add_hdr_offset(uint8_t *dst, int16_t compMethod)
{
dst+= (RTE_PKTMBUF_HEADROOM +
memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));
if(pConf->log_level)
- printf(" %s: O-RU Category %s\n", __FUNCTION__, (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
+ printf(" %s: %s Category %s\n", __FUNCTION__,
+ (pFhCfg->ru_conf.xranTech == XRAN_RAN_5GNR) ? "5G NR" : "LTE",
+ (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
nNumerology = xran_get_conf_numerology(pHandle);
}
/* setup PRACH configuration for C-Plane */
- xran_init_prach(pConf, p_xran_dev_ctx);
+ if(pConf->ru_conf.xranTech == XRAN_RAN_5GNR)
+ xran_init_prach(pConf, p_xran_dev_ctx);
+ else if (pConf->ru_conf.xranTech == XRAN_RAN_LTE)
+ xran_init_prach_lte(pConf, p_xran_dev_ctx);
+
xran_init_srs(pConf, p_xran_dev_ctx);
xran_cp_init_sectiondb(pHandle);
/* Start packet processing thread */
if((uint16_t)xran_ethdi_get_ctx()->io_cfg.port[XRAN_UP_VF] != 0xFFFF &&
(uint16_t)xran_ethdi_get_ctx()->io_cfg.port[XRAN_CP_VF] != 0xFFFF ){
- if(pConf->log_level){
- print_dbg("XRAN_UP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_UP_VF]);
- print_dbg("XRAN_CP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_CP_VF]);
+ if(/*pConf->log_level*/1){
+ printf("XRAN_UP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_UP_VF]);
+ printf("XRAN_CP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_CP_VF]);
}
+
+
if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), xran_ethdi_get_ctx()->io_cfg.timing_core))
rte_panic("thread_run() failed to start\n");
+
+ /* Start packet processing thread */
+ if(xran_ethdi_get_ctx()->io_cfg.pkt_proc_core){
+ /* start pkt workers */
+ uint64_t nWorkerCore = 1LL;
+ uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF);
+ for (i = 0; i < coreNum; i++) {
+ if (nWorkerCore & (uint64_t)xran_ethdi_get_ctx()->io_cfg.pkt_proc_core) {
+ if (rte_eal_remote_launch(ring_processing_thread, NULL, i))
+ rte_panic("ring_processing_thread() failed to start\n");
+ xran_ethdi_get_ctx()->pkt_wrk_cfg[i].f = ring_processing_thread;
+ xran_ethdi_get_ctx()->pkt_wrk_cfg[i].arg = NULL;
+ xran_ethdi_get_ctx()->pkt_wrk_cfg[i].state = 1;
+ if(p_xran_dev_ctx->pkt_proc_core_id == 0)
+ p_xran_dev_ctx->pkt_proc_core_id = i;
+ }
+ nWorkerCore = nWorkerCore << 1;
+ }
+ }
} else if(pConf->log_level){
printf("Eth port was not open. Processing thread was not started\n");
}
int32_t xran_start(void *pHandle)
{
+ struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
if(xran_get_if_state() == XRAN_RUNNING) {
print_err("Already STARTED!!");
return (-1);
}
+ if(p_xran_dev_ctx->fh_init.debugStop){
+ printf("Set debug stop %d, debug stop count %d\n", p_xran_dev_ctx->fh_init.debugStop, p_xran_dev_ctx->fh_init.debugStopCount);
+ timing_set_debug_stop(p_xran_dev_ctx->fh_init.debugStop, p_xran_dev_ctx->fh_init.debugStopCount);
+ }
+
xran_if_current_state = XRAN_RUNNING;
return 0;
}