1 /******************************************************************************
\r
3 * Copyright (c) 2019 Intel.
\r
5 * Licensed under the Apache License, Version 2.0 (the "License");
\r
6 * you may not use this file except in compliance with the License.
\r
7 * You may obtain a copy of the License at
\r
9 * http://www.apache.org/licenses/LICENSE-2.0
\r
11 * Unless required by applicable law or agreed to in writing, software
\r
12 * distributed under the License is distributed on an "AS IS" BASIS,
\r
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
\r
14 * See the License for the specific language governing permissions and
\r
15 * limitations under the License.
\r
17 *******************************************************************************/
\r
20 * @brief XRAN main functionality module
\r
22 * @ingroup group_source_xran
\r
23 * @author Intel Corporation
\r
31 #include <sys/time.h>
\r
32 #include <sys/queue.h>
\r
36 #include <pthread.h>
\r
39 #include <rte_common.h>
\r
40 #include <rte_eal.h>
\r
41 #include <rte_errno.h>
\r
42 #include <rte_lcore.h>
\r
43 #include <rte_cycles.h>
\r
44 #include <rte_memory.h>
\r
45 #include <rte_memzone.h>
\r
46 #include <rte_mbuf.h>
\r
47 #include <rte_ring.h>
\r
49 #include "xran_fh_o_du.h"
\r
52 #include "xran_pkt.h"
\r
53 #include "xran_up_api.h"
\r
54 #include "xran_cp_api.h"
\r
55 #include "xran_sync_api.h"
\r
56 #include "xran_lib_mlog_tasks_id.h"
\r
57 #include "xran_timer.h"
\r
58 #include "xran_common.h"
\r
59 #include "xran_frame_struct.h"
\r
60 #include "xran_printf.h"
\r
61 #include "xran_app_frag.h"
\r
63 #include "xran_mlog_lnx.h"
\r
65 #define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) )
\r
67 #define XranOffsetSym(offSym, otaSym, numSymTotal) (((int32_t)offSym > (int32_t)otaSym) ? \
\r
68 ((int32_t)otaSym + ((int32_t)numSymTotal) - (uint32_t)offSym) : \
\r
69 (((int32_t)otaSym - (int32_t)offSym) >= numSymTotal) ? \
\r
70 (((int32_t)otaSym - (int32_t)offSym) - numSymTotal) : \
\r
71 ((int32_t)otaSym - (int32_t)offSym))
\r
73 #define MAX_NUM_OF_XRAN_CTX (2)
\r
74 #define XranIncrementCtx(ctx) ((ctx >= (MAX_NUM_OF_XRAN_CTX-1)) ? 0 : (ctx+1))
\r
75 #define XranDecrementCtx(ctx) ((ctx == 0) ? (MAX_NUM_OF_XRAN_CTX-1) : (ctx-1))
\r
77 #define MAX_NUM_OF_DPDK_TIMERS (10)
\r
78 #define DpdkTimerIncrementCtx(ctx) ((ctx >= (MAX_NUM_OF_DPDK_TIMERS-1)) ? 0 : (ctx+1))
\r
79 #define DpdkTimerDecrementCtx(ctx) ((ctx == 0) ? (MAX_NUM_OF_DPDK_TIMERS-1) : (ctx-1))
\r
81 /* Difference between Unix seconds to GPS seconds
\r
82 GPS epoch: 1980.1.6 00:00:00 (UTC); Unix time epoch: 1970:1.1 00:00:00 UTC
\r
83 Value is calculated on Sep.6 2019. Need to be change if International
\r
84 Earth Rotation and Reference Systems Service (IERS) adds more leap seconds
\r
85 1970:1.1 - 1980.1.6: 3657 days
\r
86 3657*24*3600=315 964 800 seconds (unix seconds value at 1980.1.6 00:00:00 (UTC))
\r
87 There are 18 leap seconds inserted after 1980.1.6 00:00:00 (UTC), which means
\r
88 GPS is 18 larger. 315 964 800 - 18 = 315 964 782
\r
90 #define UNIX_TO_GPS_SECONDS_OFFSET 315964782UL
\r
91 #define NUM_OF_FRAMES_PER_SECOND 100
\r
93 //#define XRAN_CREATE_RBMAP /**< generate slot map base on symbols */
\r
96 struct xran_timer_ctx {
\r
97 uint32_t tti_to_process;
\r
100 static xran_cc_handle_t pLibInstanceHandles[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR] = {NULL};
\r
101 static struct xran_device_ctx g_xran_dev_ctx[XRAN_PORTS_NUM] = { 0 };
\r
103 struct xran_timer_ctx timer_ctx[MAX_NUM_OF_XRAN_CTX];
\r
105 static struct rte_timer tti_to_phy_timer[10];
\r
106 static struct rte_timer sym_timer;
\r
107 static struct rte_timer dpdk_timer[MAX_NUM_OF_DPDK_TIMERS];
\r
109 uint64_t interval_us = 1000;
\r
111 uint32_t xran_lib_ota_tti = 0; /**< Slot index in a second [0:(1000000/TTI-1)] */
\r
112 uint32_t xran_lib_ota_sym = 0; /**< Symbol index in a slot [0:13] */
\r
113 uint32_t xran_lib_ota_sym_idx = 0; /**< Symbol index in a second [0 : 14*(1000000/TTI)-1]
\r
114 where TTI is TTI interval in microseconds */
\r
115 uint16_t xran_SFN_at_Sec_Start = 0; /**< SFN at current second start */
\r
116 uint16_t xran_max_frame = 1023; /**< value of max frame used. expected to be 99 (old compatibility mode) and 1023 as per section 9.7.2 System Frame Number Calculation */
\r
118 static uint8_t xran_cp_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_DIR_MAX][XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR]; /* XRAN_MAX_ANTENNA_NR * 2 for PUSCH and PRACH */
\r
119 static uint8_t xran_updl_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR];
\r
120 static uint8_t xran_upul_seq_id_num[XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR]; /**< PUSCH, PRACH, SRS for Cat B */
\r
122 static uint8_t xran_section_id_curslot[XRAN_DIR_MAX][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2+ XRAN_MAX_ANT_ARRAY_ELM_NR];
\r
123 static uint16_t xran_section_id[XRAN_DIR_MAX][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2+ XRAN_MAX_ANT_ARRAY_ELM_NR];
\r
124 static uint64_t xran_total_tick = 0, xran_used_tick = 0;
\r
125 static uint32_t xran_core_used = 0;
\r
126 static int32_t first_call = 0;
\r
130 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
\r
134 static struct rte_mbuf_ext_shared_info share_data[XRAN_N_FE_BUF_LEN];
\r
136 void xran_timer_arm(struct rte_timer *tim, void* arg);
\r
138 int32_t xran_process_tx_sym(void *arg);
\r
140 int32_t xran_process_rx_sym(void *arg,
\r
141 struct rte_mbuf *mbuf,
\r
142 void *iq_data_start,
\r
147 uint8_t subframe_id,
\r
151 uint16_t start_prbu,
\r
155 uint32_t *mb_free);
\r
157 int32_t xran_process_prach_sym(void *arg,
\r
158 struct rte_mbuf *mbuf,
\r
159 void *iq_data_start,
\r
164 uint8_t subframe_id,
\r
168 uint16_t start_prbu,
\r
172 uint32_t *mb_free);
\r
174 int32_t xran_process_srs_sym(void *arg,
\r
175 struct rte_mbuf *mbuf,
\r
176 void *iq_data_start,
\r
181 uint8_t subframe_id,
\r
185 uint16_t start_prbu,
\r
189 uint32_t *mb_free);
\r
192 void tti_ota_cb(struct rte_timer *tim, void *arg);
\r
193 void tti_to_phy_cb(struct rte_timer *tim, void *arg);
\r
194 void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore);
\r
196 // Return SFN at current second start, 10 bits, [0, 1023]
\r
197 static inline uint16_t xran_getSfnSecStart(void)
\r
199 return xran_SFN_at_Sec_Start;
\r
201 void xran_updateSfnSecStart(void)
\r
203 uint64_t currentSecond = timing_get_current_second();
\r
204 // Assume always positive
\r
205 uint64_t gpsSecond = currentSecond - UNIX_TO_GPS_SECONDS_OFFSET;
\r
206 uint64_t nFrames = gpsSecond * NUM_OF_FRAMES_PER_SECOND;
\r
207 uint16_t sfn = (uint16_t)(nFrames % (xran_max_frame + 1));
\r
208 xran_SFN_at_Sec_Start = sfn;
\r
210 tx_bytes_per_sec = tx_bytes_counter;
\r
211 rx_bytes_per_sec = rx_bytes_counter;
\r
212 tx_bytes_counter = 0;
\r
213 rx_bytes_counter = 0;
\r
216 static inline int32_t xran_getSlotIdxSecond(void)
\r
218 int32_t frameIdxSecond = xran_getSfnSecStart();
\r
219 int32_t slotIndxSecond = frameIdxSecond * SLOTS_PER_SYSTEMFRAME;
\r
220 return slotIndxSecond;
\r
223 struct xran_device_ctx *xran_dev_get_ctx(void)
\r
225 return &g_xran_dev_ctx[0];
\r
228 static inline struct xran_fh_config *xran_lib_get_ctx_fhcfg(void)
\r
230 return (&(xran_dev_get_ctx()->fh_cfg));
\r
233 uint16_t xran_get_beamid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id)
\r
235 return (0); // NO BEAMFORMING
\r
238 enum xran_if_state xran_get_if_state(void)
\r
240 return xran_if_current_state;
\r
243 int xran_is_prach_slot(uint32_t subframe_id, uint32_t slot_id)
\r
245 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
246 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
\r
247 int32_t is_prach_slot = 0;
\r
249 if (p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology < 2){
\r
250 //for FR1, in 38.211 tab 6.3.3.2-2&3 it is subframe index
\r
251 if (pPrachCPConfig->isPRACHslot[subframe_id] == 1){
\r
252 if (pPrachCPConfig->nrofPrachInSlot != 1)
\r
255 if (p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology == 0)
\r
257 else if (slot_id == 1)
\r
261 } else if (p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology == 3){
\r
262 //for FR2, 38.211 tab 6.3.3.4 it is slot index of 60kHz slot
\r
264 slotidx = subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;
\r
265 if (pPrachCPConfig->nrofPrachInSlot == 2){
\r
266 if (pPrachCPConfig->isPRACHslot[slotidx>>1] == 1)
\r
269 if ((pPrachCPConfig->isPRACHslot[slotidx>>1] == 1) && ((slotidx % 2) == 1)){
\r
274 print_err("Numerology %d not supported", p_xran_dev_ctx->fh_cfg.frame_conf.nNumerology);
\r
275 return is_prach_slot;
\r
278 int xran_init_sectionid(void *pHandle)
\r
280 int cell, ant, dir;
\r
282 for (dir = 0; dir < XRAN_DIR_MAX; dir++){
\r
283 for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) {
\r
284 for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++) {
\r
285 xran_section_id[dir][cell][ant] = 0;
\r
286 xran_section_id_curslot[dir][cell][ant] = 255;
\r
294 int xran_init_srs(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
\r
296 struct xran_srs_config *p_srs = &(p_xran_dev_ctx->srs_cfg);
\r
299 p_srs->symbMask = pConf->srs_conf.symbMask;
\r
300 p_srs->eAxC_offset = pConf->srs_conf.eAxC_offset;
\r
301 print_dbg("SRS sym %d\n", p_srs->symbMask );
\r
302 print_dbg("SRS eAxC_offset %d\n", p_srs->eAxC_offset);
\r
304 return (XRAN_STATUS_SUCCESS);
\r
308 int xran_init_prach(struct xran_fh_config* pConf, struct xran_device_ctx * p_xran_dev_ctx)
\r
312 struct xran_prach_config* pPRACHConfig = &(pConf->prach_conf);
\r
313 const xRANPrachConfigTableStruct *pxRANPrachConfigTable;
\r
314 uint8_t nNumerology = pConf->frame_conf.nNumerology;
\r
315 uint8_t nPrachConfIdx = pPRACHConfig->nPrachConfIdx;
\r
316 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
\r
318 if (nNumerology > 2)
\r
319 pxRANPrachConfigTable = &gxranPrachDataTable_mmw[nPrachConfIdx];
\r
320 else if (pConf->frame_conf.nFrameDuplexType == 1)
\r
321 pxRANPrachConfigTable = &gxranPrachDataTable_sub6_tdd[nPrachConfIdx];
\r
323 pxRANPrachConfigTable = &gxranPrachDataTable_sub6_fdd[nPrachConfIdx];
\r
325 uint8_t preambleFmrt = pxRANPrachConfigTable->preambleFmrt[0];
\r
326 const xRANPrachPreambleLRAStruct *pxranPreambleforLRA = &gxranPreambleforLRA[preambleFmrt];
\r
327 memset(pPrachCPConfig, 0, sizeof(struct xran_prach_cp_config));
\r
328 if(pConf->log_level)
\r
329 printf("xRAN open PRACH config: Numerology %u ConfIdx %u, preambleFmrt %u startsymb %u, numSymbol %u, occassionsInPrachSlot %u\n", nNumerology, nPrachConfIdx, preambleFmrt, pxRANPrachConfigTable->startingSym, pxRANPrachConfigTable->duration, pxRANPrachConfigTable->occassionsInPrachSlot);
\r
331 pPrachCPConfig->filterIdx = XRAN_FILTERINDEX_PRACH_ABC; // 3, PRACH preamble format A1~3, B1~4, C0, C2
\r
332 pPrachCPConfig->startSymId = pxRANPrachConfigTable->startingSym;
\r
333 pPrachCPConfig->startPrbc = pPRACHConfig->nPrachFreqStart;
\r
334 pPrachCPConfig->numPrbc = (preambleFmrt >= FORMAT_A1)? 12 : 70;
\r
335 pPrachCPConfig->timeOffset = pxranPreambleforLRA->nRaCp;
\r
336 pPrachCPConfig->freqOffset = xran_get_freqoffset(pPRACHConfig->nPrachFreqOffset, pPRACHConfig->nPrachSubcSpacing);
\r
337 pPrachCPConfig->x = pxRANPrachConfigTable->x;
\r
338 pPrachCPConfig->nrofPrachInSlot = pxRANPrachConfigTable->nrofPrachInSlot;
\r
339 pPrachCPConfig->y[0] = pxRANPrachConfigTable->y[0];
\r
340 pPrachCPConfig->y[1] = pxRANPrachConfigTable->y[1];
\r
341 if (preambleFmrt >= FORMAT_A1)
\r
343 pPrachCPConfig->numSymbol = pxRANPrachConfigTable->duration;
\r
344 pPrachCPConfig->occassionsInPrachSlot = pxRANPrachConfigTable->occassionsInPrachSlot;
\r
348 pPrachCPConfig->numSymbol = 1;
\r
349 pPrachCPConfig->occassionsInPrachSlot = 1;
\r
352 if(pConf->log_level)
\r
353 printf("PRACH: x %u y[0] %u, y[1] %u prach slot: %u ..", pPrachCPConfig->x, pPrachCPConfig->y[0], pPrachCPConfig->y[1], pxRANPrachConfigTable->slotNr[0]);
\r
354 pPrachCPConfig->isPRACHslot[pxRANPrachConfigTable->slotNr[0]] = 1;
\r
355 for (i=1; i < XRAN_PRACH_CANDIDATE_SLOT; i++)
\r
357 slotNr = pxRANPrachConfigTable->slotNr[i];
\r
359 pPrachCPConfig->isPRACHslot[slotNr] = 1;
\r
360 if(pConf->log_level)
\r
361 printf(" %u ..", slotNr);
\r
365 for (i = 0; i < XRAN_MAX_SECTOR_NR; i++){
\r
366 p_xran_dev_ctx->prach_start_symbol[i] = pPrachCPConfig->startSymId;
\r
367 p_xran_dev_ctx->prach_last_symbol[i] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol * pPrachCPConfig->occassionsInPrachSlot - 1;
\r
369 if(pConf->log_level){
\r
370 printf("PRACH start symbol %u lastsymbol %u\n", p_xran_dev_ctx->prach_start_symbol[0], p_xran_dev_ctx->prach_last_symbol[0]);
\r
373 pPrachCPConfig->eAxC_offset = xran_get_num_eAxc(NULL);
\r
374 print_dbg("PRACH eAxC_offset %d\n", pPrachCPConfig->eAxC_offset);
\r
376 return (XRAN_STATUS_SUCCESS);
\r
379 inline uint16_t xran_alloc_sectionid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id)
\r
381 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
382 print_err("Invalid CC ID - %d", cc_id);
\r
385 if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) { //for PRACH, ant_id starts from num_ant
\r
386 print_err("Invalid antenna ID - %d", ant_id);
\r
390 /* if new slot has been started,
\r
391 * then initializes section id again for new start */
\r
392 if(xran_section_id_curslot[dir][cc_id][ant_id] != slot_id) {
\r
393 xran_section_id[dir][cc_id][ant_id] = 0;
\r
394 xran_section_id_curslot[dir][cc_id][ant_id] = slot_id;
\r
397 return(xran_section_id[dir][cc_id][ant_id]++);
\r
400 int xran_init_seqid(void *pHandle)
\r
402 int cell, dir, ant;
\r
404 for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) {
\r
405 for(dir=0; dir < XRAN_DIR_MAX; dir++) {
\r
406 for(ant=0; ant < XRAN_MAX_ANTENNA_NR * 2; ant++)
\r
407 xran_cp_seq_id_num[cell][dir][ant] = 0;
\r
409 for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++)
\r
410 xran_updl_seq_id_num[cell][ant] = 0;
\r
411 for(ant=0; ant < XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR; ant++)
\r
412 xran_upul_seq_id_num[cell][ant] = 0;
\r
418 static inline uint8_t xran_get_cp_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id)
\r
420 if(dir >= XRAN_DIR_MAX) {
\r
421 print_err("Invalid direction - %d", dir);
\r
424 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
425 print_err("Invalid CC ID - %d", cc_id);
\r
428 if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) {
\r
429 print_err("Invalid antenna ID - %d", ant_id);
\r
433 return(xran_cp_seq_id_num[cc_id][dir][ant_id]++);
\r
435 static inline uint8_t xran_get_updl_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id)
\r
437 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
438 print_err("Invalid CC ID - %d", cc_id);
\r
441 if(ant_id >= XRAN_MAX_ANTENNA_NR) {
\r
442 print_err("Invalid antenna ID - %d", ant_id);
\r
446 /* Only U-Plane DL needs to get sequence ID in O-DU */
\r
447 return(xran_updl_seq_id_num[cc_id][ant_id]++);
\r
449 static inline uint8_t *xran_get_updl_seqid_addr(void *pHandle, uint8_t cc_id, uint8_t ant_id)
\r
451 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
452 print_err("Invalid CC ID - %d", cc_id);
\r
455 if(ant_id >= XRAN_MAX_ANTENNA_NR) {
\r
456 print_err("Invalid antenna ID - %d", ant_id);
\r
460 /* Only U-Plane DL needs to get sequence ID in O-DU */
\r
461 return(&xran_updl_seq_id_num[cc_id][ant_id]);
\r
463 static inline int8_t xran_check_upul_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id, uint8_t seq_id)
\r
466 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
467 print_err("Invalid CC ID - %d", cc_id);
\r
471 if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) {
\r
472 print_err("Invalid antenna ID - %d", ant_id);
\r
476 /* O-DU needs to check the sequence ID of U-Plane UL from O-RU */
\r
477 xran_upul_seq_id_num[cc_id][ant_id]++;
\r
478 if(xran_upul_seq_id_num[cc_id][ant_id] == seq_id) { /* expected sequence */
\r
479 return (XRAN_STATUS_SUCCESS);
\r
481 print_err("expected seqid %u received %u, slot %u, ant %u cc %u", xran_upul_seq_id_num[cc_id][ant_id], seq_id, slot_id, ant_id, cc_id);
\r
482 xran_upul_seq_id_num[cc_id][ant_id] = seq_id; // for next
\r
487 //////////////////////////////////////////
\r
488 // For RU emulation
\r
489 static inline uint8_t xran_get_upul_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id)
\r
491 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
492 print_err("Invalid CC ID - %d", cc_id);
\r
495 if(ant_id >= XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR) {
\r
496 print_err("Invalid antenna ID - %d", ant_id);
\r
500 return(xran_upul_seq_id_num[cc_id][ant_id]++);
\r
502 static inline uint8_t *xran_get_upul_seqid_addr(void *pHandle, uint8_t cc_id, uint8_t ant_id)
\r
504 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
505 print_err("Invalid CC ID - %d", cc_id);
\r
508 if(ant_id >= XRAN_MAX_ANTENNA_NR * 2) {
\r
509 print_err("Invalid antenna ID - %d", ant_id);
\r
513 return(&xran_upul_seq_id_num[cc_id][ant_id]);
\r
515 static inline int8_t xran_check_cp_seqid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t seq_id)
\r
517 if(dir >= XRAN_DIR_MAX) {
\r
518 print_err("Invalid direction - %d", dir);
\r
521 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
522 print_err("Invalid CC ID - %d", cc_id);
\r
525 if(ant_id >= XRAN_MAX_ANTENNA_NR * 2) {
\r
526 print_err("Invalid antenna ID - %d", ant_id);
\r
530 xran_cp_seq_id_num[cc_id][dir][ant_id]++;
\r
531 if(xran_cp_seq_id_num[cc_id][dir][ant_id] == seq_id) { /* expected sequence */
\r
535 xran_cp_seq_id_num[cc_id][dir][ant_id] = seq_id;
\r
539 static inline int8_t xran_check_updl_seqid(void *pHandle, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id, uint8_t seq_id)
\r
541 if(cc_id >= XRAN_MAX_CELLS_PER_PORT) {
\r
542 print_err("Invalid CC ID - %d", cc_id);
\r
546 if(ant_id >= XRAN_MAX_ANTENNA_NR) {
\r
547 print_err("Invalid antenna ID - %d", ant_id);
\r
551 /* O-RU needs to check the sequence ID of U-Plane DL from O-DU */
\r
552 xran_updl_seq_id_num[cc_id][ant_id]++;
\r
553 if(xran_updl_seq_id_num[cc_id][ant_id] == seq_id) {
\r
554 /* expected sequence */
\r
555 /*print_dbg("ant %u cc_id %u : slot_id %u : seq_id %u : expected seq_id %u\n",
\r
556 ant_id, cc_id, slot_id, seq_id, xran_updl_seq_id_num[cc_id][ant_id]);*/
\r
559 /* print_err("ant %u cc_id %u : slot_id %u : seq_id %u : expected seq_id %u\n",
\r
560 ant_id, cc_id, slot_id, seq_id, xran_updl_seq_id_num[cc_id][ant_id]);*/
\r
562 xran_updl_seq_id_num[cc_id][ant_id] = seq_id;
\r
569 static struct xran_section_gen_info cpSections[XRAN_MAX_NUM_SECTIONS];
\r
570 static struct xran_cp_gen_params cpInfo;
\r
571 int process_cplane(struct rte_mbuf *pkt)
\r
573 struct xran_recv_packet_info recv;
\r
575 cpInfo.sections = cpSections;
\r
576 xran_parse_cp_pkt(pkt, &cpInfo, &recv);
\r
578 return (MBUF_FREE);
\r
580 //////////////////////////////////////////
\r
582 void sym_ota_cb(struct rte_timer *tim, void *arg, unsigned long *used_tick)
\r
584 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
585 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;
\r
586 long t1 = MLogTick(), t2;
\r
588 static int32_t ctx = 0;
\r
590 if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 0){
\r
592 tti_ota_cb(NULL, arg);
\r
593 *used_tick += get_ticks_diff(xran_tick(), t3);
\r
596 if(XranGetSymNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT) == 3){
\r
597 if(p_xran_dev_ctx->phy_tti_cb_done == 0){
\r
598 /* rearm timer to deliver TTI event to PHY */
\r
600 p_xran_dev_ctx->phy_tti_cb_done = 0;
\r
601 xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_dev_ctx->fh_init.io_cfg.timing_core);
\r
602 *used_tick += get_ticks_diff(xran_tick(), t3);
\r
607 if (xran_process_tx_sym(timer_ctx))
\r
609 *used_tick += get_ticks_diff(xran_tick(), t3);
\r
612 /* check if there is call back to do something else on this symbol */
\r
614 struct cb_elem_entry *cb_elm;
\r
615 LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[0][xran_lib_ota_sym], pointers){
\r
617 cb_elm->pSymCallback(&dpdk_timer[ctx], cb_elm->pSymCallbackTag);
\r
618 ctx = DpdkTimerIncrementCtx(ctx);
\r
622 // This counter is incremented in advance before it is the time for the next symbol
\r
623 xran_lib_ota_sym++;
\r
624 if(xran_lib_ota_sym >= N_SYM_PER_SLOT){
\r
625 xran_lib_ota_sym=0;
\r
629 MLogTask(PID_SYM_OTA_CB, t1, t2);
\r
632 void tti_ota_cb(struct rte_timer *tim, void *arg)
\r
634 uint32_t frame_id = 0;
\r
635 uint32_t subframe_id = 0;
\r
636 uint32_t slot_id = 0;
\r
637 uint32_t next_tti = 0;
\r
639 uint32_t mlogVar[10];
\r
640 uint32_t mlogVarCnt = 0;
\r
641 uint64_t t1 = MLogTick();
\r
643 uint32_t reg_tti = 0;
\r
644 uint32_t reg_sfn = 0;
\r
645 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;
\r
646 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
648 MLogTask(PID_TTI_TIMER, t1, MLogTick());
\r
650 /* To match TTbox */
\r
651 if(xran_lib_ota_tti == 0)
\r
652 reg_tti = xran_fs_get_max_slot() - 1;
\r
654 reg_tti = xran_lib_ota_tti -1;
\r
655 MLogIncrementCounter();
\r
656 reg_sfn = XranGetFrameNum(reg_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME)*10 + XranGetSubFrameNum(reg_tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);;
\r
657 /* subframe and slot */
\r
658 MLogRegisterFrameSubframe(reg_sfn, reg_tti % (SLOTNUM_PER_SUBFRAME));
\r
661 slot_id = XranGetSlotNum(xran_lib_ota_tti, SLOTNUM_PER_SUBFRAME);
\r
662 subframe_id = XranGetSubFrameNum(xran_lib_ota_tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
663 frame_id = XranGetFrameNum(xran_lib_ota_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
665 pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process = xran_lib_ota_tti;
\r
667 mlogVar[mlogVarCnt++] = 0x11111111;
\r
668 mlogVar[mlogVarCnt++] = xran_lib_ota_tti;
\r
669 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx;
\r
670 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx / 14;
\r
671 mlogVar[mlogVarCnt++] = frame_id;
\r
672 mlogVar[mlogVarCnt++] = subframe_id;
\r
673 mlogVar[mlogVarCnt++] = slot_id;
\r
674 mlogVar[mlogVarCnt++] = 0;
\r
675 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
\r
677 if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_LLS_CU)
\r
678 next_tti = xran_lib_ota_tti + 1;
\r
680 next_tti = xran_lib_ota_tti;
\r
682 if(next_tti>= xran_fs_get_max_slot()){
\r
683 print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
\r
687 slot_id = XranGetSlotNum(next_tti, SLOTNUM_PER_SUBFRAME);
\r
688 subframe_id = XranGetSubFrameNum(next_tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
689 frame_id = XranGetFrameNum(next_tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
691 print_dbg("[%d]SFN %d sf %d slot %d\n",next_tti, frame_id, subframe_id, slot_id);
\r
693 if(p_xran_dev_ctx->fh_init.io_cfg.id == ID_LLS_CU){
\r
694 pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = next_tti;
\r
696 pTCtx[(xran_lib_ota_tti & 1)].tti_to_process = pTCtx[(xran_lib_ota_tti & 1)^1].tti_to_process;
\r
699 p_xran_dev_ctx->phy_tti_cb_done = 0;
\r
700 xran_timer_arm_ex(&tti_to_phy_timer[xran_lib_ota_tti % 10], tti_to_phy_cb, (void*)pTCtx, p_xran_dev_ctx->fh_init.io_cfg.timing_core);
\r
702 //slot index is increased to next slot at the beginning of current OTA slot
\r
703 xran_lib_ota_tti++;
\r
704 if(xran_lib_ota_tti >= xran_fs_get_max_slot()){
\r
705 print_dbg("[%d]SFN %d sf %d slot %d\n",xran_lib_ota_tti, frame_id, subframe_id, slot_id);
\r
706 xran_lib_ota_tti=0;
\r
708 MLogTask(PID_TTI_CB, t1, MLogTick());
\r
711 void xran_timer_arm(struct rte_timer *tim, void* arg)
\r
713 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
714 uint64_t t3 = MLogTick();
\r
716 if (xran_if_current_state == XRAN_RUNNING){
\r
717 rte_timer_cb_t fct = (rte_timer_cb_t)arg;
\r
718 rte_timer_init(tim);
\r
719 rte_timer_reset_sync(tim, 0, SINGLE, p_xran_dev_ctx->fh_init.io_cfg.timing_core, fct, &timer_ctx[0]);
\r
721 MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick());
\r
724 void xran_timer_arm_ex(struct rte_timer *tim, void* CbFct, void *CbArg, unsigned tim_lcore)
\r
726 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
727 uint64_t t3 = MLogTick();
\r
729 if (xran_if_current_state == XRAN_RUNNING){
\r
730 rte_timer_cb_t fct = (rte_timer_cb_t)CbFct;
\r
731 rte_timer_init(tim);
\r
732 rte_timer_reset_sync(tim, 0, SINGLE, tim_lcore, fct, CbArg);
\r
734 MLogTask(PID_TIME_ARM_TIMER, t3, MLogTick());
\r
737 int xran_cp_create_and_send_section(void *pHandle, uint8_t ru_port_id, int dir, int tti, int cc_id,
\r
738 struct xran_prb_map *prbMap, enum xran_category category, uint8_t ctx_id)
\r
740 struct xran_device_ctx *p_x_ctx = xran_dev_get_ctx();
\r
741 struct xran_cp_gen_params params;
\r
742 struct xran_section_gen_info sect_geninfo[1];
\r
743 struct rte_mbuf *mbuf;
\r
745 uint32_t i, j, loc_sym;
\r
746 uint32_t nsection = 0;
\r
747 struct xran_prb_elm *pPrbMapElem = NULL;
\r
748 struct xran_prb_elm *pPrbMapElemPrev = NULL;
\r
749 uint32_t slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);
\r
750 uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
751 uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
753 frame_id = (frame_id & 0xff); /* ORAN frameId, 8 bits, [0, 255] */
\r
754 uint8_t seq_id = 0;
\r
756 struct xran_sectionext1_info m_ext1;
\r
759 nsection = prbMap->nPrbElm;
\r
760 pPrbMapElem = &prbMap->prbMap[0];
\r
762 print_dbg("cp[%d:%d:%d] ru_port_id %d dir=%d nsection %d\n",
\r
763 frame_id, subframe_id, slot_id, ru_port_id, dir, nsection);
\r
766 print_err("prbMap is NULL\n");
\r
769 for (i=0; i<nsection; i++)
\r
771 pPrbMapElem = &prbMap->prbMap[i];
\r
773 params.sectionType = XRAN_CP_SECTIONTYPE_1; /* Most DL/UL Radio Channels */
\r
774 params.hdr.filterIdx = XRAN_FILTERINDEX_STANDARD;
\r
775 params.hdr.frameId = frame_id;
\r
776 params.hdr.subframeId = subframe_id;
\r
777 params.hdr.slotId = slot_id;
\r
778 params.hdr.startSymId = pPrbMapElem->nStartSymb;
\r
779 params.hdr.iqWidth = pPrbMapElem->iqWidth; /*xran_get_conf_iqwidth(pHandle);*/
\r
780 params.hdr.compMeth = pPrbMapElem->compMethod;
\r
782 print_dbg("cp[%d:%d:%d] ru_port_id %d dir=%d\n",
\r
783 frame_id, subframe_id, slot_id, ru_port_id, dir);
\r
785 seq_id = xran_get_cp_seqid(pHandle, XRAN_DIR_DL, cc_id, ru_port_id);
\r
787 sect_geninfo[0].info.type = params.sectionType; // for database
\r
788 sect_geninfo[0].info.startSymId = params.hdr.startSymId; // for database
\r
789 sect_geninfo[0].info.iqWidth = params.hdr.iqWidth; // for database
\r
790 sect_geninfo[0].info.compMeth = params.hdr.compMeth; // for database
\r
791 sect_geninfo[0].info.id = i; /*xran_alloc_sectionid(pHandle, dir, cc_id, ru_port_id, slot_id);*/
\r
793 if(sect_geninfo[0].info.id > 7)
\r
794 print_err("sectinfo->id %d\n", sect_geninfo[0].info.id);
\r
796 if (dir == XRAN_DIR_UL) {
\r
797 for (loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++){
\r
798 struct xran_section_desc *p_sec_desc = pPrbMapElem->p_sec_desc[loc_sym];
\r
800 p_sec_desc->section_id = sect_geninfo[0].info.id;
\r
801 if(p_sec_desc->pCtrl) {
\r
802 rte_pktmbuf_free(p_sec_desc->pCtrl);
\r
803 p_sec_desc->pCtrl = NULL;
\r
804 p_sec_desc->pData = NULL;
\r
807 print_err("section desc is NULL\n");
\r
812 sect_geninfo[0].info.rb = XRAN_RBIND_EVERY;
\r
813 sect_geninfo[0].info.startPrbc = pPrbMapElem->nRBStart;
\r
814 sect_geninfo[0].info.numPrbc = pPrbMapElem->nRBSize;
\r
815 sect_geninfo[0].info.numSymbol = pPrbMapElem->numSymb;
\r
816 sect_geninfo[0].info.reMask = 0xfff;
\r
817 sect_geninfo[0].info.beamId = pPrbMapElem->nBeamIndex;
\r
819 for (loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++){
\r
820 struct xran_section_desc *p_sec_desc = pPrbMapElem->p_sec_desc[loc_sym];
\r
822 p_sec_desc->section_id = sect_geninfo[0].info.id;
\r
824 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_offset = p_sec_desc->iq_buffer_offset;
\r
825 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_len = p_sec_desc->iq_buffer_len;
\r
827 print_err("section desc is NULL\n");
\r
832 sect_geninfo[0].info.symInc = XRAN_SYMBOLNUMBER_NOTINC;
\r
835 pPrbMapElemPrev = &prbMap->prbMap[i-1];
\r
836 if (pPrbMapElemPrev->nStartSymb == pPrbMapElem->nStartSymb)
\r
838 sect_geninfo[0].info.symInc = XRAN_SYMBOLNUMBER_NOTINC;
\r
839 if (pPrbMapElemPrev->numSymb != pPrbMapElem->numSymb)
\r
840 print_err("section info error: previous numSymb %d not equal to current numSymb %d\n", pPrbMapElemPrev->numSymb, pPrbMapElem->numSymb);
\r
844 sect_geninfo[0].info.symInc = XRAN_SYMBOLNUMBER_INC;
\r
845 if (pPrbMapElem->nStartSymb != (pPrbMapElemPrev->nStartSymb + pPrbMapElemPrev->numSymb))
\r
846 print_err("section info error: current startSym %d not equal to previous endSymb %d\n", pPrbMapElem->nStartSymb, pPrbMapElemPrev->nStartSymb + pPrbMapElemPrev->numSymb);
\r
850 if(category == XRAN_CATEGORY_A){
\r
851 /* no extention sections for category */
\r
852 sect_geninfo[0].info.ef = 0;
\r
853 sect_geninfo[0].exDataSize = 0;
\r
854 } else if (category == XRAN_CATEGORY_B) {
\r
855 /*add extantion section for BF Weights if update is needed */
\r
856 if(pPrbMapElem->bf_weight_update){
\r
857 memset(&m_ext1, 0, sizeof (struct xran_sectionext1_info));
\r
858 m_ext1.bfwNumber = pPrbMapElem->bf_weight.nAntElmTRx;
\r
859 m_ext1.bfwiqWidth = pPrbMapElem->iqWidth;
\r
860 m_ext1.bfwCompMeth = pPrbMapElem->compMethod;
\r
861 m_ext1.p_bfwIQ = (int16_t*)pPrbMapElem->bf_weight.p_ext_section;
\r
862 m_ext1.bfwIQ_sz = pPrbMapElem->bf_weight.ext_section_sz;
\r
864 sect_geninfo[0].exData[0].type = XRAN_CP_SECTIONEXTCMD_1;
\r
865 sect_geninfo[0].exData[0].len = sizeof(m_ext1);
\r
866 sect_geninfo[0].exData[0].data = &m_ext1;
\r
868 sect_geninfo[0].info.ef = 1;
\r
869 sect_geninfo[0].exDataSize = 1;
\r
871 sect_geninfo[0].info.ef = 0;
\r
872 sect_geninfo[0].exDataSize = 0;
\r
875 print_err("Unsupported Category %d\n", category);
\r
879 params.numSections = 1;//nsection;
\r
880 params.sections = sect_geninfo;
\r
882 mbuf = xran_ethdi_mbuf_alloc();
\r
883 if(unlikely(mbuf == NULL)) {
\r
884 print_err("Alloc fail!\n");
\r
888 ret = xran_prepare_ctrl_pkt(mbuf, ¶ms, cc_id, ru_port_id, seq_id);
\r
890 print_err("Fail to build control plane packet - [%d:%d:%d] dir=%d\n",
\r
891 frame_id, subframe_id, slot_id, dir);
\r
893 /* add in the ethernet header */
\r
894 struct ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));
\r
896 tx_bytes_counter += rte_pktmbuf_pkt_len(mbuf);
\r
897 p_x_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI);
\r
899 /*for(i=0; i<nsection; i++)*/
\r
900 xran_cp_add_section_info(pHandle,
\r
901 dir, cc_id, ru_port_id,
\r
903 §_geninfo[0].info);
\r
910 void tx_cp_dl_cb(struct rte_timer *tim, void *arg)
\r
912 long t1 = MLogTick();
\r
915 uint32_t slot_id, subframe_id, frame_id;
\r
918 uint8_t ant_id, num_eAxc, num_CCPorts;
\r
921 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
922 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;
\r
924 pHandle = NULL; // TODO: temp implemantation
\r
925 num_eAxc = xran_get_num_eAxc(pHandle);
\r
926 num_CCPorts = xran_get_num_cc(pHandle);
\r
928 if(first_call && p_xran_dev_ctx->enableCP) {
\r
930 tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process;
\r
931 buf_id = tti % XRAN_N_FE_BUF_LEN;
\r
933 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);
\r
934 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
935 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
937 /* Wrap around to next second */
\r
938 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
\r
941 ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME) % XRAN_MAX_SECTIONDB_CTX;
\r
943 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
\r
944 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
\r
945 for(cc_id = 0; cc_id < num_CCPorts; cc_id++ ) {
\r
946 /* start new section information list */
\r
947 xran_cp_reset_section_info(pHandle, XRAN_DIR_DL, cc_id, ant_id, ctx_id);
\r
948 if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) {
\r
949 if(p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData){
\r
950 num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_DL, tti, cc_id,
\r
951 (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
\r
952 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
\r
954 print_err("[%d]SFN %d sf %d slot %d: ant_id %d cc_id %d \n", tti, frame_id, subframe_id, slot_id, ant_id, cc_id);
\r
956 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_DL) == 1) */
\r
957 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
\r
958 } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
\r
959 MLogTask(PID_CP_DL_CB, t1, MLogTick());
\r
963 void rx_ul_deadline_half_cb(struct rte_timer *tim, void *arg)
\r
965 long t1 = MLogTick();
\r
966 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
967 xran_status_t status;
\r
968 /* half of RX for current TTI as measured against current OTA time */
\r
969 int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
\r
971 uint32_t nFrameIdx;
\r
972 uint32_t nSubframeIdx;
\r
976 uint32_t nXranTime = xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
\r
977 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
\r
978 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
\r
981 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
\r
984 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
\r
985 if(p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] == 0){
\r
986 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
\r
987 pTag->slotiId = rx_tti;
\r
988 pTag->symbol = 0; /* last 7 sym means full slot of Symb */
\r
989 status = XRAN_STATUS_SUCCESS;
\r
990 if(p_xran_dev_ctx->pCallback[cc_id])
\r
991 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
\r
993 p_xran_dev_ctx->rx_packet_callback_tracker[rx_tti % XRAN_N_FE_BUF_LEN][cc_id] = 0;
\r
996 MLogTask(PID_UP_UL_HALF_DEAD_LINE_CB, t1, MLogTick());
\r
999 void rx_ul_deadline_full_cb(struct rte_timer *tim, void *arg)
\r
1001 long t1 = MLogTick();
\r
1002 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1003 xran_status_t status = 0;
\r
1004 int32_t rx_tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
\r
1005 int32_t cc_id = 0;
\r
1006 uint32_t nFrameIdx;
\r
1007 uint32_t nSubframeIdx;
\r
1008 uint32_t nSlotIdx;
\r
1011 uint32_t nXranTime = xran_get_slot_idx(&nFrameIdx, &nSubframeIdx, &nSlotIdx, &nSecond);
\r
1012 rx_tti = nFrameIdx*SUBFRAMES_PER_SYSTEMFRAME*SLOTNUM_PER_SUBFRAME
\r
1013 + nSubframeIdx*SLOTNUM_PER_SUBFRAME
\r
1017 rx_tti = (xran_fs_get_max_slot_SFN()-1);
\r
1019 rx_tti -= 1; /* end of RX for prev TTI as measured against current OTA time */
\r
1021 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
\r
1025 for(cc_id = 0; cc_id < xran_get_num_cc(p_xran_dev_ctx); cc_id++) {
\r
1026 struct xran_cb_tag *pTag = p_xran_dev_ctx->pCallbackTag[cc_id];
\r
1027 pTag->slotiId = rx_tti;
\r
1028 pTag->symbol = 7; /* last 7 sym means full slot of Symb */
\r
1029 status = XRAN_STATUS_SUCCESS;
\r
1030 if(p_xran_dev_ctx->pCallback[cc_id])
\r
1031 p_xran_dev_ctx->pCallback[cc_id](p_xran_dev_ctx->pCallbackTag[cc_id], status);
\r
1033 if(p_xran_dev_ctx->pPrachCallback[cc_id]){
\r
1034 struct xran_cb_tag *pTag = p_xran_dev_ctx->pPrachCallbackTag[cc_id];
\r
1035 pTag->slotiId = rx_tti;
\r
1036 pTag->symbol = 7; /* last 7 sym means full slot of Symb */
\r
1037 p_xran_dev_ctx->pPrachCallback[cc_id](p_xran_dev_ctx->pPrachCallbackTag[cc_id], status);
\r
1041 MLogTask(PID_UP_UL_FULL_DEAD_LINE_CB, t1, MLogTick());
\r
1045 void tx_cp_ul_cb(struct rte_timer *tim, void *arg)
\r
1047 long t1 = MLogTick();
\r
1050 uint32_t slot_id, subframe_id, frame_id;
\r
1052 int ant_id, prach_port_id;
\r
1054 uint8_t num_eAxc, num_CCPorts;
\r
1060 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1061 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
\r
1062 struct xran_timer_ctx *pTCtx = (struct xran_timer_ctx *)arg;
\r
1064 pHandle = NULL; // TODO: temp implemantation
\r
1066 if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
\r
1067 num_eAxc = xran_get_num_eAxc(pHandle);
\r
1069 num_eAxc = xran_get_num_eAxcUl(pHandle);
\r
1071 num_CCPorts = xran_get_num_cc(pHandle);
\r
1072 tti = pTCtx[(xran_lib_ota_tti & 1) ^ 1].tti_to_process;
\r
1073 buf_id = tti % XRAN_N_FE_BUF_LEN;
\r
1074 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);
\r
1075 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
1076 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
1078 //Wrap around to next second
\r
1079 frame_id = (frame_id + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
\r
1081 ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME) % XRAN_MAX_SECTIONDB_CTX;
\r
1083 if(first_call && p_xran_dev_ctx->enableCP) {
\r
1085 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
\r
1087 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
\r
1088 for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
\r
1089 if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) == 1 ||
\r
1090 xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_SP) == 1 ){
\r
1091 /* start new section information list */
\r
1092 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, ant_id, ctx_id);
\r
1093 num_list = xran_cp_create_and_send_section(pHandle, ant_id, XRAN_DIR_UL, tti, cc_id,
\r
1094 (struct xran_prb_map *)p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[buf_id][cc_id][ant_id].sBufferList.pBuffers->pData,
\r
1095 p_xran_dev_ctx->fh_cfg.ru_conf.xranCat, ctx_id);
\r
1096 } /* if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) == 1 */
\r
1097 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
\r
1098 } /* for(ant_id = 0; ant_id < num_eAxc; ++ant_id) */
\r
1100 if(p_xran_dev_ctx->enablePrach) {
\r
1101 uint32_t is_prach_slot = xran_is_prach_slot(subframe_id, slot_id);
\r
1102 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0]) && (is_prach_slot==1)) { //is prach slot
\r
1103 for(ant_id = 0; ant_id < num_eAxc; ++ant_id) {
\r
1104 for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
\r
1105 struct xran_cp_gen_params params;
\r
1106 struct xran_section_gen_info sect_geninfo[8];
\r
1107 struct rte_mbuf *mbuf = xran_ethdi_mbuf_alloc();
\r
1108 prach_port_id = ant_id + num_eAxc;
\r
1109 /* start new section information list */
\r
1110 xran_cp_reset_section_info(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, ctx_id);
\r
1112 beam_id = xran_get_beamid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id);
\r
1113 ret = generate_cpmsg_prach(pHandle, ¶ms, sect_geninfo, mbuf, p_xran_dev_ctx,
\r
1114 frame_id, subframe_id, slot_id,
\r
1115 beam_id, cc_id, prach_port_id,
\r
1116 xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));
\r
1117 if (ret == XRAN_STATUS_SUCCESS)
\r
1118 send_cpmsg(pHandle, mbuf, ¶ms, sect_geninfo,
\r
1119 cc_id, prach_port_id, xran_get_cp_seqid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id));
\r
1124 } /* if(p_xran_dev_ctx->enableCP) */
\r
1126 MLogTask(PID_CP_UL_CB, t1, MLogTick());
\r
1129 void ul_up_full_slot_cb(struct rte_timer *tim, void *arg)
\r
1131 long t1 = MLogTick();
\r
1133 MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());
\r
1136 void tti_to_phy_cb(struct rte_timer *tim, void *arg)
\r
1138 long t1 = MLogTick();
\r
1139 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1141 p_xran_dev_ctx->phy_tti_cb_done = 1; /* DPDK called CB */
\r
1143 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
\r
1144 if(p_xran_dev_ctx->SkipTti[XRAN_CB_TTI] <= 0){
\r
1145 p_xran_dev_ctx->ttiCb[XRAN_CB_TTI](p_xran_dev_ctx->TtiCbParam[XRAN_CB_TTI]);
\r
1147 p_xran_dev_ctx->SkipTti[XRAN_CB_TTI]--;
\r
1151 if(p_xran_dev_ctx->ttiCb[XRAN_CB_TTI]){
\r
1152 int32_t tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
\r
1153 uint32_t slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);
\r
1154 uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
1155 uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
1156 if((frame_id == xran_max_frame)&&(subframe_id==9)&&(slot_id == SLOTNUM_PER_SUBFRAME-1)) { //(tti == xran_fs_get_max_slot()-1)
\r
1162 MLogTask(PID_TTI_CB_TO_PHY, t1, MLogTick());
\r
1165 int xran_timing_source_thread(void *args)
\r
1169 int32_t do_reset = 0;
\r
1172 int32_t result1,i,j;
\r
1173 uint32_t delay_cp_dl;
\r
1174 uint32_t delay_cp_ul;
\r
1175 uint32_t delay_up;
\r
1176 uint32_t delay_up_ul;
\r
1177 uint32_t delay_cp2up;
\r
1178 uint32_t sym_cp_dl;
\r
1179 uint32_t sym_cp_ul;
\r
1180 uint32_t sym_up_ul;
\r
1182 struct sched_param sched_param;
\r
1183 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1184 uint64_t tWake = 0, tWakePrev = 0, tUsed = 0;
\r
1185 struct cb_elem_entry * cb_elm = NULL;
\r
1187 /* ToS = Top of Second start +- 1.5us */
\r
1188 struct timespec ts;
\r
1192 xran_core_used = rte_lcore_id();
\r
1193 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
\r
1195 /* set main thread affinity mask to CPU2 */
\r
1196 sched_param.sched_priority = 98;
\r
1198 CPU_ZERO(&cpuset);
\r
1199 CPU_SET(p_xran_dev_ctx->fh_init.io_cfg.timing_core, &cpuset);
\r
1200 if (result1 = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset))
\r
1202 printf("pthread_setaffinity_np failed: coreId = 2, result1 = %d\n",result1);
\r
1204 if ((result1 = pthread_setschedparam(pthread_self(), 1, &sched_param)))
\r
1206 printf("priority is not changed: coreId = 2, result1 = %d\n",result1);
\r
1209 if (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
\r
1211 timespec_get(&ts, TIME_UTC);
\r
1212 }while (ts.tv_nsec >1500);
\r
1213 struct tm * ptm = gmtime(&ts.tv_sec);
\r
1215 strftime(buff, sizeof buff, "%D %T", ptm);
\r
1216 printf("O-DU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us);
\r
1219 delay_cp_dl = interval_us - p_xran_dev_ctx->fh_init.T1a_max_cp_dl;
\r
1220 delay_cp_ul = interval_us - p_xran_dev_ctx->fh_init.T1a_max_cp_ul;
\r
1221 delay_up = p_xran_dev_ctx->fh_init.T1a_max_up;
\r
1222 delay_up_ul = p_xran_dev_ctx->fh_init.Ta4_max;
\r
1224 delay_cp2up = delay_up-delay_cp_dl;
\r
1226 sym_cp_dl = delay_cp_dl*1000/(interval_us*1000/N_SYM_PER_SLOT)+1;
\r
1227 sym_cp_ul = delay_cp_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1;
\r
1228 sym_up_ul = delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT);
\r
1229 p_xran_dev_ctx->sym_up = sym_up = -(delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT));
\r
1230 p_xran_dev_ctx->sym_up_ul = sym_up_ul = (delay_up_ul*1000/(interval_us*1000/N_SYM_PER_SLOT)+1);
\r
1232 printf("Start C-plane DL %d us after TTI [trigger on sym %d]\n", delay_cp_dl, sym_cp_dl);
\r
1233 printf("Start C-plane UL %d us after TTI [trigger on sym %d]\n", delay_cp_ul, sym_cp_ul);
\r
1234 printf("Start U-plane DL %d us before OTA [offset in sym %d]\n", delay_up, sym_up);
\r
1235 printf("Start U-plane UL %d us OTA [offset in sym %d]\n", delay_up_ul, sym_up_ul);
\r
1237 printf("C-plane to U-plane delay %d us after TTI\n", delay_cp2up);
\r
1238 printf("Start Sym timer %ld ns\n", TX_TIMER_INTERVAL/N_SYM_PER_SLOT);
\r
1240 cb_elm = xran_create_cb(xran_timer_arm, tx_cp_dl_cb);
\r
1242 LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_cp_dl],
\r
1246 print_err("cb_elm is NULL\n");
\r
1251 cb_elm = xran_create_cb(xran_timer_arm, tx_cp_ul_cb);
\r
1253 LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_cp_ul],
\r
1257 print_err("cb_elm is NULL\n");
\r
1262 /* Full slot UL OTA + delay_up_ul */
\r
1263 cb_elm = xran_create_cb(xran_timer_arm, rx_ul_deadline_full_cb);
\r
1265 LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_up_ul],
\r
1269 print_err("cb_elm is NULL\n");
\r
1274 /* Half slot UL OTA + delay_up_ul*/
\r
1275 cb_elm = xran_create_cb(xran_timer_arm, rx_ul_deadline_half_cb);
\r
1277 LIST_INSERT_HEAD(&p_xran_dev_ctx->sym_cb_list_head[0][sym_up_ul + N_SYM_PER_SLOT/2],
\r
1281 print_err("cb_elm is NULL\n");
\r
1285 } else { // APP_O_RU
\r
1286 /* calcualte when to send UL U-plane */
\r
1287 delay_up = p_xran_dev_ctx->fh_init.Ta3_min;
\r
1288 p_xran_dev_ctx->sym_up = sym_up = delay_up*1000/(interval_us*1000/N_SYM_PER_SLOT)+1;
\r
1289 printf("Start UL U-plane %d us after OTA [offset in sym %d]\n", delay_up, sym_up);
\r
1291 timespec_get(&ts, TIME_UTC);
\r
1292 }while (ts.tv_nsec >1500);
\r
1293 struct tm * ptm = gmtime(&ts.tv_sec);
\r
1295 strftime(buff, sizeof buff, "%D %T", ptm);
\r
1296 printf("RU: thread_run start time: %s.%09ld UTC [%ld]\n", buff, ts.tv_nsec, interval_us);
\r
1300 printf("interval_us %ld\n", interval_us);
\r
1302 timespec_get(&ts, TIME_UTC);
\r
1303 }while (ts.tv_nsec == 0);
\r
1306 /* Update Usage Stats */
\r
1307 tWake = xran_tick();
\r
1308 xran_used_tick += tUsed;
\r
1311 xran_total_tick += get_ticks_diff(tWake, tWakePrev);
\r
1313 tWakePrev = tWake;
\r
1316 delta = poll_next_tick(interval_us*1000L/N_SYM_PER_SLOT, &tUsed);
\r
1317 if (XRAN_STOPPED == xran_if_current_state)
\r
1320 if (likely(XRAN_RUNNING == xran_if_current_state))
\r
1321 sym_ota_cb(&sym_timer, timer_ctx, &tUsed);
\r
1325 for (i = 0; i< XRAN_MAX_SECTOR_NR; i++){
\r
1326 for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){
\r
1327 struct cb_elem_entry *cb_elm;
\r
1328 LIST_FOREACH(cb_elm, &p_xran_dev_ctx->sym_cb_list_head[i][j], pointers){
\r
1330 LIST_REMOVE(cb_elm, pointers);
\r
1331 xran_destroy_cb(cb_elm);
\r
1337 printf("Closing timing source thread...tx counter %lu, rx counter %lu\n", tx_counter, rx_counter);
\r
1341 /* Handle ecpri format. */
\r
1342 int handle_ecpri_ethertype(struct rte_mbuf *pkt, uint64_t rx_time)
\r
1344 const struct xran_ecpri_hdr *ecpri_hdr;
\r
1346 int32_t ret = MBUF_FREE;
\r
1348 if (rte_pktmbuf_data_len(pkt) < sizeof(struct xran_ecpri_hdr)) {
\r
1349 print_err("Packet too short - %d bytes", rte_pktmbuf_data_len(pkt));
\r
1353 /* check eCPRI header. */
\r
1354 ecpri_hdr = rte_pktmbuf_mtod(pkt, struct xran_ecpri_hdr *);
\r
1355 if(ecpri_hdr == NULL){
\r
1356 print_err("ecpri_hdr error\n");
\r
1360 rx_bytes_counter += rte_pktmbuf_pkt_len(pkt);
\r
1361 switch(ecpri_hdr->cmnhdr.ecpri_mesg_type) {
\r
1362 case ECPRI_IQ_DATA:
\r
1363 // t1 = MLogTick();
\r
1364 ret = process_mbuf(pkt);
\r
1365 // MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick());
\r
1367 // For RU emulation
\r
1368 case ECPRI_RT_CONTROL_DATA:
\r
1370 if(xran_dev_get_ctx()->fh_init.io_cfg.id == O_RU) {
\r
1371 ret = process_cplane(pkt);
\r
1373 print_err("O-DU recevied C-Plane message!");
\r
1375 MLogTask(PID_PROCESS_CP_PKT, t1, MLogTick());
\r
1378 print_err("Invalid eCPRI message type - %d", ecpri_hdr->cmnhdr.ecpri_mesg_type);
\r
1384 int xran_process_prach_sym(void *arg,
\r
1385 struct rte_mbuf *mbuf,
\r
1386 void *iq_data_start,
\r
1391 uint8_t subframe_id,
\r
1394 uint16_t num_prbu,
\r
1395 uint16_t start_prbu,
\r
1399 uint32_t *mb_free)
\r
1402 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1403 uint8_t symb_id_offset;
\r
1405 xran_status_t status;
\r
1406 void *pHandle = NULL;
\r
1407 struct rte_mbuf *mb;
\r
1409 uint16_t iq_sample_size_bits = 16;
\r
1411 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
\r
1414 tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;
\r
1416 status = tti << 16 | symb_id;
\r
1418 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){
\r
1419 symb_id_offset = symb_id - p_xran_dev_ctx->prach_start_symbol[CC_ID]; //make the storing of prach packets to start from 0 for easy of processing within PHY
\r
1420 pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData;
\r
1421 if(pos && iq_data_start && size){
\r
1422 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
\r
1424 uint16_t *psrc = (uint16_t *)iq_data_start;
\r
1425 uint16_t *pdst = (uint16_t *)pos;
\r
1426 /* network byte (be) order of IQ to CPU byte order (le) */
\r
1427 for (idx = 0; idx < size/sizeof(int16_t); idx++){
\r
1428 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
\r
1430 *mb_free = MBUF_FREE;
\r
1432 mb = p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl;
\r
1434 rte_pktmbuf_free(mb);
\r
1436 print_err("mb==NULL\n");
\r
1438 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData = iq_data_start;
\r
1439 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl = mbuf;
\r
1440 *mb_free = MBUF_KEEP;
\r
1443 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
\r
1446 print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
\r
1449 /* if (symb_id == p_xran_dev_ctx->prach_last_symbol[CC_ID] ){
\r
1450 p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id]++;
\r
1451 if(p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] >= xran_get_num_eAxc(pHandle)){
\r
1452 if(p_xran_dev_ctx->pPrachCallback[0])
\r
1453 p_xran_dev_ctx->pPrachCallback[0](p_xran_dev_ctx->pPrachCallbackTag[0], status);
\r
1454 p_xran_dev_ctx->rx_packet_prach_tracker[tti % XRAN_N_FE_BUF_LEN][CC_ID][symb_id] = 0;
\r
1461 int32_t xran_process_srs_sym(void *arg,
\r
1462 struct rte_mbuf *mbuf,
\r
1463 void *iq_data_start,
\r
1468 uint8_t subframe_id,
\r
1471 uint16_t num_prbu,
\r
1472 uint16_t start_prbu,
\r
1476 uint32_t *mb_free)
\r
1479 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1481 xran_status_t status;
\r
1482 void *pHandle = NULL;
\r
1483 struct rte_mbuf *mb = NULL;
\r
1485 uint16_t iq_sample_size_bits = 16;
\r
1487 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
\r
1490 tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;
\r
1492 status = tti << 16 | symb_id;
\r
1494 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < p_xran_dev_ctx->fh_cfg.nAntElmTRx && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT) {
\r
1495 pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;
\r
1496 pos += start_prbu * N_SC_PER_PRB*(iq_sample_size_bits/8)*2;
\r
1497 if(pos && iq_data_start && size){
\r
1498 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
\r
1500 uint16_t *psrc = (uint16_t *)iq_data_start;
\r
1501 uint16_t *pdst = (uint16_t *)pos;
\r
1502 rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);
\r
1503 /* network byte (be) order of IQ to CPU byte order (le) */
\r
1504 for (idx = 0; idx < size/sizeof(int16_t); idx++){
\r
1505 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
\r
1507 } else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){
\r
1508 if (likely (p_xran_dev_ctx->fh_init.mtu >=
\r
1509 p_xran_dev_ctx->fh_cfg.nULRBs * N_SC_PER_PRB*(iq_sample_size_bits/8)*2)) {
\r
1510 /* no fragmentation */
\r
1511 mb = p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl;
\r
1513 rte_pktmbuf_free(mb);
\r
1515 print_err("mb==NULL\n");
\r
1517 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData = iq_data_start;
\r
1518 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl = mbuf;
\r
1519 *mb_free = MBUF_KEEP;
\r
1521 /* packet can be fragmented copy RBs */
\r
1522 rte_memcpy(pos, iq_data_start, size);
\r
1523 *mb_free = MBUF_FREE;
\r
1527 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
\r
1530 print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
\r
1536 int32_t xran_pkt_validate(void *arg,
\r
1537 struct rte_mbuf *mbuf,
\r
1538 void *iq_data_start,
\r
1543 uint8_t subframe_id,
\r
1546 struct ecpri_seq_id *seq_id,
\r
1547 uint16_t num_prbu,
\r
1548 uint16_t start_prbu,
\r
1553 struct xran_device_ctx * pctx = xran_dev_get_ctx();
\r
1554 struct xran_common_counters *pCnt = &pctx->fh_counters;
\r
1556 if(pctx->fh_init.io_cfg.id == O_DU) {
\r
1557 if(xran_check_upul_seqid(NULL, CC_ID, Ant_ID, slot_id, seq_id->seq_id) != XRAN_STATUS_SUCCESS) {
\r
1558 pCnt->Rx_pkt_dupl++;
\r
1559 return (XRAN_STATUS_FAIL);
\r
1561 }else if(pctx->fh_init.io_cfg.id == O_RU) {
\r
1562 if(xran_check_updl_seqid(NULL, CC_ID, Ant_ID, slot_id, seq_id->seq_id) != XRAN_STATUS_SUCCESS) {
\r
1563 pCnt->Rx_pkt_dupl++;
\r
1564 return (XRAN_STATUS_FAIL);
\r
1567 print_err("incorrect dev type %d\n", pctx->fh_init.io_cfg.id);
\r
1572 pCnt->Rx_on_time++;
\r
1573 pCnt->Total_msgs_rcvd++;
\r
1575 return XRAN_STATUS_SUCCESS;
\r
1578 int32_t xran_process_rx_sym(void *arg,
\r
1579 struct rte_mbuf *mbuf,
\r
1580 void *iq_data_start,
\r
1585 uint8_t subframe_id,
\r
1588 uint16_t num_prbu,
\r
1589 uint16_t start_prbu,
\r
1593 uint32_t *mb_free)
\r
1596 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1598 xran_status_t status;
\r
1599 void *pHandle = NULL;
\r
1600 struct rte_mbuf *mb = NULL;
\r
1601 struct xran_prb_map * pRbMap = NULL;
\r
1602 struct xran_prb_elm * prbMapElm = NULL;
\r
1604 uint16_t iq_sample_size_bits = 16;
\r
1606 tti = frame_id * SLOTS_PER_SYSTEMFRAME + subframe_id * SLOTNUM_PER_SUBFRAME + slot_id;
\r
1608 status = tti << 16 | symb_id;
\r
1610 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){
\r
1611 pos = (char*) p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;
\r
1612 pRbMap = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers->pData;
\r
1614 prbMapElm = &pRbMap->prbMap[sect_id];
\r
1615 if(sect_id >= pRbMap->nPrbElm) {
\r
1616 print_err("sect_id %d !=pRbMap->nPrbElm %d\n", sect_id,pRbMap->nPrbElm);
\r
1617 *mb_free = MBUF_FREE;
\r
1621 print_err("pRbMap==NULL\n");
\r
1622 *mb_free = MBUF_FREE;
\r
1626 pos += start_prbu * N_SC_PER_PRB*(iq_sample_size_bits/8)*2;
\r
1627 if(pos && iq_data_start && size){
\r
1628 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
\r
1630 uint16_t *psrc = (uint16_t *)iq_data_start;
\r
1631 uint16_t *pdst = (uint16_t *)pos;
\r
1632 rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);
\r
1633 /* network byte (be) order of IQ to CPU byte order (le) */
\r
1634 for (idx = 0; idx < size/sizeof(int16_t); idx++){
\r
1635 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
\r
1637 } else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){
\r
1638 if (/*likely (p_xran_dev_ctx->fh_init.mtu >=
\r
1639 p_xran_dev_ctx->fh_cfg.nULRBs * N_SC_PER_PRB*(iq_sample_size_bits/8)*2)
\r
1640 && p_xran_dev_ctx->fh_init.io_cfg.id == O_DU*/ 1) {
\r
1641 if (pRbMap->nPrbElm == 1){
\r
1642 /* no fragmentation */
\r
1643 mb = p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl;
\r
1645 rte_pktmbuf_free(mb);
\r
1647 print_err("mb==NULL\n");
\r
1649 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData = iq_data_start;
\r
1650 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl = mbuf;
\r
1651 *mb_free = MBUF_KEEP;
\r
1653 prbMapElm = &pRbMap->prbMap[sect_id];
\r
1654 struct xran_section_desc *p_sec_desc = prbMapElm->p_sec_desc[symb_id];
\r
1656 mb = p_sec_desc->pCtrl;
\r
1658 rte_pktmbuf_free(mb);
\r
1660 p_sec_desc->pData = iq_data_start;
\r
1661 p_sec_desc->pCtrl = mbuf;
\r
1662 p_sec_desc->iq_buffer_len = size;
\r
1663 p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);
\r
1665 print_err("p_sec_desc==NULL\n");
\r
1666 *mb_free = MBUF_FREE;
\r
1669 *mb_free = MBUF_KEEP;
\r
1672 /* packet can be fragmented copy RBs */
\r
1673 rte_memcpy(pos, iq_data_start, size);
\r
1674 *mb_free = MBUF_FREE;
\r
1678 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
\r
1681 print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
\r
1687 /* Send burst of packets on an output interface */
\r
1689 xran_send_burst(struct xran_device_ctx *dev, uint16_t n, uint16_t port)
\r
1691 struct rte_mbuf **m_table;
\r
1692 struct rte_mbuf *m;
\r
1697 m_table = (struct rte_mbuf **)dev->tx_mbufs[port].m_table;
\r
1699 for(i = 0; i < n; i++){
\r
1700 rte_mbuf_sanity_check(m_table[i], 0);
\r
1701 /*rte_pktmbuf_dump(stdout, m_table[i], 256);*/
\r
1703 tx_bytes_counter += rte_pktmbuf_pkt_len(m_table[i]);
\r
1704 ret += dev->send_upmbuf2ring(m_table[i], ETHER_TYPE_ECPRI);
\r
1708 if (unlikely(ret < n)) {
\r
1709 print_err("ret < n\n");
\r
1715 int32_t xran_process_tx_sym_cp_off(uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id,
\r
1718 int32_t retval = 0;
\r
1719 uint64_t t1 = MLogTick();
\r
1721 void *pHandle = NULL;
\r
1723 char *p_sec_iq = NULL;
\r
1724 char *p_sect_iq = NULL;
\r
1727 uint16_t iq_sample_size_bits = 16; // TODO: make dynamic per
\r
1729 struct xran_prb_map *prb_map = NULL;
\r
1730 uint8_t num_ant_elm = 0;
\r
1732 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
1733 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
\r
1734 struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
\r
1735 num_ant_elm = xran_get_num_ant_elm(pHandle);
\r
1736 enum xran_pkt_dir direction;
\r
1738 struct rte_mbuf *eth_oran_hdr = NULL;
\r
1739 char *ext_buff = NULL;
\r
1740 uint16_t ext_buff_len = 0;
\r
1741 struct rte_mbuf *tmp = NULL;
\r
1742 rte_iova_t ext_buff_iova = 0;
\r
1744 struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN];
\r
1746 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
\r
1747 direction = XRAN_DIR_DL; /* O-DU */
\r
1748 prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
\r
1750 direction = XRAN_DIR_UL; /* RU */
\r
1751 prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
\r
1754 if(xran_fs_get_slot_type(cc_id, tti, ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SLOT_TYPE_DL : XRAN_SLOT_TYPE_UL)) == 1
\r
1755 || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_SP) == 1
\r
1756 || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_FDD) == 1){
\r
1758 if(xran_fs_get_symbol_type(cc_id, tti, sym_id) == ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SYMBOL_TYPE_DL : XRAN_SYMBOL_TYPE_UL)
\r
1759 || xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){
\r
1761 if(iq_sample_size_bits != 16)
\r
1762 print_err("Incorrect iqWidth %d\n", iq_sample_size_bits );
\r
1764 pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
\r
1765 mb = (void*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
\r
1766 prb_map = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
\r
1770 int32_t elmIdx = 0;
\r
1771 for (elmIdx = 0; elmIdx < prb_map->nPrbElm; elmIdx++){
\r
1772 uint16_t sec_id = elmIdx;
\r
1773 struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
\r
1774 struct xran_section_desc * p_sec_desc = NULL;
\r
1776 if(prb_map_elm == NULL){
\r
1777 rte_panic("p_sec_desc == NULL\n");
\r
1780 p_sec_desc = prb_map_elm->p_sec_desc[sym_id];
\r
1782 if(p_sec_desc == NULL){
\r
1783 rte_panic("p_sec_desc == NULL\n");
\r
1787 p_sec_iq = ((char*)pos + p_sec_desc->iq_buffer_offset);
\r
1789 /* calculete offset for external buffer */
\r
1790 ext_buff_len = p_sec_desc->iq_buffer_len;
\r
1791 ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
\r
1792 sizeof (struct xran_ecpri_hdr) +
\r
1793 sizeof (struct radio_app_common_hdr) +
\r
1794 sizeof(struct data_section_hdr));
\r
1796 ext_buff_len += RTE_PKTMBUF_HEADROOM +
\r
1797 sizeof (struct xran_ecpri_hdr) +
\r
1798 sizeof (struct radio_app_common_hdr) +
\r
1799 sizeof(struct data_section_hdr) + 18;
\r
1801 if(prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE){
\r
1802 ext_buff -= sizeof (struct data_section_compression_hdr);
\r
1803 ext_buff_len += sizeof (struct data_section_compression_hdr);
\r
1806 eth_oran_hdr = rte_pktmbuf_alloc(_eth_mbuf_pool_small);
\r
1808 if (unlikely (( eth_oran_hdr) == NULL)) {
\r
1809 rte_panic("Failed rte_pktmbuf_alloc\n");
\r
1812 p_share_data->free_cb = extbuf_free_callback;
\r
1813 p_share_data->fcb_opaque = NULL;
\r
1814 rte_mbuf_ext_refcnt_set(p_share_data, 1);
\r
1816 ext_buff_iova = rte_mempool_virt2iova(mb);
\r
1817 if (unlikely (( ext_buff_iova) == 0)) {
\r
1818 rte_panic("Failed rte_mem_virt2iova \n");
\r
1821 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
\r
1822 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
\r
1825 rte_pktmbuf_attach_extbuf(eth_oran_hdr,
\r
1827 ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
\r
1831 rte_pktmbuf_reset_headroom(eth_oran_hdr);
\r
1833 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct ether_hdr));
\r
1834 if (unlikely (( tmp) == NULL)) {
\r
1835 rte_panic("Failed rte_pktmbuf_prepend \n");
\r
1837 mb = eth_oran_hdr;
\r
1839 /* first all PRBs */
\r
1840 prepare_symbol_ex(direction, sec_id,
\r
1842 (struct rb_map *)p_sec_iq,
\r
1843 prb_map_elm->compMethod,
\r
1844 prb_map_elm->iqWidth,
\r
1845 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1846 frame_id, subframe_id, slot_id, sym_id,
\r
1847 prb_map_elm->nRBStart, prb_map_elm->nRBSize,
\r
1849 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
\r
1850 xran_get_updl_seqid(pHandle, cc_id, ant_id) :
\r
1851 xran_get_upul_seqid(pHandle, cc_id, ant_id),
\r
1854 rte_mbuf_sanity_check((struct rte_mbuf *)mb, 0);
\r
1856 tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)mb);
\r
1857 p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)mb, ETHER_TYPE_ECPRI);
\r
1859 p_sect_iq = pos + p_sec_desc->iq_buffer_offset;
\r
1860 prb_num = prb_map_elm->nRBSize;
\r
1862 if( prb_num > 136 || prb_num == 0) {
\r
1863 /* first 136 PRBs */
\r
1864 rte_panic("first 136 PRBs\n");
\r
1865 send_symbol_ex(direction,
\r
1868 (struct rb_map *)p_sect_iq,
\r
1869 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1870 frame_id, subframe_id, slot_id, sym_id,
\r
1873 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
\r
1874 xran_get_updl_seqid(pHandle, cc_id, ant_id) :
\r
1875 xran_get_upul_seqid(pHandle, cc_id, ant_id));
\r
1877 pos += 136 * N_SC_PER_PRB * (iq_sample_size_bits/8)*2;
\r
1878 /* last 137 PRBs */
\r
1879 send_symbol_ex(direction, sec_id,
\r
1881 (struct rb_map *)p_sect_iq,
\r
1882 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1883 frame_id, subframe_id, slot_id, sym_id,
\r
1886 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
\r
1887 xran_get_updl_seqid(pHandle, cc_id, ant_id) :
\r
1888 xran_get_upul_seqid(pHandle, cc_id, ant_id));
\r
1891 send_symbol_ex(direction,
\r
1892 sec_id, /* xran_alloc_sectionid(pHandle, direction, cc_id, ant_id, slot_id)*/
\r
1893 /*(struct rte_mbuf *)mb*/ NULL,
\r
1894 (struct rb_map *)p_sect_iq,
\r
1895 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1896 frame_id, subframe_id, slot_id, sym_id,
\r
1897 prb_map_elm->nRBStart, prb_map_elm->nRBSize,
\r
1899 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
\r
1900 xran_get_updl_seqid(pHandle, cc_id, ant_id) :
\r
1901 xran_get_upul_seqid(pHandle, cc_id, ant_id));
\r
1909 printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, sym_id);
\r
1912 if(p_xran_dev_ctx->enablePrach
\r
1913 && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)) { /* Only RU needs to send PRACH I/Q */
\r
1914 uint32_t is_prach_slot = xran_is_prach_slot(subframe_id, slot_id);
\r
1915 if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
\r
1916 && (is_prach_slot == 1)
\r
1917 && (sym_id >= p_xran_dev_ctx->prach_start_symbol[cc_id])
\r
1918 && (sym_id <= p_xran_dev_ctx->prach_last_symbol[cc_id])) { //is prach slot
\r
1919 int prach_port_id = ant_id + pPrachCPConfig->eAxC_offset;
\r
1920 pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pData;
\r
1921 pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id]) * pPrachCPConfig->numPrbc * N_SC_PER_PRB * 4;
\r
1922 mb = NULL;//(void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
\r
1924 send_symbol_ex(direction,
\r
1925 xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, slot_id),
\r
1926 (struct rte_mbuf *)mb,
\r
1927 (struct rb_map *)pos,
\r
1928 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1929 frame_id, subframe_id, slot_id, sym_id,
\r
1930 pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
\r
1931 cc_id, prach_port_id,
\r
1932 xran_get_upul_seqid(pHandle, cc_id, prach_port_id));
\r
1934 } /* if((frame_id % pPrachCPConfig->x == pPrachCPConfig->y[0]) .... */
\r
1935 } /* if(p_xran_dev_ctx->enablePrach ..... */
\r
1938 if(p_xran_dev_ctx->enableSrs && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)){
\r
1939 if( p_srs_cfg->symbMask & (1 << sym_id) /* is SRS symbol */
\r
1941 int32_t ant_elm_id = 0;
\r
1943 for (ant_elm_id = 0; ant_elm_id < num_ant_elm; ant_elm_id++){
\r
1944 int32_t ant_elm_eAxC_id = ant_elm_id + p_srs_cfg->eAxC_offset;
\r
1946 pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_elm_id].sBufferList.pBuffers[sym_id].pData;
\r
1947 mb = (void*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_elm_id].sBufferList.pBuffers[sym_id].pCtrl;
\r
1949 if( prb_num > 136 || prb_num == 0) {
\r
1950 uint16_t sec_id = xran_alloc_sectionid(pHandle, direction, cc_id, ant_id, slot_id);
\r
1951 /* first 136 PRBs */
\r
1952 send_symbol_ex(direction,
\r
1955 (struct rb_map *)pos,
\r
1956 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1957 frame_id, subframe_id, slot_id, sym_id,
\r
1959 cc_id, ant_elm_eAxC_id,
\r
1960 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
\r
1961 xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :
\r
1962 xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id));
\r
1964 pos += 136 * N_SC_PER_PRB * (iq_sample_size_bits/8)*2;
\r
1965 /* last 137 PRBs */
\r
1966 send_symbol_ex(direction, sec_id,
\r
1968 (struct rb_map *)pos,
\r
1969 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1970 frame_id, subframe_id, slot_id, sym_id,
\r
1972 cc_id, ant_elm_eAxC_id,
\r
1973 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
\r
1974 xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :
\r
1975 xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id));
\r
1977 send_symbol_ex(direction,
\r
1978 xran_alloc_sectionid(pHandle, direction, cc_id, ant_elm_eAxC_id, slot_id),
\r
1979 (struct rte_mbuf *)mb,
\r
1980 (struct rb_map *)pos,
\r
1981 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
1982 frame_id, subframe_id, slot_id, sym_id,
\r
1984 cc_id, ant_elm_eAxC_id,
\r
1985 (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
\r
1986 xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :
\r
1987 xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id));
\r
1990 } /* for ant elem */
\r
1991 } /* SRS symbol */
\r
1992 } /* SRS enabled */
\r
1993 } /* RU mode or C-Plane is not used */
\r
2000 int32_t xran_process_tx_sym_cp_on(uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
\r
2001 uint32_t slot_id, uint32_t sym_id)
\r
2003 int32_t retval = 0;
\r
2004 uint64_t t1 = MLogTick();
\r
2006 struct rte_mbuf *eth_oran_hdr = NULL;
\r
2007 char *ext_buff = NULL;
\r
2008 uint16_t ext_buff_len = 0;
\r
2009 struct rte_mbuf *tmp = NULL;
\r
2010 rte_iova_t ext_buff_iova = 0;
\r
2011 void *pHandle = NULL;
\r
2013 char *p_sec_iq = NULL;
\r
2016 uint16_t iq_sample_size_bits = 16; // TODO: make dynamic per
\r
2017 uint32_t next = 0;
\r
2018 int32_t num_sections = 0;
\r
2020 struct xran_section_info *sectinfo = NULL;
\r
2021 struct xran_device_ctx *p_xran_dev_ctx = xran_dev_get_ctx();
\r
2023 struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
\r
2024 struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
\r
2025 enum xran_pkt_dir direction;
\r
2027 struct rte_mbuf_ext_shared_info * p_share_data = &share_data[tti % XRAN_N_FE_BUF_LEN];
\r
2029 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
\r
2030 direction = XRAN_DIR_DL; /* O-DU */
\r
2031 prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
\r
2033 direction = XRAN_DIR_UL; /* RU */
\r
2034 prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
\r
2038 num_sections = xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id, ctx_id);
\r
2039 /* iterate C-Plane configuration to generate corresponding U-Plane */
\r
2040 while(next < num_sections) {
\r
2041 sectinfo = xran_cp_iterate_section_info(pHandle, direction, cc_id, ant_id, ctx_id, &next);
\r
2043 if(sectinfo == NULL)
\r
2046 if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) { /* only supports type 1 */
\r
2047 print_err("Invalid section type in section DB - %d", sectinfo->type);
\r
2051 /* skip, if not scheduled */
\r
2052 if(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol)
\r
2055 if(sectinfo->compMeth)
\r
2056 iq_sample_size_bits = sectinfo->iqWidth;
\r
2058 print_dbg(">>> sym %2d [%d] type%d, id %d, startPrbc=%d, numPrbc=%d, numSymbol=%d\n", sym_id, next,
\r
2059 sectinfo->type, sectinfo->id, sectinfo->startPrbc,
\r
2060 sectinfo->numPrbc, sectinfo->numSymbol);
\r
2062 p_xran_dev_ctx->tx_mbufs[0].len = 0;
\r
2063 uint16_t len = p_xran_dev_ctx->tx_mbufs[0].len;
\r
2067 //Added for Klocworks
\r
2068 if (len >= MBUF_TABLE_SIZE)
\r
2069 len = MBUF_TABLE_SIZE - 1;
\r
2071 pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
\r
2072 mb = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
\r
2075 p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
\r
2077 /* calculete offset for external buffer */
\r
2078 ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
\r
2079 ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
\r
2080 sizeof (struct xran_ecpri_hdr) +
\r
2081 sizeof (struct radio_app_common_hdr) +
\r
2082 sizeof(struct data_section_hdr));
\r
2084 ext_buff_len += RTE_PKTMBUF_HEADROOM +
\r
2085 sizeof (struct xran_ecpri_hdr) +
\r
2086 sizeof (struct radio_app_common_hdr) +
\r
2087 sizeof(struct data_section_hdr) + 18;
\r
2089 if(sectinfo->compMeth != XRAN_COMPMETHOD_NONE){
\r
2090 ext_buff -= sizeof (struct data_section_compression_hdr);
\r
2091 ext_buff_len += sizeof (struct data_section_compression_hdr);
\r
2094 eth_oran_hdr = rte_pktmbuf_alloc(_eth_mbuf_pool_small);
\r
2096 if (unlikely (( eth_oran_hdr) == NULL)) {
\r
2097 rte_panic("Failed rte_pktmbuf_alloc\n");
\r
2100 p_share_data->free_cb = extbuf_free_callback;
\r
2101 p_share_data->fcb_opaque = NULL;
\r
2102 rte_mbuf_ext_refcnt_set(p_share_data, 1);
\r
2104 ext_buff_iova = rte_mempool_virt2iova(mb);
\r
2105 if (unlikely (( ext_buff_iova) == 0)) {
\r
2106 rte_panic("Failed rte_mem_virt2iova \n");
\r
2109 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
\r
2110 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
\r
2113 rte_pktmbuf_attach_extbuf(eth_oran_hdr,
\r
2115 ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
\r
2119 rte_pktmbuf_reset_headroom(eth_oran_hdr);
\r
2121 tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct ether_hdr));
\r
2122 if (unlikely (( tmp) == NULL)) {
\r
2123 rte_panic("Failed rte_pktmbuf_prepend \n");
\r
2125 mb = eth_oran_hdr;
\r
2127 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
\r
2129 /* first all PRBs */
\r
2130 prepare_symbol_ex(direction, sectinfo->id,
\r
2132 (struct rb_map *)p_sec_iq,
\r
2133 sectinfo->compMeth,
\r
2134 sectinfo->iqWidth,
\r
2135 p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
\r
2136 frame_id, subframe_id, slot_id, sym_id,
\r
2137 sectinfo->startPrbc, sectinfo->numPrbc,
\r
2139 xran_get_updl_seqid(pHandle, cc_id, ant_id),
\r
2142 /* if we don't need to do any fragmentation */
\r
2143 if (likely (p_xran_dev_ctx->fh_init.mtu >=
\r
2144 sectinfo->numPrbc * (3*iq_sample_size_bits + 1))) {
\r
2145 /* no fragmentation */
\r
2146 p_xran_dev_ctx->tx_mbufs[0].m_table[len] = mb;
\r
2149 /* fragmentation */
\r
2150 uint8_t * seq_num = xran_get_updl_seqid_addr(pHandle, cc_id, ant_id);
\r
2154 rte_panic("pointer to seq number is NULL [CC %d Ant %d]\n", cc_id, ant_id);
\r
2156 len2 = xran_app_fragment_packet(mb,
\r
2157 &p_xran_dev_ctx->tx_mbufs[0].m_table[len],
\r
2158 (uint16_t)(MBUF_TABLE_SIZE - len),
\r
2159 p_xran_dev_ctx->fh_init.mtu,
\r
2160 p_xran_dev_ctx->direct_pool,
\r
2161 p_xran_dev_ctx->indirect_pool,
\r
2165 /* Free input packet */
\r
2166 rte_pktmbuf_free(mb);
\r
2168 /* If we fail to fragment the packet */
\r
2169 if (unlikely (len2 < 0)){
\r
2170 print_err("len2= %d\n", len2);
\r
2176 for (i = len; i < len + len2; i ++) {
\r
2177 struct rte_mbuf *m;
\r
2178 m = p_xran_dev_ctx->tx_mbufs[0].m_table[i];
\r
2179 struct ether_hdr *eth_hdr = (struct ether_hdr *)
\r
2180 rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr));
\r
2181 if (eth_hdr == NULL) {
\r
2182 rte_panic("No headroom in mbuf.\n");
\r
2189 if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM)) {
\r
2190 rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
\r
2193 /* Transmit packets */
\r
2194 xran_send_burst(p_xran_dev_ctx, (uint16_t)len, 0);
\r
2195 p_xran_dev_ctx->tx_mbufs[0].len = 0;
\r
2197 } /* while(section) */
\r
2202 int32_t xran_process_tx_sym(void *arg)
\r
2204 int32_t retval = 0;
\r
2207 uint32_t mlogVar[10];
\r
2208 uint32_t mlogVarCnt = 0;
\r
2210 unsigned long t1 = MLogTick();
\r
2212 void *pHandle = NULL;
\r
2213 int32_t ant_id = 0;
\r
2214 int32_t cc_id = 0;
\r
2215 uint8_t num_eAxc = 0;
\r
2216 uint8_t num_CCPorts = 0;
\r
2217 uint8_t num_ant_elm = 0;
\r
2218 uint32_t frame_id = 0;
\r
2219 uint32_t subframe_id = 0;
\r
2220 uint32_t slot_id = 0;
\r
2221 uint32_t sym_id = 0;
\r
2222 uint32_t sym_idx = 0;
\r
2225 enum xran_pkt_dir direction;
\r
2226 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
2228 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
\r
2231 /* O-RU: send symb after OTA time with delay (UL) */
\r
2232 /* O-DU: send symb in advance of OTA time (DL) */
\r
2233 sym_idx = XranOffsetSym(p_xran_dev_ctx->sym_up, xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME*1000);
\r
2235 tti = XranGetTtiNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
\r
2236 slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);
\r
2237 subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
2238 frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
2239 // ORAN frameId, 8 bits, [0, 255]
\r
2240 frame_id = (frame_id & 0xff);
\r
2242 sym_id = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
\r
2243 ctx_id = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME) % XRAN_MAX_SECTIONDB_CTX;
\r
2245 print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
\r
2248 mlogVar[mlogVarCnt++] = 0xAAAAAAAA;
\r
2249 mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx;
\r
2250 mlogVar[mlogVarCnt++] = sym_idx;
\r
2251 mlogVar[mlogVarCnt++] = abs(p_xran_dev_ctx->sym_up);
\r
2252 mlogVar[mlogVarCnt++] = tti;
\r
2253 mlogVar[mlogVarCnt++] = frame_id;
\r
2254 mlogVar[mlogVarCnt++] = subframe_id;
\r
2255 mlogVar[mlogVarCnt++] = slot_id;
\r
2256 mlogVar[mlogVarCnt++] = sym_id;
\r
2257 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
\r
2260 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {
\r
2261 num_eAxc = xran_get_num_eAxcUl(pHandle);
\r
2263 num_eAxc = xran_get_num_eAxc(pHandle);
\r
2266 num_CCPorts = xran_get_num_cc(pHandle);
\r
2268 for(ant_id = 0; ant_id < num_eAxc; ant_id++) {
\r
2269 for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
\r
2270 if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP){
\r
2271 retval = xran_process_tx_sym_cp_on(ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
\r
2273 retval = xran_process_tx_sym_cp_off(ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, (ant_id == (num_eAxc - 1)));
\r
2275 } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
\r
2276 } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
\r
2278 MLogTask(PID_PROCESS_TX_SYM, t1, MLogTick());
\r
2282 int xran_packet_and_dpdk_timer_thread(void *args)
\r
2284 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
\r
2286 uint64_t prev_tsc = 0;
\r
2287 uint64_t cur_tsc = rte_rdtsc();
\r
2288 uint64_t diff_tsc = cur_tsc - prev_tsc;
\r
2290 struct sched_param sched_param;
\r
2292 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
\r
2294 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
\r
2296 if ((res = pthread_setschedparam(pthread_self(), 1, &sched_param)))
\r
2298 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
\r
2303 cur_tsc = rte_rdtsc();
\r
2304 diff_tsc = cur_tsc - prev_tsc;
\r
2305 if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
\r
2306 rte_timer_manage();
\r
2307 prev_tsc = cur_tsc;
\r
2310 if (XRAN_STOPPED == xran_if_current_state)
\r
2314 printf("Closing pkts timer thread...\n");
\r
2319 int32_t xran_init(int argc, char *argv[],
\r
2320 struct xran_fh_init *p_xran_fh_init, char *appName, void ** pXranLayerHandle)
\r
2325 struct xran_io_loop_cfg *p_io_cfg = (struct xran_io_loop_cfg *)&p_xran_fh_init->io_cfg;
\r
2326 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
2328 int32_t lcore_id = 0;
\r
2329 char filename[64];
\r
2330 int64_t offset_sec, offset_nsec;
\r
2332 memset(p_xran_dev_ctx, 0, sizeof(struct xran_device_ctx));
\r
2335 p_xran_dev_ctx->fh_init = *p_xran_fh_init;
\r
2337 printf(" %s: MTU %d\n", __FUNCTION__, p_xran_dev_ctx->fh_init.mtu);
\r
2339 xran_if_current_state = XRAN_INIT;
\r
2341 memcpy(&(p_xran_dev_ctx->eAxc_id_cfg), &(p_xran_fh_init->eAxCId_conf), sizeof(struct xran_eaxcid_config));
\r
2343 p_xran_dev_ctx->enableCP = p_xran_fh_init->enableCP;
\r
2344 p_xran_dev_ctx->enablePrach = p_xran_fh_init->prachEnable;
\r
2345 p_xran_dev_ctx->enableSrs = p_xran_fh_init->srsEnable;
\r
2346 p_xran_dev_ctx->DynamicSectionEna = p_xran_fh_init->DynamicSectionEna;
\r
2348 /* To make sure to set default functions */
\r
2349 p_xran_dev_ctx->send_upmbuf2ring = NULL;
\r
2350 p_xran_dev_ctx->send_cpmbuf2ring = NULL;
\r
2352 xran_register_ethertype_handler(ETHER_TYPE_ECPRI, handle_ecpri_ethertype);
\r
2353 if (p_io_cfg->id == 0)
\r
2354 xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
\r
2357 (struct ether_addr *)p_xran_fh_init->p_o_du_addr,
\r
2358 (struct ether_addr *)p_xran_fh_init->p_o_ru_addr,
\r
2359 p_xran_fh_init->cp_vlan_tag,
\r
2360 p_xran_fh_init->up_vlan_tag);
\r
2362 xran_ethdi_init_dpdk_io(p_xran_fh_init->filePrefix,
\r
2365 (struct ether_addr *)p_xran_fh_init->p_o_ru_addr,
\r
2366 (struct ether_addr *)p_xran_fh_init->p_o_du_addr,
\r
2367 p_xran_fh_init->cp_vlan_tag,
\r
2368 p_xran_fh_init->up_vlan_tag);
\r
2370 for(i = 0; i < 10; i++ )
\r
2371 rte_timer_init(&tti_to_phy_timer[i]);
\r
2373 rte_timer_init(&sym_timer);
\r
2374 for (i = 0; i< MAX_NUM_OF_DPDK_TIMERS; i++)
\r
2375 rte_timer_init(&dpdk_timer[i]);
\r
2377 p_xran_dev_ctx->direct_pool = socket_direct_pool;
\r
2378 p_xran_dev_ctx->indirect_pool = socket_indirect_pool;
\r
2380 for (i = 0; i< XRAN_MAX_SECTOR_NR; i++){
\r
2381 for (j = 0; j< XRAN_NUM_OF_SYMBOL_PER_SLOT; j++){
\r
2382 LIST_INIT (&p_xran_dev_ctx->sym_cb_list_head[i][j]);
\r
2386 printf("Set debug stop %d, debug stop count %d\n", p_xran_fh_init->debugStop, p_xran_fh_init->debugStopCount);
\r
2387 timing_set_debug_stop(p_xran_fh_init->debugStop, p_xran_fh_init->debugStopCount);
\r
2389 for (uint32_t nCellIdx = 0; nCellIdx < XRAN_MAX_SECTOR_NR; nCellIdx++){
\r
2390 xran_fs_clear_slot_type(nCellIdx);
\r
2393 *pXranLayerHandle = p_xran_dev_ctx;
\r
2395 if(p_xran_fh_init->GPS_Alpha || p_xran_fh_init->GPS_Beta ){
\r
2396 offset_sec = p_xran_fh_init->GPS_Beta / 100; //resolution of beta is 10ms
\r
2397 offset_nsec = (p_xran_fh_init->GPS_Beta - offset_sec * 100) * 1e7 + p_xran_fh_init->GPS_Alpha;
\r
2398 p_xran_dev_ctx->offset_sec = offset_sec;
\r
2399 p_xran_dev_ctx->offset_nsec = offset_nsec;
\r
2401 p_xran_dev_ctx->offset_sec = 0;
\r
2402 p_xran_dev_ctx->offset_nsec = 0;
\r
2408 int32_t xran_sector_get_instances (void * pDevHandle, uint16_t nNumInstances,
\r
2409 xran_cc_handle_t * pSectorInstanceHandles)
\r
2411 xran_status_t nStatus = XRAN_STATUS_FAIL;
\r
2412 struct xran_device_ctx *pDev = (struct xran_device_ctx *)pDevHandle;
\r
2413 XranSectorHandleInfo *pCcHandle = NULL;
\r
2416 /* Check for the Valid Parameters */
\r
2417 CHECK_NOT_NULL (pSectorInstanceHandles, XRAN_STATUS_INVALID_PARAM);
\r
2419 if (!nNumInstances) {
\r
2420 print_dbg("Instance is not assigned for this function !!! \n");
\r
2421 return XRAN_STATUS_INVALID_PARAM;
\r
2424 for (i = 0; i < nNumInstances; i++) {
\r
2426 /* Allocate Memory for CC handles */
\r
2427 pCcHandle = (XranSectorHandleInfo *) _mm_malloc( /*"xran_cc_handles",*/ sizeof (XranSectorHandleInfo), 64);
\r
2429 if(pCcHandle == NULL)
\r
2430 return XRAN_STATUS_RESOURCE;
\r
2432 memset (pCcHandle, 0, (sizeof (XranSectorHandleInfo)));
\r
2434 pCcHandle->nIndex = i;
\r
2435 pCcHandle->nXranPort = pDev->xran_port_id;
\r
2437 printf("%s [%d]: CC %d handle %p\n", __FUNCTION__, pDev->xran_port_id, i, pCcHandle);
\r
2438 pLibInstanceHandles[pDev->xran_port_id][i] = pSectorInstanceHandles[i] = pCcHandle;
\r
2440 printf("Handle: %p Instance: %p\n",
\r
2441 &pSectorInstanceHandles[i], pSectorInstanceHandles[i]);
\r
2444 return XRAN_STATUS_SUCCESS;
\r
2447 int32_t xran_mm_init (void * pHandle, uint64_t nMemorySize,
\r
2448 uint32_t nMemorySegmentSize)
\r
2450 /* we use mbuf from dpdk memory */
\r
2454 int32_t xran_bm_init (void * pHandle, uint32_t * pPoolIndex, uint32_t nNumberOfBuffers, uint32_t nBufferSize)
\r
2456 XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
\r
2457 uint32_t nAllocBufferSize;
\r
2459 char pool_name[RTE_MEMPOOL_NAMESIZE];
\r
2461 snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "ru_%d_cc_%d_idx_%d",
\r
2462 pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex);
\r
2464 nAllocBufferSize = nBufferSize + sizeof(struct ether_hdr) +
\r
2465 sizeof (struct xran_ecpri_hdr) +
\r
2466 sizeof (struct radio_app_common_hdr) +
\r
2467 sizeof(struct data_section_hdr) + 256;
\r
2469 if(nAllocBufferSize >= UINT16_MAX) {
\r
2470 rte_panic("nAllocBufferSize is failed [ handle %p %d %d ] [nPoolIndex %d] nNumberOfBuffers %d nBufferSize %d nAllocBufferSize %d\n",
\r
2471 pXranCc, pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex, nNumberOfBuffers, nBufferSize, nAllocBufferSize);
\r
2475 printf("%s: [ handle %p %d %d ] [nPoolIndex %d] nNumberOfBuffers %d nBufferSize %d\n", pool_name,
\r
2476 pXranCc, pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex, nNumberOfBuffers, nBufferSize);
\r
2478 pXranCc->p_bufferPool[pXranCc->nBufferPoolIndex] = rte_pktmbuf_pool_create(pool_name, nNumberOfBuffers,
\r
2479 MBUF_CACHE, 0, nAllocBufferSize, rte_socket_id());
\r
2481 if(pXranCc->p_bufferPool[pXranCc->nBufferPoolIndex] == NULL){
\r
2482 rte_panic("rte_pktmbuf_pool_create failed [ handle %p %d %d ] [nPoolIndex %d] nNumberOfBuffers %d nBufferSize %d errno %s\n",
\r
2483 pXranCc, pXranCc->nXranPort, pXranCc->nIndex, pXranCc->nBufferPoolIndex, nNumberOfBuffers, nBufferSize, rte_strerror(rte_errno));
\r
2487 pXranCc->bufferPoolElmSz[pXranCc->nBufferPoolIndex] = nBufferSize;
\r
2488 pXranCc->bufferPoolNumElm[pXranCc->nBufferPoolIndex] = nNumberOfBuffers;
\r
2490 printf("CC:[ handle %p ru %d cc_idx %d ] [nPoolIndex %d] mb pool %p \n",
\r
2491 pXranCc, pXranCc->nXranPort, pXranCc->nIndex,
\r
2492 pXranCc->nBufferPoolIndex, pXranCc->p_bufferPool[pXranCc->nBufferPoolIndex]);
\r
2494 *pPoolIndex = pXranCc->nBufferPoolIndex++;
\r
2499 int32_t xran_bm_allocate_buffer(void * pHandle, uint32_t nPoolIndex, void **ppData, void **ppCtrl)
\r
2501 XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
\r
2505 struct rte_mbuf * mb = rte_pktmbuf_alloc(pXranCc->p_bufferPool[nPoolIndex]);
\r
2508 char * start = rte_pktmbuf_append(mb, pXranCc->bufferPoolElmSz[nPoolIndex]);
\r
2509 char * ethhdr = rte_pktmbuf_prepend(mb, sizeof(struct ether_hdr));
\r
2511 if(start && ethhdr){
\r
2512 char * iq_offset = rte_pktmbuf_mtod(mb, char * );
\r
2513 /* skip headers */
\r
2514 iq_offset = iq_offset + sizeof(struct ether_hdr) +
\r
2515 sizeof (struct xran_ecpri_hdr) +
\r
2516 sizeof (struct radio_app_common_hdr) +
\r
2517 sizeof(struct data_section_hdr);
\r
2519 if (0) /* if compression */
\r
2520 iq_offset += sizeof (struct data_section_compression_hdr);
\r
2522 *ppData = (void *)iq_offset;
\r
2523 *ppCtrl = (void *)mb;
\r
2525 print_err("[nPoolIndex %d] start ethhdr failed \n", nPoolIndex );
\r
2529 print_err("[nPoolIndex %d] mb alloc failed \n", nPoolIndex );
\r
2533 if (*ppData == NULL){
\r
2534 print_err("[nPoolIndex %d] rte_pktmbuf_append for %d failed \n", nPoolIndex, pXranCc->bufferPoolElmSz[nPoolIndex]);
\r
2541 int32_t xran_bm_free_buffer(void * pHandle, void *pData, void *pCtrl)
\r
2543 XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
\r
2546 rte_pktmbuf_free(pCtrl);
\r
2551 int32_t xran_5g_fronthault_config (void * pHandle,
\r
2552 struct xran_buffer_list *pSrcBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
\r
2553 struct xran_buffer_list *pSrcCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
\r
2554 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
\r
2555 struct xran_buffer_list *pDstCpBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
\r
2556 xran_transport_callback_fn pCallback,
\r
2557 void *pCallbackTag)
\r
2559 XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
\r
2560 xran_status_t nStatus = XRAN_STATUS_SUCCESS;
\r
2561 int j, i = 0, z, k;
\r
2562 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
2564 print_dbg("%s\n", __FUNCTION__);
\r
2566 if(NULL == pHandle)
\r
2568 printf("Handle is NULL!\n");
\r
2569 return XRAN_STATUS_FAIL;
\r
2572 if (pCallback == NULL)
\r
2574 printf ("no callback\n");
\r
2575 return XRAN_STATUS_FAIL;
\r
2578 i = pXranCc->nIndex;
\r
2580 for(j=0; j<XRAN_N_FE_BUF_LEN; j++)
\r
2582 for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
\r
2585 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].bValid = 0;
\r
2586 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
\r
2587 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
\r
2588 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
\r
2589 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
\r
2590 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxBuffers[j][i][z][0];
\r
2592 p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[j][i][z].sBufferList = *pSrcBuffer[z][j];
\r
2595 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
\r
2596 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
\r
2597 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
\r
2598 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
\r
2599 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
\r
2600 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulTxPrbMapBuffers[j][i][z][0];
\r
2602 p_xran_dev_ctx->sFrontHaulTxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pSrcCpBuffer[z][j];
\r
2606 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].bValid = 0;
\r
2607 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
\r
2608 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
\r
2609 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
\r
2610 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
\r
2611 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxBuffers[j][i][z][0];
\r
2613 p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
\r
2616 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].bValid = 0;
\r
2617 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
\r
2618 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
\r
2619 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
\r
2620 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_NUM_OF_SYMBOL_PER_SLOT;
\r
2621 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFrontHaulRxPrbMapBuffers[j][i][z][0];
\r
2623 p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[j][i][z].sBufferList = *pDstCpBuffer[z][j];
\r
2628 p_xran_dev_ctx->pCallback[i] = pCallback;
\r
2629 p_xran_dev_ctx->pCallbackTag[i] = pCallbackTag;
\r
2631 p_xran_dev_ctx->xran2phy_mem_ready = 1;
\r
2636 int32_t xran_5g_prach_req (void * pHandle,
\r
2637 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANTENNA_NR][XRAN_N_FE_BUF_LEN],
\r
2638 xran_transport_callback_fn pCallback,
\r
2639 void *pCallbackTag)
\r
2641 XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
\r
2642 xran_status_t nStatus = XRAN_STATUS_SUCCESS;
\r
2644 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
2646 if(NULL == pHandle)
\r
2648 printf("Handle is NULL!\n");
\r
2649 return XRAN_STATUS_FAIL;
\r
2651 if (pCallback == NULL)
\r
2653 printf ("no callback\n");
\r
2654 return XRAN_STATUS_FAIL;
\r
2657 i = pXranCc->nIndex;
\r
2659 for(j=0; j<XRAN_N_FE_BUF_LEN; j++)
\r
2661 for(z = 0; z < XRAN_MAX_ANTENNA_NR; z++){
\r
2662 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].bValid = 0;
\r
2663 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
\r
2664 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
\r
2665 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
\r
2666 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANTENNA_NR; // ant number.
\r
2667 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHPrachRxBuffers[j][i][z][0];
\r
2668 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
\r
2672 p_xran_dev_ctx->pPrachCallback[i] = pCallback;
\r
2673 p_xran_dev_ctx->pPrachCallbackTag[i] = pCallbackTag;
\r
2679 int32_t xran_5g_srs_req (void * pHandle,
\r
2680 struct xran_buffer_list *pDstBuffer[XRAN_MAX_ANT_ARRAY_ELM_NR][XRAN_N_FE_BUF_LEN],
\r
2681 xran_transport_callback_fn pCallback,
\r
2682 void *pCallbackTag)
\r
2684 XranSectorHandleInfo* pXranCc = (XranSectorHandleInfo*) pHandle;
\r
2685 xran_status_t nStatus = XRAN_STATUS_SUCCESS;
\r
2687 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
2689 if(NULL == pHandle)
\r
2691 printf("Handle is NULL!\n");
\r
2692 return XRAN_STATUS_FAIL;
\r
2694 if (pCallback == NULL)
\r
2696 printf ("no callback\n");
\r
2697 return XRAN_STATUS_FAIL;
\r
2700 i = pXranCc->nIndex;
\r
2702 for(j=0; j<XRAN_N_FE_BUF_LEN; j++)
\r
2704 for(z = 0; z < XRAN_MAX_ANT_ARRAY_ELM_NR; z++){
\r
2705 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].bValid = 0;
\r
2706 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegGenerated = -1;
\r
2707 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegToBeGen = -1;
\r
2708 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].nSegTransferred = 0;
\r
2709 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.nNumBuffers = XRAN_MAX_ANT_ARRAY_ELM_NR; // ant number.
\r
2710 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList.pBuffers = &p_xran_dev_ctx->sFHSrsRxBuffers[j][i][z][0];
\r
2711 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[j][i][z].sBufferList = *pDstBuffer[z][j];
\r
2715 p_xran_dev_ctx->pSrsCallback[i] = pCallback;
\r
2716 p_xran_dev_ctx->pSrsCallbackTag[i] = pCallbackTag;
\r
2721 uint32_t xran_get_time_stats(uint64_t *total_time, uint64_t *used_time, uint32_t *core_used, uint32_t clear)
\r
2723 *total_time = xran_total_tick;
\r
2724 *used_time = xran_used_tick;
\r
2725 *core_used = xran_core_used;
\r
2729 xran_total_tick = 0;
\r
2730 xran_used_tick = 0;
\r
2736 void * xran_malloc(size_t buf_len)
\r
2738 return rte_malloc("External buffer", buf_len, RTE_CACHE_LINE_SIZE);
\r
2741 uint8_t *xran_add_hdr_offset(uint8_t *dst, int16_t compMethod)
\r
2743 dst+= (RTE_PKTMBUF_HEADROOM +
\r
2744 sizeof (struct xran_ecpri_hdr) +
\r
2745 sizeof (struct radio_app_common_hdr) +
\r
2746 sizeof(struct data_section_hdr));
\r
2748 if(compMethod != XRAN_COMPMETHOD_NONE)
\r
2749 dst += sizeof (struct data_section_compression_hdr);
\r
2751 dst = RTE_PTR_ALIGN_CEIL(dst, 64);
\r
2756 int32_t xran_open(void *pHandle, struct xran_fh_config* pConf)
\r
2759 uint8_t nNumerology = 0;
\r
2760 int32_t lcore_id = 0;
\r
2761 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
2762 struct xran_fh_config *pFhCfg;
\r
2763 pFhCfg = &(p_xran_dev_ctx->fh_cfg);
\r
2765 memcpy(pFhCfg, pConf, sizeof(struct xran_fh_config));
\r
2767 if(pConf->log_level)
\r
2768 printf(" %s: O-RU Category %s\n", __FUNCTION__, (pFhCfg->ru_conf.xranCat == XRAN_CATEGORY_A) ? "A" : "B");
\r
2770 nNumerology = xran_get_conf_numerology(pHandle);
\r
2772 if (pConf->nCC > XRAN_MAX_SECTOR_NR)
\r
2774 if(pConf->log_level)
\r
2775 printf("Number of cells %d exceeds max number supported %d!\n", pConf->nCC, XRAN_MAX_SECTOR_NR);
\r
2776 pConf->nCC = XRAN_MAX_SECTOR_NR;
\r
2779 if(pConf->ru_conf.iqOrder != XRAN_I_Q_ORDER
\r
2780 || pConf->ru_conf.byteOrder != XRAN_NE_BE_BYTE_ORDER ){
\r
2782 print_err("Byte order and/or IQ order is not supported [IQ %d byte %d]\n", pConf->ru_conf.iqOrder, pConf->ru_conf.byteOrder);
\r
2783 return XRAN_STATUS_FAIL;
\r
2786 /* setup PRACH configuration for C-Plane */
\r
2787 xran_init_prach(pConf, p_xran_dev_ctx);
\r
2788 xran_init_srs(pConf, p_xran_dev_ctx);
\r
2790 xran_cp_init_sectiondb(pHandle);
\r
2791 xran_init_sectionid(pHandle);
\r
2792 xran_init_seqid(pHandle);
\r
2794 if(pConf->ru_conf.xran_max_frame) {
\r
2795 xran_max_frame = pConf->ru_conf.xran_max_frame;
\r
2796 printf("xran_max_frame %d\n", xran_max_frame);
\r
2799 interval_us = xran_fs_get_tti_interval(nNumerology);
\r
2801 if(pConf->log_level){
\r
2802 printf("%s: interval_us=%ld\n", __FUNCTION__, interval_us);
\r
2804 timing_set_numerology(nNumerology);
\r
2806 for(i = 0 ; i <pConf->nCC; i++){
\r
2807 xran_fs_set_slot_type(i, pConf->frame_conf.nFrameDuplexType, pConf->frame_conf.nTddPeriod,
\r
2808 pConf->frame_conf.sSlotConfig);
\r
2811 xran_fs_slot_limit_init(xran_fs_get_tti_interval(nNumerology));
\r
2813 if(xran_ethdi_get_ctx()->io_cfg.bbdev_mode != XRAN_BBDEV_NOT_USED){
\r
2814 p_xran_dev_ctx->bbdev_dec = pConf->bbdev_dec;
\r
2815 p_xran_dev_ctx->bbdev_enc = pConf->bbdev_enc;
\r
2818 /* if send_xpmbuf2ring needs to be changed from default functions,
\r
2819 * then those should be set between xran_init and xran_open */
\r
2820 if(p_xran_dev_ctx->send_cpmbuf2ring == NULL)
\r
2821 p_xran_dev_ctx->send_cpmbuf2ring = xran_ethdi_mbuf_send_cp;
\r
2822 if(p_xran_dev_ctx->send_upmbuf2ring == NULL)
\r
2823 p_xran_dev_ctx->send_upmbuf2ring = xran_ethdi_mbuf_send;
\r
2825 /* Start packet processing thread */
\r
2826 if((uint16_t)xran_ethdi_get_ctx()->io_cfg.port[XRAN_UP_VF] != 0xFFFF &&
\r
2827 (uint16_t)xran_ethdi_get_ctx()->io_cfg.port[XRAN_CP_VF] != 0xFFFF ){
\r
2828 if(pConf->log_level){
\r
2829 print_dbg("XRAN_UP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_UP_VF]);
\r
2830 print_dbg("XRAN_CP_VF: 0x%04x\n", xran_ethdi_get_ctx()->io_cfg.port[XRAN_CP_VF]);
\r
2832 if (rte_eal_remote_launch(xran_timing_source_thread, xran_dev_get_ctx(), xran_ethdi_get_ctx()->io_cfg.timing_core))
\r
2833 rte_panic("thread_run() failed to start\n");
\r
2834 } else if(pConf->log_level){
\r
2835 printf("Eth port was not open. Processing thread was not started\n");
\r
2841 int32_t xran_start(void *pHandle)
\r
2843 if(xran_get_if_state() == XRAN_RUNNING) {
\r
2844 print_err("Already STARTED!!");
\r
2848 xran_if_current_state = XRAN_RUNNING;
\r
2852 int32_t xran_stop(void *pHandle)
\r
2854 if(xran_get_if_state() == XRAN_STOPPED) {
\r
2855 print_err("Already STOPPED!!");
\r
2859 xran_if_current_state = XRAN_STOPPED;
\r
2863 int32_t xran_close(void *pHandle)
\r
2865 xran_if_current_state = XRAN_STOPPED;
\r
2866 //TODO: fix memory leak xran_cp_free_sectiondb(pHandle);
\r
2867 //rte_eal_mp_wait_lcore();
\r
2868 //xran_ethdi_ports_stats();
\r
2870 #ifdef RTE_LIBRTE_PDUMP
\r
2871 /* uninitialize packet capture framework */
\r
2872 rte_pdump_uninit();
\r
2877 int32_t xran_mm_destroy (void * pHandle)
\r
2879 if(xran_get_if_state() == XRAN_RUNNING) {
\r
2880 print_err("Please STOP first !!");
\r
2884 /* functionality is not yet implemented */
\r
2888 int32_t xran_reg_sym_cb(void *pHandle, xran_callback_sym_fn symCb, void * symCbParam, uint8_t symb, uint8_t ant)
\r
2890 if(xran_get_if_state() == XRAN_RUNNING) {
\r
2891 print_err("Cannot register callback while running!!\n");
\r
2895 /* functionality is not yet implemented */
\r
2896 print_err("Functionality is not yet implemented !");
\r
2900 int32_t xran_reg_physide_cb(void *pHandle, xran_fh_tti_callback_fn Cb, void *cbParam, int skipTtiNum, enum callback_to_phy_id id)
\r
2902 struct xran_device_ctx * p_xran_dev_ctx = xran_dev_get_ctx();
\r
2904 if(xran_get_if_state() == XRAN_RUNNING) {
\r
2905 print_err("Cannot register callback while running!!\n");
\r
2909 p_xran_dev_ctx->ttiCb[id] = Cb;
\r
2910 p_xran_dev_ctx->TtiCbParam[id] = cbParam;
\r
2911 p_xran_dev_ctx->SkipTti[id] = skipTtiNum;
\r
2916 /* send_cpmbuf2ring and send_upmbuf2ring should be set between xran_init and xran_open
\r
2917 * each cb will be set by default duing open if it is set by NULL */
\r
2918 int xran_register_cb_mbuf2ring(xran_ethdi_mbuf_send_fn mbuf_send_cp, xran_ethdi_mbuf_send_fn mbuf_send_up)
\r
2920 struct xran_device_ctx *p_xran_dev_ctx;
\r
2922 if(xran_get_if_state() == XRAN_RUNNING) {
\r
2923 print_err("Cannot register callback while running!!\n");
\r
2927 p_xran_dev_ctx = xran_dev_get_ctx();
\r
2929 p_xran_dev_ctx->send_cpmbuf2ring = mbuf_send_cp;
\r
2930 p_xran_dev_ctx->send_upmbuf2ring = mbuf_send_up;
\r
2936 int32_t xran_get_slot_idx (uint32_t *nFrameIdx, uint32_t *nSubframeIdx, uint32_t *nSlotIdx, uint64_t *nSecond)
\r
2940 tti = (int32_t)XranGetTtiNum(xran_lib_ota_sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
\r
2941 *nSlotIdx = (uint32_t)XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME);
\r
2942 *nSubframeIdx = (uint32_t)XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME, SUBFRAMES_PER_SYSTEMFRAME);
\r
2943 *nFrameIdx = (uint32_t)XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME);
\r
2944 *nSecond = timing_get_current_second();
\r
2951 * @brief Get the configuration of eAxC ID
\r
2953 * @return the pointer of configuration
\r
2955 inline struct xran_eaxcid_config *xran_get_conf_eAxC(void *pHandle)
\r
2957 return (&(xran_dev_get_ctx()->eAxc_id_cfg));
\r
2961 * @brief Get the configuration of the total number of beamforming weights on RU
\r
2963 * @return Configured the number of beamforming weights
\r
2965 inline uint8_t xran_get_conf_num_bfweights(void *pHandle)
\r
2967 return (xran_dev_get_ctx()->fh_init.totalBfWeights);
\r
2971 * @brief Get the configuration of subcarrier spacing for PRACH
\r
2973 * @return subcarrier spacing value for PRACH
\r
2975 inline uint8_t xran_get_conf_prach_scs(void *pHandle)
\r
2977 return (xran_lib_get_ctx_fhcfg()->prach_conf.nPrachSubcSpacing);
\r
2981 * @brief Get the configuration of FFT size for RU
\r
2983 * @return FFT size value for RU
\r
2985 inline uint8_t xran_get_conf_fftsize(void *pHandle)
\r
2987 return (xran_lib_get_ctx_fhcfg()->ru_conf.fftSize);
\r
2991 * @brief Get the configuration of nummerology
\r
2993 * @return Configured numerology
\r
2995 inline uint8_t xran_get_conf_numerology(void *pHandle)
\r
2997 return (xran_lib_get_ctx_fhcfg()->frame_conf.nNumerology);
\r
3001 * @brief Get the configuration of IQ bit width for RU
\r
3003 * @return IQ bit width for RU
\r
3005 inline uint8_t xran_get_conf_iqwidth(void *pHandle)
\r
3007 struct xran_fh_config *pFhCfg;
\r
3009 pFhCfg = xran_lib_get_ctx_fhcfg();
\r
3010 return ((pFhCfg->ru_conf.iqWidth==16)?0:pFhCfg->ru_conf.iqWidth);
\r
3014 * @brief Get the configuration of compression method for RU
\r
3016 * @return Compression method for RU
\r
3018 inline uint8_t xran_get_conf_compmethod(void *pHandle)
\r
3020 return (xran_lib_get_ctx_fhcfg()->ru_conf.compMeth);
\r
3025 * @brief Get the configuration of the number of component carriers
\r
3027 * @return Configured the number of component carriers
\r
3029 inline uint8_t xran_get_num_cc(void *pHandle)
\r
3031 return (xran_lib_get_ctx_fhcfg()->nCC);
\r
3035 * @brief Get the configuration of the number of antenna for UL
\r
3037 * @return Configured the number of antenna
\r
3039 inline uint8_t xran_get_num_eAxc(void *pHandle)
\r
3041 return (xran_lib_get_ctx_fhcfg()->neAxc);
\r
3045 * @brief Get configuration of O-RU (Cat A or Cat B)
\r
3047 * @return Configured the number of antenna
\r
3049 inline enum xran_category xran_get_ru_category(void *pHandle)
\r
3051 return (xran_lib_get_ctx_fhcfg()->ru_conf.xranCat);
\r
3055 * @brief Get the configuration of the number of antenna
\r
3057 * @return Configured the number of antenna
\r
3059 inline uint8_t xran_get_num_eAxcUl(void *pHandle)
\r
3061 return (xran_lib_get_ctx_fhcfg()->neAxcUl);
\r
3065 * @brief Get the configuration of the number of antenna elements
\r
3067 * @return Configured the number of antenna
\r
3069 inline uint8_t xran_get_num_ant_elm(void *pHandle)
\r
3071 return (xran_lib_get_ctx_fhcfg()->nAntElmTRx);
\r
3074 int32_t xran_get_common_counters(void *pXranLayerHandle, struct xran_common_counters *pStats)
\r
3076 struct xran_device_ctx* pDev = (struct xran_device_ctx*)pXranLayerHandle;
\r
3078 if(pStats && pDev) {
\r
3079 *pStats = pDev->fh_counters;
\r
3080 return XRAN_STATUS_SUCCESS;
\r
3082 return XRAN_STATUS_INVALID_PARAM;
\r