1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN RX module
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
40 #include <rte_common.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
50 #include "xran_fh_o_du.h"
54 #include "xran_up_api.h"
55 #include "xran_cp_api.h"
56 #include "xran_sync_api.h"
57 #include "xran_lib_mlog_tasks_id.h"
58 #include "xran_timer.h"
59 #include "xran_common.h"
61 #include "xran_frame_struct.h"
62 #include "xran_printf.h"
63 #include "xran_rx_proc.h"
64 #include "xran_cp_proc.h"
66 #include "xran_mlog_lnx.h"
68 int xran_process_prach_sym(void *arg,
69 struct rte_mbuf *mbuf,
86 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
87 uint8_t symb_id_offset;
90 //xran_status_t status;
92 uint32_t interval = p_xran_dev_ctx->interval_us_local;
94 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
97 tti = frame_id * SLOTS_PER_SYSTEMFRAME(interval) + subframe_id * SLOTNUM_PER_SUBFRAME(interval) + slot_id;
99 //status = tti << 16 | symb_id;
102 struct xran_prach_cp_config *pPrachCPConfig;
103 uint32_t StartUsedFirstSym;
104 if(p_xran_dev_ctx->dssEnable){
105 int i = tti % p_xran_dev_ctx->dssPeriod;
106 if(p_xran_dev_ctx->technology[i]==1) {
107 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
110 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
114 pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
118 if (1500 == p_xran_dev_ctx->fh_init.mtu && pPrachCPConfig->filterIdx == XRAN_FILTERINDEX_PRACH_012)
120 /*one prach for more then one pkg*/
121 StartUsedFirstSym = 1;
124 StartUsedFirstSym = 0;
128 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){
129 uint8_t numerology = xran_get_conf_numerology(p_xran_dev_ctx);
130 if (numerology > 0 && pPrachCPConfig->filterIdx == XRAN_FILTERINDEX_PRACH_012) ttt_det = (1<<numerology) - 1;
133 if (1 == StartUsedFirstSym)
135 uint8_t compMeth = p_xran_dev_ctx->fh_cfg.ru_conf.compMeth;
136 uint8_t iqWidth = p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth;
137 uint32_t iqLenPrePrb,dataOffset,dataLen;
140 if (XRAN_COMPMETHOD_NONE == compMeth)
146 iqLenPrePrb = 3*iqWidth+1;
148 dataOffset = start_prbu*iqLenPrePrb;
149 dataLen = num_prbu*iqLenPrePrb;
151 if(iq_data_start && size) {
152 pdata = p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[(tti + ttt_det)% XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData + dataOffset;
153 mb = p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[(tti + ttt_det)% XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl;
155 rte_pktmbuf_free(mb);
157 if(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
159 uint16_t *psrc = (uint16_t *)iq_data_start;
160 uint16_t *pdst = (uint16_t *)pdata;
161 for (idx = 0; idx < dataLen; idx++){
162 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
164 //*mb_free = MBUF_FREE;
167 memcpy(pdata,iq_data_start,dataLen);
170 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[(tti + ttt_det) % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl = mbuf;
171 *mb_free = MBUF_KEEP;
174 //print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
175 print_err("iq_data_start %p size %d\n", iq_data_start, size);
181 symb_id_offset = symb_id - p_xran_dev_ctx->prach_start_symbol[CC_ID]; //make the storing of prach packets to start from 0 for easy of processing within PHY
182 // pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData;
183 if(iq_data_start && size) {
184 mb = p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[(tti + ttt_det) % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl;
186 rte_pktmbuf_free(mb);
188 if(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
190 uint16_t *psrc = (uint16_t *)iq_data_start;
191 uint16_t *pdst = (uint16_t *)iq_data_start;
192 for (idx = 0; idx < size/sizeof(int16_t); idx++){
193 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
195 //*mb_free = MBUF_FREE;
198 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[(tti + ttt_det) % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData = iq_data_start;
199 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[(tti + ttt_det) % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl = mbuf;
200 *mb_free = MBUF_KEEP;
203 //print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
204 print_err("iq_data_start %p size %d\n", iq_data_start, size);
210 print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
216 int32_t xran_process_srs_sym(void *arg,
217 struct rte_mbuf *mbuf,
237 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
239 struct rte_mbuf *mb = NULL;
240 struct xran_prb_map * pRbMap = NULL;
241 struct xran_prb_elm * prbMapElm = NULL;
242 uint16_t iq_sample_size_bits = 16;
243 uint16_t sec_desc_idx;
244 uint32_t interval = p_xran_dev_ctx->interval_us_local;
247 iq_sample_size_bits = iqWidth;
249 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
252 tti = frame_id * SLOTS_PER_SYSTEMFRAME(interval) + subframe_id * SLOTNUM_PER_SUBFRAME(interval) + slot_id;
255 rte_panic("CC_ID != 0");
258 if(CC_ID < XRAN_MAX_SECTOR_NR
259 && Ant_ID < p_xran_dev_ctx->fh_cfg.nAntElmTRx
260 && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT)
262 if (0 == p_xran_dev_ctx->enableSrsCp)
265 struct xran_section_desc *p_sec_desc = NULL;
266 pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;
267 pRbMap = (struct xran_prb_map *) p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers->pData;
270 if(pRbMap && pRbMap->nPrbElm > 0)
272 prbMapElm = &pRbMap->prbMap[0];
273 if (symb_id < prbMapElm->nStartSymb || symb_id >= (prbMapElm->nStartSymb + prbMapElm->numSymb))
275 print_err("%dnot srs symbole, srs sym start is %d,num is %d\n", symb_id,prbMapElm->nStartSymb,prbMapElm->numSymb);
276 *mb_free = MBUF_FREE;
279 sec_desc_idx = prbMapElm->nSecDesc[0];
280 p_sec_desc = &(prbMapElm->sec_desc[0][0]);
281 if(sec_desc_idx >= XRAN_NUM_OF_SYMBOL_PER_SLOT*XRAN_MAX_FRAGMENT)
283 print_err("sec_desc_idx %d is more then %d\n", sec_desc_idx,XRAN_NUM_OF_SYMBOL_PER_SLOT*XRAN_MAX_FRAGMENT);
284 *mb_free = MBUF_FREE;
288 pos += start_prbu * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits);
289 if(pos && iq_data_start && size)
291 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER)
293 rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);
295 else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER))
297 p_sec_desc += sec_desc_idx;
300 mb = p_sec_desc->pCtrl;
303 rte_pktmbuf_free(mb);
305 p_sec_desc->pData = iq_data_start;
306 p_sec_desc->pCtrl = mbuf;
307 p_sec_desc->start_prbu = start_prbu;
308 p_sec_desc->num_prbu = num_prbu;
309 p_sec_desc->iq_buffer_len = size;
310 p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);
311 prbMapElm->nSecDesc[0] += 1;
315 print_err("p_sec_desc==NULL tti %u ant %d symb_id %d sec_desc_idx %d\n", tti, Ant_ID, symb_id, sec_desc_idx);
316 *mb_free = MBUF_FREE;
319 *mb_free = MBUF_KEEP;
320 } /* else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)) */
321 } /* if(pos && iq_data_start && size) */
324 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
330 print_err("pRbMap==NULL\n");
331 *mb_free = MBUF_FREE;
338 pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;
339 pRbMap = (struct xran_prb_map *) p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers->pData;
342 prbMapElm = &pRbMap->prbMap[sect_id];
343 if(sect_id >= pRbMap->nPrbElm)
345 print_err("sect_id %d !=pRbMap->nPrbElm %d\n", sect_id,pRbMap->nPrbElm);
346 *mb_free = MBUF_FREE;
352 print_err("pRbMap==NULL\n");
353 *mb_free = MBUF_FREE;
357 pos += start_prbu * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits);
358 if(pos && iq_data_start && size)
360 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER)
363 uint16_t *psrc = (uint16_t *)iq_data_start;
364 uint16_t *pdst = (uint16_t *)pos;
365 rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);
366 /* network byte (be) order of IQ to CPU byte order (le) */
367 for (idx = 0; idx < size/sizeof(int16_t); idx++)
369 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
372 else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER))
374 /*if (pRbMap->nPrbElm == 1)
376 if (likely (p_xran_dev_ctx->fh_init.mtu >=
377 p_xran_dev_ctx->fh_cfg.nULRBs * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits)))
380 mb = p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl;
382 rte_pktmbuf_free(mb);
384 print_err("mb==NULL\n");
386 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData = iq_data_start;
387 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl = mbuf;
388 *mb_free = MBUF_KEEP;
392 // packet can be fragmented copy RBs
393 memcpy(pos, iq_data_start, size);
394 *mb_free = MBUF_FREE;
399 struct xran_section_desc *p_sec_desc = NULL;
400 prbMapElm = &pRbMap->prbMap[sect_id];
401 // sec_desc_idx = 0;//prbMapElm->nSecDesc[symb_id];
402 sec_desc_idx = prbMapElm->nSecDesc[symb_id];
404 if (sec_desc_idx < XRAN_MAX_FRAGMENT)
406 p_sec_desc = &prbMapElm->sec_desc[symb_id][sec_desc_idx];
410 print_err("[p %d]sect_id %d: sec_desc_idx %d tti %u ant %d symb_id %d sec_desc_idx %d\n", p_xran_dev_ctx->xran_port_id, sect_id, sec_desc_idx, tti, Ant_ID, symb_id, sec_desc_idx);
411 prbMapElm->nSecDesc[symb_id] = 0;
412 *mb_free = MBUF_FREE;
418 mb = p_sec_desc->pCtrl;
421 rte_pktmbuf_free(mb);
423 p_sec_desc->pData = iq_data_start;
424 p_sec_desc->pCtrl = mbuf;
425 p_sec_desc->start_prbu = start_prbu;
426 p_sec_desc->num_prbu = num_prbu;
427 p_sec_desc->iq_buffer_len = size;
428 p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);
429 prbMapElm->nSecDesc[symb_id] += 1;
433 print_err("p_sec_desc==NULL tti %u ant %d symb_id %d sec_desc_idx %d\n", tti, Ant_ID, symb_id, sec_desc_idx);
434 *mb_free = MBUF_FREE;
437 *mb_free = MBUF_KEEP;
439 } /* else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)) */
440 } /* if(pos && iq_data_start && size) */
443 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
446 } /* if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < p_xran_dev_ctx->fh_cfg.nAntElmTRx && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT) */
449 print_err("o-xu%d: TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",p_xran_dev_ctx->xran_port_id, tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
455 int32_t xran_pkt_validate(void *arg,
456 struct rte_mbuf *mbuf,
465 union ecpri_seq_id *seq_id,
472 struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *)arg;
473 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
475 if(p_dev_ctx->fh_init.io_cfg.id == O_DU) {
476 if(xran_check_upul_seqid(p_dev_ctx, CC_ID, Ant_ID, slot_id, seq_id->bits.seq_id) != XRAN_STATUS_SUCCESS) {
478 return (XRAN_STATUS_FAIL);
480 } else if(p_dev_ctx->fh_init.io_cfg.id == O_RU) {
481 if(xran_check_updl_seqid(p_dev_ctx, CC_ID, Ant_ID, slot_id, seq_id->bits.seq_id) != XRAN_STATUS_SUCCESS) {
483 return (XRAN_STATUS_FAIL);
486 print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id);
492 pCnt->Total_msgs_rcvd++;
494 return XRAN_STATUS_SUCCESS;
497 int32_t xran_process_rx_sym(void *arg,
498 struct rte_mbuf *mbuf,
518 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
520 //xran_status_t status;
521 struct rte_mbuf *mb = NULL;
522 struct xran_prb_map * pRbMap = NULL;
523 struct xran_prb_elm * prbMapElm = NULL;
524 uint16_t iq_sample_size_bits = 16;
525 uint16_t sec_desc_idx, prb_elem_id=0;
526 uint32_t interval = p_xran_dev_ctx->interval_us_local;
527 uint16_t i=0, total_sections=0;
530 iq_sample_size_bits = iqWidth;
532 tti = frame_id * SLOTS_PER_SYSTEMFRAME(interval) + subframe_id * SLOTNUM_PER_SUBFRAME(interval) + slot_id;
534 //status = tti << 16 | symb_id;
536 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){
537 pos = (char*) p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;
538 pRbMap = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers->pData;
540 /** Get the prb_elem_id */
542 if(pRbMap->prbMap[0].bf_weight.extType == 1)
544 for(i=0 ; i < pRbMap->nPrbElm ; i++)
546 total_sections += pRbMap->prbMap[i].bf_weight.numSetBFWs;
547 if(total_sections >= (sect_id + 1))
556 prb_elem_id = sect_id;
559 prbMapElm = &pRbMap->prbMap[prb_elem_id];
560 if(prb_elem_id >= pRbMap->nPrbElm) {
561 print_err("sect id %d prb_elem_id %d !=pRbMap->nPrbElm %d\n",sect_id, prb_elem_id,pRbMap->nPrbElm);
562 *mb_free = MBUF_FREE;
566 print_err("pRbMap==NULL\n");
567 *mb_free = MBUF_FREE;
571 pos += start_prbu * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits);
572 if(pos && iq_data_start && size){
573 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
575 uint16_t *psrc = (uint16_t *)iq_data_start;
576 uint16_t *pdst = (uint16_t *)pos;
577 rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);
578 /* network byte (be) order of IQ to CPU byte order (le) */
579 for (idx = 0; idx < size/sizeof(int16_t); idx++){
580 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
582 } else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){
583 struct xran_section_desc *p_sec_desc = NULL;
584 prbMapElm = &pRbMap->prbMap[prb_elem_id];
585 sec_desc_idx = prbMapElm->nSecDesc[symb_id];
587 if (sec_desc_idx < XRAN_MAX_FRAGMENT) {
588 p_sec_desc = &prbMapElm->sec_desc[symb_id][sec_desc_idx];
590 print_err("[p: %d] sect_id %d: sec_desc_idx %d tti %u ant %d symb_id %d sec_desc_idx %d\n",p_xran_dev_ctx->xran_port_id,
591 sect_id, sec_desc_idx, tti, Ant_ID, symb_id, sec_desc_idx);
592 prbMapElm->nSecDesc[symb_id] = 0;
593 *mb_free = MBUF_FREE;
598 mb = p_sec_desc->pCtrl;
600 rte_pktmbuf_free(mb);
602 p_sec_desc->pData = iq_data_start;
603 p_sec_desc->pCtrl = mbuf;
604 p_sec_desc->start_prbu = start_prbu;
605 p_sec_desc->num_prbu = num_prbu;
606 p_sec_desc->iq_buffer_len = size;
607 p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);
608 prbMapElm->nSecDesc[symb_id] += 1;
610 print_err("p_sec_desc==NULL tti %u ant %d symb_id %d sec_desc_idx %d\n", tti, Ant_ID, symb_id, sec_desc_idx);
611 *mb_free = MBUF_FREE;
614 *mb_free = MBUF_KEEP;
618 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
621 print_err("o-xu%d: TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",p_xran_dev_ctx->xran_port_id, tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);