1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN RX module
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
40 #include <rte_common.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
50 #include "xran_fh_o_du.h"
54 #include "xran_up_api.h"
55 #include "xran_cp_api.h"
56 #include "xran_sync_api.h"
57 #include "xran_lib_mlog_tasks_id.h"
58 #include "xran_timer.h"
59 #include "xran_common.h"
61 #include "xran_frame_struct.h"
62 #include "xran_printf.h"
63 #include "xran_app_frag.h"
64 #include "xran_rx_proc.h"
65 #include "xran_cp_proc.h"
67 #include "xran_mlog_lnx.h"
69 int xran_process_prach_sym(void *arg,
70 struct rte_mbuf *mbuf,
87 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
88 uint8_t symb_id_offset;
93 uint32_t interval = p_xran_dev_ctx->interval_us_local;
95 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
98 tti = frame_id * SLOTS_PER_SYSTEMFRAME(interval) + subframe_id * SLOTNUM_PER_SUBFRAME(interval) + slot_id;
100 status = tti << 16 | symb_id;
102 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){
103 symb_id_offset = symb_id - p_xran_dev_ctx->prach_start_symbol[CC_ID]; //make the storing of prach packets to start from 0 for easy of processing within PHY
104 // pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData;
105 if(iq_data_start && size) {
106 mb = p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl;
108 rte_pktmbuf_free(mb);
110 if(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
112 uint16_t *psrc = (uint16_t *)iq_data_start;
113 uint16_t *pdst = (uint16_t *)iq_data_start;
114 for (idx = 0; idx < size/sizeof(int16_t); idx++){
115 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
117 //*mb_free = MBUF_FREE;
120 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pData = iq_data_start;
121 p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrlDecomp[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id_offset].pCtrl = mbuf;
123 *mb_free = MBUF_KEEP;
126 //print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
127 print_err("iq_data_start %p size %d\n", iq_data_start, size);
130 print_err("TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
136 int32_t xran_process_srs_sym(void *arg,
137 struct rte_mbuf *mbuf,
157 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
159 xran_status_t status;
160 void *pHandle = NULL;
161 struct rte_mbuf *mb = NULL;
162 struct xran_prb_map * pRbMap = NULL;
163 struct xran_prb_elm * prbMapElm = NULL;
164 uint16_t iq_sample_size_bits = 16;
165 uint16_t sec_desc_idx;
166 uint32_t interval = p_xran_dev_ctx->interval_us_local;
169 iq_sample_size_bits = iqWidth;
171 if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
174 tti = frame_id * SLOTS_PER_SYSTEMFRAME(interval) + subframe_id * SLOTNUM_PER_SUBFRAME(interval) + slot_id;
176 status = tti << 16 | symb_id;
179 rte_panic("CC_ID != 0");
181 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < p_xran_dev_ctx->fh_cfg.nAntElmTRx && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT) {
182 pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;
183 pRbMap = (struct xran_prb_map *) p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers->pData;
185 prbMapElm = &pRbMap->prbMap[sect_id];
186 if(sect_id >= pRbMap->nPrbElm) {
187 print_err("sect_id %d !=pRbMap->nPrbElm %d\n", sect_id,pRbMap->nPrbElm);
188 *mb_free = MBUF_FREE;
192 print_err("pRbMap==NULL\n");
193 *mb_free = MBUF_FREE;
196 pos += start_prbu * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits);
197 if(pos && iq_data_start && size){
198 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
200 uint16_t *psrc = (uint16_t *)iq_data_start;
201 uint16_t *pdst = (uint16_t *)pos;
202 rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);
203 /* network byte (be) order of IQ to CPU byte order (le) */
204 for (idx = 0; idx < size/sizeof(int16_t); idx++){
205 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
207 } else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){
208 /*if (pRbMap->nPrbElm == 1){
209 if (likely (p_xran_dev_ctx->fh_init.mtu >=
210 p_xran_dev_ctx->fh_cfg.nULRBs * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits)))
213 mb = p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl;
215 rte_pktmbuf_free(mb);
217 print_err("mb==NULL\n");
219 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData = iq_data_start;
220 p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pCtrl = mbuf;
221 *mb_free = MBUF_KEEP;
223 // packet can be fragmented copy RBs
224 memcpy(pos, iq_data_start, size);
225 *mb_free = MBUF_FREE;
228 struct xran_section_desc *p_sec_desc = NULL;
229 prbMapElm = &pRbMap->prbMap[sect_id];
230 sec_desc_idx = 0;//prbMapElm->nSecDesc[symb_id];
232 if (sec_desc_idx < XRAN_MAX_FRAGMENT) {
233 p_sec_desc = prbMapElm->p_sec_desc[symb_id][sec_desc_idx];
235 print_err("sect_id %d: sec_desc_idx %d tti %u ant %d symb_id %d sec_desc_idx %d\n", sect_id, sec_desc_idx, tti, Ant_ID, symb_id, sec_desc_idx);
236 prbMapElm->nSecDesc[symb_id] = 0;
237 *mb_free = MBUF_FREE;
242 mb = p_sec_desc->pCtrl;
244 rte_pktmbuf_free(mb);
246 p_sec_desc->pData = iq_data_start;
247 p_sec_desc->pCtrl = mbuf;
248 p_sec_desc->start_prbu = start_prbu;
249 p_sec_desc->num_prbu = num_prbu;
250 p_sec_desc->iq_buffer_len = size;
251 p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);
252 //prbMapElm->nSecDesc[symb_id] += 1;
254 print_err("p_sec_desc==NULL tti %u ant %d symb_id %d sec_desc_idx %d\n", tti, Ant_ID, symb_id, sec_desc_idx);
255 *mb_free = MBUF_FREE;
258 *mb_free = MBUF_KEEP;
262 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
265 print_err("o-xu%d: TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",p_xran_dev_ctx->xran_port_id, tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);
271 int32_t xran_pkt_validate(void *arg,
272 struct rte_mbuf *mbuf,
281 union ecpri_seq_id *seq_id,
288 struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx *)arg;
289 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
291 if(p_dev_ctx->fh_init.io_cfg.id == O_DU) {
292 if(xran_check_upul_seqid(p_dev_ctx, CC_ID, Ant_ID, slot_id, seq_id->bits.seq_id) != XRAN_STATUS_SUCCESS) {
294 return (XRAN_STATUS_FAIL);
296 } else if(p_dev_ctx->fh_init.io_cfg.id == O_RU) {
297 if(xran_check_updl_seqid(p_dev_ctx, CC_ID, Ant_ID, slot_id, seq_id->bits.seq_id) != XRAN_STATUS_SUCCESS) {
299 return (XRAN_STATUS_FAIL);
302 print_err("incorrect dev type %d\n", p_dev_ctx->fh_init.io_cfg.id);
308 pCnt->Total_msgs_rcvd++;
310 return XRAN_STATUS_SUCCESS;
313 int32_t xran_process_rx_sym(void *arg,
314 struct rte_mbuf *mbuf,
334 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)arg;
336 xran_status_t status;
337 void *pHandle = NULL;
338 struct rte_mbuf *mb = NULL;
339 struct xran_prb_map * pRbMap = NULL;
340 struct xran_prb_elm * prbMapElm = NULL;
341 uint16_t iq_sample_size_bits = 16;
342 uint16_t sec_desc_idx;
343 uint32_t interval = p_xran_dev_ctx->interval_us_local;
346 iq_sample_size_bits = iqWidth;
348 tti = frame_id * SLOTS_PER_SYSTEMFRAME(interval) + subframe_id * SLOTNUM_PER_SUBFRAME(interval) + slot_id;
350 status = tti << 16 | symb_id;
352 if(CC_ID < XRAN_MAX_SECTOR_NR && Ant_ID < XRAN_MAX_ANTENNA_NR && symb_id < XRAN_NUM_OF_SYMBOL_PER_SLOT){
353 pos = (char*) p_xran_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers[symb_id].pData;
354 pRbMap = (struct xran_prb_map *) p_xran_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID][Ant_ID].sBufferList.pBuffers->pData;
356 prbMapElm = &pRbMap->prbMap[sect_id];
357 if(sect_id >= pRbMap->nPrbElm) {
358 print_err("sect_id %d !=pRbMap->nPrbElm %d\n", sect_id,pRbMap->nPrbElm);
359 *mb_free = MBUF_FREE;
363 print_err("pRbMap==NULL\n");
364 *mb_free = MBUF_FREE;
368 pos += start_prbu * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits);
369 if(pos && iq_data_start && size){
370 if (p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_CPU_LE_BYTE_ORDER) {
372 uint16_t *psrc = (uint16_t *)iq_data_start;
373 uint16_t *pdst = (uint16_t *)pos;
374 rte_panic("XRAN_CPU_LE_BYTE_ORDER is not supported 0x16%lx\n", (long)mb);
375 /* network byte (be) order of IQ to CPU byte order (le) */
376 for (idx = 0; idx < size/sizeof(int16_t); idx++){
377 pdst[idx] = (psrc[idx]>>8) | (psrc[idx]<<8); //rte_be_to_cpu_16(psrc[idx]);
379 } else if (likely(p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder == XRAN_NE_BE_BYTE_ORDER)){
380 if (pRbMap->nPrbElm == 1){
381 prbMapElm = &pRbMap->prbMap[0];
382 if (likely (p_xran_dev_ctx->fh_init.mtu >=
383 prbMapElm->nRBSize * XRAN_PAYLOAD_1_RB_SZ(iq_sample_size_bits)))
385 /* no fragmentation */
386 struct xran_section_desc *p_sec_desc = NULL;
387 sec_desc_idx = 0;//prbMapElm->nSecDesc[symb_id];
388 p_sec_desc = prbMapElm->p_sec_desc[symb_id][sec_desc_idx];
391 mb = p_sec_desc->pCtrl;
393 rte_pktmbuf_free(mb);
395 p_sec_desc->pData = iq_data_start;
396 p_sec_desc->pCtrl = mbuf;
397 p_sec_desc->start_prbu = start_prbu;
398 p_sec_desc->num_prbu = num_prbu;
399 p_sec_desc->iq_buffer_len = size;
400 p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);
402 print_err("p_sec_desc==NULL tti %u ant %d symb_id %d sec_desc_idx %d\n", tti, Ant_ID, symb_id, sec_desc_idx);
403 *mb_free = MBUF_FREE;
406 *mb_free = MBUF_KEEP;
408 /* packet can be fragmented copy RBs */
409 memcpy(pos, iq_data_start, size);
410 *mb_free = MBUF_FREE;
413 struct xran_section_desc *p_sec_desc = NULL;
414 prbMapElm = &pRbMap->prbMap[sect_id];
415 sec_desc_idx = 0;//prbMapElm->nSecDesc[symb_id];
417 if (sec_desc_idx < XRAN_MAX_FRAGMENT) {
418 p_sec_desc = prbMapElm->p_sec_desc[symb_id][sec_desc_idx];
420 print_err("sect_id %d: sec_desc_idx %d tti %u ant %d symb_id %d sec_desc_idx %d\n", sect_id, sec_desc_idx, tti, Ant_ID, symb_id, sec_desc_idx);
421 prbMapElm->nSecDesc[symb_id] = 0;
422 *mb_free = MBUF_FREE;
427 mb = p_sec_desc->pCtrl;
429 rte_pktmbuf_free(mb);
431 p_sec_desc->pData = iq_data_start;
432 p_sec_desc->pCtrl = mbuf;
433 p_sec_desc->start_prbu = start_prbu;
434 p_sec_desc->num_prbu = num_prbu;
435 p_sec_desc->iq_buffer_len = size;
436 p_sec_desc->iq_buffer_offset = RTE_PTR_DIFF(iq_data_start, mbuf);
437 //prbMapElm->nSecDesc[symb_id] += 1;
439 print_err("p_sec_desc==NULL tti %u ant %d symb_id %d sec_desc_idx %d\n", tti, Ant_ID, symb_id, sec_desc_idx);
440 *mb_free = MBUF_FREE;
443 *mb_free = MBUF_KEEP;
447 print_err("pos %p iq_data_start %p size %d\n",pos, iq_data_start, size);
450 print_err("o-xu%d: TTI %d(f_%d sf_%d slot_%d) CC %d Ant_ID %d symb_id %d\n",p_xran_dev_ctx->xran_port_id, tti, frame_id, subframe_id, slot_id, CC_ID, Ant_ID, symb_id);