1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN layer common functionality for both O-DU and O-RU as well as C-plane and
23 * @ingroup group_source_xran
24 * @author Intel Corporation
30 #include <arpa/inet.h>
34 #include <immintrin.h>
37 #include "xran_common.h"
40 #include "xran_pkt_up.h"
41 #include "xran_cp_api.h"
42 #include "xran_up_api.h"
43 #include "xran_cp_proc.h"
45 #include "xran_lib_mlog_tasks_id.h"
47 #include "xran_printf.h"
48 #include "xran_mlog_lnx.h"
50 static struct timespec sleeptime = {.tv_nsec = 1E3 }; /* 1 us */
54 extern int32_t xran_process_rx_sym(void *arg,
55 struct rte_mbuf *mbuf,
75 extern int xran_process_prach_sym(void *arg,
76 struct rte_mbuf *mbuf,
92 extern int32_t xran_process_srs_sym(void *arg,
93 struct rte_mbuf *mbuf,
112 extern int32_t xran_pkt_validate(void *arg,
113 struct rte_mbuf *mbuf,
122 union ecpri_seq_id *seq_id,
129 int process_mbuf_batch(struct rte_mbuf* pkt_q[], void* handle, int16_t num, struct xran_eaxc_info *p_cid, uint32_t* ret_data)
131 struct rte_mbuf* pkt;
132 struct xran_device_ctx* p_dev_ctx = (struct xran_device_ctx*)handle;
133 void* iq_samp_buf[MBUFS_CNT];
134 union ecpri_seq_id seq[MBUFS_CNT];
135 static int symbol_total_bytes[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR][XRAN_MAX_ANTENNA_NR] = { 0 };
136 int num_bytes[MBUFS_CNT] = { 0 }, num_bytes_pusch[MBUFS_CNT] = { 0 };
139 struct xran_common_counters* pCnt = &p_dev_ctx->fh_counters;
141 uint8_t CC_ID[MBUFS_CNT] = { 0 };
142 uint8_t Ant_ID[MBUFS_CNT] = { 0 };
143 uint8_t frame_id[MBUFS_CNT] = { 0 };
144 uint8_t subframe_id[MBUFS_CNT] = { 0 };
145 uint8_t slot_id[MBUFS_CNT] = { 0 };
146 uint8_t symb_id[MBUFS_CNT] = { 0 };
148 uint16_t num_prbu[MBUFS_CNT];
149 uint16_t start_prbu[MBUFS_CNT];
150 uint16_t sym_inc[MBUFS_CNT];
151 uint16_t rb[MBUFS_CNT];
152 uint16_t sect_id[MBUFS_CNT];
154 uint8_t compMeth[MBUFS_CNT] = { 0 };
155 uint8_t iqWidth[MBUFS_CNT] = { 0 };
156 uint8_t compMeth_ini = 0;
157 uint8_t iqWidth_ini = 0;
159 uint32_t pkt_size[MBUFS_CNT];
161 void* pHandle = NULL;
162 int32_t valid_res, res_loc;
163 int expect_comp = (p_dev_ctx->fh_cfg.ru_conf.compMeth != XRAN_COMPMETHOD_NONE);
164 enum xran_comp_hdr_type staticComp = p_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
166 int16_t num_pusch = 0, num_prach = 0, num_srs = 0;
167 int16_t pusch_idx[MBUFS_CNT] = { 0 }, prach_idx[MBUFS_CNT] = { 0 }, srs_idx[MBUFS_CNT] = { 0 };
168 int8_t xran_port = xran_dev_ctx_get_port_id(p_dev_ctx);
169 int16_t max_ant_num = 0;
170 uint8_t *ptr_seq_id_num_port;
171 struct xran_eaxcid_config* conf;
172 uint8_t seq_id[MBUFS_CNT];
173 uint16_t cid[MBUFS_CNT];
175 struct xran_ecpri_hdr* ecpri_hdr[MBUFS_CNT];
176 struct radio_app_common_hdr* radio_hdr[MBUFS_CNT];
177 struct data_section_hdr* data_hdr[MBUFS_CNT];
178 struct data_section_compression_hdr* data_compr_hdr[MBUFS_CNT];
180 const int16_t ecpri_size = sizeof(struct xran_ecpri_hdr);
181 const int16_t rad_size = sizeof(struct radio_app_common_hdr);
182 const int16_t data_size = sizeof(struct data_section_hdr);
183 const int16_t compr_size = sizeof(struct data_section_compression_hdr);
185 char* buf_start[MBUFS_CNT];
186 uint16_t start_off[MBUFS_CNT];
187 uint16_t iq_offset[MBUFS_CNT];
188 uint16_t last[MBUFS_CNT];
191 struct rte_mbuf* mb = NULL;
192 struct xran_prb_map* pRbMap = NULL;
193 struct xran_prb_elm* prbMapElm = NULL;
194 uint16_t iq_sample_size_bits;
197 uint32_t mlogVar[10];
198 uint32_t mlogVarCnt = 0;
202 print_err("Invalid pHandle - %p", pHandle);
206 if (xran_port > XRAN_PORTS_NUM) {
207 print_err("Invalid port - %d", xran_port);
211 conf = &(p_dev_ctx->eAxc_id_cfg);
213 rte_panic("conf == NULL");
216 if (p_dev_ctx->fh_init.io_cfg.id == O_DU)
218 max_ant_num = XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR;
219 ptr_seq_id_num_port = &xran_upul_seq_id_num[xran_port][0][0];
221 else if (p_dev_ctx->fh_init.io_cfg.id == O_RU)
223 max_ant_num = XRAN_MAX_ANTENNA_NR;
224 ptr_seq_id_num_port = &xran_updl_seq_id_num[xran_port][0][0];
228 rte_panic("incorrect fh_init.io_cfg.id");
231 if (staticComp == XRAN_COMP_HDR_TYPE_STATIC)
233 compMeth_ini = p_dev_ctx->fh_cfg.ru_conf.compMeth;
234 iqWidth_ini = p_dev_ctx->fh_cfg.ru_conf.iqWidth;
237 for (i = 0; i < MBUFS_CNT; i++)
239 pkt_size[i] = pkt_q[i]->pkt_len;
240 buf_start[i] = (char*)pkt_q[i]->buf_addr;
241 start_off[i] = pkt_q[i]->data_off;
244 if (expect_comp && (staticComp != XRAN_COMP_HDR_TYPE_STATIC))
246 #pragma vector always
247 for (i = 0; i < MBUFS_CNT; i++)
252 ecpri_hdr[i] = (void*)(buf_start[i] + start_off[i]);
253 radio_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size);
254 data_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size + rad_size);
255 data_compr_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size + rad_size + data_size);
256 seq[i] = ecpri_hdr[i]->ecpri_seq_id;
257 seq_id[i] = seq[i].bits.seq_id;
258 last[i] = seq[i].bits.e_bit;
260 iq_offset[i] = ecpri_size + rad_size + data_size + compr_size;
262 iq_samp_buf[i] = (void*)(buf_start[i] + start_off[i] + iq_offset[i]);
263 num_bytes[i] = pkt_size[i] - iq_offset[i];
265 if (ecpri_hdr[i] == NULL ||
266 radio_hdr[i] == NULL ||
267 data_hdr[i] == NULL ||
268 data_compr_hdr[i] == NULL ||
269 iq_samp_buf[i] == NULL)
271 num_bytes[i] = 0; /* packet too short */
275 if(radio_hdr[i] != NULL && data_hdr[i] != NULL)
277 mlogVar[mlogVarCnt++] = 0xBBBBBBBB;
278 mlogVar[mlogVarCnt++] = xran_lib_ota_tti;
279 mlogVar[mlogVarCnt++] = radio_hdr[i]->frame_id;
280 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.subframe_id;
281 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.slot_id;
282 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.symb_id;
283 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.sect_id;
284 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.start_prbu;
285 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.num_prbu;
286 mlogVar[mlogVarCnt++] = rte_pktmbuf_pkt_len(pkt_q[i]);
287 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
294 #pragma vector always
295 for (i = 0; i < MBUFS_CNT; i++)
300 ecpri_hdr[i] = (void*)(buf_start[i] + start_off[i]);
301 radio_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size);
302 data_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size + rad_size);
303 seq[i] = ecpri_hdr[i]->ecpri_seq_id;
304 seq_id[i] = seq[i].bits.seq_id;
305 last[i] = seq[i].bits.e_bit;
307 iq_offset[i] = ecpri_size + rad_size + data_size;
308 iq_samp_buf[i] = (void*)(buf_start[i] + start_off[i] + iq_offset[i]);
309 num_bytes[i] = pkt_size[i] - iq_offset[i];
311 if (ecpri_hdr[i] == NULL ||
312 radio_hdr[i] == NULL ||
313 data_hdr[i] == NULL ||
314 iq_samp_buf[i] == NULL)
316 num_bytes[i] = 0; /* packet too short */
320 if (radio_hdr[i] != NULL && data_hdr[i] != NULL)
322 mlogVar[mlogVarCnt++] = 0xBBBBBBBB;
323 mlogVar[mlogVarCnt++] = xran_lib_ota_tti;
324 mlogVar[mlogVarCnt++] = radio_hdr[i]->frame_id;
325 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.subframe_id;
326 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.slot_id;
327 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.symb_id;
328 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.sect_id;
329 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.start_prbu;
330 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.num_prbu;
331 mlogVar[mlogVarCnt++] = rte_pktmbuf_pkt_len(pkt_q[i]);
332 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
338 for (i = 0; i < MBUFS_CNT; i++) {
339 if(p_cid->ccId == 0xFF && p_cid->ruPortId == 0xFF) {
340 cid[i] = rte_be_to_cpu_16((uint16_t)ecpri_hdr[i]->ecpri_xtc_id);
341 if (num_bytes[i] > 0) {
342 CC_ID[i] = (cid[i] & conf->mask_ccId) >> conf->bit_ccId;
343 Ant_ID[i] = (cid[i] & conf->mask_ruPortId) >> conf->bit_ruPortId;
346 if (num_bytes[i] > 0) {
347 CC_ID[i] = p_cid->ccId;
348 Ant_ID[i] = p_cid->ruPortId;
353 for (i = 0; i < MBUFS_CNT; i++)
355 radio_hdr[i]->sf_slot_sym.value = rte_be_to_cpu_16(radio_hdr[i]->sf_slot_sym.value);
356 data_hdr[i]->fields.all_bits = rte_be_to_cpu_32(data_hdr[i]->fields.all_bits);
359 for (i = 0; i < MBUFS_CNT; i++)
361 if (num_bytes[i] > 0)
363 compMeth[i] = compMeth_ini;
364 iqWidth[i] = iqWidth_ini;
365 valid_res = XRAN_STATUS_SUCCESS;
367 frame_id[i] = radio_hdr[i]->frame_id;
368 subframe_id[i] = radio_hdr[i]->sf_slot_sym.subframe_id;
369 slot_id[i] = radio_hdr[i]->sf_slot_sym.slot_id;
370 symb_id[i] = radio_hdr[i]->sf_slot_sym.symb_id;
372 num_prbu[i] = data_hdr[i]->fields.num_prbu;
373 start_prbu[i] = data_hdr[i]->fields.start_prbu;
374 sym_inc[i] = data_hdr[i]->fields.sym_inc;
375 rb[i] = data_hdr[i]->fields.rb;
376 sect_id[i] = data_hdr[i]->fields.sect_id;
378 if (expect_comp && (staticComp != XRAN_COMP_HDR_TYPE_STATIC))
380 compMeth[i] = data_compr_hdr[i]->ud_comp_hdr.ud_comp_meth;
381 iqWidth[i] = data_compr_hdr[i]->ud_comp_hdr.ud_iq_width;
384 if (CC_ID[i] >= XRAN_MAX_CELLS_PER_PORT || Ant_ID[i] >= max_ant_num || symb_id[i] >= XRAN_NUM_OF_SYMBOL_PER_SLOT)
386 ptr_seq_id_num_port[CC_ID[i] * max_ant_num + Ant_ID[i]] = seq_id[i]; // for next
387 valid_res = XRAN_STATUS_FAIL;
389 // print_err("Invalid CC ID - %d or antenna ID or Symbol ID- %d", CC_ID[i], Ant_ID[i], symb_id[i]);
393 ptr_seq_id_num_port[CC_ID[i] * max_ant_num + Ant_ID[i]]++;
398 pCnt->Total_msgs_rcvd++;
400 if (Ant_ID[i] >= p_dev_ctx->srs_cfg.eAxC_offset && p_dev_ctx->fh_cfg.srsEnable)
402 Ant_ID[i] -= p_dev_ctx->srs_cfg.eAxC_offset;
405 srs_idx[num_srs] = i;
407 pCnt->rx_srs_packets++;
410 else if (Ant_ID[i] >= p_dev_ctx->PrachCPConfig.eAxC_offset && p_dev_ctx->fh_cfg.prachEnable)
412 Ant_ID[i] -= p_dev_ctx->PrachCPConfig.eAxC_offset;
415 prach_idx[num_prach] = i;
417 pCnt->rx_prach_packets[Ant_ID[i]]++;
424 pusch_idx[num_pusch] = i;
426 pCnt->rx_pusch_packets[Ant_ID[i]]++;
429 symbol_total_bytes[xran_port][CC_ID[i]][Ant_ID[i]] += num_bytes[i];
430 num_bytes_pusch[i] = symbol_total_bytes[xran_port][CC_ID[i]][Ant_ID[i]];
432 symbol_total_bytes[xran_port][CC_ID[i]][Ant_ID[i]] = 0;
436 for (j = 0; j < num_prach; j++)
441 print_dbg("Completed receiving PRACH symbol %d, size=%d bytes\n", symb_id[i], num_bytes[i]);
443 int16_t res = xran_process_prach_sym(p_dev_ctx,
461 for (j = 0; j < num_srs; j++)
466 print_dbg("SRS receiving symbol %d, size=%d bytes\n",
467 symb_id[i], symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID[i]][Ant_ID[i]]);
469 uint64_t t1 = MLogTick();
470 int16_t res = xran_process_srs_sym(p_dev_ctx,
489 MLogTask(PID_PROCESS_UP_PKT_SRS, t1, MLogTick());
492 if (num_pusch == MBUFS_CNT)
494 for (i = 0; i < MBUFS_CNT; i++)
496 iq_sample_size_bits = 16;
498 iq_sample_size_bits = iqWidth[i];
500 tti = frame_id[i] * SLOTS_PER_SYSTEMFRAME(p_dev_ctx->interval_us_local) +
501 subframe_id[i] * SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local) + slot_id[i];
503 pRbMap = (struct xran_prb_map*)p_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers->pData;
507 prbMapElm = &pRbMap->prbMap[sect_id[i]];
508 if (sect_id[i] >= pRbMap->nPrbElm)
510 // print_err("sect_id %d !=pRbMap->nPrbElm %d\n", sect_id[i], pRbMap->nPrbElm);
511 ret_data[i] = MBUF_FREE;
517 // print_err("pRbMap==NULL\n");
518 ret_data[i] = MBUF_FREE;
522 if (pRbMap->nPrbElm == 1)
524 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pData = iq_samp_buf[i];
525 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pCtrl = pkt_q[i];
526 ret_data[i] = MBUF_KEEP;
530 struct xran_section_desc* p_sec_desc = NULL;
531 prbMapElm = &pRbMap->prbMap[sect_id[i]];
532 p_sec_desc = prbMapElm->p_sec_desc[symb_id[i]][0];
536 mb = p_sec_desc->pCtrl;
538 rte_pktmbuf_free(mb);
540 p_sec_desc->pCtrl = pkt_q[i];
541 p_sec_desc->pData = iq_samp_buf[i];
542 p_sec_desc->start_prbu = start_prbu[i];
543 p_sec_desc->num_prbu = num_prbu[i];
544 p_sec_desc->iq_buffer_len = num_bytes_pusch[i];
545 p_sec_desc->iq_buffer_offset = iq_offset[i];
546 ret_data[i] = MBUF_KEEP;
550 // print_err("p_sec_desc==NULL tti %u ant %d symb_id %d\n", tti, Ant_ID[i], symb_id[i]);
551 ret_data[i] = MBUF_FREE;
558 for (j = 0; j < num_pusch; j++)
562 iq_sample_size_bits = 16;
564 iq_sample_size_bits = iqWidth[i];
566 tti = frame_id[i] * SLOTS_PER_SYSTEMFRAME(p_dev_ctx->interval_us_local) +
567 subframe_id[i] * SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local) + slot_id[i];
569 pRbMap = (struct xran_prb_map*)p_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers->pData;
573 prbMapElm = &pRbMap->prbMap[sect_id[i]];
574 if (sect_id[i] >= pRbMap->nPrbElm)
576 // print_err("sect_id %d !=pRbMap->nPrbElm %d\n", sect_id[i], pRbMap->nPrbElm);
577 ret_data[i] = MBUF_FREE;
583 // print_err("pRbMap==NULL\n");
584 ret_data[i] = MBUF_FREE;
588 if (pRbMap->nPrbElm == 1)
590 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pData = iq_samp_buf[i];
591 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pCtrl = pkt_q[i];
592 ret_data[i] = MBUF_KEEP;
596 struct xran_section_desc* p_sec_desc = NULL;
597 prbMapElm = &pRbMap->prbMap[sect_id[i]];
598 p_sec_desc = prbMapElm->p_sec_desc[symb_id[i]][0];
602 mb = p_sec_desc->pCtrl;
604 rte_pktmbuf_free(mb);
606 p_sec_desc->pCtrl = pkt_q[i];
607 p_sec_desc->pData = iq_samp_buf[i];
608 p_sec_desc->start_prbu = start_prbu[i];
609 p_sec_desc->num_prbu = num_prbu[i];
610 p_sec_desc->iq_buffer_len = num_bytes_pusch[i];
611 p_sec_desc->iq_buffer_offset = iq_offset[i];
612 ret_data[i] = MBUF_KEEP;
616 // print_err("p_sec_desc==NULL tti %u ant %d symb_id %d\n", tti, Ant_ID[i], symb_id[i]);
617 ret_data[i] = MBUF_FREE;
626 process_mbuf(struct rte_mbuf *pkt, void* handle, struct xran_eaxc_info *p_cid)
628 uint64_t tt1 = MLogTick();
629 struct xran_device_ctx *p_dev_ctx = (struct xran_device_ctx *)handle;
631 union ecpri_seq_id seq;
632 static int symbol_total_bytes[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR][XRAN_MAX_ANTENNA_NR] = {0};
635 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
637 uint8_t CC_ID = p_cid->ccId;
638 uint8_t Ant_ID = p_cid->ruPortId;
639 uint8_t frame_id = 0;
640 uint8_t subframe_id = 0;
650 uint8_t compMeth = 0;
653 void *pHandle = NULL;
655 uint32_t mb_free = 0;
656 int32_t valid_res = 0;
657 int expect_comp = (p_dev_ctx->fh_cfg.ru_conf.compMeth != XRAN_COMPMETHOD_NONE);
658 enum xran_comp_hdr_type staticComp = p_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
660 if (staticComp == XRAN_COMP_HDR_TYPE_STATIC)
662 compMeth = p_dev_ctx->fh_cfg.ru_conf.compMeth;
663 iqWidth = p_dev_ctx->fh_cfg.ru_conf.iqWidth;
666 if(p_dev_ctx->xran2phy_mem_ready == 0)
669 num_bytes = xran_extract_iq_samples(pkt,
688 print_err("num_bytes is wrong [%d]\n", num_bytes);
692 valid_res = xran_pkt_validate(p_dev_ctx,
710 print_dbg("valid_res is wrong [%d] ant %u (%u : %u : %u : %u) seq %u num_bytes %d\n", valid_res, Ant_ID, frame_id, subframe_id, slot_id, symb_id, seq.seq_id, num_bytes);
714 MLogTask(PID_PROCESS_UP_PKT_PARSE, tt1, MLogTick());
715 if (Ant_ID >= p_dev_ctx->srs_cfg.eAxC_offset && p_dev_ctx->fh_cfg.srsEnable) {
716 /* SRS packet has ruportid = 2*num_eAxc + ant_id */
717 Ant_ID -= p_dev_ctx->srs_cfg.eAxC_offset;
718 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] += num_bytes;
720 if (seq.bits.e_bit == 1) {
721 print_dbg("SRS receiving symbol %d, size=%d bytes\n",
722 symb_id, symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]);
724 if (symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]) {
725 uint64_t t1 = MLogTick();
726 int16_t res = xran_process_srs_sym(p_dev_ctx,
745 if(res == symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]) {
748 print_err("res != symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]\n");
750 pCnt->rx_srs_packets++;
751 MLogTask(PID_PROCESS_UP_PKT_SRS, t1, MLogTick());
753 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] = 0;
756 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
759 } else if (Ant_ID >= p_dev_ctx->PrachCPConfig.eAxC_offset && p_dev_ctx->fh_cfg.prachEnable) {
760 /* PRACH packet has ruportid = num_eAxc + ant_id */
761 Ant_ID -= p_dev_ctx->PrachCPConfig.eAxC_offset;
762 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] += num_bytes;
763 if (seq.bits.e_bit == 1) {
764 print_dbg("Completed receiving PRACH symbol %d, size=%d bytes\n",
767 if (symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]) {
768 int16_t res = xran_process_prach_sym(p_dev_ctx,
784 if(res == symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]) {
787 print_err("res != symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]\n");
789 pCnt->rx_prach_packets[Ant_ID]++;
791 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] = 0;
793 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
797 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] += num_bytes;
799 if (seq.bits.e_bit == 1) {
800 print_dbg("Completed receiving symbol %d, size=%d bytes\n",
801 symb_id, symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]);
803 if (symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]) {
804 uint64_t t1 = MLogTick();
805 int res = xran_process_rx_sym(p_dev_ctx,
808 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID],
824 if(res == symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]) {
827 print_err("res != symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]\n");
829 pCnt->rx_pusch_packets[Ant_ID]++;
830 MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick());
832 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] = 0;
834 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
841 static int set_iq_bit_width(uint8_t iq_bit_width, struct data_section_compression_hdr *compr_hdr)
843 if (iq_bit_width == MAX_IQ_BIT_WIDTH)
844 compr_hdr->ud_comp_hdr.ud_iq_width = (uint8_t) 0;
846 compr_hdr->ud_comp_hdr.ud_iq_width = iq_bit_width;
852 /* Send a single 5G symbol over multiple packets */
853 inline int32_t prepare_symbol_ex(enum xran_pkt_dir direction,
859 const enum xran_input_byte_order iq_buf_byte_order,
870 enum xran_comp_hdr_type staticEn)
877 struct xran_up_pkt_gen_params xp = { 0 };
880 iqWidth = (iqWidth==0) ? 16 : iqWidth;
882 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
883 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
887 n_bytes = (3 * iqWidth + parm_size) * prb_num;
888 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
890 nPktSize = sizeof(struct rte_ether_hdr)
891 + sizeof(struct xran_ecpri_hdr)
892 + sizeof(struct radio_app_common_hdr)
893 + sizeof(struct data_section_hdr)
895 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC))
896 nPktSize += sizeof(struct data_section_compression_hdr);
898 /* radio app header */
899 xp.app_params.data_feature.value = 0x10;
900 xp.app_params.data_feature.data_direction = direction;
901 //xp.app_params.payl_ver = 1;
902 //xp.app_params.filter_id = 0;
903 xp.app_params.frame_id = frame_id;
904 xp.app_params.sf_slot_sym.subframe_id = subframe_id;
905 xp.app_params.sf_slot_sym.slot_id = xran_slotid_convert(slot_id, 0);
906 xp.app_params.sf_slot_sym.symb_id = symbol_no;
908 /* convert to network byte order */
909 xp.app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp.app_params.sf_slot_sym.value);
911 xp.sec_hdr.fields.all_bits = 0;
912 xp.sec_hdr.fields.sect_id = section_id;
913 xp.sec_hdr.fields.num_prbu = (uint8_t)prb_num;
914 xp.sec_hdr.fields.start_prbu = (uint8_t)prb_start;
915 //xp.sec_hdr.fields.sym_inc = 0;
916 //xp.sec_hdr.fields.rb = 0;
919 xp.compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
920 xp.compr_hdr_param.ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
921 xp.compr_hdr_param.rsrvd = 0;
923 /* network byte order */
924 xp.sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp.sec_hdr.fields.all_bits);
928 errx(1, "out of mbufs after %d packets", 1);
931 prep_bytes = xran_prepare_iq_symbol_portion(mb,
942 errx(1, "failed preparing symbol");
944 rte_pktmbuf_pkt_len(mb) = nPktSize;
945 rte_pktmbuf_data_len(mb) = nPktSize;
948 printf("Symbol %2d prep_bytes (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
954 int32_t prepare_sf_slot_sym (enum xran_pkt_dir direction,
959 struct xran_up_pkt_gen_params *xp)
961 /* radio app header */
962 xp->app_params.data_feature.value = 0x10;
963 xp->app_params.data_feature.data_direction = direction;
964 //xp->app_params.payl_ver = 1;
965 //xp->app_params.filter_id = 0;
966 xp->app_params.frame_id = frame_id;
967 xp->app_params.sf_slot_sym.subframe_id = subframe_id;
968 xp->app_params.sf_slot_sym.slot_id = xran_slotid_convert(slot_id, 0);
969 xp->app_params.sf_slot_sym.symb_id = symbol_no;
971 /* convert to network byte order */
972 xp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp->app_params.sf_slot_sym.value);
980 /* Send a single 5G symbol over multiple packets */
981 int send_symbol_ex(void *handle,
982 enum xran_pkt_dir direction,
984 struct rte_mbuf *mb, uint8_t *data,
985 uint8_t compMeth, uint8_t iqWidth,
986 const enum xran_input_byte_order iq_buf_byte_order,
987 uint8_t frame_id, uint8_t subframe_id,
988 uint8_t slot_id, uint8_t symbol_no,
989 int prb_start, int prb_num,
990 uint8_t CC_ID, uint8_t RU_Port_ID, uint8_t seq_id)
992 uint32_t do_copy = 0;
994 int hdr_len, parm_size;
996 struct xran_device_ctx *p_dev_ctx = (struct xran_device_ctx *)handle;
997 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
998 enum xran_comp_hdr_type staticEn= XRAN_COMP_HDR_TYPE_DYNAMIC;
1001 if (p_dev_ctx != NULL)
1003 staticEn = p_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
1005 hdr_len = sizeof(struct xran_ecpri_hdr)
1006 + sizeof(struct radio_app_common_hdr)
1007 + sizeof(struct data_section_hdr);
1008 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC))
1009 hdr_len += sizeof(struct data_section_compression_hdr);
1012 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
1013 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
1017 n_bytes = (3 * iqWidth + parm_size) * prb_num;
1020 char * pChar = NULL;
1021 mb = xran_ethdi_mbuf_alloc(); /* will be freede by ETH */
1024 errx(1, "out of mbufs after %d packets", 1);
1026 pChar = rte_pktmbuf_append(mb, hdr_len + n_bytes);
1029 errx(1, "incorrect mbuf size %d packets", 1);
1031 pChar = rte_pktmbuf_prepend(mb, sizeof(struct rte_ether_hdr));
1034 errx(1, "incorrect mbuf size %d packets", 1);
1036 do_copy = 1; /* new mbuf hence copy of IQs */
1039 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
1042 sent = prepare_symbol_ex(direction,
1063 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(mb);
1064 p_dev_ctx->send_upmbuf2ring(mb, ETHER_TYPE_ECPRI, xran_map_ecpriPcid_to_vf(p_dev_ctx, direction, CC_ID, RU_Port_ID));
1068 printf("Symbol %2d sent (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
1074 int send_cpmsg(void *pHandle, struct rte_mbuf *mbuf,struct xran_cp_gen_params *params,
1075 struct xran_section_gen_info *sect_geninfo, uint8_t cc_id, uint8_t ru_port_id, uint8_t seq_id)
1077 int ret = 0, nsection, i;
1078 uint8_t subframe_id = params->hdr.subframeId;
1079 uint8_t slot_id = params->hdr.slotId;
1080 uint8_t dir = params->dir;
1081 struct xran_device_ctx *p_dev_ctx =(struct xran_device_ctx *) pHandle;
1082 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
1084 nsection = params->numSections;
1086 /* add in the ethernet header */
1087 struct rte_ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));
1090 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(mbuf);
1091 p_dev_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI, xran_map_ecpriRtcid_to_vf(p_dev_ctx, dir, cc_id, ru_port_id));
1092 for(i=0; i<nsection; i++)
1093 xran_cp_add_section_info(pHandle, dir, cc_id, ru_port_id,
1094 (slot_id + subframe_id*SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local))%XRAN_MAX_SECTIONDB_CTX,
1095 §_geninfo[i].info);
1100 int generate_cpmsg_dlul(void *pHandle, struct xran_cp_gen_params *params, struct xran_section_gen_info *sect_geninfo, struct rte_mbuf *mbuf,
1101 enum xran_pkt_dir dir, uint8_t frame_id, uint8_t subframe_id, uint8_t slot_id,
1102 uint8_t startsym, uint8_t numsym, uint16_t prb_start, uint16_t prb_num,int16_t iq_buffer_offset, int16_t iq_buffer_len,
1103 uint16_t beam_id, uint8_t cc_id, uint8_t ru_port_id, uint8_t comp_method, uint8_t iqWidth, uint8_t seq_id, uint8_t symInc)
1105 int ret = 0, nsection, loc_sym;
1109 params->sectionType = XRAN_CP_SECTIONTYPE_1; // Most DL/UL Radio Channels
1110 params->hdr.filterIdx = XRAN_FILTERINDEX_STANDARD;
1111 params->hdr.frameId = frame_id;
1112 params->hdr.subframeId = subframe_id;
1113 params->hdr.slotId = slot_id;
1114 params->hdr.startSymId = startsym; // start Symbol ID
1115 params->hdr.iqWidth = iqWidth;
1116 params->hdr.compMeth = comp_method;
1119 sect_geninfo[nsection].info.type = params->sectionType; // for database
1120 sect_geninfo[nsection].info.startSymId = params->hdr.startSymId; // for database
1121 sect_geninfo[nsection].info.iqWidth = params->hdr.iqWidth; // for database
1122 sect_geninfo[nsection].info.compMeth = params->hdr.compMeth; // for database
1123 sect_geninfo[nsection].info.id = xran_alloc_sectionid(pHandle, dir, cc_id, ru_port_id, slot_id);
1124 sect_geninfo[nsection].info.rb = XRAN_RBIND_EVERY;
1125 sect_geninfo[nsection].info.symInc = symInc;
1126 sect_geninfo[nsection].info.startPrbc = prb_start;
1127 sect_geninfo[nsection].info.numPrbc = prb_num;
1128 sect_geninfo[nsection].info.numSymbol = numsym;
1129 sect_geninfo[nsection].info.reMask = 0xfff;
1130 sect_geninfo[nsection].info.beamId = beam_id;
1132 for (loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++) {
1133 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_offset = iq_buffer_offset;
1134 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_len = iq_buffer_len;
1137 sect_geninfo[nsection].info.ef = 0;
1138 sect_geninfo[nsection].exDataSize = 0;
1139 // sect_geninfo[nsection].exData = NULL;
1142 params->numSections = nsection;
1143 params->sections = sect_geninfo;
1145 if(unlikely(mbuf == NULL)) {
1146 print_err("Alloc fail!\n");
1150 ret = xran_prepare_ctrl_pkt(mbuf, params, cc_id, ru_port_id, seq_id);
1152 print_err("Fail to build control plane packet - [%d:%d:%d] dir=%d\n",
1153 frame_id, subframe_id, slot_id, dir);
1154 rte_pktmbuf_free(mbuf);
1160 int generate_cpmsg_prach(void *pHandle, struct xran_cp_gen_params *params, struct xran_section_gen_info *sect_geninfo, struct rte_mbuf *mbuf, struct xran_device_ctx *pxran_lib_ctx,
1161 uint8_t frame_id, uint8_t subframe_id, uint8_t slot_id,
1162 uint16_t beam_id, uint8_t cc_id, uint8_t prach_port_id, uint16_t occasionid, uint8_t seq_id)
1165 struct xran_prach_cp_config *pPrachCPConfig = &(pxran_lib_ctx->PrachCPConfig);
1166 uint16_t timeOffset;
1167 uint16_t nNumerology = pxran_lib_ctx->fh_cfg.frame_conf.nNumerology;
1170 if(unlikely(mbuf == NULL)) {
1171 print_err("Alloc fail!\n");
1175 printf("%d:%d:%d:%d - filter=%d, startSym=%d[%d:%d], numSym=%d, occasions=%d, freqOff=%d\n",
1176 frame_id, subframe_id, slot_id, prach_port_id,
1177 pPrachCPConfig->filterIdx,
1178 pPrachCPConfig->startSymId,
1179 pPrachCPConfig->startPrbc,
1180 pPrachCPConfig->numPrbc,
1181 pPrachCPConfig->numSymbol,
1182 pPrachCPConfig->occassionsInPrachSlot,
1183 pPrachCPConfig->freqOffset);
1185 timeOffset = pPrachCPConfig->timeOffset; //this is the CP value per 38.211 tab 6.3.3.1-1&2
1186 startSymId = pPrachCPConfig->startSymId + occasionid * pPrachCPConfig->numSymbol;
1189 timeOffset += startSymId * (2048 + 144);
1191 timeOffset = timeOffset >> nNumerology; //original number is Tc, convert to Ts based on mu
1192 if ((slot_id == 0) || (slot_id == (SLOTNUM_PER_SUBFRAME(pxran_lib_ctx->interval_us_local) >> 1)))
1195 params->dir = XRAN_DIR_UL;
1196 params->sectionType = XRAN_CP_SECTIONTYPE_3;
1197 params->hdr.filterIdx = pPrachCPConfig->filterIdx;
1198 params->hdr.frameId = frame_id;
1199 params->hdr.subframeId = subframe_id;
1200 params->hdr.slotId = slot_id;
1201 params->hdr.startSymId = startSymId;
1202 params->hdr.iqWidth = xran_get_conf_iqwidth_prach(pHandle);
1203 params->hdr.compMeth = xran_get_conf_compmethod_prach(pHandle);
1204 /* use timeOffset field for the CP length value for prach sequence */
1205 params->hdr.timeOffset = timeOffset;
1206 params->hdr.fftSize = xran_get_conf_fftsize(pHandle);
1207 params->hdr.scs = xran_get_conf_prach_scs(pHandle);
1208 params->hdr.cpLength = 0;
1211 sect_geninfo[nsection].info.type = params->sectionType; // for database
1212 sect_geninfo[nsection].info.startSymId = params->hdr.startSymId; // for database
1213 sect_geninfo[nsection].info.iqWidth = params->hdr.iqWidth; // for database
1214 sect_geninfo[nsection].info.compMeth = params->hdr.compMeth; // for database
1215 sect_geninfo[nsection].info.id = xran_alloc_sectionid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id);
1216 sect_geninfo[nsection].info.rb = XRAN_RBIND_EVERY;
1217 sect_geninfo[nsection].info.symInc = XRAN_SYMBOLNUMBER_NOTINC;
1218 sect_geninfo[nsection].info.startPrbc = pPrachCPConfig->startPrbc;
1219 sect_geninfo[nsection].info.numPrbc = pPrachCPConfig->numPrbc,
1220 sect_geninfo[nsection].info.numSymbol = pPrachCPConfig->numSymbol;
1221 sect_geninfo[nsection].info.reMask = 0xfff;
1222 sect_geninfo[nsection].info.beamId = beam_id;
1223 sect_geninfo[nsection].info.freqOffset = pPrachCPConfig->freqOffset;
1225 pxran_lib_ctx->prach_last_symbol[cc_id] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol*pPrachCPConfig->occassionsInPrachSlot - 1;
1227 sect_geninfo[nsection].info.ef = 0;
1228 sect_geninfo[nsection].exDataSize = 0;
1229 // sect_geninfo[nsection].exData = NULL;
1232 params->numSections = nsection;
1233 params->sections = sect_geninfo;
1235 ret = xran_prepare_ctrl_pkt(mbuf, params, cc_id, prach_port_id, seq_id);
1237 print_err("Fail to build prach control packet - [%d:%d:%d]\n", frame_id, subframe_id, slot_id);
1238 rte_pktmbuf_free(mbuf);
1244 int process_ring(struct rte_ring *r, uint16_t ring_id, uint16_t q_id)
1248 struct rte_mbuf *mbufs[MBUFS_CNT];
1252 const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
1253 RTE_DIM(mbufs), &remaining);
1260 xran_ethdi_filter_packet(mbufs, ring_id, q_id, dequeued);
1261 //MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick());
1265 /** FH RX AND BBDEV */
1266 int32_t ring_processing_func(void* args)
1268 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1269 int16_t retPoll = 0;
1276 if (ctx->bbdev_dec) {
1278 retPoll = ctx->bbdev_dec();
1282 MLogTask(PID_XRAN_BBDEV_UL_POLL + retPoll, t1, t2);
1286 if (ctx->bbdev_enc) {
1288 retPoll = ctx->bbdev_enc();
1292 MLogTask(PID_XRAN_BBDEV_DL_POLL + retPoll, t1, t2);
1296 for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i++){
1297 for(qi = 0; qi < ctx->rxq_per_port[i]; qi++) {
1298 if (process_ring(ctx->rx_ring[i][qi], i, qi))
1303 if (XRAN_STOPPED == xran_if_current_state)
1309 /** Generic thread to perform task on specific core */
1311 xran_generic_worker_thread(void *args)
1314 struct xran_worker_th_ctx* pThCtx = (struct xran_worker_th_ctx*)args;
1315 struct sched_param sched_param;
1316 struct xran_io_cfg * const p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
1318 memset(&sched_param, 0, sizeof(struct sched_param));
1320 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
1321 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1322 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))){
1323 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1325 pThCtx->worker_policy = SCHED_FIFO;
1326 if ((res = pthread_setname_np(pthread_self(), pThCtx->worker_name))) {
1327 printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res);
1331 if(pThCtx->task_func) {
1332 if(pThCtx->task_func(pThCtx->task_arg) != 0)
1336 if (XRAN_STOPPED == xran_if_current_state)
1339 if(p_io_cfg->io_sleep)
1340 nanosleep(&sleeptime,NULL);
1343 printf("%s worker thread finished on core %d [worker id %d]\n",pThCtx->worker_name, rte_lcore_id(), pThCtx->worker_id);
1347 int ring_processing_thread(void *args)
1349 struct sched_param sched_param;
1350 struct xran_io_cfg * const p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
1353 memset(&sched_param, 0, sizeof(struct sched_param));
1355 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
1356 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1357 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))){
1358 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1362 if(ring_processing_func(args) != 0)
1365 /* work around for some kernel */
1366 if(p_io_cfg->io_sleep)
1367 nanosleep(&sleeptime,NULL);
1370 puts("Pkt processing thread finished.");