1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN layer common functionality for both O-DU and O-RU as well as C-plane and
23 * @ingroup group_source_xran
24 * @author Intel Corporation
30 #include <arpa/inet.h>
34 #include <immintrin.h>
39 #include "xran_common.h"
42 #include "xran_pkt_up.h"
43 #include "xran_cp_api.h"
44 #include "xran_up_api.h"
45 #include "xran_cp_proc.h"
47 #include "xran_lib_mlog_tasks_id.h"
49 #include "xran_printf.h"
50 #include "xran_mlog_lnx.h"
52 static struct timespec sleeptime = {.tv_nsec = 1E3 }; /* 1 us */
54 extern int32_t first_call;
58 extern int32_t xran_process_rx_sym(void *arg,
59 struct rte_mbuf *mbuf,
79 extern int xran_process_prach_sym(void *arg,
80 struct rte_mbuf *mbuf,
96 extern int32_t xran_process_srs_sym(void *arg,
97 struct rte_mbuf *mbuf,
116 extern int32_t xran_pkt_validate(void *arg,
117 struct rte_mbuf *mbuf,
126 union ecpri_seq_id *seq_id,
133 int process_mbuf_batch(struct rte_mbuf* pkt_q[], void* handle, int16_t num, struct xran_eaxc_info *p_cid, uint32_t* ret_data)
135 struct rte_mbuf* pkt;
136 struct xran_device_ctx* p_dev_ctx = (struct xran_device_ctx*)handle;
137 void* iq_samp_buf[MBUFS_CNT];
138 union ecpri_seq_id seq[MBUFS_CNT];
139 static int symbol_total_bytes[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR][XRAN_MAX_ANTENNA_NR] = { 0 };
140 int num_bytes[MBUFS_CNT] = { 0 }, num_bytes_pusch[MBUFS_CNT] = { 0 };
143 struct xran_common_counters* pCnt = &p_dev_ctx->fh_counters;
145 uint8_t CC_ID[MBUFS_CNT] = { 0 };
146 uint8_t Ant_ID[MBUFS_CNT] = { 0 };
147 uint8_t frame_id[MBUFS_CNT] = { 0 };
148 uint8_t subframe_id[MBUFS_CNT] = { 0 };
149 uint8_t slot_id[MBUFS_CNT] = { 0 };
150 uint8_t symb_id[MBUFS_CNT] = { 0 };
152 uint16_t num_prbu[MBUFS_CNT];
153 uint16_t start_prbu[MBUFS_CNT];
154 uint16_t sym_inc[MBUFS_CNT];
155 uint16_t rb[MBUFS_CNT];
156 uint16_t sect_id[MBUFS_CNT];
157 uint16_t prb_elem_id[MBUFS_CNT] = {0};
159 uint8_t compMeth[MBUFS_CNT] = { 0 };
160 uint8_t iqWidth[MBUFS_CNT] = { 0 };
161 uint8_t compMeth_ini = 0;
162 uint8_t iqWidth_ini = 0;
164 uint32_t pkt_size[MBUFS_CNT];
166 int expect_comp = (p_dev_ctx->fh_cfg.ru_conf.compMeth != XRAN_COMPMETHOD_NONE);
167 enum xran_comp_hdr_type staticComp = p_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
169 int16_t num_pusch = 0, num_prach = 0, num_srs = 0;
170 int16_t pusch_idx[MBUFS_CNT] = { 0 }, prach_idx[MBUFS_CNT] = { 0 }, srs_idx[MBUFS_CNT] = { 0 };
171 int8_t xran_port = xran_dev_ctx_get_port_id(p_dev_ctx);
172 int16_t max_ant_num = 0;
173 uint8_t *ptr_seq_id_num_port;
174 struct xran_eaxcid_config* conf;
175 uint8_t seq_id[MBUFS_CNT];
176 uint16_t cid[MBUFS_CNT];
178 struct xran_ecpri_hdr* ecpri_hdr[MBUFS_CNT];
179 struct radio_app_common_hdr* radio_hdr[MBUFS_CNT];
180 struct data_section_hdr* data_hdr[MBUFS_CNT];
181 struct data_section_compression_hdr* data_compr_hdr[MBUFS_CNT];
183 const int16_t ecpri_size = sizeof(struct xran_ecpri_hdr);
184 const int16_t rad_size = sizeof(struct radio_app_common_hdr);
185 const int16_t data_size = sizeof(struct data_section_hdr);
186 const int16_t compr_size = sizeof(struct data_section_compression_hdr);
188 char* buf_start[MBUFS_CNT];
189 uint16_t start_off[MBUFS_CNT];
190 uint16_t iq_offset[MBUFS_CNT];
191 uint16_t last[MBUFS_CNT];
194 struct rte_mbuf* mb = NULL;
195 struct xran_prb_map* pRbMap = NULL;
196 struct xran_prb_elm* prbMapElm = NULL;
197 //uint16_t iq_sample_size_bits;
198 uint16_t idxElm = 0, total_sections = 0;
201 uint32_t mlogVar[10];
202 uint32_t mlogVarCnt = 0;
206 print_err("Invalid pHandle");
210 if (xran_port > XRAN_PORTS_NUM) {
211 print_err("Invalid port - %d", xran_port);
215 if(first_call == 0) {
216 for(i = 0; i < num; i++ )
217 ret_data[i] = MBUF_FREE;
221 conf = &(p_dev_ctx->eAxc_id_cfg);
223 rte_panic("conf == NULL");
226 if (p_dev_ctx->fh_init.io_cfg.id == O_DU)
228 max_ant_num = XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR;
229 ptr_seq_id_num_port = &xran_upul_seq_id_num[xran_port][0][0];
231 else if (p_dev_ctx->fh_init.io_cfg.id == O_RU)
233 max_ant_num = XRAN_MAX_ANTENNA_NR;
234 ptr_seq_id_num_port = &xran_updl_seq_id_num[xran_port][0][0];
238 rte_panic("incorrect fh_init.io_cfg.id");
241 if (staticComp == XRAN_COMP_HDR_TYPE_STATIC)
243 compMeth_ini = p_dev_ctx->fh_cfg.ru_conf.compMeth;
244 iqWidth_ini = p_dev_ctx->fh_cfg.ru_conf.iqWidth;
247 for (i = 0; i < MBUFS_CNT; i++)
249 pkt_size[i] = pkt_q[i]->pkt_len;
250 buf_start[i] = (char*)pkt_q[i]->buf_addr;
251 start_off[i] = pkt_q[i]->data_off;
254 if (expect_comp && (staticComp != XRAN_COMP_HDR_TYPE_STATIC))
256 #pragma vector always
257 for (i = 0; i < MBUFS_CNT; i++)
262 ecpri_hdr[i] = (void*)(buf_start[i] + start_off[i]);
263 radio_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size);
264 data_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size + rad_size);
265 data_compr_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size + rad_size + data_size);
266 seq[i] = ecpri_hdr[i]->ecpri_seq_id;
267 seq_id[i] = seq[i].bits.seq_id;
268 last[i] = seq[i].bits.e_bit;
270 iq_offset[i] = ecpri_size + rad_size + data_size + compr_size;
272 iq_samp_buf[i] = (void*)(buf_start[i] + start_off[i] + iq_offset[i]);
273 num_bytes[i] = pkt_size[i] - iq_offset[i];
275 if (ecpri_hdr[i] == NULL ||
276 radio_hdr[i] == NULL ||
277 data_hdr[i] == NULL ||
278 data_compr_hdr[i] == NULL ||
279 iq_samp_buf[i] == NULL)
281 num_bytes[i] = 0; /* packet too short */
285 if(radio_hdr[i] != NULL && data_hdr[i] != NULL)
287 mlogVar[mlogVarCnt++] = 0xBBBBBBBB;
288 mlogVar[mlogVarCnt++] = xran_lib_ota_tti;
289 mlogVar[mlogVarCnt++] = radio_hdr[i]->frame_id;
290 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.subframe_id;
291 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.slot_id;
292 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.symb_id;
293 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.sect_id;
294 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.start_prbu;
295 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.num_prbu;
296 mlogVar[mlogVarCnt++] = rte_pktmbuf_pkt_len(pkt_q[i]);
297 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
304 #pragma vector always
305 for (i = 0; i < MBUFS_CNT; i++)
310 ecpri_hdr[i] = (void*)(buf_start[i] + start_off[i]);
311 radio_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size);
312 data_hdr[i] = (void*)(buf_start[i] + start_off[i] + ecpri_size + rad_size);
313 seq[i] = ecpri_hdr[i]->ecpri_seq_id;
314 seq_id[i] = seq[i].bits.seq_id;
315 last[i] = seq[i].bits.e_bit;
317 iq_offset[i] = ecpri_size + rad_size + data_size;
318 iq_samp_buf[i] = (void*)(buf_start[i] + start_off[i] + iq_offset[i]);
319 num_bytes[i] = pkt_size[i] - iq_offset[i];
321 if (ecpri_hdr[i] == NULL ||
322 radio_hdr[i] == NULL ||
323 data_hdr[i] == NULL ||
324 iq_samp_buf[i] == NULL)
326 num_bytes[i] = 0; /* packet too short */
330 if (radio_hdr[i] != NULL && data_hdr[i] != NULL)
332 mlogVar[mlogVarCnt++] = 0xBBBBBBBB;
333 mlogVar[mlogVarCnt++] = xran_lib_ota_tti;
334 mlogVar[mlogVarCnt++] = radio_hdr[i]->frame_id;
335 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.subframe_id;
336 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.slot_id;
337 mlogVar[mlogVarCnt++] = radio_hdr[i]->sf_slot_sym.symb_id;
338 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.sect_id;
339 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.start_prbu;
340 mlogVar[mlogVarCnt++] = data_hdr[i]->fields.num_prbu;
341 mlogVar[mlogVarCnt++] = rte_pktmbuf_pkt_len(pkt_q[i]);
342 MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
348 for (i = 0; i < MBUFS_CNT; i++) {
349 if(p_cid->ccId == 0xFF && p_cid->ruPortId == 0xFF) {
350 cid[i] = rte_be_to_cpu_16((uint16_t)ecpri_hdr[i]->ecpri_xtc_id);
351 if (num_bytes[i] > 0) {
352 CC_ID[i] = (cid[i] & conf->mask_ccId) >> conf->bit_ccId;
353 Ant_ID[i] = (cid[i] & conf->mask_ruPortId) >> conf->bit_ruPortId;
356 if (num_bytes[i] > 0) {
357 CC_ID[i] = p_cid->ccId;
358 Ant_ID[i] = p_cid->ruPortId;
363 for (i = 0; i < MBUFS_CNT; i++)
365 radio_hdr[i]->sf_slot_sym.value = rte_be_to_cpu_16(radio_hdr[i]->sf_slot_sym.value);
366 data_hdr[i]->fields.all_bits = rte_be_to_cpu_32(data_hdr[i]->fields.all_bits);
369 for (i = 0; i < MBUFS_CNT; i++)
371 if (num_bytes[i] > 0)
373 compMeth[i] = compMeth_ini;
374 iqWidth[i] = iqWidth_ini;
376 frame_id[i] = radio_hdr[i]->frame_id;
377 subframe_id[i] = radio_hdr[i]->sf_slot_sym.subframe_id;
378 slot_id[i] = radio_hdr[i]->sf_slot_sym.slot_id;
379 symb_id[i] = radio_hdr[i]->sf_slot_sym.symb_id;
381 num_prbu[i] = data_hdr[i]->fields.num_prbu;
382 start_prbu[i] = data_hdr[i]->fields.start_prbu;
383 sym_inc[i] = data_hdr[i]->fields.sym_inc;
384 rb[i] = data_hdr[i]->fields.rb;
385 sect_id[i] = data_hdr[i]->fields.sect_id;
387 if (num_prbu[i] == 0)
388 num_prbu[i] = p_dev_ctx->fh_cfg.nULRBs;
390 if (expect_comp && (staticComp != XRAN_COMP_HDR_TYPE_STATIC))
392 compMeth[i] = data_compr_hdr[i]->ud_comp_hdr.ud_comp_meth;
393 iqWidth[i] = data_compr_hdr[i]->ud_comp_hdr.ud_iq_width;
396 if (CC_ID[i] >= XRAN_MAX_CELLS_PER_PORT || Ant_ID[i] >= max_ant_num || symb_id[i] >= XRAN_NUM_OF_SYMBOL_PER_SLOT)
398 ptr_seq_id_num_port[CC_ID[i] * max_ant_num + Ant_ID[i]] = seq_id[i]; // for next
400 // print_err("Invalid CC ID - %d or antenna ID or Symbol ID- %d", CC_ID[i], Ant_ID[i], symb_id[i]);
404 ptr_seq_id_num_port[CC_ID[i] * max_ant_num + Ant_ID[i]]++;
409 pCnt->Total_msgs_rcvd++;
410 struct xran_prach_cp_config *PrachCfg = NULL;
411 if(p_dev_ctx->dssEnable){
412 tti = frame_id[i] * SLOTS_PER_SYSTEMFRAME(p_dev_ctx->interval_us_local) +
413 subframe_id[i] * SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local) + slot_id[i];
414 int techSlot = (tti % p_dev_ctx->dssPeriod);
415 if(p_dev_ctx->technology[techSlot] == 1)
416 PrachCfg = &(p_dev_ctx->PrachCPConfig);
418 PrachCfg = &(p_dev_ctx->PrachCPConfigLTE);
421 PrachCfg = &(p_dev_ctx->PrachCPConfig);
424 if (Ant_ID[i] >= p_dev_ctx->srs_cfg.eAxC_offset && p_dev_ctx->fh_cfg.srsEnable)
426 Ant_ID[i] -= p_dev_ctx->srs_cfg.eAxC_offset;
429 srs_idx[num_srs] = i;
431 pCnt->rx_srs_packets++;
434 else if (Ant_ID[i] >= PrachCfg->eAxC_offset && p_dev_ctx->fh_cfg.prachEnable)
436 Ant_ID[i] -= PrachCfg->eAxC_offset;
439 prach_idx[num_prach] = i;
441 pCnt->rx_prach_packets[Ant_ID[i]]++;
448 pusch_idx[num_pusch] = i;
450 pCnt->rx_pusch_packets[Ant_ID[i]]++;
453 symbol_total_bytes[xran_port][CC_ID[i]][Ant_ID[i]] += num_bytes[i];
454 num_bytes_pusch[i] = symbol_total_bytes[xran_port][CC_ID[i]][Ant_ID[i]];
456 symbol_total_bytes[xran_port][CC_ID[i]][Ant_ID[i]] = 0;
460 for (j = 0; j < num_prach; j++)
465 print_dbg("Completed receiving PRACH symbol %d, size=%d bytes\n", symb_id[i], num_bytes[i]);
467 xran_process_prach_sym(p_dev_ctx,
485 for (j = 0; j < num_srs; j++)
490 print_dbg("SRS receiving symbol %d, size=%d bytes\n",
491 symb_id[i], symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID[i]][Ant_ID[i]]);
493 uint64_t t1 = MLogXRANTick();
494 xran_process_srs_sym(p_dev_ctx,
513 MLogXRANTask(PID_PROCESS_UP_PKT_SRS, t1, MLogXRANTick());
516 if (num_pusch == MBUFS_CNT)
518 for (i = 0; i < MBUFS_CNT; i++)
520 //iq_sample_size_bits = 16;
522 // iq_sample_size_bits = iqWidth[i];
524 tti = frame_id[i] * SLOTS_PER_SYSTEMFRAME(p_dev_ctx->interval_us_local) +
525 subframe_id[i] * SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local) + slot_id[i];
527 pRbMap = (struct xran_prb_map*)p_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers->pData;
531 /** Get the prb_elem_id */
533 if(pRbMap->prbMap[0].bf_weight.extType == 1)
535 for(idxElm=0 ; idxElm < pRbMap->nPrbElm ; idxElm++)
537 total_sections += pRbMap->prbMap[idxElm].bf_weight.numSetBFWs;
538 if(total_sections >= (sect_id[i] + 1))
540 prb_elem_id[i] = idxElm;
547 prb_elem_id[i] = sect_id[i];
550 if (prb_elem_id[i] >= pRbMap->nPrbElm)
552 print_err("sect_id %d, prb_elem_id %d !=pRbMap->nPrbElm %d\n", sect_id[i], prb_elem_id[i], pRbMap->nPrbElm);
553 ret_data[i] = MBUF_FREE;
559 // print_err("pRbMap==NULL\n");
560 ret_data[i] = MBUF_FREE;
564 if (pRbMap->nPrbElm == 1)
566 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pData = iq_samp_buf[i];
567 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pCtrl = pkt_q[i];
568 ret_data[i] = MBUF_KEEP;
572 struct xran_section_desc* p_sec_desc = NULL;
573 prbMapElm = &pRbMap->prbMap[prb_elem_id[i]];
574 int16_t nSecDesc = prbMapElm->nSecDesc[symb_id[i]];
575 p_sec_desc = &prbMapElm->sec_desc[symb_id[i]][nSecDesc];
579 mb = p_sec_desc->pCtrl;
581 rte_pktmbuf_free(mb);
583 p_sec_desc->pCtrl = pkt_q[i];
584 p_sec_desc->pData = iq_samp_buf[i];
585 p_sec_desc->start_prbu = start_prbu[i];
586 p_sec_desc->num_prbu = num_prbu[i];
587 p_sec_desc->iq_buffer_len = num_bytes_pusch[i];
588 p_sec_desc->iq_buffer_offset = iq_offset[i];
589 ret_data[i] = MBUF_KEEP;
590 prbMapElm->nSecDesc[symb_id[i]] += 1;
594 // print_err("p_sec_desc==NULL tti %u ant %d symb_id %d\n", tti, Ant_ID[i], symb_id[i]);
595 ret_data[i] = MBUF_FREE;
602 for (j = 0; j < num_pusch; j++)
606 //iq_sample_size_bits = 16;
608 // iq_sample_size_bits = iqWidth[i];
610 tti = frame_id[i] * SLOTS_PER_SYSTEMFRAME(p_dev_ctx->interval_us_local) +
611 subframe_id[i] * SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local) + slot_id[i];
613 pRbMap = (struct xran_prb_map*)p_dev_ctx->sFrontHaulRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers->pData;
617 /** Get the prb_elem_id */
619 if(pRbMap->prbMap[0].bf_weight.extType == 1)
621 for(idxElm=0 ; idxElm < pRbMap->nPrbElm ; idxElm++)
623 total_sections += pRbMap->prbMap[idxElm].bf_weight.numSetBFWs;
624 if(total_sections >= (sect_id[i] + 1))
626 prb_elem_id[i] = idxElm;
633 prb_elem_id[i] = sect_id[i];
636 if (prb_elem_id[i] >= pRbMap->nPrbElm)
638 print_err("sect_id %d, prb_elem_id %d !=pRbMap->nPrbElm %d\n", sect_id[i], prb_elem_id[i], pRbMap->nPrbElm);
639 ret_data[i] = MBUF_FREE;
645 // print_err("pRbMap==NULL\n");
646 ret_data[i] = MBUF_FREE;
650 if (pRbMap->nPrbElm == 1)
652 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pData = iq_samp_buf[i];
653 p_dev_ctx->sFrontHaulRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][CC_ID[i]][Ant_ID[i]].sBufferList.pBuffers[symb_id[i]].pCtrl = pkt_q[i];
654 ret_data[i] = MBUF_KEEP;
658 struct xran_section_desc* p_sec_desc = NULL;
659 prbMapElm = &pRbMap->prbMap[prb_elem_id[i]];
660 int16_t nSecDesc = prbMapElm->nSecDesc[symb_id[i]];
661 p_sec_desc = &prbMapElm->sec_desc[symb_id[i]][nSecDesc];
665 mb = p_sec_desc->pCtrl;
667 rte_pktmbuf_free(mb);
669 p_sec_desc->pCtrl = pkt_q[i];
670 p_sec_desc->pData = iq_samp_buf[i];
671 p_sec_desc->start_prbu = start_prbu[i];
672 p_sec_desc->num_prbu = num_prbu[i];
673 p_sec_desc->iq_buffer_len = num_bytes_pusch[i];
674 p_sec_desc->iq_buffer_offset = iq_offset[i];
675 ret_data[i] = MBUF_KEEP;
676 prbMapElm->nSecDesc[symb_id[i]] += 1;
680 // print_err("p_sec_desc==NULL tti %u ant %d symb_id %d\n", tti, Ant_ID[i], symb_id[i]);
681 ret_data[i] = MBUF_FREE;
690 process_mbuf(struct rte_mbuf *pkt, void* handle, struct xran_eaxc_info *p_cid)
692 uint64_t tt1 = MLogXRANTick();
693 struct xran_device_ctx *p_dev_ctx = (struct xran_device_ctx *)handle;
695 union ecpri_seq_id seq;
696 static int symbol_total_bytes[XRAN_PORTS_NUM][XRAN_MAX_SECTOR_NR][XRAN_MAX_ANTENNA_NR] = {0};
699 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
701 uint8_t CC_ID = p_cid->ccId;
702 uint8_t Ant_ID = p_cid->ruPortId;
703 uint8_t frame_id = 0;
704 uint8_t subframe_id = 0;
714 uint8_t compMeth = 0;
718 uint32_t mb_free = 0;
719 int32_t valid_res = 0;
720 int expect_comp = (p_dev_ctx->fh_cfg.ru_conf.compMeth != XRAN_COMPMETHOD_NONE);
721 enum xran_comp_hdr_type staticComp = p_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
726 if (staticComp == XRAN_COMP_HDR_TYPE_STATIC)
728 compMeth = p_dev_ctx->fh_cfg.ru_conf.compMeth;
729 iqWidth = p_dev_ctx->fh_cfg.ru_conf.iqWidth;
732 if(p_dev_ctx->xran2phy_mem_ready == 0 || first_call == 0)
735 num_bytes = xran_extract_iq_samples(pkt, &iq_samp_buf,
736 &CC_ID, &Ant_ID, &frame_id, &subframe_id, &slot_id, &symb_id, &seq,
737 &num_prbu, &start_prbu, &sym_inc, &rb, §_id,
738 expect_comp, staticComp, &compMeth, &iqWidth);
741 print_err("num_bytes is wrong [%d]\n", num_bytes);
745 num_prbu = p_dev_ctx->fh_cfg.nULRBs;
747 MLogXRANTask(PID_PROCESS_UP_PKT_PARSE, tt1, MLogXRANTick());
748 /* do not validate for NDM SRS */
749 if (Ant_ID >= p_dev_ctx->srs_cfg.eAxC_offset && p_dev_ctx->fh_cfg.srsEnable)
751 /* SRS packet has ruportid = 2*num_eAxc + ant_id */
752 Ant_ID -= p_dev_ctx->srs_cfg.eAxC_offset;
753 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] += num_bytes;
755 if (seq.bits.e_bit == 1)
757 print_dbg("SRS receiving symbol %d, size=%d bytes\n",
758 symb_id, symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]);
760 if (symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID])
762 uint64_t t1 = MLogXRANTick();
763 int16_t res = xran_process_srs_sym(p_dev_ctx,
764 pkt, iq_samp_buf, num_bytes,
765 CC_ID, Ant_ID, frame_id, subframe_id, slot_id, symb_id,
766 num_prbu, start_prbu, sym_inc, rb, sect_id,
767 &mb_free, expect_comp, compMeth, iqWidth);
768 if(res == symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID])
771 print_err("res != symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]\n");
773 pCnt->rx_srs_packets++;
774 MLogXRANTask(PID_PROCESS_UP_PKT_SRS, t1, MLogXRANTick());
776 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] = 0;
779 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
780 } /* if (Ant_ID >= p_dev_ctx->srs_cfg.eAxC_offset && p_dev_ctx->fh_cfg.srsEnable) */
784 valid_res = xran_pkt_validate(p_dev_ctx,
785 pkt, iq_samp_buf, num_bytes,
786 CC_ID, Ant_ID, frame_id, subframe_id, slot_id, symb_id,
787 &seq, num_prbu, start_prbu, sym_inc, rb, sect_id);
791 print_dbg("valid_res is wrong [%d] ant %u (%u : %u : %u : %u) seq %u num_bytes %d\n", valid_res, Ant_ID, frame_id, subframe_id, slot_id, symb_id, seq.bits.seq_id, num_bytes);
796 struct xran_prach_cp_config *PrachCfg = NULL;
797 if(p_dev_ctx->dssEnable){
798 tti = frame_id * SLOTS_PER_SYSTEMFRAME(p_dev_ctx->interval_us_local) +
799 subframe_id * SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local) + slot_id;
800 int techSlot = (tti % p_dev_ctx->dssPeriod);
801 if(p_dev_ctx->technology[techSlot] == 1)
802 PrachCfg = &(p_dev_ctx->PrachCPConfig);
804 PrachCfg = &(p_dev_ctx->PrachCPConfigLTE);
807 PrachCfg = &(p_dev_ctx->PrachCPConfig);
810 if (Ant_ID >= PrachCfg->eAxC_offset && p_dev_ctx->fh_cfg.prachEnable)
812 /* PRACH packet has ruportid = num_eAxc + ant_id */
813 Ant_ID -= PrachCfg->eAxC_offset;
814 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] += num_bytes;
815 if (seq.bits.e_bit == 1)
817 print_dbg("Completed receiving PRACH symbol %d, size=%d bytes\n",
820 if (symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID])
822 int16_t res = xran_process_prach_sym(p_dev_ctx,
823 pkt, iq_samp_buf, num_bytes,
824 CC_ID, Ant_ID, frame_id, subframe_id, slot_id, symb_id,
825 num_prbu, start_prbu, sym_inc, rb, sect_id, &mb_free);
826 if(res == symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID])
829 print_err("res != symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]\n");
831 pCnt->rx_prach_packets[Ant_ID]++;
833 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] = 0;
836 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
841 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] += num_bytes;
843 if (seq.bits.e_bit == 1)
845 print_dbg("Completed receiving symbol %d, size=%d bytes\n",
846 symb_id, symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]);
848 if (symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID])
850 uint64_t t1 = MLogXRANTick();
851 int res = xran_process_rx_sym(p_dev_ctx,
852 pkt, iq_samp_buf, symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID],
853 CC_ID, Ant_ID, frame_id, subframe_id, slot_id, symb_id,
854 num_prbu, start_prbu, sym_inc, rb, sect_id,
855 &mb_free, expect_comp, compMeth, iqWidth);
856 if(res == symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID])
859 print_err("res != symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID]\n");
861 pCnt->rx_pusch_packets[Ant_ID]++;
862 MLogXRANTask(PID_PROCESS_UP_PKT, t1, MLogXRANTick());
864 symbol_total_bytes[p_dev_ctx->xran_port_id][CC_ID][Ant_ID] = 0;
867 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
875 static int set_iq_bit_width(uint8_t iq_bit_width, struct data_section_compression_hdr *compr_hdr)
877 if (iq_bit_width == MAX_IQ_BIT_WIDTH)
878 compr_hdr->ud_comp_hdr.ud_iq_width = (uint8_t) 0;
880 compr_hdr->ud_comp_hdr.ud_iq_width = iq_bit_width;
887 /* Send a single 5G symbol over multiple packets */
888 inline int32_t prepare_symbol_ex(enum xran_pkt_dir direction,
889 uint16_t section_id_start,
894 const enum xran_input_byte_order iq_buf_byte_order,
905 enum xran_comp_hdr_type staticEn,
906 uint16_t num_sections,
909 int32_t n_bytes , iq_len_aggr = 0;
911 int16_t nPktSize,idx, nprb_per_section;
912 uint32_t curr_sect_id;
914 struct xran_up_pkt_gen_params xp[XRAN_MAX_SECTIONS_PER_SLOT] = { 0 };
915 bool prbElemBegin , prbElemEnd;
917 iqWidth = (iqWidth==0) ? 16 : iqWidth;
919 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
920 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
925 nprb_per_section = prb_num/num_sections;
926 if(prb_num%num_sections)
929 n_bytes = (3 * iqWidth + parm_size)*nprb_per_section;
930 // n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
932 for(idx=0 ; idx < num_sections ; idx++)
934 prbElemBegin = (idx == 0) ? 1 : 0;
935 prbElemEnd = (idx + 1 == num_sections) ? 1 : 0;
936 curr_sect_id = section_id_start + idx ;
938 iq_len_aggr += n_bytes;
942 nPktSize = sizeof(struct rte_ether_hdr)
943 + sizeof(struct xran_ecpri_hdr)
944 + sizeof(struct radio_app_common_hdr) ;
948 if(((idx+1)*nprb_per_section) > prb_num){
949 nprb_per_section = (prb_num - idx*nprb_per_section);
950 // n_bytes = (3 * iqWidth + parm_size)*(nprb_per_section);
954 nPktSize += sizeof(struct data_section_hdr);
956 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC))
957 nPktSize += sizeof(struct data_section_compression_hdr);
962 * Setting app_params is redundant , its needed only once to create common Radio app header.
964 xp[idx].app_params.data_feature.value = 0x10;
965 xp[idx].app_params.data_feature.data_direction = direction;
966 // xp[idx].app_params.payl_ver = 1;
967 // xp[idx].app_params.filter_id = 0;
968 xp[idx].app_params.frame_id = frame_id;
969 xp[idx].app_params.sf_slot_sym.subframe_id = subframe_id;
970 xp[idx].app_params.sf_slot_sym.slot_id = xran_slotid_convert(slot_id, 0);
971 xp[idx].app_params.sf_slot_sym.symb_id = symbol_no;
973 /* convert to network byte order */
974 xp[idx].app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp[idx].app_params.sf_slot_sym.value);
976 // printf("start_prbu = %d, prb_num = %d,num_sections = %d, nprb_per_section = %d,curr_sect_id = %d\n",(prb_start + idx*nprb_per_section),prb_num,num_sections,nprb_per_section,curr_sect_id);
977 xp[idx].sec_hdr.fields.all_bits = 0;
978 xp[idx].sec_hdr.fields.sect_id = curr_sect_id;
979 xp[idx].sec_hdr.fields.num_prbu = XRAN_CONVERT_NUMPRBC(nprb_per_section); //(uint8_t)prb_num;
980 xp[idx].sec_hdr.fields.start_prbu = prb_start;
981 xp[idx].sec_hdr.fields.sym_inc = 0;
982 xp[idx].sec_hdr.fields.rb = 0;
985 xp[idx].compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
986 xp[idx].compr_hdr_param.ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
987 xp[idx].compr_hdr_param.rsrvd = 0;
988 prb_start += nprb_per_section;
991 printf("\nidx %hu num_prbu %u sect_id %u start_prbu %u sym_inc %u curr_sec_id %u",idx,(uint32_t)xp[idx].sec_hdr.fields.num_prbu,
992 (uint32_t)xp[idx].sec_hdr.fields.sect_id,
993 (uint32_t)xp[idx].sec_hdr.fields.start_prbu,
994 (uint32_t)xp[idx].sec_hdr.fields.sym_inc,curr_sect_id);
998 /* network byte order */
999 xp[idx].sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp[idx].sec_hdr.fields.all_bits);
1003 errx(1, "out of mbufs after %d packets", 1);
1005 } /* for(idx=0 ; idx < num_sections ; idx++) */
1007 //printf("\niq_len_aggr %u",iq_len_aggr);
1009 prep_bytes = xran_prepare_iq_symbol_portion(mb,
1022 if (prep_bytes <= 0)
1023 errx(1, "failed preparing symbol");
1025 rte_pktmbuf_pkt_len(mb) = nPktSize;
1026 rte_pktmbuf_data_len(mb) = nPktSize;
1029 printf("Symbol %2d prep_bytes (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
1035 int32_t prepare_sf_slot_sym (enum xran_pkt_dir direction,
1037 uint8_t subframe_id,
1040 struct xran_up_pkt_gen_params *xp)
1042 /* radio app header */
1043 xp->app_params.data_feature.value = 0x10;
1044 xp->app_params.data_feature.data_direction = direction;
1045 //xp->app_params.payl_ver = 1;
1046 //xp->app_params.filter_id = 0;
1047 xp->app_params.frame_id = frame_id;
1048 xp->app_params.sf_slot_sym.subframe_id = subframe_id;
1049 xp->app_params.sf_slot_sym.slot_id = xran_slotid_convert(slot_id, 0);
1050 xp->app_params.sf_slot_sym.symb_id = symbol_no;
1052 /* convert to network byte order */
1053 xp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp->app_params.sf_slot_sym.value);
1058 int send_symbol_mult_section_ex(void *handle,
1059 enum xran_pkt_dir direction,
1060 uint16_t section_id,
1061 struct rte_mbuf *mb, uint8_t *data,
1062 uint8_t compMeth, uint8_t iqWidth,
1063 const enum xran_input_byte_order iq_buf_byte_order,
1064 uint8_t frame_id, uint8_t subframe_id,
1065 uint8_t slot_id, uint8_t symbol_no,
1066 int prb_start, int prb_num,
1067 uint8_t CC_ID, uint8_t RU_Port_ID, uint8_t seq_id)
1069 uint32_t do_copy = 0;
1071 int hdr_len, parm_size;
1074 struct xran_device_ctx *p_dev_ctx = (struct xran_device_ctx *)handle;
1075 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
1076 enum xran_comp_hdr_type staticEn= XRAN_COMP_HDR_TYPE_DYNAMIC;
1079 if (p_dev_ctx != NULL)
1081 staticEn = p_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
1083 hdr_len = sizeof(struct xran_ecpri_hdr)
1084 + sizeof(struct radio_app_common_hdr)
1085 + sizeof(struct data_section_hdr);
1086 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC))
1087 hdr_len += sizeof(struct data_section_compression_hdr);
1090 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
1091 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
1095 int prb_num_pre_sec = (prb_num+2)/3;
1097 int data_offset = 0;
1099 rte_iova_t ext_buff_iova = 0;
1101 struct rte_mbuf *send_mb;
1102 char *p_sec_iq = NULL;
1103 char *ext_buff = NULL;
1104 uint16_t ext_buff_len = 0;
1105 struct rte_mbuf_ext_shared_info * p_share_data = NULL;
1106 struct rte_mbuf *eth_oran_hdr = NULL;
1107 struct rte_mbuf *tmp = NULL;
1108 for (loop = 0; loop < 3;loop++)
1110 seq_id = xran_get_upul_seqid(handle, CC_ID, RU_Port_ID);
1112 prb_num_sec = ((loop+1)*prb_num_pre_sec > prb_num) ? (prb_num - loop*prb_num_pre_sec) : prb_num_pre_sec;
1113 n_bytes = (3 * iqWidth + parm_size) * prb_num_sec;
1114 char * pChar = NULL;
1116 send_mb = xran_ethdi_mbuf_alloc(); /* will be freede by ETH */
1117 if(send_mb == NULL) {
1119 errx(1, "out of mbufs after %d packets", 1);
1122 pChar = rte_pktmbuf_append(send_mb, hdr_len + n_bytes);
1125 errx(1, "incorrect mbuf size %d packets", 1);
1127 pChar = rte_pktmbuf_prepend(send_mb, sizeof(struct rte_ether_hdr));
1130 errx(1, "incorrect mbuf size %d packets", 1);
1132 do_copy = 1; /* new mbuf hence copy of IQs */
1133 pChar = rte_pktmbuf_mtod(send_mb, char*);
1134 char *pdata_start = (pChar + sizeof(struct rte_ether_hdr) + hdr_len);
1135 memcpy(pdata_start,data + data_offset,n_bytes);
1138 sent = prepare_symbol_ex(direction,
1149 prb_start+prb_offset,
1157 0); /*Send a single section */
1158 prb_offset += prb_num_sec;
1159 data_offset += n_bytes;
1162 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(send_mb);
1163 p_dev_ctx->send_upmbuf2ring(send_mb, ETHER_TYPE_ECPRI, xran_map_ecpriPcid_to_vf(p_dev_ctx, direction, CC_ID, RU_Port_ID));
1169 printf("Symbol %2d sent (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
1176 /* Send a single 5G symbol over multiple packets */
1177 int send_symbol_ex(void *handle,
1178 enum xran_pkt_dir direction,
1179 uint16_t section_id,
1180 struct rte_mbuf *mb, uint8_t *data,
1181 uint8_t compMeth, uint8_t iqWidth,
1182 const enum xran_input_byte_order iq_buf_byte_order,
1183 uint8_t frame_id, uint8_t subframe_id,
1184 uint8_t slot_id, uint8_t symbol_no,
1185 int prb_start, int prb_num,
1186 uint8_t CC_ID, uint8_t RU_Port_ID, uint8_t seq_id)
1188 uint32_t do_copy = 0;
1190 int hdr_len, parm_size;
1192 struct xran_device_ctx *p_dev_ctx = (struct xran_device_ctx *)handle;
1193 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
1194 enum xran_comp_hdr_type staticEn= XRAN_COMP_HDR_TYPE_DYNAMIC;
1197 if (p_dev_ctx != NULL)
1199 staticEn = p_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
1201 hdr_len = sizeof(struct xran_ecpri_hdr)
1202 + sizeof(struct radio_app_common_hdr)
1203 + sizeof(struct data_section_hdr);
1204 if ((compMeth != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC))
1205 hdr_len += sizeof(struct data_section_compression_hdr);
1208 case XRAN_COMPMETHOD_BLKFLOAT: parm_size = 1; break;
1209 case XRAN_COMPMETHOD_MODULATION: parm_size = 0; break;
1213 n_bytes = (3 * iqWidth + parm_size) * prb_num;
1216 char * pChar = NULL;
1217 mb = xran_ethdi_mbuf_alloc(); /* will be freede by ETH */
1220 errx(1, "out of mbufs after %d packets", 1);
1222 pChar = rte_pktmbuf_append(mb, hdr_len + n_bytes);
1225 errx(1, "incorrect mbuf size %d packets", 1);
1227 pChar = rte_pktmbuf_prepend(mb, sizeof(struct rte_ether_hdr));
1230 errx(1, "incorrect mbuf size %d packets", 1);
1232 do_copy = 1; /* new mbuf hence copy of IQs */
1234 /**copy prach data start**/
1235 pChar = rte_pktmbuf_mtod(mb, char*);
1236 char *pdata_start = (pChar + sizeof(struct rte_ether_hdr) + hdr_len);
1237 memcpy(pdata_start,data,n_bytes);
1238 /**copy prach data end**/
1243 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
1246 sent = prepare_symbol_ex(direction,
1265 0); /*Send a single section */
1269 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(mb);
1270 p_dev_ctx->send_upmbuf2ring(mb, ETHER_TYPE_ECPRI, xran_map_ecpriPcid_to_vf(p_dev_ctx, direction, CC_ID, RU_Port_ID));
1274 printf("Symbol %2d sent (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
1280 int send_cpmsg(void *pHandle, struct rte_mbuf *mbuf,struct xran_cp_gen_params *params,
1281 struct xran_section_gen_info *sect_geninfo, uint8_t cc_id, uint8_t ru_port_id, uint8_t seq_id)
1283 int ret = 0, nsection, i;
1284 uint8_t subframe_id = params->hdr.subframeId;
1285 uint8_t slot_id = params->hdr.slotId;
1286 uint8_t dir = params->dir;
1287 struct xran_device_ctx *p_dev_ctx =(struct xran_device_ctx *) pHandle;
1288 struct xran_common_counters *pCnt = &p_dev_ctx->fh_counters;
1290 nsection = params->numSections;
1292 /* add in the ethernet header */
1293 struct rte_ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));
1296 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(mbuf);
1297 p_dev_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI, xran_map_ecpriRtcid_to_vf(p_dev_ctx, dir, cc_id, ru_port_id));
1298 for(i=0; i<nsection; i++)
1299 xran_cp_add_section_info(pHandle, dir, cc_id, ru_port_id,
1300 (slot_id + subframe_id*SLOTNUM_PER_SUBFRAME(p_dev_ctx->interval_us_local))%XRAN_MAX_SECTIONDB_CTX,
1301 sect_geninfo[i].info);
1306 int generate_cpmsg_dlul(void *pHandle, struct xran_cp_gen_params *params, struct xran_section_gen_info *sect_geninfo, struct rte_mbuf *mbuf,
1307 enum xran_pkt_dir dir, uint8_t frame_id, uint8_t subframe_id, uint8_t slot_id,
1308 uint8_t startsym, uint8_t numsym, uint16_t prb_start, uint16_t prb_num,int16_t iq_buffer_offset, int16_t iq_buffer_len,
1309 uint16_t beam_id, uint8_t cc_id, uint8_t ru_port_id, uint8_t comp_method, uint8_t iqWidth, uint8_t seq_id, uint8_t symInc)
1311 int ret = 0, nsection, loc_sym;
1315 params->sectionType = XRAN_CP_SECTIONTYPE_1; // Most DL/UL Radio Channels
1316 params->hdr.filterIdx = XRAN_FILTERINDEX_STANDARD;
1317 params->hdr.frameId = frame_id;
1318 params->hdr.subframeId = subframe_id;
1319 params->hdr.slotId = slot_id;
1320 params->hdr.startSymId = startsym; // start Symbol ID
1321 params->hdr.iqWidth = iqWidth;
1322 params->hdr.compMeth = comp_method;
1325 sect_geninfo[nsection].info->type = params->sectionType; // for database
1326 sect_geninfo[nsection].info->startSymId = params->hdr.startSymId; // for database
1327 sect_geninfo[nsection].info->iqWidth = params->hdr.iqWidth; // for database
1328 sect_geninfo[nsection].info->compMeth = params->hdr.compMeth; // for database
1329 sect_geninfo[nsection].info->id = xran_alloc_sectionid(pHandle, dir, cc_id, ru_port_id, subframe_id, slot_id);
1330 sect_geninfo[nsection].info->rb = XRAN_RBIND_EVERY;
1331 sect_geninfo[nsection].info->symInc = symInc;
1332 sect_geninfo[nsection].info->startPrbc = prb_start;
1333 sect_geninfo[nsection].info->numPrbc = prb_num;
1334 sect_geninfo[nsection].info->numSymbol = numsym;
1335 sect_geninfo[nsection].info->reMask = 0xfff;
1336 sect_geninfo[nsection].info->beamId = beam_id;
1338 for (loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++) {
1339 sect_geninfo[0].info->sec_desc[loc_sym].iq_buffer_offset = iq_buffer_offset;
1340 sect_geninfo[0].info->sec_desc[loc_sym].iq_buffer_len = iq_buffer_len;
1343 sect_geninfo[nsection].info->ef = 0;
1344 sect_geninfo[nsection].exDataSize = 0;
1345 // sect_geninfo[nsection].exData = NULL;
1348 params->numSections = nsection;
1349 params->sections = sect_geninfo;
1351 if(unlikely(mbuf == NULL)) {
1352 print_err("Alloc fail!\n");
1356 ret = xran_prepare_ctrl_pkt(mbuf, params, cc_id, ru_port_id, seq_id,0);
1358 print_err("Fail to build control plane packet - [%d:%d:%d] dir=%d\n",
1359 frame_id, subframe_id, slot_id, dir);
1360 rte_pktmbuf_free(mbuf);
1366 int generate_cpmsg_prach(void *pHandle, struct xran_cp_gen_params *params, struct xran_section_gen_info *sect_geninfo, struct rte_mbuf *mbuf, struct xran_device_ctx *pxran_lib_ctx,
1367 uint8_t frame_id, uint8_t subframe_id, uint8_t slot_id, int tti,
1368 uint16_t beam_id, uint8_t cc_id, uint8_t prach_port_id, uint16_t occasionid, uint8_t seq_id)
1371 struct xran_prach_cp_config *pPrachCPConfig = NULL;;
1373 if(pxran_lib_ctx->dssEnable){
1374 i = tti % pxran_lib_ctx->dssPeriod;
1375 if(pxran_lib_ctx->technology[i]==1) {
1376 pPrachCPConfig = &(pxran_lib_ctx->PrachCPConfig);
1380 pPrachCPConfig = &(pxran_lib_ctx->PrachCPConfigLTE);
1384 pPrachCPConfig = &(pxran_lib_ctx->PrachCPConfig);
1386 uint16_t timeOffset;
1387 uint16_t nNumerology = pxran_lib_ctx->fh_cfg.frame_conf.nNumerology;
1390 if(unlikely(mbuf == NULL)) {
1391 print_err("Alloc fail!\n");
1395 printf("%d:%d:%d:%d - filter=%d, startSym=%d[%d:%d], numSym=%d, occasions=%d, freqOff=%d\n",
1396 frame_id, subframe_id, slot_id, prach_port_id,
1397 pPrachCPConfig->filterIdx,
1398 pPrachCPConfig->startSymId,
1399 pPrachCPConfig->startPrbc,
1400 pPrachCPConfig->numPrbc,
1401 pPrachCPConfig->numSymbol,
1402 pPrachCPConfig->occassionsInPrachSlot,
1403 pPrachCPConfig->freqOffset);
1405 timeOffset = pPrachCPConfig->timeOffset; //this is the CP value per 38.211 tab 6.3.3.1-1&2
1406 startSymId = pPrachCPConfig->startSymId + occasionid * pPrachCPConfig->numSymbol;
1409 timeOffset += startSymId * (2048 + 144);
1412 if(XRAN_FILTERINDEX_PRACH_ABC == pPrachCPConfig->filterIdx)
1414 timeOffset = timeOffset >> nNumerology; //original number is Tc, convert to Ts based on mu
1415 if ((slot_id == 0) || (slot_id == (SLOTNUM_PER_SUBFRAME(pxran_lib_ctx->interval_us_local) >> 1)))
1420 //when prach scs lower than 15khz, timeOffset base 15khz not need to adjust.
1423 params->dir = XRAN_DIR_UL;
1424 params->sectionType = XRAN_CP_SECTIONTYPE_3;
1425 params->hdr.filterIdx = pPrachCPConfig->filterIdx;
1426 params->hdr.frameId = frame_id;
1427 params->hdr.subframeId = subframe_id;
1428 params->hdr.slotId = slot_id;
1429 params->hdr.startSymId = startSymId;
1430 params->hdr.iqWidth = xran_get_conf_iqwidth_prach(pHandle);
1431 params->hdr.compMeth = xran_get_conf_compmethod_prach(pHandle);
1432 /* use timeOffset field for the CP length value for prach sequence */
1433 params->hdr.timeOffset = timeOffset;
1434 params->hdr.fftSize = xran_get_conf_fftsize(pHandle);
1435 /*convert to o-ran ecpri specs scs index*/
1436 switch(pPrachCPConfig->filterIdx)
1438 case XRAN_FILTERINDEX_PRACH_012:
1439 params->hdr.scs = 12;
1441 case XRAN_FILTERINDEX_NPRACH:
1442 params->hdr.scs = 13;
1444 case XRAN_FILTERINDEX_PRACH_3:
1445 params->hdr.scs = 14;
1447 case XRAN_FILTERINDEX_LTE4:
1448 params->hdr.scs = 15;
1450 case XRAN_FILTERINDEX_PRACH_ABC:
1451 params->hdr.scs = xran_get_conf_prach_scs(pHandle);
1454 print_err("prach filterIdx error - [%d:%d:%d]--%d\n", frame_id, subframe_id, slot_id,pPrachCPConfig->filterIdx);
1455 params->hdr.scs = 0;
1458 params->hdr.cpLength = 0;
1461 sect_geninfo[nsection].info->type = params->sectionType; // for database
1462 sect_geninfo[nsection].info->startSymId = params->hdr.startSymId; // for database
1463 sect_geninfo[nsection].info->iqWidth = params->hdr.iqWidth; // for database
1464 sect_geninfo[nsection].info->compMeth = params->hdr.compMeth; // for database
1465 sect_geninfo[nsection].info->id = xran_alloc_sectionid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, subframe_id, slot_id);
1466 sect_geninfo[nsection].info->rb = XRAN_RBIND_EVERY;
1467 sect_geninfo[nsection].info->symInc = XRAN_SYMBOLNUMBER_NOTINC;
1468 sect_geninfo[nsection].info->startPrbc = pPrachCPConfig->startPrbc;
1469 sect_geninfo[nsection].info->numPrbc = pPrachCPConfig->numPrbc,
1470 sect_geninfo[nsection].info->numSymbol = pPrachCPConfig->numSymbol;
1471 sect_geninfo[nsection].info->reMask = 0xfff;
1472 sect_geninfo[nsection].info->beamId = beam_id;
1473 sect_geninfo[nsection].info->freqOffset = pPrachCPConfig->freqOffset;
1474 sect_geninfo[nsection].info->prbElemBegin = 1;
1475 sect_geninfo[nsection].info->prbElemEnd = 1;
1478 pxran_lib_ctx->prach_last_symbol[cc_id] = pPrachCPConfig->startSymId + pPrachCPConfig->numSymbol*pPrachCPConfig->occassionsInPrachSlot - 1;
1480 sect_geninfo[nsection].info->ef = 0;
1481 sect_geninfo[nsection].exDataSize = 0;
1482 // sect_geninfo[nsection].exData = NULL;
1485 params->numSections = nsection;
1486 params->sections = sect_geninfo;
1488 ret = xran_prepare_ctrl_pkt(mbuf, params, cc_id, prach_port_id, seq_id,0);
1490 print_err("Fail to build prach control packet - [%d:%d:%d]\n", frame_id, subframe_id, slot_id);
1491 rte_pktmbuf_free(mbuf);
1497 int process_ring(struct rte_ring *r, uint16_t ring_id, uint16_t q_id)
1501 struct rte_mbuf *mbufs[MBUFS_CNT];
1504 const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
1505 RTE_DIM(mbufs), &remaining);
1512 xran_ethdi_filter_packet(mbufs, ring_id, q_id, dequeued);
1513 //MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick());
1517 /** FH RX AND BBDEV */
1518 int32_t ring_processing_func(void* args)
1520 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
1521 int16_t retPoll = 0;
1528 if (ctx->bbdev_dec) {
1529 t1 = MLogXRANTick();
1530 retPoll = ctx->bbdev_dec();
1533 t2 = MLogXRANTick();
1534 MLogXRANTask(PID_XRAN_BBDEV_UL_POLL + retPoll, t1, t2);
1538 if (ctx->bbdev_enc) {
1539 t1 = MLogXRANTick();
1540 retPoll = ctx->bbdev_enc();
1543 t2 = MLogXRANTick();
1544 MLogXRANTask(PID_XRAN_BBDEV_DL_POLL + retPoll, t1, t2);
1548 for (i = 0; i < ctx->io_cfg.num_vfs && i < XRAN_VF_MAX; i++){
1549 for(qi = 0; qi < ctx->rxq_per_port[i]; qi++) {
1550 if (process_ring(ctx->rx_ring[i][qi], i, qi))
1555 if (XRAN_STOPPED == xran_if_current_state)
1561 /** Generic thread to perform task on specific core */
1563 xran_generic_worker_thread(void *args)
1566 struct xran_worker_th_ctx* pThCtx = (struct xran_worker_th_ctx*)args;
1567 struct sched_param sched_param;
1568 struct xran_io_cfg * const p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
1570 memset(&sched_param, 0, sizeof(struct sched_param));
1572 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
1573 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1574 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))){
1575 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1577 pThCtx->worker_policy = SCHED_FIFO;
1578 if ((res = pthread_setname_np(pthread_self(), pThCtx->worker_name))) {
1579 printf("[core %d] pthread_setname_np = %d\n",rte_lcore_id(), res);
1583 if(pThCtx->task_func) {
1584 if(pThCtx->task_func(pThCtx->task_arg) != 0)
1588 if (XRAN_STOPPED == xran_if_current_state)
1591 if(p_io_cfg->io_sleep)
1592 nanosleep(&sleeptime,NULL);
1595 printf("%s worker thread finished on core %d [worker id %d]\n",pThCtx->worker_name, rte_lcore_id(), pThCtx->worker_id);
1599 int ring_processing_thread(void *args)
1601 struct sched_param sched_param;
1602 struct xran_io_cfg * const p_io_cfg = &(xran_ethdi_get_ctx()->io_cfg);
1605 memset(&sched_param, 0, sizeof(struct sched_param));
1607 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
1608 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
1609 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))){
1610 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
1614 if(ring_processing_func(args) != 0)
1617 /* work around for some kernel */
1618 if(p_io_cfg->io_sleep)
1619 nanosleep(&sleeptime,NULL);
1622 puts("Pkt processing thread finished.");