1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN C plane processing functionality and helper functions
21 * @file xran_cp_proc.c
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
40 #include <rte_common.h>
42 #include <rte_errno.h>
43 #include <rte_lcore.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
50 #include "xran_fh_o_du.h"
54 #include "xran_up_api.h"
55 #include "xran_cp_api.h"
56 #include "xran_sync_api.h"
57 #include "xran_lib_mlog_tasks_id.h"
58 #include "xran_timer.h"
59 #include "xran_common.h"
61 #include "xran_frame_struct.h"
62 #include "xran_printf.h"
63 #include "xran_app_frag.h"
64 #include "xran_cp_proc.h"
65 #include "xran_tx_proc.h"
67 #include "xran_mlog_lnx.h"
69 uint8_t xran_cp_seq_id_num[XRAN_PORTS_NUM][XRAN_MAX_CELLS_PER_PORT][XRAN_DIR_MAX][XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR]; /* XRAN_MAX_ANTENNA_NR * 2 for PUSCH and PRACH */
70 uint8_t xran_updl_seq_id_num[XRAN_PORTS_NUM][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR];
71 uint8_t xran_upul_seq_id_num[XRAN_PORTS_NUM][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR]; /**< PUSCH, PRACH, SRS for Cat B */
72 uint8_t xran_section_id_curslot[XRAN_PORTS_NUM][XRAN_DIR_MAX][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2+ XRAN_MAX_ANT_ARRAY_ELM_NR];
73 uint16_t xran_section_id[XRAN_PORTS_NUM][XRAN_DIR_MAX][XRAN_MAX_CELLS_PER_PORT][XRAN_MAX_ANTENNA_NR * 2+ XRAN_MAX_ANT_ARRAY_ELM_NR];
75 struct xran_recv_packet_info parse_recv[XRAN_PORTS_NUM];
77 //////////////////////////////////////////
79 struct xran_section_recv_info *recvSections[XRAN_PORTS_NUM] = {NULL,NULL,NULL,NULL};
80 struct xran_cp_recv_params recvCpInfo[XRAN_PORTS_NUM];
83 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
85 /*long t1 = MLogTick();
86 MLogTask(77777, t1, t1+100);*/
90 xran_init_sectionid(void *pHandle)
93 struct xran_device_ctx* p_dev = NULL;
94 uint8_t xran_port_id = 0;
97 p_dev = (struct xran_device_ctx* )pHandle;
98 xran_port_id = p_dev->xran_port_id;
100 print_err("Invalid pHandle - %p", pHandle);
104 for (dir = 0; dir < XRAN_DIR_MAX; dir++){
105 for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) {
106 for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++) {
107 xran_section_id[xran_port_id][dir][cell][ant] = 0;
108 xran_section_id_curslot[xran_port_id][dir][cell][ant] = 255;
117 xran_init_seqid(void *pHandle)
120 int8_t xran_port = 0;
121 if((xran_port = xran_dev_ctx_get_port_id(pHandle)) < 0 ){
122 print_err("Invalid pHandle - %p", pHandle);
127 for(cell=0; cell < XRAN_MAX_CELLS_PER_PORT; cell++) {
128 for(dir=0; dir < XRAN_DIR_MAX; dir++) {
129 for(ant=0; ant < XRAN_MAX_ANTENNA_NR * 2; ant++)
130 xran_cp_seq_id_num[xran_port][cell][dir][ant] = 0;
132 for(ant=0; ant < XRAN_MAX_ANTENNA_NR; ant++)
133 xran_updl_seq_id_num[xran_port][cell][ant] = 0;
134 for(ant=0; ant < XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR; ant++)
135 xran_upul_seq_id_num[xran_port][cell][ant] = 0;
142 process_cplane(struct rte_mbuf *pkt, void* handle)
144 struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)handle;
147 if(xran_dev_get_ctx_by_id(0)->fh_cfg.debugStop) /* check CP with standard tests only */
148 xran_parse_cp_pkt(pkt, &recvCpInfo[p_xran_dev_ctx->xran_port_id], &parse_recv[p_xran_dev_ctx->xran_port_id]);
155 xran_check_symbolrange(int symbol_type, uint32_t PortId, int cc_id, int tti,
156 int start_sym, int numsym_in, int *numsym_out)
159 int first_pos, last_pos;
160 int start_pos, end_pos;
162 first_pos = last_pos = -1;
164 /* Find first symbol which is same with given symbol type */
165 for(i=0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
166 if(xran_fs_get_symbol_type(PortId, cc_id, tti, i) == symbol_type) {
167 first_pos = i; break;
171 // for(i=0; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
172 // printf("symbol_type %d - %d:%d\n", symbol_type, i, xran_fs_get_symbol_type(cc_id, tti, i));
177 /* Find the rest of consecutive symbols which are same with given symbol type */
178 for( ; i < XRAN_NUM_OF_SYMBOL_PER_SLOT; i++)
179 if(xran_fs_get_symbol_type(PortId, cc_id, tti, i) != symbol_type)
183 start_pos = (first_pos > start_sym) ? first_pos : start_sym;
184 end_pos = ((start_sym + numsym_in) > last_pos) ? last_pos : (start_sym + numsym_in);
185 *numsym_out = end_pos - start_pos;
191 xran_attach_cp_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
192 struct rte_mbuf_ext_shared_info * p_share_data)
194 struct rte_mbuf *mb_oran_hdr_ext = NULL;
195 struct rte_mbuf *tmp = NULL;
196 int8_t *ext_buff = NULL;
197 rte_iova_t ext_buff_iova = 0;
199 ext_buff = p_ext_buff - (RTE_PKTMBUF_HEADROOM +
200 sizeof(struct xran_ecpri_hdr) +
201 sizeof(struct xran_cp_radioapp_section1_header) +
202 sizeof(struct xran_cp_radioapp_section1));
204 ext_buff_len += (RTE_PKTMBUF_HEADROOM +
205 sizeof(struct xran_ecpri_hdr) +
206 sizeof(struct xran_cp_radioapp_section1_header) +
207 sizeof(struct xran_cp_radioapp_section1)) + 18;
209 // mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_small);
210 mb_oran_hdr_ext = xran_ethdi_mbuf_indir_alloc();
212 if (unlikely (( mb_oran_hdr_ext) == NULL)) {
213 rte_panic("Failed rte_pktmbuf_alloc\n");
216 p_share_data->free_cb = extbuf_free_callback;
217 p_share_data->fcb_opaque = NULL;
218 rte_mbuf_ext_refcnt_set(p_share_data, 1);
220 ext_buff_iova = rte_malloc_virt2iova(p_ext_buff_start);
221 if (unlikely (( ext_buff_iova) == 0)) {
222 rte_panic("Failed rte_mem_virt2iova \n");
225 if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
226 rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
229 rte_pktmbuf_attach_extbuf(mb_oran_hdr_ext,
231 ext_buff_iova + RTE_PTR_DIFF(ext_buff , p_ext_buff_start),
235 rte_pktmbuf_reset_headroom(mb_oran_hdr_ext);
237 return mb_oran_hdr_ext;
241 xran_cp_create_and_send_section(void *pHandle, uint8_t ru_port_id, int dir, int tti, int cc_id,
242 struct xran_prb_map *prbMap, enum xran_category category, uint8_t ctx_id)
245 struct xran_device_ctx *p_x_ctx = (struct xran_device_ctx *)pHandle;
246 struct xran_common_counters *pCnt = &p_x_ctx->fh_counters;
247 struct xran_cp_gen_params params;
248 struct xran_section_gen_info sect_geninfo[1];
249 struct rte_mbuf *mbuf;
250 uint32_t interval = p_x_ctx->interval_us_local;
251 uint8_t PortId = p_x_ctx->xran_port_id;
254 uint32_t i, j, loc_sym;
255 uint32_t nsection = 0;
256 struct xran_prb_elm *pPrbMapElem = NULL;
257 struct xran_prb_elm *pPrbMapElemPrev = NULL;
258 uint32_t slot_id = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
259 uint32_t subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval), SUBFRAMES_PER_SYSTEMFRAME);
260 uint32_t frame_id = XranGetFrameNum(tti,xran_getSfnSecStart(),SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
266 struct xran_sectionext1_info ext1;
267 struct xran_sectionext4_info ext4 = {0};
268 struct xran_sectionext11_info ext11;
270 //frame_id = (frame_id & 0xff); /* ORAN frameId, 8 bits, [0, 255] */
271 frame_id = ((frame_id + ((0 == tti)?NUM_OF_FRAMES_PER_SECOND:0)) & 0xff); /* ORAN frameId, 8 bits, [0, 255] */
274 nsection = prbMap->nPrbElm;
275 pPrbMapElem = &prbMap->prbMap[0];
277 print_err("prbMap is NULL\n");
281 /* Generate a C-Plane message per each section,
282 * not a C-Plane message with multi sections */
283 for (i = 0; i < nsection; i++) {
284 int startSym, numSyms;
286 pPrbMapElem = &prbMap->prbMap[i];
288 /* For Special Subframe,
289 * Check validity of given symbol range with slot configuration
290 * and adjust symbol range accordingly. */
291 if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_FDD) != 1
292 && xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) == 1) {
293 /* This function cannot handle two or more groups of consecutive same type of symbol.
294 * If there are two or more, then it might cause an error */
295 startSym = xran_check_symbolrange(
296 ((dir==XRAN_DIR_DL)?XRAN_SYMBOL_TYPE_DL:XRAN_SYMBOL_TYPE_UL),
298 pPrbMapElem->nStartSymb,
299 pPrbMapElem->numSymb, &numSyms);
300 if(startSym < 0 || numSyms == 0) {
301 /* if start symbol is not valid, then skip this section */
302 print_err("Skip section %d due to invalid symbol range - [%d:%d], [%d:%d]",
304 pPrbMapElem->nStartSymb, pPrbMapElem->numSymb,
309 startSym = pPrbMapElem->nStartSymb;
310 numSyms = pPrbMapElem->numSymb;
313 vf_id = xran_map_ecpriRtcid_to_vf(p_x_ctx, dir, cc_id, ru_port_id);
315 params.sectionType = XRAN_CP_SECTIONTYPE_1;
316 params.hdr.filterIdx = XRAN_FILTERINDEX_STANDARD;
317 params.hdr.frameId = frame_id;
318 params.hdr.subframeId = subframe_id;
319 params.hdr.slotId = slot_id;
320 params.hdr.startSymId = startSym;
321 params.hdr.iqWidth = pPrbMapElem->iqWidth;
322 params.hdr.compMeth = pPrbMapElem->compMethod;
324 print_dbg("cp[%d:%d:%d] ru_port_id %d dir=%d\n",
325 frame_id, subframe_id, slot_id, ru_port_id, dir);
327 seq_id = xran_get_cp_seqid(pHandle, XRAN_DIR_DL, cc_id, ru_port_id);
329 sect_geninfo[0].info.type = params.sectionType;
330 sect_geninfo[0].info.startSymId = params.hdr.startSymId;
331 sect_geninfo[0].info.iqWidth = params.hdr.iqWidth;
332 sect_geninfo[0].info.compMeth = params.hdr.compMeth;
334 sect_geninfo[0].info.id = i; /* do not revert 'i' to
335 xran_alloc_sectionid(pHandle, dir, cc_id, ru_port_id, slot_id); */
337 if(sect_geninfo[0].info.id > XRAN_MAX_SECTIONS_PER_SLOT)
338 print_err("sectinfo->id %d\n", sect_geninfo[0].info.id);
340 if (dir == XRAN_DIR_UL) {
341 for(loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++) {
342 int32_t sec_desc_idx = pPrbMapElem->nSecDesc[loc_sym];
343 struct xran_section_desc *p_sec_desc = pPrbMapElem->p_sec_desc[loc_sym][0];
345 p_sec_desc->section_id = sect_geninfo[0].info.id;
346 if(p_sec_desc->pCtrl) {
347 rte_pktmbuf_free(p_sec_desc->pCtrl);
348 p_sec_desc->pCtrl = NULL;
349 p_sec_desc->pData = NULL;
353 print_err("section desc is NULL\n");
356 pPrbMapElem->nSecDesc[loc_sym] = 0;
361 sect_geninfo[0].info.rb = XRAN_RBIND_EVERY;
362 sect_geninfo[0].info.startPrbc = pPrbMapElem->nRBStart;
363 sect_geninfo[0].info.numPrbc = pPrbMapElem->nRBSize;
364 sect_geninfo[0].info.numSymbol = numSyms;
365 sect_geninfo[0].info.reMask = 0xfff;
366 sect_geninfo[0].info.beamId = pPrbMapElem->nBeamIndex;
367 sect_geninfo[0].info.symInc = XRAN_SYMBOLNUMBER_NOTINC;
369 for(loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++) {
370 struct xran_section_desc *p_sec_desc = pPrbMapElem->p_sec_desc[loc_sym][0];
372 p_sec_desc->section_id = sect_geninfo[0].info.id;
374 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_offset = p_sec_desc->iq_buffer_offset;
375 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_len = p_sec_desc->iq_buffer_len;
377 print_err("section desc is NULL\n");
381 if(unlikely((category != XRAN_CATEGORY_A) && (category != XRAN_CATEGORY_B))) {
382 print_err("Unsupported Category %d\n", category);
386 /* Add extentions if required */
388 sect_geninfo[0].exDataSize = 0;
390 /* Extension 4 for modulation compression */
391 if(pPrbMapElem->compMethod == XRAN_COMPMETHOD_MODULATION) {
392 mbuf = xran_ethdi_mbuf_alloc();
394 ext4.csf = 0; //no shift for now only
395 ext4.modCompScaler = pPrbMapElem->ScaleFactor;
396 sect_geninfo[0].exData[next].type = XRAN_CP_SECTIONEXTCMD_4;
397 sect_geninfo[0].exData[next].len = sizeof(ext4);
398 sect_geninfo[0].exData[next].data = &ext4;
400 sect_geninfo[0].info.ef = 1;
401 sect_geninfo[0].exDataSize++;
405 /* Extension 1 or 11 for Beam forming weights */
406 if((category == XRAN_CATEGORY_B) && (pPrbMapElem->bf_weight_update)) {
407 /* add extantion section for BF Weights if update is needed */
408 if(pPrbMapElem->bf_weight.numBundPrb == 0) {
409 /* No bundled PRBs, using Extension 1 */
410 struct rte_mbuf_ext_shared_info * p_share_data = &p_x_ctx->cp_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ru_port_id][sect_geninfo[0].info.id];
412 /*add extention section for BF Weights if update is needed */
413 if(pPrbMapElem->bf_weight.p_ext_start) {
414 /* use buffer with BF Weights for mbuf */
415 mbuf = xran_attach_cp_ext_buf(vf_id, pPrbMapElem->bf_weight.p_ext_start,
416 pPrbMapElem->bf_weight.p_ext_section,
417 pPrbMapElem->bf_weight.ext_section_sz, p_share_data);
419 print_err("p %d cc %d dir %d Alloc fail!\n", PortId, cc_id, dir);
423 memset(&ext1, 0, sizeof (struct xran_sectionext1_info));
424 ext1.bfwNumber = pPrbMapElem->bf_weight.nAntElmTRx;
425 ext1.bfwIqWidth = pPrbMapElem->iqWidth;
426 ext1.bfwCompMeth = pPrbMapElem->compMethod;
427 ext1.p_bfwIQ = (int16_t*)pPrbMapElem->bf_weight.p_ext_section;
428 ext1.bfwIQ_sz = pPrbMapElem->bf_weight.ext_section_sz;
430 sect_geninfo[0].exData[next].type = XRAN_CP_SECTIONEXTCMD_1;
431 sect_geninfo[0].exData[next].len = sizeof(ext1);
432 sect_geninfo[0].exData[next].data = &ext1;
434 sect_geninfo[0].info.ef = 1;
435 sect_geninfo[0].exDataSize++;
437 } else { /* if(pPrbMapElem->bf_weight.numBundPrb == 0) */
438 /* Using Extension 11 */
439 struct rte_mbuf_ext_shared_info *shared_info;
441 shared_info = &p_x_ctx->bfw_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ru_port_id][sect_geninfo[0].info.id];
444 shared_info->free_cb = NULL;
445 shared_info->fcb_opaque = NULL;
447 mbuf = xran_ethdi_mbuf_indir_alloc();
448 if(unlikely(mbuf == NULL)) {
449 rte_panic("Alloc fail!\n");
452 //mbuf = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
453 if(xran_cp_attach_ext_buf(mbuf, (uint8_t *)pPrbMapElem->bf_weight.p_ext_start, pPrbMapElem->bf_weight.maxExtBufSize, shared_info) < 0) {
454 rte_pktmbuf_free(mbuf);
458 rte_mbuf_ext_refcnt_update(shared_info, 0);
460 ext11.RAD = pPrbMapElem->bf_weight.RAD;
461 ext11.disableBFWs = pPrbMapElem->bf_weight.disableBFWs;
463 ext11.numBundPrb = pPrbMapElem->bf_weight.numBundPrb;
464 ext11.numSetBFWs = pPrbMapElem->bf_weight.numSetBFWs;
466 ext11.bfwCompMeth = pPrbMapElem->bf_weight.bfwCompMeth;
467 ext11.bfwIqWidth = pPrbMapElem->bf_weight.bfwIqWidth;
469 ext11.maxExtBufSize = pPrbMapElem->bf_weight.maxExtBufSize;
470 ext11.pExtBufShinfo = shared_info;
472 ext11.pExtBuf = (uint8_t *)pPrbMapElem->bf_weight.p_ext_start;
473 ext11.totalBfwIQLen = pPrbMapElem->bf_weight.ext_section_sz;
475 sect_geninfo[0].exData[next].type = XRAN_CP_SECTIONEXTCMD_11;
476 sect_geninfo[0].exData[next].len = sizeof(ext11);
477 sect_geninfo[0].exData[next].data = &ext11;
479 sect_geninfo[0].info.ef = 1;
480 sect_geninfo[0].exDataSize++;
483 } else { /* if((category == XRAN_CATEGORY_B) && (pPrbMapElem->bf_weight_update)) */
484 mbuf = xran_ethdi_mbuf_alloc();
485 sect_geninfo[0].info.ef = 0;
486 sect_geninfo[0].exDataSize = 0;
489 if(unlikely(mbuf == NULL)) {
490 print_err("Alloc fail!\n");
494 params.numSections = 1;//nsection;
495 params.sections = sect_geninfo;
497 ret = xran_prepare_ctrl_pkt(mbuf, ¶ms, cc_id, ru_port_id, seq_id);
499 print_err("Fail to build control plane packet - [%d:%d:%d] dir=%d\n",
500 frame_id, subframe_id, slot_id, dir);
504 /* add in the ethernet header */
505 struct rte_ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));
506 pkt_len = rte_pktmbuf_pkt_len(mbuf);
508 pCnt->tx_bytes_counter += pkt_len; //rte_pktmbuf_pkt_len(mbuf);
509 if(pkt_len > p_x_ctx->fh_init.mtu)
510 rte_panic("section %d: pkt_len = %d maxExtBufSize %d\n", i, pkt_len, pPrbMapElem->bf_weight.maxExtBufSize);
511 //rte_mbuf_sanity_check(mbuf, 0);
512 cp_sent = p_x_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI, vf_id);
514 rte_pktmbuf_free(mbuf);
516 xran_cp_add_section_info(pHandle, dir, cc_id, ru_port_id, ctx_id, §_geninfo[0].info);
518 } /* for (i=0; i<nsection; i++) */
524 xran_ruemul_init(void *pHandle)
526 uint16_t xran_port_id;
527 struct xran_device_ctx* p_dev = NULL;
530 p_dev = (struct xran_device_ctx* )pHandle;
531 xran_port_id = p_dev->xran_port_id;
533 print_err("Invalid pHandle - %p", pHandle);
534 return (XRAN_STATUS_FAIL);
537 if(xran_port_id < XRAN_PORTS_NUM) {
538 if(recvSections[xran_port_id]) {
539 print_err("Memory already allocated!");
543 recvSections[xran_port_id] = malloc(sizeof(struct xran_section_recv_info) * XRAN_MAX_NUM_SECTIONS);
544 if(recvSections == NULL) {
545 print_err("Fail to allocate memory!");
549 recvCpInfo[xran_port_id].sections = recvSections[xran_port_id];
551 print_err("Incorrect xran port %d\n", xran_port_id);
560 xran_ruemul_release(void *pHandle)
562 uint16_t xran_port_id;
563 struct xran_device_ctx* p_dev = NULL;
566 p_dev = (struct xran_device_ctx* )pHandle;
567 xran_port_id = p_dev->xran_port_id;
569 print_err("Invalid pHandle - %p", pHandle);
570 return (XRAN_STATUS_FAIL);
573 if(xran_port_id < XRAN_PORTS_NUM){
574 if(recvSections[xran_port_id]) {
575 free(recvSections[xran_port_id]);
576 recvCpInfo[xran_port_id].sections = NULL;
579 print_err("Incorrect xran port %d\n", xran_port_id);