1 /******************************************************************************
3 * Copyright (c) 2019 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN layer common functionality for both O-DU and O-RU as well as C-plane and
23 * @ingroup group_source_xran
24 * @author Intel Corporation
29 #include <arpa/inet.h>
33 #include "xran_common.h"
36 #include "xran_pkt_up.h"
37 #include "xran_up_api.h"
38 #include "xran_lib_mlog_tasks_id.h"
40 #include "../src/xran_printf.h"
42 #include "xran_mlog_lnx.h"
44 static struct timespec sleeptime = {.tv_nsec = 1E3 }; /* 1 us */
48 extern long interval_us;
50 extern int xran_process_rx_sym(void *arg,
51 struct rte_mbuf *mbuf,
68 extern int xran_process_prach_sym(void *arg,
69 struct rte_mbuf *mbuf,
85 extern int32_t xran_process_srs_sym(void *arg,
86 struct rte_mbuf *mbuf,
102 extern int32_t xran_pkt_validate(void *arg,
103 struct rte_mbuf *mbuf,
112 struct ecpri_seq_id *seq_id,
120 struct cb_elem_entry *xran_create_cb(XranSymCallbackFn cb_fn, void *cb_data)
122 struct cb_elem_entry * cb_elm = (struct cb_elem_entry *)malloc(sizeof(struct cb_elem_entry));
124 cb_elm->pSymCallback = cb_fn;
125 cb_elm->pSymCallbackTag = cb_data;
131 int xran_destroy_cb(struct cb_elem_entry * cb_elm)
138 int process_mbuf(struct rte_mbuf *pkt)
141 struct ecpri_seq_id seq;
142 static int symbol_total_bytes = 0;
144 struct xran_device_ctx * p_x_ctx = xran_dev_get_ctx();
145 struct xran_common_counters *pCnt = &p_x_ctx->fh_counters;
149 uint8_t frame_id = 0;
150 uint8_t subframe_id = 0;
160 uint8_t compMeth = 0;
163 void *pHandle = NULL;
165 uint32_t mb_free = 0;
166 int32_t valid_res = 0;
167 int expect_comp = (p_x_ctx->fh_cfg.ru_conf.compMeth != XRAN_COMPMETHOD_NONE);
170 if(p_x_ctx->xran2phy_mem_ready == 0)
173 num_bytes = xran_extract_iq_samples(pkt,
191 print_err("num_bytes is wrong [%d]\n", num_bytes);
195 valid_res = xran_pkt_validate(NULL,
213 print_dbg("valid_res is wrong [%d] ant %u (%u : %u : %u : %u) seq %u num_bytes %d\n", valid_res, Ant_ID, frame_id, subframe_id, slot_id, symb_id, seq.seq_id, num_bytes);
217 if (Ant_ID >= p_x_ctx->srs_cfg.eAxC_offset && p_x_ctx->fh_init.srsEnable) {
218 /* SRS packet has ruportid = 2*num_eAxc + ant_id */
219 Ant_ID -= p_x_ctx->srs_cfg.eAxC_offset;
220 symbol_total_bytes += num_bytes;
222 if (seq.e_bit == 1) {
223 print_dbg("SRS receiving symbol %d, size=%d bytes\n",
224 symb_id, symbol_total_bytes);
226 if (symbol_total_bytes) {
227 int16_t res = xran_process_srs_sym(NULL,
244 if(res == symbol_total_bytes) {
247 print_err("res != symbol_total_bytes\n");
249 pCnt->rx_srs_packets++;
251 symbol_total_bytes = 0;
254 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
257 } else if (Ant_ID >= p_x_ctx->PrachCPConfig.eAxC_offset && p_x_ctx->fh_init.prachEnable) {
258 /* PRACH packet has ruportid = num_eAxc + ant_id */
259 Ant_ID -= p_x_ctx->PrachCPConfig.eAxC_offset;
260 symbol_total_bytes += num_bytes;
261 if (seq.e_bit == 1) {
262 print_dbg("Completed receiving PRACH symbol %d, size=%d bytes\n",
265 if (symbol_total_bytes) {
266 int16_t res = xran_process_prach_sym(NULL,
282 if(res == symbol_total_bytes) {
285 print_err("res != symbol_total_bytes\n");
287 pCnt->rx_prach_packets[Ant_ID]++;
289 symbol_total_bytes = 0;
291 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
295 symbol_total_bytes += num_bytes;
297 if (seq.e_bit == 1) {
298 print_dbg("Completed receiving symbol %d, size=%d bytes\n",
299 symb_id, symbol_total_bytes);
301 if (symbol_total_bytes) {
302 int res = xran_process_rx_sym(NULL,
318 if(res == symbol_total_bytes) {
321 print_err("res != symbol_total_bytes\n");
323 pCnt->rx_pusch_packets[Ant_ID]++;
325 symbol_total_bytes = 0;
327 print_dbg("Transport layer fragmentation (eCPRI) is not supported\n");
334 static int set_iq_bit_width(uint8_t iq_bit_width, struct data_section_compression_hdr *compr_hdr)
336 if (iq_bit_width == MAX_IQ_BIT_WIDTH)
337 compr_hdr->ud_comp_hdr.ud_iq_width = (uint8_t) 0;
339 compr_hdr->ud_comp_hdr.ud_iq_width = iq_bit_width;
345 /* Send a single 5G symbol over multiple packets */
346 int32_t prepare_symbol_ex(enum xran_pkt_dir direction,
352 const enum xran_input_byte_order iq_buf_byte_order,
364 int32_t n_bytes = ((prb_num == 0) ? MAX_N_FULLBAND_SC : prb_num) * N_SC_PER_PRB * sizeof(struct rb_map);
366 n_bytes = ((iqWidth == 0) || (iqWidth == 16)) ? n_bytes : ((3 * iqWidth + 1 ) * prb_num);
370 int16_t nPktSize = sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr) +
371 sizeof(struct radio_app_common_hdr)+ sizeof(struct data_section_hdr) + n_bytes;
373 struct xran_up_pkt_gen_params xp = { 0 };
375 if(compMeth != XRAN_COMPMETHOD_NONE)
376 nPktSize += sizeof(struct data_section_compression_hdr);
378 n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
380 /* radio app header */
381 xp.app_params.data_direction = direction;
382 xp.app_params.payl_ver = 1;
383 xp.app_params.filter_id = 0;
384 xp.app_params.frame_id = frame_id;
385 xp.app_params.sf_slot_sym.subframe_id = subframe_id;
386 xp.app_params.sf_slot_sym.slot_id = xran_slotid_convert(slot_id, 0);
387 xp.app_params.sf_slot_sym.symb_id = symbol_no;
389 /* convert to network byte order */
390 xp.app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp.app_params.sf_slot_sym.value);
392 xp.sec_hdr.fields.sect_id = section_id;
393 xp.sec_hdr.fields.num_prbu = (uint8_t)prb_num;
394 xp.sec_hdr.fields.start_prbu = (uint8_t)prb_start;
395 xp.sec_hdr.fields.sym_inc = 0;
396 xp.sec_hdr.fields.rb = 0;
398 xp.sec_hdr.udCompHdr = 0;
399 xp.sec_hdr.reserved = 0;
403 xp.compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
404 xp.compr_hdr_param.ud_comp_hdr.ud_iq_width = iqWidth;
405 xp.compr_hdr_param.rsrvd = 0;
407 /* network byte order */
408 xp.sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp.sec_hdr.fields.all_bits);
412 errx(1, "out of mbufs after %d packets", 1);
415 prep_bytes = xran_prepare_iq_symbol_portion(mb,
425 errx(1, "failed preparing symbol");
427 rte_pktmbuf_pkt_len(mb) = nPktSize;
428 rte_pktmbuf_data_len(mb) = nPktSize;
431 printf("Symbol %2d prep_bytes (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
437 /* Send a single 5G symbol over multiple packets */
438 int send_symbol_ex(enum xran_pkt_dir direction,
442 const enum xran_input_byte_order iq_buf_byte_order,
453 uint32_t do_copy = 0;
454 int32_t n_bytes = ((prb_num == 0) ? MAX_N_FULLBAND_SC : prb_num) * N_SC_PER_PRB * sizeof(struct rb_map);
455 struct xran_device_ctx *p_x_ctx = xran_dev_get_ctx();
456 struct xran_common_counters *pCnt = &p_x_ctx->fh_counters;
461 mb = xran_ethdi_mbuf_alloc(); /* will be freede by ETH */
464 errx(1, "out of mbufs after %d packets", 1);
466 pChar = rte_pktmbuf_append(mb, sizeof(struct xran_ecpri_hdr)+ sizeof(struct radio_app_common_hdr)+ sizeof(struct data_section_hdr) + n_bytes);
469 errx(1, "incorrect mbuf size %d packets", 1);
471 pChar = rte_pktmbuf_prepend(mb, sizeof(struct rte_ether_hdr));
474 errx(1, "incorrect mbuf size %d packets", 1);
476 do_copy = 1; /* new mbuf hence copy of IQs */
478 rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
481 int32_t sent = prepare_symbol_ex(direction,
501 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(mb);
502 p_x_ctx->send_upmbuf2ring(mb, ETHER_TYPE_ECPRI, xran_map_ecpriPcid_to_vf(direction, CC_ID, RU_Port_ID));
508 printf("Symbol %2d sent (%d packets, %d bytes)\n", symbol_no, i, n_bytes);
514 int send_cpmsg(void *pHandle, struct rte_mbuf *mbuf,struct xran_cp_gen_params *params,
515 struct xran_section_gen_info *sect_geninfo, uint8_t cc_id, uint8_t ru_port_id, uint8_t seq_id)
517 int ret = 0, nsection, i;
518 uint8_t subframe_id = params->hdr.subframeId;
519 uint8_t slot_id = params->hdr.slotId;
520 uint8_t dir = params->dir;
521 struct xran_device_ctx *p_x_ctx = xran_dev_get_ctx();
522 struct xran_common_counters *pCnt = &p_x_ctx->fh_counters;
524 nsection = params->numSections;
526 /* add in the ethernet header */
527 struct rte_ether_hdr *const h = (void *)rte_pktmbuf_prepend(mbuf, sizeof(*h));
530 pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(mbuf);
531 p_x_ctx->send_cpmbuf2ring(mbuf, ETHER_TYPE_ECPRI, xran_map_ecpriRtcid_to_vf(dir, cc_id, ru_port_id));
532 for(i=0; i<nsection; i++)
533 xran_cp_add_section_info(pHandle, dir, cc_id, ru_port_id,
534 (slot_id + subframe_id*SLOTNUM_PER_SUBFRAME)%XRAN_MAX_SECTIONDB_CTX,
535 §_geninfo[i].info);
540 int generate_cpmsg_dlul(void *pHandle, struct xran_cp_gen_params *params, struct xran_section_gen_info *sect_geninfo, struct rte_mbuf *mbuf,
541 enum xran_pkt_dir dir, uint8_t frame_id, uint8_t subframe_id, uint8_t slot_id,
542 uint8_t startsym, uint8_t numsym, uint16_t prb_start, uint16_t prb_num,int16_t iq_buffer_offset, int16_t iq_buffer_len,
543 uint16_t beam_id, uint8_t cc_id, uint8_t ru_port_id, uint8_t comp_method, uint8_t iqWidth, uint8_t seq_id, uint8_t symInc)
545 int ret = 0, nsection, loc_sym;
549 params->sectionType = XRAN_CP_SECTIONTYPE_1; // Most DL/UL Radio Channels
550 params->hdr.filterIdx = XRAN_FILTERINDEX_STANDARD;
551 params->hdr.frameId = frame_id;
552 params->hdr.subframeId = subframe_id;
553 params->hdr.slotId = slot_id;
554 params->hdr.startSymId = startsym; // start Symbol ID
555 params->hdr.iqWidth = iqWidth;
556 params->hdr.compMeth = comp_method;
559 sect_geninfo[nsection].info.type = params->sectionType; // for database
560 sect_geninfo[nsection].info.startSymId = params->hdr.startSymId; // for database
561 sect_geninfo[nsection].info.iqWidth = params->hdr.iqWidth; // for database
562 sect_geninfo[nsection].info.compMeth = params->hdr.compMeth; // for database
563 sect_geninfo[nsection].info.id = xran_alloc_sectionid(pHandle, dir, cc_id, ru_port_id, slot_id);
564 sect_geninfo[nsection].info.rb = XRAN_RBIND_EVERY;
565 sect_geninfo[nsection].info.symInc = symInc;
566 sect_geninfo[nsection].info.startPrbc = prb_start;
567 sect_geninfo[nsection].info.numPrbc = prb_num;
568 sect_geninfo[nsection].info.numSymbol = numsym;
569 sect_geninfo[nsection].info.reMask = 0xfff;
570 sect_geninfo[nsection].info.beamId = beam_id;
572 for (loc_sym = 0; loc_sym < XRAN_NUM_OF_SYMBOL_PER_SLOT; loc_sym++) {
573 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_offset = iq_buffer_offset;
574 sect_geninfo[0].info.sec_desc[loc_sym].iq_buffer_len = iq_buffer_len;
577 sect_geninfo[nsection].info.ef = 0;
578 sect_geninfo[nsection].exDataSize = 0;
579 // sect_geninfo[nsection].exData = NULL;
582 params->numSections = nsection;
583 params->sections = sect_geninfo;
585 if(unlikely(mbuf == NULL)) {
586 print_err("Alloc fail!\n");
590 ret = xran_prepare_ctrl_pkt(mbuf, params, cc_id, ru_port_id, seq_id);
592 print_err("Fail to build control plane packet - [%d:%d:%d] dir=%d\n",
593 frame_id, subframe_id, slot_id, dir);
594 rte_pktmbuf_free(mbuf);
600 int generate_cpmsg_prach(void *pHandle, struct xran_cp_gen_params *params, struct xran_section_gen_info *sect_geninfo, struct rte_mbuf *mbuf, struct xran_device_ctx *pxran_lib_ctx,
601 uint8_t frame_id, uint8_t subframe_id, uint8_t slot_id,
602 uint16_t beam_id, uint8_t cc_id, uint8_t prach_port_id, uint8_t seq_id)
605 struct xran_prach_cp_config *pPrachCPConfig = &(pxran_lib_ctx->PrachCPConfig);
607 uint16_t nNumerology = pxran_lib_ctx->fh_cfg.frame_conf.nNumerology;
609 if(unlikely(mbuf == NULL)) {
610 print_err("Alloc fail!\n");
614 printf("%d:%d:%d:%d - filter=%d, startSym=%d[%d:%d], numSym=%d, occasions=%d, freqOff=%d\n",
615 frame_id, subframe_id, slot_id, prach_port_id,
616 pPrachCPConfig->filterIdx,
617 pPrachCPConfig->startSymId,
618 pPrachCPConfig->startPrbc,
619 pPrachCPConfig->numPrbc,
620 pPrachCPConfig->numSymbol,
621 pPrachCPConfig->occassionsInPrachSlot,
622 pPrachCPConfig->freqOffset);
624 timeOffset = pPrachCPConfig->timeOffset; //this is the CP value per 38.211 tab 6.3.3.1-1&2
625 timeOffset = timeOffset >> nNumerology; //original number is Tc, convert to Ts based on mu
626 if (pPrachCPConfig->startSymId > 0)
628 timeOffset += (pPrachCPConfig->startSymId * 2048) >> nNumerology;
629 if ((slot_id == 0) || (slot_id == (SLOTNUM_PER_SUBFRAME >> 1)))
632 params->dir = XRAN_DIR_UL;
633 params->sectionType = XRAN_CP_SECTIONTYPE_3;
634 params->hdr.filterIdx = pPrachCPConfig->filterIdx;
635 params->hdr.frameId = frame_id;
636 params->hdr.subframeId = subframe_id;
637 params->hdr.slotId = slot_id;
638 params->hdr.startSymId = pPrachCPConfig->startSymId;
639 params->hdr.iqWidth = xran_get_conf_iqwidth(pHandle);
640 params->hdr.compMeth = xran_get_conf_compmethod(pHandle);
641 /* use timeOffset field for the CP length value for prach sequence */
642 params->hdr.timeOffset = timeOffset;
643 params->hdr.fftSize = xran_get_conf_fftsize(pHandle);
644 params->hdr.scs = xran_get_conf_prach_scs(pHandle);
645 params->hdr.cpLength = 0;
648 sect_geninfo[nsection].info.type = params->sectionType; // for database
649 sect_geninfo[nsection].info.startSymId = params->hdr.startSymId; // for database
650 sect_geninfo[nsection].info.iqWidth = params->hdr.iqWidth; // for database
651 sect_geninfo[nsection].info.compMeth = params->hdr.compMeth; // for database
652 sect_geninfo[nsection].info.id = xran_alloc_sectionid(pHandle, XRAN_DIR_UL, cc_id, prach_port_id, slot_id);
653 sect_geninfo[nsection].info.rb = XRAN_RBIND_EVERY;
654 sect_geninfo[nsection].info.symInc = XRAN_SYMBOLNUMBER_NOTINC;
655 sect_geninfo[nsection].info.startPrbc = pPrachCPConfig->startPrbc;
656 sect_geninfo[nsection].info.numPrbc = pPrachCPConfig->numPrbc,
657 sect_geninfo[nsection].info.numSymbol = pPrachCPConfig->numSymbol*pPrachCPConfig->occassionsInPrachSlot;
658 sect_geninfo[nsection].info.reMask = 0xfff;
659 sect_geninfo[nsection].info.beamId = beam_id;
660 sect_geninfo[nsection].info.freqOffset = pPrachCPConfig->freqOffset;
662 pxran_lib_ctx->prach_last_symbol[cc_id] = sect_geninfo[nsection].info.startSymId + sect_geninfo[nsection].info.numSymbol - 1;
664 sect_geninfo[nsection].info.ef = 0;
665 sect_geninfo[nsection].exDataSize = 0;
666 // sect_geninfo[nsection].exData = NULL;
669 params->numSections = nsection;
670 params->sections = sect_geninfo;
672 ret = xran_prepare_ctrl_pkt(mbuf, params, cc_id, prach_port_id, seq_id);
674 print_err("Fail to build prach control packet - [%d:%d:%d]\n", frame_id, subframe_id, slot_id);
675 rte_pktmbuf_free(mbuf);
681 int process_ring(struct rte_ring *r)
685 struct rte_mbuf *mbufs[MBUFS_CNT];
689 const uint16_t dequeued = rte_ring_dequeue_burst(r, (void **)mbufs,
690 RTE_DIM(mbufs), &remaining);
696 for (i = 0; i < dequeued; ++i) {
697 if (xran_ethdi_filter_packet(mbufs[i], 0) == MBUF_FREE)
698 rte_pktmbuf_free(mbufs[i]);
700 MLogTask(PID_PROCESS_UP_PKT, t1, MLogTick());
705 int32_t ring_processing_func(void)
707 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
708 struct xran_device_ctx *const pxran_lib_ctx = xran_dev_get_ctx();
715 if (pxran_lib_ctx->bbdev_dec) {
717 retPoll = pxran_lib_ctx->bbdev_dec();
721 MLogTask(PID_XRAN_BBDEV_UL_POLL + retPoll, t1, t2);
725 if (pxran_lib_ctx->bbdev_enc) {
727 retPoll = pxran_lib_ctx->bbdev_enc();
731 MLogTask(PID_XRAN_BBDEV_DL_POLL + retPoll, t1, t2);
737 for (i = 0; i < ctx->io_cfg.num_vfs && i < (XRAN_VF_MAX - 1); i = i+2){
738 if (process_ring(ctx->rx_ring[i]))
742 if(ctx->io_cfg.id == O_RU) /* process CP only on O-RU */
743 if (process_ring(ctx->rx_ring[i+1]))
747 if (XRAN_STOPPED == xran_if_current_state)
753 int ring_processing_thread(void *args)
755 struct sched_param sched_param;
756 struct xran_device_ctx *const p_xran_dev_ctx = xran_dev_get_ctx();
759 memset(&sched_param, 0, sizeof(struct sched_param));
761 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
762 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
763 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param))){
764 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
768 if(ring_processing_func() != 0)
771 /* work around for some kernel */
772 if(p_xran_dev_ctx->fh_init.io_cfg.io_sleep)
773 nanosleep(&sleeptime,NULL);
776 puts("Pkt processing thread finished.");