1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief XRAN library device (O-RU or O-DU) specific context and coresponding methods
22 * @ingroup group_source_xran
23 * @author Intel Corporation
32 #include <sys/queue.h>
38 #include <immintrin.h>
39 #include <rte_common.h>
41 #include <rte_errno.h>
42 #include <rte_lcore.h>
43 #include <rte_cycles.h>
44 #include <rte_memory.h>
45 #include <rte_malloc.h>
46 #include <rte_memzone.h>
49 #include <rte_version.h>
51 #include "xran_fh_o_du.h"
54 #include "xran_printf.h"
56 static struct xran_device_ctx *g_xran_dev_ctx[XRAN_PORTS_NUM] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
59 xran_dev_create_ctx(uint32_t xran_ports_num)
62 struct xran_device_ctx * pCtx = NULL;
64 if (xran_ports_num > XRAN_PORTS_NUM)
67 pCtx = (struct xran_device_ctx *) _mm_malloc(sizeof(struct xran_device_ctx)*xran_ports_num, 64);
69 for(i = 0; i < xran_ports_num; i++){
70 g_xran_dev_ctx[i] = pCtx;
80 xran_dev_destroy_ctx(void)
82 if (g_xran_dev_ctx[0])
83 free(g_xran_dev_ctx[0]);
87 struct xran_device_ctx *xran_dev_get_ctx(void)
89 return g_xran_dev_ctx[0];
92 struct xran_device_ctx **xran_dev_get_ctx_addr(void)
94 return &g_xran_dev_ctx[0];
97 struct xran_device_ctx *xran_dev_get_ctx_by_id(uint32_t xran_port_id)
99 if (xran_port_id >= XRAN_PORTS_NUM)
102 return g_xran_dev_ctx[xran_port_id];
105 static inline struct xran_fh_config *xran_lib_get_ctx_fhcfg(void *pHandle)
107 struct xran_device_ctx * p_dev_ctx = (struct xran_device_ctx*)pHandle;
108 return (&(p_dev_ctx->fh_cfg));
113 * @brief Get the configuration of eAxC ID
115 * @return the pointer of configuration
117 struct xran_eaxcid_config *xran_get_conf_eAxC(void *pHandle)
119 struct xran_device_ctx * p_dev_ctx = pHandle;
120 if(p_dev_ctx == NULL)
121 p_dev_ctx = xran_dev_get_ctx();
123 if(p_dev_ctx == NULL)
125 return (&(p_dev_ctx->eAxc_id_cfg));
129 * @brief Get the configuration of the total number of beamforming weights on RU
131 * @return Configured the number of beamforming weights
133 uint8_t xran_get_conf_num_bfweights(void *pHandle)
135 struct xran_device_ctx * p_dev_ctx = pHandle;
136 if(p_dev_ctx == NULL)
137 p_dev_ctx = xran_dev_get_ctx();
139 if(p_dev_ctx == NULL)
142 return (p_dev_ctx->fh_init.totalBfWeights);
146 * @brief Get the configuration of subcarrier spacing for PRACH
148 * @return subcarrier spacing value for PRACH
150 uint8_t xran_get_conf_prach_scs(void *pHandle)
152 return (xran_lib_get_ctx_fhcfg(pHandle)->prach_conf.nPrachSubcSpacing);
156 * @brief Get the configuration of FFT size for RU
158 * @return FFT size value for RU
160 uint8_t xran_get_conf_fftsize(void *pHandle)
162 return (xran_lib_get_ctx_fhcfg(pHandle)->ru_conf.fftSize);
166 * @brief Get the configuration of nummerology
168 * @return Configured numerology
170 uint8_t xran_get_conf_numerology(void *pHandle)
172 return (xran_lib_get_ctx_fhcfg(pHandle)->frame_conf.nNumerology);
176 * @brief Get the configuration of IQ bit width for RU
178 * @return IQ bit width for RU
180 uint8_t xran_get_conf_iqwidth_prach(void *pHandle)
182 struct xran_fh_config *pFhCfg;
184 pFhCfg = xran_lib_get_ctx_fhcfg(pHandle);
185 return ((pFhCfg->ru_conf.iqWidth_PRACH==16)?0:pFhCfg->ru_conf.iqWidth_PRACH);
189 * @brief Get the configuration of compression method for RU
191 * @return Compression method for RU
193 uint8_t xran_get_conf_compmethod_prach(void *pHandle)
195 return (xran_lib_get_ctx_fhcfg(pHandle)->ru_conf.compMeth_PRACH);
200 * @brief Get the configuration of the number of component carriers
202 * @return Configured the number of component carriers
204 uint8_t xran_get_num_cc(void *pHandle)
206 return (xran_lib_get_ctx_fhcfg(pHandle)->nCC);
210 * @brief Get the configuration of the number of antenna for UL
212 * @return Configured the number of antenna
214 uint8_t xran_get_num_eAxc(void *pHandle)
216 return (xran_lib_get_ctx_fhcfg(pHandle)->neAxc);
220 * @brief Get configuration of O-RU (Cat A or Cat B)
222 * @return Configured the number of antenna
224 enum xran_category xran_get_ru_category(void *pHandle)
226 return (xran_lib_get_ctx_fhcfg(pHandle)->ru_conf.xranCat);
230 * @brief Get the configuration of the number of antenna
232 * @return Configured the number of antenna
234 uint8_t xran_get_num_eAxcUl(void *pHandle)
236 return (xran_lib_get_ctx_fhcfg(pHandle)->neAxcUl);
240 * @brief Get the configuration of the number of antenna elements
242 * @return Configured the number of antenna
244 uint8_t xran_get_num_ant_elm(void *pHandle)
246 return (xran_lib_get_ctx_fhcfg(pHandle)->nAntElmTRx);
249 int32_t xran_get_common_counters(void *pXranLayerHandle, struct xran_common_counters *pStats)
252 int32_t xran_port_num = 0;
253 struct xran_device_ctx* pDev = (struct xran_device_ctx*)pXranLayerHandle;
254 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
258 xran_port_num = pDev->fh_init.xran_ports;
259 for(o_xu_id = 0; o_xu_id < XRAN_PORTS_NUM;o_xu_id++ ){
260 if(o_xu_id < xran_port_num ){
261 pStats[o_xu_id] = pDev->fh_counters;
265 if (ctx->io_cfg.id == 0 && ctx->io_cfg.num_rxq > 1) {
266 for (port = 0; port < ctx->io_cfg.num_vfs; port++) {
267 printf("vf %d: ", port);
268 for (qi = 0; qi < ctx->rxq_per_port[port]; qi++){
269 printf("%6ld ", ctx->rx_vf_queue_cnt[port][qi]);
275 return XRAN_STATUS_SUCCESS;
277 return XRAN_STATUS_INVALID_PARAM;
281 uint16_t xran_get_beamid(void *pHandle, uint8_t dir, uint8_t cc_id, uint8_t ant_id, uint8_t slot_id)
283 return (0); // NO BEAMFORMING
286 struct cb_elem_entry *xran_create_cb(XranSymCallbackFn cb_fn, void *cb_data, void* p_dev_ctx)
288 struct cb_elem_entry * cb_elm = (struct cb_elem_entry *)malloc(sizeof(struct cb_elem_entry));
290 cb_elm->pSymCallback = cb_fn;
291 cb_elm->pSymCallbackTag = cb_data;
292 cb_elm->p_dev_ctx = p_dev_ctx;
299 xran_destroy_cb(struct cb_elem_entry * cb_elm)
307 xran_map_ecpriRtcid_to_vf(struct xran_device_ctx *p_dev_ctx, int32_t dir, int32_t cc_id, int32_t ru_port_id)
309 return (p_dev_ctx->map2vf[dir][cc_id][ru_port_id][XRAN_CP_VF]);
313 xran_map_ecpriPcid_to_vf(struct xran_device_ctx *p_dev_ctx, int32_t dir, int32_t cc_id, int32_t ru_port_id)
315 return (p_dev_ctx->map2vf[dir][cc_id][ru_port_id][XRAN_UP_VF]);
319 xran_set_map_ecpriRtcid_to_vf(struct xran_device_ctx *p_dev_ctx, int32_t dir, int32_t cc_id, int32_t ru_port_id, uint16_t vf_id)
321 p_dev_ctx->map2vf[dir][cc_id][ru_port_id][XRAN_CP_VF] = vf_id;
322 return XRAN_STATUS_SUCCESS;
326 xran_set_map_ecpriPcid_to_vf(struct xran_device_ctx *p_dev_ctx, int32_t dir, int32_t cc_id, int32_t ru_port_id, uint16_t vf_id)
328 p_dev_ctx->map2vf[dir][cc_id][ru_port_id][XRAN_UP_VF] = vf_id;
329 return XRAN_STATUS_SUCCESS;
333 xran_pcid_str_type(struct xran_device_ctx* p_dev, int ant)
335 if(ant < xran_get_num_eAxcUl(p_dev))
337 else if (ant >= xran_get_num_eAxcUl(p_dev) && ant < 2*xran_get_num_eAxcUl(p_dev))
339 else if ( ant >= xran_get_num_eAxcUl(p_dev) * 2 && ant < 2*xran_get_num_eAxcUl(p_dev) + xran_get_num_ant_elm(p_dev))
346 xran_init_vf_rxq_to_pcid_mapping(void *pHandle)
348 /* eCPRI flow supported with DPDK 21.02 or later */
349 #if (RTE_VER_YEAR >= 21) /* eCPRI flow supported with DPDK 21.02 or later */
350 struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
351 uint8_t xran_port_id = 0;
352 struct xran_device_ctx* p_dev = NULL;
353 struct rte_flow_error error;
357 uint16_t pc_id_be = 0;
358 uint16_t rx_q[XRAN_VF_MAX] = { 0 };
360 int32_t dir = XRAN_DIR_UL;
361 uint8_t num_eAxc = 0;
365 p_dev = (struct xran_device_ctx* )pHandle;
366 xran_port_id = p_dev->xran_port_id;
368 print_err("Invalid pHandle - %p", pHandle);
369 return (XRAN_STATUS_FAIL);
372 num_cc = xran_get_num_cc(p_dev);
374 if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_A)
375 num_eAxc = xran_get_num_eAxc(p_dev);
377 num_eAxc = xran_get_num_eAxcUl(p_dev);
379 num_eAxc *= 2; /* +PRACH */
380 num_eAxc += xran_get_num_ant_elm(p_dev); /* +SRS */
382 for(cc = 0; cc < num_cc; cc++) {
383 for(ant = 0; ant < num_eAxc; ant++) {
384 pc_id_be = xran_compose_cid(0, 0, cc, ant);
385 vf_id = xran_map_ecpriPcid_to_vf(p_dev, dir, cc, ant);
387 /* don't use queue 0 for eCpri Flows */
391 p_dev->p_iq_flow[p_dev->iq_flow_cnt] = generate_ecpri_flow(vf_id, rx_q[vf_id], pc_id_be, &error);
392 eth_ctx->vf_and_q2pc_id[vf_id][rx_q[vf_id]] = rte_be_to_cpu_16(pc_id_be);
394 xran_decompose_cid((uint16_t)pc_id_be, ð_ctx->vf_and_q2cid[vf_id][rx_q[vf_id]]);
396 eth_ctx->vf_and_q2cid[vf_id][rx_q[vf_id]].bandSectorId = vf_id;
397 eth_ctx->vf_and_q2cid[vf_id][rx_q[vf_id]].cuPortId = rx_q[vf_id];
399 printf("%s: p %d vf %d qi %d 0x%016p UP: dir %d cc %d (%d) ant %d (%d) type %s", __FUNCTION__, xran_port_id, vf_id, rx_q[vf_id], ð_ctx->vf_and_q2cid[vf_id][rx_q[vf_id]], dir,
400 cc, eth_ctx->vf_and_q2cid[vf_id][rx_q[vf_id]].ccId, ant, eth_ctx->vf_and_q2cid[vf_id][rx_q[vf_id]].ruPortId, xran_pcid_str_type(p_dev, ant));
402 printf(" queue_id %d flow_id %d pc_id 0x%04x\n",rx_q[vf_id], p_dev->iq_flow_cnt, pc_id_be);
403 p_dev->iq_flow_cnt++;
406 if(rx_q[vf_id] > eth_ctx->io_cfg.num_rxq)
407 rte_panic("Not enough RX Queues\n");
408 eth_ctx->rxq_per_port[vf_id] = rx_q[vf_id];
412 return XRAN_STATUS_SUCCESS;
416 xran_init_vfs_mapping(void *pHandle)
419 struct xran_device_ctx* p_dev = NULL;
420 uint8_t xran_port_id = 0;
422 uint16_t vf_id_cp = 0;
423 struct xran_ethdi_ctx *eth_ctx = xran_ethdi_get_ctx();
424 uint16_t vf_id_all[XRAN_VF_MAX];
425 uint16_t total_vf_cnt = 0;
428 p_dev = (struct xran_device_ctx* )pHandle;
429 xran_port_id = p_dev->xran_port_id;
431 print_err("Invalid pHandle - %p", pHandle);
432 return (XRAN_STATUS_FAIL);
435 memset(vf_id_all, 0, sizeof(vf_id_all));
437 for(i = 0; i < XRAN_VF_MAX; i++){
438 if(eth_ctx->vf2xran_port[i] == xran_port_id){
439 vf_id_all[total_vf_cnt++] = i;
440 printf("%s: p %d vf %d\n", __FUNCTION__, xran_port_id, i);
444 print_dbg("total_vf_cnt %d\n", total_vf_cnt);
446 if(eth_ctx->io_cfg.nEthLinePerPort != (total_vf_cnt >> (1 - eth_ctx->io_cfg.one_vf_cu_plane))) {
447 print_err("Invalid total_vf_cnt - %d [expected %d]", total_vf_cnt,
448 eth_ctx->io_cfg.nEthLinePerPort << (1 - eth_ctx->io_cfg.one_vf_cu_plane));
449 return (XRAN_STATUS_FAIL);
452 for(dir=0; dir < 2; dir++){
453 for(cc=0; cc < xran_get_num_cc(p_dev); cc++){
454 for(ant=0; ant < xran_get_num_eAxc(p_dev)*2 + xran_get_num_ant_elm(p_dev); ant++){
455 if((total_vf_cnt == 2) && eth_ctx->io_cfg.one_vf_cu_plane){
456 if(ant & 1) { /* split ant half and half on VFs */
457 vf_id = vf_id_all[XRAN_UP_VF+1];
458 xran_set_map_ecpriPcid_to_vf(p_dev, dir, cc, ant, vf_id);
459 vf_id_cp = vf_id_all[XRAN_UP_VF+1];
460 xran_set_map_ecpriRtcid_to_vf(p_dev, dir, cc, ant, vf_id_cp);
462 vf_id = vf_id_all[XRAN_UP_VF];
463 xran_set_map_ecpriPcid_to_vf(p_dev, dir, cc, ant, vf_id);
464 vf_id_cp = vf_id_all[XRAN_UP_VF];
465 xran_set_map_ecpriRtcid_to_vf(p_dev, dir, cc, ant, vf_id_cp);
468 vf_id = vf_id_all[XRAN_UP_VF];
469 xran_set_map_ecpriPcid_to_vf(p_dev, dir, cc, ant, vf_id);
470 vf_id_cp = vf_id_all[(eth_ctx->io_cfg.one_vf_cu_plane ? XRAN_UP_VF : XRAN_CP_VF)];
471 xran_set_map_ecpriRtcid_to_vf(p_dev, dir, cc, ant, vf_id_cp);
473 print_dbg("%s: p %d vf %d UP: dir %d cc %d ant %d\n", __FUNCTION__, xran_port_id, vf_id, dir, cc, ant);
474 print_dbg("%s: p %d vf %d CP: dir %d cc %d ant %d\n", __FUNCTION__, xran_port_id, vf_id_cp, dir, cc, ant);
479 return (XRAN_STATUS_SUCCESS);