1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief This file has all definitions for the Ethernet Data Interface Layer
22 * @ingroup group_lte_source_auxlib
23 * @author Intel Corporation
31 #include <sys/queue.h>
34 #include <linux/limits.h>
35 #include <sys/types.h>
40 #include <immintrin.h>
41 #include <rte_config.h>
42 #include <rte_common.h>
44 #include <rte_memory.h>
45 #include <rte_malloc.h>
46 #include <rte_memzone.h>
48 #include <rte_per_lcore.h>
49 #include <rte_launch.h>
50 #include <rte_atomic.h>
51 #include <rte_cycles.h>
52 #include <rte_prefetch.h>
53 #include <rte_lcore.h>
54 #include <rte_per_lcore.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_interrupts.h>
58 #include <rte_debug.h>
59 #include <rte_ethdev.h>
62 #include <rte_timer.h>
66 #include "xran_fh_o_du.h"
67 #include "xran_mlog_lnx.h"
68 #include "xran_printf.h"
69 #include "xran_common.h"
71 #include "xran_lib_mlog_tasks_id.h"
73 #define BURST_RX_IO_SIZE 48
75 //#define ORAN_OWD_DEBUG_TX_LOOP
77 struct xran_ethdi_ctx g_ethdi_ctx = { 0 };
78 enum xran_if_state xran_if_current_state = XRAN_STOPPED;
80 struct rte_mbuf *xran_ethdi_mbuf_alloc(void)
82 return rte_pktmbuf_alloc(_eth_mbuf_pool);
85 struct rte_mbuf *xran_ethdi_mbuf_indir_alloc(void)
87 return rte_pktmbuf_alloc(socket_indirect_pool);
90 int32_t xran_ethdi_mbuf_send(struct rte_mbuf *mb, uint16_t ethertype, uint16_t vf_id)
92 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
95 mb->port = ctx->io_cfg.port[vf_id];
96 xran_add_eth_hdr_vlan(&ctx->entities[vf_id][ID_O_RU], ethertype, mb);
98 res = xran_enqueue_mbuf(mb, ctx->tx_ring[vf_id]);
102 int32_t xran_ethdi_mbuf_send_cp(struct rte_mbuf *mb, uint16_t ethertype, uint16_t vf_id)
104 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
107 mb->port = ctx->io_cfg.port[vf_id];
108 xran_add_eth_hdr_vlan(&ctx->entities[vf_id][ID_O_RU], ethertype, mb);
110 res = xran_enqueue_mbuf(mb, ctx->tx_ring[vf_id]);
116 ethertype_handler fn;
117 } xran_ethertype_handlers[] = {
118 { ETHER_TYPE_ECPRI, NULL },
121 int32_t xran_register_ethertype_handler(uint16_t ethertype, ethertype_handler callback)
125 for (i = 0; i < RTE_DIM(xran_ethertype_handlers); ++i)
126 if (xran_ethertype_handlers[i].ethertype == ethertype) {
127 xran_ethertype_handlers[i].fn = callback;
132 print_err("support for ethertype %u not found", ethertype);
137 int xran_handle_ether(uint16_t ethertype, struct rte_mbuf* pkt_q[], uint16_t xport_id, struct xran_eaxc_info *p_cid, uint16_t num)
141 for (i = 0; i < RTE_DIM(xran_ethertype_handlers); ++i)
142 if (xran_ethertype_handlers[i].ethertype == ethertype)
143 if (xran_ethertype_handlers[i].fn){
144 // rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
145 return xran_ethertype_handlers[i].fn(pkt_q, xport_id, p_cid, num);
148 print_err("Packet with unrecognized ethertype '%.4X' dropped", ethertype);
154 /* Process vlan tag. Cut the ethernet header. Call the etherype handlers. */
155 int xran_ethdi_filter_packet(struct rte_mbuf *pkt_q[], uint16_t vf_id, uint16_t q_id, uint16_t num)
158 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
159 struct rte_ether_hdr* eth_hdr;
160 uint16_t port_id = ctx->vf2xran_port[vf_id];
161 struct xran_eaxc_info *p_cid = &ctx->vf_and_q2cid[vf_id][q_id];
163 ret = xran_handle_ether(ETHER_TYPE_ECPRI, pkt_q, port_id, p_cid, num);
168 /* Check the link status of all ports in up to 9s, and print them finally */
169 static void check_port_link_status(uint8_t portid)
171 #define CHECK_INTERVAL 100 /* 100ms */
172 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
173 uint8_t count, all_ports_up, print_flag = 0;
174 struct rte_eth_link link;
176 printf("\nChecking link status portid [%d] ", portid);
178 for (count = 0; count <= MAX_CHECK_TIME; count++) {
180 memset(&link, 0, sizeof(link));
181 rte_eth_link_get_nowait(portid, &link);
183 /* print link status if flag set */
184 if (print_flag == 1) {
185 if (link.link_status)
186 printf("Port %d Link Up - speed %u "
187 "Mbps - %s\n", (uint8_t)portid,
188 (unsigned)link.link_speed,
189 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
190 ("full-duplex") : ("half-duplex\n"));
192 printf("Port %d Link Down\n",
195 /* clear all_ports_up flag if any link down */
196 if (link.link_status == ETH_LINK_DOWN) {
200 /* after finally printing all link status, get out */
204 if (all_ports_up == 0) {
207 rte_delay_ms(CHECK_INTERVAL);
210 /* set the print_flag if all ports up or timeout */
211 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
213 printf(" ... done\n");
219 * create a flow rule that sends packets with matching pc_id
225 * The selected target queue.
227 * The value to apply to the pc_id.
229 * Perform verbose error reporting if not NULL.
232 * A flow if the rule could be created else return NULL.
235 generate_ecpri_flow(uint16_t port_id, uint16_t rx_q, uint16_t pc_id_be, struct rte_flow_error *error)
237 struct rte_flow *flow = NULL;
238 #if (RTE_VER_YEAR >= 21)
239 #define MAX_PATTERN_NUM 3
240 #define MAX_ACTION_NUM 2
241 struct rte_flow_attr attr;
242 struct rte_flow_item pattern[MAX_PATTERN_NUM];
243 struct rte_flow_action action[MAX_ACTION_NUM];
245 struct rte_flow_action_queue queue = { .index = rx_q };
246 struct rte_flow_item_ecpri ecpri_spec;
247 struct rte_flow_item_ecpri ecpri_mask;
250 print_dbg("%s\n", __FUNCTION__);
251 memset(pattern, 0, sizeof(pattern));
252 memset(action, 0, sizeof(action));
255 * set the rule attribute.
256 * in this case only ingress packets will be checked.
258 memset(&attr, 0, sizeof(struct rte_flow_attr));
262 * create the action sequence.
263 * one action only, move packet to queue
265 action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
266 action[0].conf = &queue;
267 action[1].type = RTE_FLOW_ACTION_TYPE_END;
270 * set the first level of the pattern (ETH).
271 * since in this example we just want to get the
272 * eCPRI we set this level to allow all.
274 pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
276 memset(&ecpri_spec, 0, sizeof(struct rte_flow_item_ecpri));
277 memset(&ecpri_mask, 0, sizeof(struct rte_flow_item_ecpri));
279 ecpri_spec.hdr.common.type = RTE_ECPRI_MSG_TYPE_IQ_DATA;
280 ecpri_spec.hdr.type0.pc_id = pc_id_be;
282 ecpri_mask.hdr.common.type = 0xff;
283 ecpri_mask.hdr.type0.pc_id = 0xffff;
285 ecpri_spec.hdr.common.u32 = rte_cpu_to_be_32(ecpri_spec.hdr.common.u32);
287 pattern[1].type = RTE_FLOW_ITEM_TYPE_ECPRI;
288 pattern[1].spec = &ecpri_spec;
289 pattern[1].mask = &ecpri_mask;
291 struct rte_flow_item_ecpri *pecpri_spec = (struct rte_flow_item_ecpri *)pattern[1].spec;
292 struct rte_flow_item_ecpri *pecpri_mask = (struct rte_flow_item_ecpri *)pattern[1].mask;
293 print_dbg("RTE_FLOW_ITEM_TYPE_ECPRI\n");
294 print_dbg("spec type %x pc_id %x\n", pecpri_spec->hdr.common.type, pecpri_spec->hdr.type0.pc_id);
295 print_dbg("mask type %x pc_id %x\n", pecpri_mask->hdr.common.type, pecpri_mask->hdr.type0.pc_id);
297 /* the final level must be always type end */
298 pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
300 res = rte_flow_validate(port_id, &attr, pattern, action, error);
302 flow = rte_flow_create(port_id, &attr, pattern, action, error);
304 rte_panic("Flow can't be created %d message: %s\n",
306 error->message ? error->message : "(no stated reason)");
314 xran_ethdi_init_dpdk_io(char *name, const struct xran_io_cfg *io_cfg,
315 int *lcore_id, struct rte_ether_addr *p_o_du_addr,
316 struct rte_ether_addr *p_ru_addr, uint32_t mtu)
318 uint16_t port[XRAN_VF_MAX];
319 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
322 uint64_t c_mask = 0L;
323 uint64_t c_mask_64_127 = 0L;
324 uint64_t nWorkerCore = 1;
325 uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF);
326 char bbdev_wdev[32] = "";
327 char bbdev_vdev[32] = "";
328 char iova_mode[32] = "--iova-mode=pa";
329 char socket_mem[32] = "--socket-mem=8192";
330 char socket_limit[32] = "--socket-limit=8192";
331 char ring_name[32] = "";
332 int32_t xran_port = -1;
337 char *argv[] = { name, core_mask, "-n2", iova_mode, socket_mem, socket_limit, "--proc-type=auto",
338 "--file-prefix", name, "-a0000:00:00.0", bbdev_wdev, bbdev_vdev};
343 if(io_cfg->bbdev_mode != XRAN_BBDEV_NOT_USED){
344 printf("BBDEV_FEC_ACCL_NR5G\n");
345 if (io_cfg->bbdev_mode == XRAN_BBDEV_MODE_HW_ON){
346 // hw-accelerated bbdev
347 printf("hw-accelerated bbdev %s\n", io_cfg->bbdev_dev[0]);
349 snprintf(bbdev_wdev, RTE_DIM(bbdev_wdev), "-a%s", io_cfg->bbdev_dev[0]);
351 } else if (io_cfg->bbdev_mode == XRAN_BBDEV_MODE_HW_OFF){
353 snprintf(bbdev_wdev, RTE_DIM(bbdev_wdev), "%s", "--vdev=baseband_turbo_sw");
354 } else if (io_cfg->bbdev_mode == XRAN_BBDEV_MODE_HW_SW){
355 printf("software and hw-accelerated bbdev %s\n", io_cfg->bbdev_dev[0]);
357 snprintf(bbdev_wdev, RTE_DIM(bbdev_wdev), "-a%s", io_cfg->bbdev_dev[0]);
359 snprintf(bbdev_vdev, RTE_DIM(bbdev_vdev), "%s", "--vdev=baseband_turbo_sw");
361 rte_panic("Cannot init DPDK incorrect [bbdev_mode %d]\n", io_cfg->bbdev_mode);
365 if (io_cfg->dpdkIoVaMode == 1){
366 snprintf(iova_mode, RTE_DIM(iova_mode), "%s", "--iova-mode=va");
369 if (io_cfg->dpdkMemorySize){
370 snprintf(socket_mem, RTE_DIM(socket_mem), "--socket-mem=%d", io_cfg->dpdkMemorySize);
371 snprintf(socket_limit, RTE_DIM(socket_limit), "--socket-limit=%d", io_cfg->dpdkMemorySize);
374 if (io_cfg->core < 64)
375 c_mask |= (long)(1L << io_cfg->core);
377 c_mask_64_127 |= (long)(1L << (io_cfg->core - 64));
379 if (io_cfg->system_core < 64)
380 c_mask |= (long)(1L << io_cfg->system_core);
382 c_mask_64_127 |= (long)(1L << (io_cfg->system_core - 64));
384 if (io_cfg->timing_core < 64)
385 c_mask |= (long)(1L << io_cfg->timing_core);
387 c_mask_64_127 |= (long)(1L << (io_cfg->timing_core - 64));
390 for (i = 0; i < coreNum && i < 64; i++) {
391 if (nWorkerCore & (uint64_t)io_cfg->pkt_proc_core) {
392 c_mask |= nWorkerCore;
394 nWorkerCore = nWorkerCore << 1;
398 for (i = 64; i < coreNum && i < 128; i++) {
399 if (nWorkerCore & (uint64_t)io_cfg->pkt_proc_core_64_127) {
400 c_mask_64_127 |= nWorkerCore;
402 nWorkerCore = nWorkerCore << 1;
405 printf("total cores %d c_mask 0x%lx%016lx core %d [id] system_core %d [id] pkt_proc_core 0x%lx%016lx [mask] pkt_aux_core %d [id] timing_core %d [id]\n",
406 coreNum, c_mask_64_127, c_mask, io_cfg->core, io_cfg->system_core, io_cfg->pkt_proc_core_64_127, io_cfg->pkt_proc_core, io_cfg->pkt_aux_core, io_cfg->timing_core);
408 snprintf(core_mask, sizeof(core_mask), "-c 0x%lx%016lx",c_mask_64_127,c_mask);
410 ctx->io_cfg = *io_cfg;
412 for (ivf = 0; ivf < XRAN_VF_MAX; ivf++){
413 for (i = 0; i < ID_MAX; i++) /* Initialize all as broadcast */
414 memset(&ctx->entities[ivf][i], 0xFF, sizeof(ctx->entities[0][0]));
417 printf("%s: Calling rte_eal_init:", __FUNCTION__);
418 for (i = 0; i < RTE_DIM(argv); i++)
420 printf("%s ", argv[i]);
424 /* This will return on system_core, which is not necessarily the
425 * one we're on right now. */
426 if (rte_eal_init(RTE_DIM(argv), argv) < 0)
427 rte_panic("Cannot init EAL: %s\n", rte_strerror(rte_errno));
429 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
430 rte_exit(EXIT_FAILURE,
431 "Secondary process type not supported.\n");
433 xran_init_mbuf_pool(mtu);
435 #ifdef RTE_LIBRTE_PDUMP
436 /* initialize packet capture framework */
441 rte_timer_subsystem_init();
443 *lcore_id = rte_get_next_lcore(rte_lcore_id(), 0, 0);
445 PANIC_ON(*lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()");
447 for (i = 0; i < XRAN_VF_MAX; i++)
450 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
451 for (i = 0; i < XRAN_VF_MAX && i < io_cfg->num_vfs; i++){
452 if(io_cfg->dpdk_dev[i]){
453 struct rte_dev_iterator iterator;
456 if (rte_dev_probe(io_cfg->dpdk_dev[i]) != 0 ||
457 rte_eth_dev_count_avail() == 0) {
458 errx(1, "Network port doesn't exist\n");
461 RTE_ETH_FOREACH_MATCHING_DEV(port_id, io_cfg->dpdk_dev[i], &iterator){
463 xran_init_port(port[i], io_cfg->num_rxq, mtu);
466 if(!(i & 1) || io_cfg->one_vf_cu_plane){
467 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d", "tx_ring_up", i);
468 ctx->tx_ring[i] = rte_ring_create(ring_name, NUM_MBUFS_RING_TRX,
469 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
470 PANIC_ON(ctx->tx_ring[i] == NULL, "failed to allocate tx ring");
471 for(qi = 0; qi < io_cfg->num_rxq; qi++) {
472 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d_%d", "rx_ring_up", i, qi);
473 ctx->rx_ring[i][qi] = rte_ring_create(ring_name, NUM_MBUFS_RING_TRX,
474 rte_lcore_to_socket_id(*lcore_id), RING_F_SP_ENQ);
475 PANIC_ON(ctx->rx_ring[i][qi] == NULL, "failed to allocate rx ring");
478 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d", "tx_ring_cp", i);
479 ctx->tx_ring[i] = rte_ring_create(ring_name, NUM_MBUFS_RING_TRX,
480 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
481 PANIC_ON(ctx->tx_ring[i] == NULL, "failed to allocate rx ring");
482 for(qi = 0; qi < io_cfg->num_rxq; qi++) {
483 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d_%d", "rx_ring_cp", i, qi);
484 ctx->rx_ring[i][qi] = rte_ring_create(ring_name, NUM_MBUFS_RING_TRX,
485 rte_lcore_to_socket_id(*lcore_id), RING_F_SP_ENQ);
486 PANIC_ON(ctx->rx_ring[i][qi] == NULL, "failed to allocate rx ring");
490 printf("no DPDK port provided\n");
491 xran_init_port_mempool(i, mtu);
494 if(io_cfg->dpdk_dev[i]){
495 check_port_link_status(port[i]);
499 rte_panic("ethdi_dpdk_io_loop() failed to start with RTE_PROC_SECONDARY\n");
502 for (i = 0; i < XRAN_VF_MAX && i < io_cfg->num_vfs; i++){
503 ctx->io_cfg.port[i] = port[i];
504 print_dbg("port_id 0x%04x\n", ctx->io_cfg.port[i]);
507 for (i = 0; i < XRAN_VF_MAX; i++){
508 ctx->vf2xran_port[i] = 0xFFFF;
509 ctx->rxq_per_port[i] = 1;
510 for (qi = 0; qi < XRAN_VF_QUEUE_MAX; qi++){
511 ctx->vf_and_q2pc_id[i][qi] = 0xFFFF;
513 ctx->vf_and_q2cid[i][qi].cuPortId = 0xFF;
514 ctx->vf_and_q2cid[i][qi].bandSectorId = 0xFF;
515 ctx->vf_and_q2cid[i][qi].ccId = 0xFF;
516 ctx->vf_and_q2cid[i][qi].ruPortId = 0xFF;
520 for (i = 0; i < XRAN_VF_MAX && i < io_cfg->num_vfs; i++){
521 if(io_cfg->dpdk_dev[i]){
522 struct rte_ether_addr *p_addr;
524 if(i % (io_cfg->nEthLinePerPort * (2 - 1*ctx->io_cfg.one_vf_cu_plane)) == 0) /* C-p and U-p VFs per line */
527 rte_eth_macaddr_get(port[i], &ctx->entities[i][io_cfg->id]);
529 p_addr = &ctx->entities[i][io_cfg->id];
530 printf("[%2d] vf %2u local SRC MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
531 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
534 p_addr->addr_bytes[0], p_addr->addr_bytes[1], p_addr->addr_bytes[2],
535 p_addr->addr_bytes[3], p_addr->addr_bytes[4], p_addr->addr_bytes[5]);
537 p_addr = &p_ru_addr[i];
538 printf("[%2d] vf %2u remote DST MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
539 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
542 p_addr->addr_bytes[0], p_addr->addr_bytes[1], p_addr->addr_bytes[2],
543 p_addr->addr_bytes[3], p_addr->addr_bytes[4], p_addr->addr_bytes[5]);
545 rte_ether_addr_copy(&p_ru_addr[i], &ctx->entities[i][ID_O_RU]);
546 ctx->vf2xran_port[i] = xran_port;
547 ctx->rxq_per_port[i] = io_cfg->num_rxq;
551 for(i = 0; i < xran_port + 1 && i < XRAN_PORTS_NUM; i++) {
552 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d", "dl_gen_ring_up", i);
553 ctx->up_dl_pkt_gen_ring[i] = rte_ring_create(ring_name, NUM_MBUFS_RING,
554 rte_lcore_to_socket_id(*lcore_id), /*RING_F_SC_DEQ*/0);
555 PANIC_ON(ctx->up_dl_pkt_gen_ring[i] == NULL, "failed to allocate dl gen ring");
556 printf("created %s\n", ring_name);
562 static inline uint16_t xran_tx_from_ring(int port, struct rte_ring *r)
564 struct rte_mbuf *mbufs[BURST_SIZE];
565 uint16_t dequeued, sent = 0;
568 long t1 = MLogTick();
570 dequeued = rte_ring_dequeue_burst(r, (void **)mbufs, BURST_SIZE,
573 return 0; /* Nothing to send. */
575 while (1) { /* When tx queue is full it is trying again till succeed */
576 sent += rte_eth_tx_burst(port, 0, &mbufs[sent], dequeued - sent);
577 if (sent == dequeued){
578 MLogTask(PID_RADIO_ETH_TX_BURST, t1, MLogTick());
584 int32_t process_dpdk_io(void* args)
586 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
587 struct xran_io_cfg * cfg = &(xran_ethdi_get_ctx()->io_cfg);
588 int32_t* port = &cfg->port[0];
594 for (port_id = 0; port_id < XRAN_VF_MAX && port_id < ctx->io_cfg.num_vfs; port_id++){
595 struct rte_mbuf *mbufs[BURST_RX_IO_SIZE];
596 if(port[port_id] == 0xFF)
600 for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++) {
601 const uint16_t rxed = rte_eth_rx_burst(port[port_id], qi, mbufs, BURST_RX_IO_SIZE);
604 long t1 = MLogTick();
605 ctx->rx_vf_queue_cnt[port[port_id]][qi] += rxed;
606 enq_n = rte_ring_enqueue_burst(ctx->rx_ring[port_id][qi], (void*)mbufs, rxed, NULL);
608 rte_panic("error enq\n");
609 MLogTask(PID_RADIO_RX_VALIDATE, t1, MLogTick());
615 const uint16_t sent = xran_tx_from_ring(port[port_id], ctx->tx_ring[port_id]);
616 /* One way Delay Measurements */
617 if ((cfg->eowd_cmn[cfg->id].owdm_enable != 0) && (cfg->eowd_cmn[cfg->id].measVf == port_id))
619 if (!xran_ecpri_port_update_required(cfg, (uint16_t)port_id))
621 #ifdef ORAN_OWD_DEBUG_TX_LOOP
622 printf("going to owd tx for port %d\n", port_id);
624 if (xran_ecpri_one_way_delay_measurement_transmitter((uint16_t) port_id, (void*)xran_dev_get_ctx()) != OK)
626 errx(1,"Exit pdio port_id %d", port_id);
631 if (XRAN_STOPPED == xran_if_current_state)
635 if (XRAN_STOPPED == xran_if_current_state)
641 int32_t process_dpdk_io_tx(void* args)
643 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
644 struct xran_io_cfg * cfg = &(xran_ethdi_get_ctx()->io_cfg);
645 int32_t* port = &cfg->port[0];
648 //rte_timer_manage();
650 for (port_id = 0; port_id < XRAN_VF_MAX && port_id < ctx->io_cfg.num_vfs; port_id++){
651 struct rte_mbuf *mbufs[BURST_RX_IO_SIZE];
652 if(port[port_id] == 0xFF)
655 const uint16_t sent = xran_tx_from_ring(port[port_id], ctx->tx_ring[port_id]);
657 if (XRAN_STOPPED == xran_if_current_state)
661 if (XRAN_STOPPED == xran_if_current_state)
667 int32_t process_dpdk_io_rx(void* args)
669 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
670 struct xran_io_cfg * cfg = &(xran_ethdi_get_ctx()->io_cfg);
671 int32_t* port = &cfg->port[0];
677 for (port_id = 0; port_id < XRAN_VF_MAX && port_id < ctx->io_cfg.num_vfs; port_id++){
678 struct rte_mbuf *mbufs[BURST_RX_IO_SIZE];
679 if(port[port_id] == 0xFF)
683 for(qi = 0; qi < ctx->rxq_per_port[port_id]; qi++){
684 const uint16_t rxed = rte_eth_rx_burst(port[port_id], qi, mbufs, BURST_RX_IO_SIZE);
687 long t1 = MLogTick();
688 ctx->rx_vf_queue_cnt[port[port_id]][qi] += rxed;
689 enq_n = rte_ring_enqueue_burst(ctx->rx_ring[port_id][qi], (void*)mbufs, rxed, NULL);
691 rte_panic("error enq\n");
692 MLogTask(PID_RADIO_RX_VALIDATE, t1, MLogTick());
695 if (XRAN_STOPPED == xran_if_current_state)
699 if (XRAN_STOPPED == xran_if_current_state)