1 /******************************************************************************
3 * Copyright (c) 2019 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief This file has all definitions for the Ethernet Data Interface Layer
22 * @ingroup group_lte_source_auxlib
23 * @author Intel Corporation
31 #include <sys/queue.h>
34 #include <linux/limits.h>
35 #include <sys/types.h>
41 #include <rte_config.h>
42 #include <rte_common.h>
44 #include <rte_memory.h>
45 #include <rte_memcpy.h>
46 #include <rte_memzone.h>
48 #include <rte_per_lcore.h>
49 #include <rte_launch.h>
50 #include <rte_atomic.h>
51 #include <rte_cycles.h>
52 #include <rte_prefetch.h>
53 #include <rte_lcore.h>
54 #include <rte_per_lcore.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_interrupts.h>
58 #include <rte_debug.h>
59 #include <rte_ethdev.h>
62 #include <rte_timer.h>
66 #include "xran_fh_o_du.h"
67 #include "xran_mlog_lnx.h"
68 #include "xran_printf.h"
70 #include "../src/xran_lib_mlog_tasks_id.h"
72 #define BURST_RX_IO_SIZE 48
74 struct xran_ethdi_ctx g_ethdi_ctx = { 0 };
75 enum xran_if_state xran_if_current_state = XRAN_STOPPED;
77 struct rte_mbuf *xran_ethdi_mbuf_alloc(void)
79 return rte_pktmbuf_alloc(_eth_mbuf_pool);
82 int32_t xran_ethdi_mbuf_send(struct rte_mbuf *mb, uint16_t ethertype, uint16_t vf_id)
84 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
87 mb->port = ctx->io_cfg.port[vf_id];
88 xran_add_eth_hdr_vlan(&ctx->entities[vf_id][ID_O_RU], ethertype, mb);
90 res = xran_enqueue_mbuf(mb, ctx->tx_ring[vf_id]);
94 int32_t xran_ethdi_mbuf_send_cp(struct rte_mbuf *mb, uint16_t ethertype, uint16_t vf_id)
96 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
99 mb->port = ctx->io_cfg.port[vf_id];
100 xran_add_eth_hdr_vlan(&ctx->entities[vf_id][ID_O_RU], ethertype, mb);
102 res = xran_enqueue_mbuf(mb, ctx->tx_ring[vf_id]);
108 ethertype_handler fn;
109 } xran_ethertype_handlers[] = {
110 { ETHER_TYPE_ETHDI, NULL },
111 { ETHER_TYPE_ECPRI, NULL },
112 { ETHER_TYPE_START_TX, NULL }
117 int32_t xran_register_ethertype_handler(uint16_t ethertype, ethertype_handler callback)
121 for (i = 0; i < RTE_DIM(xran_ethertype_handlers); ++i)
122 if (xran_ethertype_handlers[i].ethertype == ethertype) {
123 xran_ethertype_handlers[i].fn = callback;
128 elog("support for ethertype %u not found", ethertype);
133 int xran_handle_ether(uint16_t ethertype, struct rte_mbuf *pkt, uint64_t rx_time)
137 for (i = 0; i < RTE_DIM(xran_ethertype_handlers); ++i)
138 if (xran_ethertype_handlers[i].ethertype == ethertype)
139 if (xran_ethertype_handlers[i].fn)
140 return xran_ethertype_handlers[i].fn(pkt, rx_time);
142 wlog("Packet with unrecognized ethertype '%.4X' dropped", ethertype);
148 /* Process vlan tag. Cut the ethernet header. Call the etherype handlers. */
149 int xran_ethdi_filter_packet(struct rte_mbuf *pkt, uint64_t rx_time)
151 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
153 const struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, void *);
155 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1
156 nlog("*** processing RX'ed packet of size %d ***",
157 rte_pktmbuf_data_len(pkt));
158 /* TODO: just dump ethernet header in readable format? */
161 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1
163 char dst[ETHER_ADDR_FMT_SIZE] = "(empty)";
164 char src[ETHER_ADDR_FMT_SIZE] = "(empty)";
166 ether_format_addr(dst, sizeof(dst), ð_hdr->d_addr);
167 ether_format_addr(src, sizeof(src), ð_hdr->s_addr);
168 nlog("src: %s dst: %s ethertype: %.4X", dst, src,
169 rte_be_to_cpu_16(eth_hdr->ether_type));
173 /* Cut out the ethernet header. It's not needed anymore. */
174 if (rte_pktmbuf_adj(pkt, sizeof(*eth_hdr)) == NULL) {
175 wlog("Packet too short, dropping");
180 return xran_handle_ether(rte_be_to_cpu_16(eth_hdr->ether_type), pkt, rx_time);
183 /* Check the link status of all ports in up to 9s, and print them finally */
184 static void check_port_link_status(uint8_t portid)
186 #define CHECK_INTERVAL 100 /* 100ms */
187 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
188 uint8_t count, all_ports_up, print_flag = 0;
189 struct rte_eth_link link;
191 printf("\nChecking link status portid [%d] ", portid);
193 for (count = 0; count <= MAX_CHECK_TIME; count++) {
195 memset(&link, 0, sizeof(link));
196 rte_eth_link_get_nowait(portid, &link);
198 /* print link status if flag set */
199 if (print_flag == 1) {
200 if (link.link_status)
201 printf("Port %d Link Up - speed %u "
202 "Mbps - %s\n", (uint8_t)portid,
203 (unsigned)link.link_speed,
204 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
205 ("full-duplex") : ("half-duplex\n"));
207 printf("Port %d Link Down\n",
210 /* clear all_ports_up flag if any link down */
211 if (link.link_status == ETH_LINK_DOWN) {
215 /* after finally printing all link status, get out */
219 if (all_ports_up == 0) {
222 rte_delay_ms(CHECK_INTERVAL);
225 /* set the print_flag if all ports up or timeout */
226 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
228 printf(" ... done\n");
235 xran_ethdi_init_dpdk_io(char *name, const struct xran_io_cfg *io_cfg,
236 int *lcore_id, struct rte_ether_addr *p_o_du_addr,
237 struct rte_ether_addr *p_ru_addr)
239 uint16_t port[XRAN_VF_MAX];
240 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
244 uint64_t nWorkerCore = 1;
245 uint32_t coreNum = sysconf(_SC_NPROCESSORS_CONF);
246 char bbdev_wdev[32] = "";
247 char bbdev_vdev[32] = "";
248 char iova_mode[32] = "--iova-mode=pa";
249 char socket_mem[32] = "--socket-mem=8192";
250 char socket_limit[32] = "--socket-limit=8192";
251 char ring_name[32] = "";
253 char *argv[] = { name, core_mask, "-n2", iova_mode, socket_mem, socket_limit, "--proc-type=auto",
254 "--file-prefix", name, "-w", "0000:00:00.0", bbdev_wdev, bbdev_vdev};
258 if(io_cfg->bbdev_mode != XRAN_BBDEV_NOT_USED){
259 printf("BBDEV_FEC_ACCL_NR5G\n");
260 if (io_cfg->bbdev_mode == XRAN_BBDEV_MODE_HW_ON){
261 // hw-accelerated bbdev
262 printf("hw-accelerated bbdev %s\n", io_cfg->bbdev_dev[0]);
263 snprintf(bbdev_wdev, RTE_DIM(bbdev_wdev), "-w %s", io_cfg->bbdev_dev[0]);
264 } else if (io_cfg->bbdev_mode == XRAN_BBDEV_MODE_HW_OFF){
265 // hw-accelerated bbdev disable
266 if(io_cfg->bbdev_dev[0]){
267 printf("hw-accelerated bbdev disable %s\n", io_cfg->bbdev_dev[0]);
268 snprintf(bbdev_wdev, RTE_DIM(bbdev_wdev), "-b %s", io_cfg->bbdev_dev[0]);
270 snprintf(bbdev_wdev, RTE_DIM(bbdev_wdev), "%s", "--vdev=baseband_turbo_sw");
272 rte_panic("Cannot init DPDK incorrect [bbdev_mode %d]\n", io_cfg->bbdev_mode);
276 if (io_cfg->dpdkIoVaMode == 1){
277 snprintf(iova_mode, RTE_DIM(iova_mode), "%s", "--iova-mode=va");
280 if (io_cfg->dpdkMemorySize){
281 snprintf(socket_mem, RTE_DIM(socket_mem), "--socket-mem=%d", io_cfg->dpdkMemorySize);
282 snprintf(socket_limit, RTE_DIM(socket_limit), "--socket-limit=%d", io_cfg->dpdkMemorySize);
285 c_mask = (long)(1L << io_cfg->core) |
286 (long)(1L << io_cfg->system_core) |
287 (long)(1L << io_cfg->timing_core);
290 for (i = 0; i < coreNum; i++) {
291 if (nWorkerCore & (uint64_t)io_cfg->pkt_proc_core) {
292 c_mask |= nWorkerCore;
294 nWorkerCore = nWorkerCore << 1;
297 printf("total cores %d c_mask 0x%lx core %d [id] system_core %d [id] pkt_proc_core 0x%lx [mask] pkt_aux_core %d [id] timing_core %d [id]\n",
298 coreNum, c_mask, io_cfg->core, io_cfg->system_core, io_cfg->pkt_proc_core, io_cfg->pkt_aux_core, io_cfg->timing_core);
300 snprintf(core_mask, sizeof(core_mask), "-c 0x%lx", c_mask);
302 ctx->io_cfg = *io_cfg;
304 for (ivf = 0; ivf < XRAN_VF_MAX; ivf++){
305 for (i = 0; i <= ID_BROADCAST; i++) /* Initialize all as broadcast */
306 memset(&ctx->entities[ivf][i], 0xFF, sizeof(ctx->entities[0][0]));
309 printf("%s: Calling rte_eal_init:", __FUNCTION__);
310 for (i = 0; i < RTE_DIM(argv); i++)
312 printf("%s ", argv[i]);
317 /* This will return on system_core, which is not necessarily the
318 * one we're on right now. */
319 if (rte_eal_init(RTE_DIM(argv), argv) < 0)
320 rte_panic("Cannot init EAL: %s\n", rte_strerror(rte_errno));
322 xran_init_mbuf_pool();
324 #ifdef RTE_LIBRTE_PDUMP
325 /* initialize packet capture framework */
330 rte_timer_subsystem_init();
331 rte_timer_init(&ctx->timer_ping);
332 rte_timer_init(&ctx->timer_sync);
333 rte_timer_init(&ctx->timer_tx);
335 *lcore_id = rte_get_next_lcore(rte_lcore_id(), 0, 0);
337 PANIC_ON(*lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()");
339 for (i = 0; i < XRAN_VF_MAX; i++)
342 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
343 for (i = 0; i < XRAN_VF_MAX && i < io_cfg->num_vfs; i++){
344 if(io_cfg->dpdk_dev[i]){
345 struct rte_dev_iterator iterator;
348 if (rte_dev_probe(io_cfg->dpdk_dev[i]) != 0 ||
349 rte_eth_dev_count_avail() == 0) {
350 errx(1, "Network port doesn't exist\n");
353 RTE_ETH_FOREACH_MATCHING_DEV(port_id, io_cfg->dpdk_dev[i], &iterator){
355 xran_init_port(port[i]);
358 printf("no DPDK port provided\n");
362 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d", "tx_ring_up", i);
363 ctx->tx_ring[i] = rte_ring_create(ring_name, NUM_MBUFS_RING,
364 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
365 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d", "rx_ring_up", i);
366 ctx->rx_ring[i] = rte_ring_create(ring_name, NUM_MBUFS_RING,
367 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
369 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d", "tx_ring_cp", i);
370 ctx->tx_ring[i] = rte_ring_create(ring_name, NUM_MBUFS_RING,
371 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
372 snprintf(ring_name, RTE_DIM(ring_name), "%s_%d", "rx_ring_cp", i);
373 ctx->rx_ring[i] = rte_ring_create(ring_name, NUM_MBUFS_RING,
374 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
376 if(io_cfg->dpdk_dev[i]){
377 check_port_link_status(port[i]);
381 rte_panic("ethdi_dpdk_io_loop() failed to start with RTE_PROC_SECONDARY\n");
383 PANIC_ON(ctx->tx_ring == NULL, "failed to allocate tx ring");
384 PANIC_ON(ctx->rx_ring == NULL, "failed to allocate rx ring");
385 PANIC_ON(ctx->pkt_dump_ring == NULL, "failed to allocate pkt dumping ring");
386 for (i = 0; i < XRAN_VF_MAX && i < io_cfg->num_vfs; i++){
387 ctx->io_cfg.port[i] = port[i];
388 print_dbg("port_id 0x%04x\n", ctx->io_cfg.port[i]);
391 for (i = 0; i < XRAN_VF_MAX && i < io_cfg->num_vfs; i++){
392 if(io_cfg->dpdk_dev[i]){
393 struct rte_ether_addr *p_addr;
394 rte_eth_macaddr_get(port[i], &ctx->entities[i][io_cfg->id]);
396 p_addr = &ctx->entities[i][io_cfg->id];
397 printf("vf %u local SRC MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
398 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
400 p_addr->addr_bytes[0], p_addr->addr_bytes[1], p_addr->addr_bytes[2],
401 p_addr->addr_bytes[3], p_addr->addr_bytes[4], p_addr->addr_bytes[5]);
403 p_addr = &p_ru_addr[i];
404 printf("vf %u remote DST MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
405 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
407 p_addr->addr_bytes[0], p_addr->addr_bytes[1], p_addr->addr_bytes[2],
408 p_addr->addr_bytes[3], p_addr->addr_bytes[4], p_addr->addr_bytes[5]);
410 rte_ether_addr_copy(&p_ru_addr[i], &ctx->entities[i][ID_O_RU]);
417 static inline uint16_t xran_tx_from_ring(int port, struct rte_ring *r)
419 struct rte_mbuf *mbufs[BURST_SIZE];
420 uint16_t dequeued, sent = 0;
423 long t1 = MLogTick();
425 dequeued = rte_ring_dequeue_burst(r, (void **)mbufs, BURST_SIZE,
428 return 0; /* Nothing to send. */
430 while (1) { /* When tx queue is full it is trying again till succeed */
432 sent += rte_eth_tx_burst(port, 0, &mbufs[sent], dequeued - sent);
434 MLogTask(PID_RADIO_ETH_TX_BURST, t1, MLogTick());
436 if (sent == dequeued)
441 int32_t process_dpdk_io(void)
443 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
444 struct xran_io_cfg * cfg = &(xran_ethdi_get_ctx()->io_cfg);
445 int32_t* port = &cfg->port[0];
450 for (port_id = 0; port_id < XRAN_VF_MAX && port_id < ctx->io_cfg.num_vfs; port_id++){
451 struct rte_mbuf *mbufs[BURST_RX_IO_SIZE];
452 if(port[port_id] == 0xFF)
456 const uint16_t rxed = rte_eth_rx_burst(port[port_id], 0, mbufs, BURST_RX_IO_SIZE);
459 long t1 = MLogTick();
460 enq_n = rte_ring_enqueue_burst(ctx->rx_ring[port_id], (void*)mbufs, rxed, NULL);
462 rte_panic("error enq\n");
463 MLogTask(PID_RADIO_RX_VALIDATE, t1, MLogTick());
467 const uint16_t sent = xran_tx_from_ring(port[port_id], ctx->tx_ring[port_id]);
469 if (XRAN_STOPPED == xran_if_current_state)
473 if (XRAN_STOPPED == xran_if_current_state)