1 /******************************************************************************
3 * Copyright (c) 2019 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
21 * @brief This file has all definitions for the Ethernet Data Interface Layer
23 * @ingroup group_lte_source_auxlib
24 * @author Intel Corporation
31 #include <sys/queue.h>
34 #include <linux/limits.h>
35 #include <sys/types.h>
41 #include <rte_config.h>
42 #include <rte_common.h>
44 #include <rte_memory.h>
45 #include <rte_memcpy.h>
46 #include <rte_memzone.h>
48 #include <rte_per_lcore.h>
49 #include <rte_launch.h>
50 #include <rte_atomic.h>
51 #include <rte_cycles.h>
52 #include <rte_prefetch.h>
53 #include <rte_lcore.h>
54 #include <rte_per_lcore.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_interrupts.h>
58 #include <rte_debug.h>
59 #include <rte_ethdev.h>
62 #include <rte_timer.h>
67 #include "../src/mlog_lnx_xRAN.h"
72 #include "../src/xran_lib_mlog_tasks_id.h"
74 struct xran_ethdi_ctx g_ethdi_ctx = { 0 };
75 enum xran_if_state xran_if_current_state = XRAN_STOPPED;
77 struct rte_mbuf *xran_ethdi_mbuf_alloc(void)
79 return rte_pktmbuf_alloc(_eth_mbuf_pool);
82 int xran_ethdi_mbuf_send(struct rte_mbuf *mb, uint16_t ethertype)
84 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
87 mb->port = ctx->io_cfg.port[ETHDI_UP_VF];
88 xran_add_eth_hdr_vlan(&ctx->entities[ID_RU], ethertype, mb, ctx->up_vtag);
90 res = xran_enqueue_mbuf(mb, ctx->tx_ring[ETHDI_UP_VF]);
94 int xran_ethdi_mbuf_send_cp(struct rte_mbuf *mb, uint16_t ethertype)
96 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
99 mb->port = ctx->io_cfg.port[ETHDI_CP_VF];
100 xran_add_eth_hdr_vlan(&ctx->entities[ID_RU], ethertype, mb, ctx->cp_vtag);
102 res = xran_enqueue_mbuf(mb, ctx->tx_ring[ETHDI_CP_VF]);
106 void xran_ethdi_stop_tx()
108 struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
109 rte_timer_stop_sync(&ctx->timer_tx);
115 ethertype_handler fn;
116 } xran_ethertype_handlers[] = {
117 { ETHER_TYPE_ETHDI, NULL },
118 { ETHER_TYPE_ECPRI, NULL },
119 { ETHER_TYPE_START_TX, NULL }
124 int xran_register_ethertype_handler(uint16_t ethertype, ethertype_handler callback)
128 for (i = 0; i < RTE_DIM(xran_ethertype_handlers); ++i)
129 if (xran_ethertype_handlers[i].ethertype == ethertype) {
130 xran_ethertype_handlers[i].fn = callback;
135 elog("support for ethertype %u not found", ethertype);
140 int xran_handle_ether(uint16_t ethertype, struct rte_mbuf *pkt, uint64_t rx_time)
144 for (i = 0; i < RTE_DIM(xran_ethertype_handlers); ++i)
145 if (xran_ethertype_handlers[i].ethertype == ethertype)
146 if (xran_ethertype_handlers[i].fn)
147 return xran_ethertype_handlers[i].fn(pkt, rx_time);
149 wlog("Packet with unrecognized ethertype '%.4X' dropped", ethertype);
155 /* Process vlan tag. Cut the ethernet header. Call the etherype handlers. */
156 int xran_ethdi_filter_packet(struct rte_mbuf *pkt, uint64_t rx_time)
158 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
161 if (rte_vlan_strip(pkt) == 0) {
162 if (pkt->vlan_tci == ctx->cp_vtag) {
163 dlog("VLAN tci matches %d", pkt->vlan_tci);
165 wlog("packet with wrong VLAN tag %d, dropping",
170 dlog("Packet not vlan tagged");
173 const struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, void *);
175 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1
176 nlog("*** processing RX'ed packet of size %d ***",
177 rte_pktmbuf_data_len(pkt));
178 /* TODO: just dump ethernet header in readable format? */
181 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1
183 char dst[ETHER_ADDR_FMT_SIZE] = "(empty)";
184 char src[ETHER_ADDR_FMT_SIZE] = "(empty)";
186 ether_format_addr(dst, sizeof(dst), ð_hdr->d_addr);
187 ether_format_addr(src, sizeof(src), ð_hdr->s_addr);
188 nlog("src: %s dst: %s ethertype: %.4X", dst, src,
189 rte_be_to_cpu_16(eth_hdr->ether_type));
193 /* Cut out the ethernet header. It's not needed anymore. */
194 if (rte_pktmbuf_adj(pkt, sizeof(*eth_hdr)) == NULL) {
195 wlog("Packet too short, dropping");
200 return xran_handle_ether(rte_be_to_cpu_16(eth_hdr->ether_type), pkt, rx_time);
206 int xran_ethdi_init_dpdk_io(char *name, const struct xran_io_loop_cfg *io_cfg,
207 int *lcore_id, struct ether_addr *p_lls_cu_addr, struct ether_addr *p_ru_addr,
208 uint16_t cp_vlan, uint16_t up_vlan)
210 uint16_t port[2] = {0, 0};
211 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
214 char *argv[] = { name, core_mask, "-m3072", "--proc-type=auto",
215 "--file-prefix", name, "-w", "0000:00:00.0" };
220 snprintf(core_mask, sizeof(core_mask), "-c%x",
221 (1 << io_cfg->core) |
222 (1 << io_cfg->system_core) |
223 (1 << io_cfg->pkt_proc_core) |
224 (1 << io_cfg->pkt_aux_core) |
225 (1 << io_cfg->timing_core));
227 ctx->io_cfg = *io_cfg;
228 ctx->ping_state = PING_IDLE;
229 ctx->known_peers = 1;
230 ctx->busy_poll_till = rte_rdtsc();
231 ctx->cp_vtag = cp_vlan;
232 ctx->up_vtag = up_vlan;
234 for (i = 0; i <= ID_BROADCAST; i++) /* Initialize all as broadcast */
235 memset(&ctx->entities[i], 0xFF, sizeof(ctx->entities[0]));
237 /* This will return on system_core, which is not necessarily the
238 * one we're on right now. */
239 if (rte_eal_init(RTE_DIM(argv), argv) < 0)
240 rte_panic("Cannot init EAL: %s\n", rte_strerror(rte_errno));
242 xran_init_mbuf_pool();
245 rte_timer_subsystem_init();
246 rte_timer_init(&ctx->timer_ping);
247 rte_timer_init(&ctx->timer_sync);
248 rte_timer_init(&ctx->timer_tx);
250 *lcore_id = rte_get_next_lcore(rte_lcore_id(), 0, 0);
252 PANIC_ON(*lcore_id == RTE_MAX_LCORE, "out of lcores for io_loop()");
254 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
255 for (i = 0; i < ETHDI_VF_MAX; i ++){
256 if (rte_eth_dev_attach(io_cfg->dpdk_dev[i], &port[i]) != 0 ||
257 rte_eth_dev_count_avail() == 0)
258 errx(1, "Network port doesn't exist.");
259 xran_init_port(port[i], p_lls_cu_addr); /* we only have 1 port at this stage */
261 ctx->tx_ring[i] = rte_ring_create("tx_ring_up", NUM_MBUFS,
262 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
263 ctx->rx_ring[i] = rte_ring_create("rx_ring_up", NUM_MBUFS,
264 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
265 ctx->pkt_dump_ring[i] = rte_ring_create("pkt_dump_ring_up", NUM_MBUFS,
266 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
268 ctx->tx_ring[i] = rte_ring_create("tx_ring_cp", NUM_MBUFS,
269 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
270 ctx->rx_ring[i] = rte_ring_create("rx_ring_cp", NUM_MBUFS,
271 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
272 ctx->pkt_dump_ring[i] = rte_ring_create("pkt_dump_ring_cp", NUM_MBUFS,
273 rte_lcore_to_socket_id(*lcore_id), RING_F_SC_DEQ);
277 rte_panic("ethdi_dpdk_io_loop() failed to start with RTE_PROC_SECONDARY\n");
279 PANIC_ON(ctx->tx_ring == NULL, "failed to allocate tx ring");
280 PANIC_ON(ctx->rx_ring == NULL, "failed to allocate rx ring");
281 PANIC_ON(ctx->pkt_dump_ring == NULL, "failed to allocate pkt dumping ring");
282 for (i = 0; i < ETHDI_VF_MAX; i++)
283 ctx->io_cfg.port[i] = port[i];
285 rte_eth_macaddr_get(port[ETHDI_UP_VF], &ctx->entities[io_cfg->id]);
286 ether_addr_copy(p_ru_addr, &ctx->entities[ID_RU]);
288 /* Start the actual IO thread */
289 if (rte_eal_remote_launch(xran_ethdi_dpdk_io_loop, &ctx->io_cfg, *lcore_id))
290 rte_panic("ethdi_dpdk_io_loop() failed to start\n");
295 static inline uint16_t xran_tx_from_ring(int port, struct rte_ring *r)
297 struct rte_mbuf *mbufs[BURST_SIZE];
298 uint16_t dequeued, sent = 0;
301 long t1 = MLogTick();
303 dequeued = rte_ring_dequeue_burst(r, (void **)mbufs, BURST_SIZE,
306 return 0; /* Nothing to send. */
308 while (1) { /* When tx queue is full it is trying again till succeed */
310 sent += rte_eth_tx_burst(port, 0, &mbufs[sent], dequeued - sent);
311 MLogTask(PID_RADIO_ETH_TX_BURST, t1, MLogTick());
313 if (sent == dequeued)
321 * This is the main DPDK-IO loop.
322 * This will sleep if there's no packets incoming and there's
323 * no work enqueued, sleep lenth is defined in IDLE_SLEEP_MICROSECS
325 int xran_ethdi_dpdk_io_loop(void *io_loop_cfg)
327 struct xran_ethdi_ctx *ctx = xran_ethdi_get_ctx();
328 const struct xran_io_loop_cfg *const cfg = io_loop_cfg;
329 const int port[ETHDI_VF_MAX] = {cfg->port[ETHDI_UP_VF], cfg->port[ETHDI_CP_VF]};
331 struct sched_param sched_param;
334 printf("%s [PORT: %d %d] [CPU %2d] [PID: %6d]\n", __FUNCTION__, port[ETHDI_UP_VF], port[ETHDI_CP_VF] , rte_lcore_id(), getpid());
336 printf("%s [CPU %2d] [PID: %6d]\n", __FUNCTION__, rte_lcore_id(), getpid());
337 sched_param.sched_priority = XRAN_THREAD_DEFAULT_PRIO;
338 if ((res = pthread_setschedparam(pthread_self(), SCHED_FIFO, &sched_param)))
340 printf("priority is not changed: coreId = %d, result1 = %d\n",rte_lcore_id(), res);
344 for (port_id = 0; port_id < ETHDI_VF_MAX; port_id++){
345 struct rte_mbuf *mbufs[BURST_SIZE];
347 const uint16_t rxed = rte_eth_rx_burst(port[port_id], 0, mbufs, BURST_SIZE);
349 long t1 = MLogTick();
350 rte_ring_enqueue_burst(ctx->rx_ring[port_id], (void*)mbufs, rxed, NULL);
351 MLogTask(PID_RADIO_RX_VALIDATE, t1, MLogTick());
355 const uint16_t sent = xran_tx_from_ring(port[port_id], ctx->tx_ring[port_id]);
357 continue; /* more packets might be waiting in queues */
359 rte_pause(); /* short pause, optimize memory access */
360 if (XRAN_STOPPED == xran_if_current_state)
364 if (XRAN_STOPPED == xran_if_current_state)
370 puts("IO loop finished");
372 //for (port_id = 0; port_id < ETHDI_VF_MAX; port_id++)
373 // xran_ethdi_port_stats(port[port_id]);