1 /******************************************************************************
3 * Copyright (c) 2019 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
21 * @brief This file has all definitions for the Ethernet Data Interface Layer
23 * @ingroup group_lte_source_auxlib
24 * @author Intel Corporation
33 #include <sys/queue.h>
37 #include <linux/limits.h>
38 #include <sys/types.h>
42 #include <rte_config.h>
43 #include <rte_common.h>
45 #include <rte_memory.h>
46 #include <rte_memcpy.h>
47 #include <rte_memzone.h>
49 #include <rte_per_lcore.h>
50 #include <rte_launch.h>
51 #include <rte_atomic.h>
52 #include <rte_cycles.h>
53 #include <rte_prefetch.h>
54 #include <rte_lcore.h>
55 #include <rte_per_lcore.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_interrupts.h>
59 #include <rte_debug.h>
60 #include <rte_ether.h>
61 #include <rte_ethdev.h>
63 #include <rte_mempool.h>
65 #include <rte_errno.h>
71 struct rte_mempool *_eth_mbuf_pool = NULL;
72 struct rte_mempool *_eth_mbuf_pool_inderect = NULL;
73 struct rte_mempool *_eth_mbuf_pool_rx = NULL;
74 struct rte_mempool *_eth_mbuf_pool_small = NULL;
75 struct rte_mempool *_eth_mbuf_pool_big = NULL;
77 struct rte_mempool *socket_direct_pool = NULL;
78 struct rte_mempool *socket_indirect_pool = NULL;
82 * Make sure the ring indexes are big enough to cover buf space x2
83 * This ring-buffer maintains the property head - tail <= RINGSIZE.
84 * head == tail: ring buffer empty
85 * head - tail == RINGSIZE: ring buffer full
87 typedef uint16_t ring_idx;
92 char buf[1024]; /* needs power of 2! */
93 } io_ring = { {0}, 0, 0};
95 #define RINGSIZE sizeof(io_ring.buf)
96 #define RINGMASK (RINGSIZE - 1)
98 int __xran_delayed_msg(const char *fmt, ...)
103 char localbuf[RINGSIZE];
104 ring_idx old_head, new_head;
107 /* first prep a copy of the message on the local stack */
109 msg_len = vsnprintf(localbuf, RINGSIZE, fmt, ap);
112 /* atomically reserve space in the ring */
114 old_head = io_ring.head; /* snapshot head */
115 /* free always within range of [0, RINGSIZE] - proof by induction */
116 const ring_idx free = RINGSIZE - (old_head - io_ring.tail);
118 copy_len = RTE_MIN(msg_len, free);
120 return 0; /* vsnprintf error or ringbuff full. Drop log. */
122 new_head = old_head + copy_len;
123 RTE_ASSERT((ring_idx)(new_head - io_ring.tail) <= RINGSIZE);
125 if (likely(__atomic_compare_exchange_n(&io_ring.head, &old_head,
126 new_head, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)))
130 /* Now copy data in at ease. */
131 const int copy_start = (old_head & RINGMASK);
132 if (copy_start < (new_head & RINGMASK)) /* no wrap */
133 memcpy(io_ring.buf + copy_start, localbuf, copy_len);
134 else { /* wrap-around */
135 const int chunk_len = RINGSIZE - copy_start;
137 memcpy(io_ring.buf + copy_start, localbuf, chunk_len);
138 memcpy(io_ring.buf, localbuf + chunk_len, copy_len - chunk_len);
141 /* wait for previous writes to complete before updating read_head. */
142 while (io_ring.read_head != old_head)
144 io_ring.read_head = new_head;
153 * Display part of the message stored in the ring buffer.
154 * Might require multiple calls to print the full message.
155 * Will return 0 when nothing left to print.
158 int xran_show_delayed_message(void)
160 ring_idx tail = io_ring.tail;
161 ring_idx wlen = io_ring.read_head - tail; /* always within [0, RINGSIZE] */
166 tail &= RINGMASK; /* modulo the range down now that we have wlen */
168 /* Make sure we're not going over buffer end. Next call will wrap. */
169 if (tail + wlen > RINGSIZE)
170 wlen = RINGSIZE - tail;
172 RTE_ASSERT(tail + wlen <= RINGSIZE);
174 /* We use write() here to avoid recaculating string length in fwrite(). */
175 const ssize_t written = write(STDOUT_FILENO, io_ring.buf + tail, wlen);
177 return 0; /* To avoid moving tail the wrong way on error. */
179 /* Move tail up. Only we touch it. And we only print from one core. */
180 io_ring.tail += written;
182 return written; /* next invocation will print the rest if any */
186 void xran_init_mbuf_pool(void)
188 /* Init the buffer pool */
189 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
190 _eth_mbuf_pool = rte_pktmbuf_pool_create("mempool", NUM_MBUFS,
191 MBUF_CACHE, 0, MBUF_POOL_ELEMENT, rte_socket_id());
192 #ifdef XRAN_ATTACH_MBUF
193 _eth_mbuf_pool_inderect = rte_pktmbuf_pool_create("mempool_indirect", NUM_MBUFS,
194 MBUF_CACHE, 0, MBUF_POOL_ELEMENT, rte_socket_id());*/
196 _eth_mbuf_pool_rx = rte_pktmbuf_pool_create("mempool_rx", NUM_MBUFS,
197 MBUF_CACHE, 0, MBUF_POOL_ELEMENT, rte_socket_id());
198 _eth_mbuf_pool_small = rte_pktmbuf_pool_create("mempool_small",
199 NUM_MBUFS, MBUF_CACHE, 0, MBUF_POOL_ELM_SMALL, rte_socket_id());
200 _eth_mbuf_pool_big = rte_pktmbuf_pool_create("mempool_big",
201 NUM_MBUFS_BIG, 0, 0, MBUF_POOL_ELM_BIG, rte_socket_id());
203 _eth_mbuf_pool = rte_mempool_lookup("mempool");
204 _eth_mbuf_pool_inderect = rte_mempool_lookup("mempool_indirect");
205 _eth_mbuf_pool_rx = rte_mempool_lookup("mempool_rx");
206 _eth_mbuf_pool_small = rte_mempool_lookup("mempool_small");
207 _eth_mbuf_pool_big = rte_mempool_lookup("mempool_big");
209 if (_eth_mbuf_pool == NULL)
210 rte_panic("Cannot create mbuf pool: %s\n", rte_strerror(rte_errno));
211 #ifdef XRAN_ATTACH_MBUF
212 if (_eth_mbuf_pool_inderect == NULL)
213 rte_panic("Cannot create mbuf pool: %s\n", rte_strerror(rte_errno));
215 if (_eth_mbuf_pool_rx == NULL)
216 rte_panic("Cannot create mbuf pool: %s\n", rte_strerror(rte_errno));
217 if (_eth_mbuf_pool_small == NULL)
218 rte_panic("Cannot create small mbuf pool: %s\n", rte_strerror(rte_errno));
219 if (_eth_mbuf_pool_big == NULL)
220 rte_panic("Cannot create big mbuf pool: %s\n", rte_strerror(rte_errno));
222 if (socket_direct_pool == NULL)
223 socket_direct_pool = _eth_mbuf_pool;
225 if (socket_indirect_pool == NULL)
226 socket_indirect_pool = _eth_mbuf_pool_inderect;
229 /* Init NIC port, then start the port */
230 void xran_init_port(int p_id, struct ether_addr *p_lls_cu_addr)
232 static uint16_t nb_rxd = BURST_SIZE;
233 static uint16_t nb_txd = BURST_SIZE;
234 struct ether_addr addr;
235 struct rte_eth_rxmode rxmode =
236 { .split_hdr_size = 0,
237 .max_rx_pkt_len = MAX_RX_LEN,
238 .offloads=(DEV_RX_OFFLOAD_JUMBO_FRAME|DEV_RX_OFFLOAD_CRC_STRIP)
240 struct rte_eth_txmode txmode = {
241 .mq_mode = ETH_MQ_TX_NONE
243 struct rte_eth_conf port_conf = {
247 struct rte_eth_rxconf rxq_conf;
248 struct rte_eth_txconf txq_conf;
251 struct rte_eth_dev_info dev_info;
252 const char *drv_name = "";
253 int sock_id = rte_eth_dev_socket_id(p_id);
255 rte_eth_dev_info_get(p_id, &dev_info);
256 if (dev_info.driver_name)
257 drv_name = dev_info.driver_name;
258 printf("initializing port %d for TX, drv=%s\n", p_id, drv_name);
260 rte_eth_macaddr_get(p_id, &addr);
262 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
263 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
265 addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2],
266 addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]);
269 ret = rte_eth_dev_configure(p_id, 1, 1, &port_conf);
271 rte_panic("Cannot configure port %u (%d)\n", p_id, ret);
273 ret = rte_eth_dev_adjust_nb_rx_tx_desc(p_id, &nb_rxd,&nb_txd);
277 rte_exit(EXIT_FAILURE, "Cannot adjust number of "
278 "descriptors: err=%d, port=%d\n", ret, p_id);
280 printf("Port %u: nb_rxd %d nb_txd %d\n", p_id, nb_rxd, nb_txd);
283 rxq_conf = dev_info.default_rxconf;
284 ret = rte_eth_rx_queue_setup(p_id, 0, nb_rxd,
285 sock_id, &rxq_conf, _eth_mbuf_pool_rx);
287 rte_panic("Cannot init RX for port %u (%d)\n",
291 txq_conf = dev_info.default_txconf;
292 ret = rte_eth_tx_queue_setup(p_id, 0, nb_txd, sock_id, &txq_conf);
294 rte_panic("Cannot init TX for port %u (%d)\n",
298 ret = rte_eth_dev_start(p_id);
300 rte_panic("Cannot start port %u (%d)\n", p_id, ret);
302 // rte_eth_promiscuous_enable(p_id);
306 void xran_memdump(void *addr, int len)
309 char tmp_buf[len * 2 + len / 16 + 1];
314 for (i = 0; i < len; ++i) {
315 sprintf(p, "%.2X ", ((uint8_t *)addr)[i]);
324 /* Prepend ethernet header, possibly vlan tag. */
325 void xran_add_eth_hdr(struct ether_addr *dst, uint16_t ethertype, struct rte_mbuf *mb)
327 /* add in the ethernet header */
328 struct ether_hdr *const h = (void *)rte_pktmbuf_prepend(mb, sizeof(*h));
330 PANIC_ON(h == NULL, "mbuf prepend of ether_hdr failed");
332 /* Fill in the ethernet header. */
333 rte_eth_macaddr_get(mb->port, &h->s_addr); /* set source addr */
334 h->d_addr = *dst; /* set dst addr */
335 h->ether_type = rte_cpu_to_be_16(ethertype); /* ethertype too */
337 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1
339 char dst[ETHER_ADDR_FMT_SIZE] = "(empty)";
340 char src[ETHER_ADDR_FMT_SIZE] = "(empty)";
342 nlog("*** packet for TX below (len %d) ***", rte_pktmbuf_pkt_len(mb));
343 ether_format_addr(src, sizeof(src), &h->s_addr);
344 ether_format_addr(dst, sizeof(dst), &h->d_addr);
345 nlog("src: %s dst: %s ethertype: %.4X", src, dst, ethertype);
349 mb->vlan_tci = FLEXRAN_UP_VLAN_TAG;
350 dlog("Inserting vlan tag of %d", FLEXRAN_UP_VLAN_TAG);
351 rte_vlan_insert(&mb);
355 int xran_send_mbuf(struct ether_addr *dst, struct rte_mbuf *mb)
357 xran_add_eth_hdr(dst, ETHER_TYPE_ETHDI, mb);
359 if (rte_eth_tx_burst(mb->port, 0, &mb, 1) == 1)
362 elog("packet sending failed on port %d", mb->port);
363 rte_pktmbuf_free(mb);
368 int xran_send_message_burst(int dst_id, int pkt_type, void *body, int len)
370 struct rte_mbuf *mbufs[BURST_SIZE];
373 const struct xran_ethdi_ctx *const ctx = xran_ethdi_get_ctx();
375 /* We're limited by maximum mbuf size on the receive size.
376 * We can change this but this would be a bigger rework. */
377 RTE_ASSERT(len < MBUF_POOL_ELM_BIG);
379 /* Allocate the required number of mbufs. */
380 const uint8_t count = ceilf((float)len / MAX_DATA_SIZE);
381 if (rte_pktmbuf_alloc_bulk(_eth_mbuf_pool, mbufs, count) != 0)
382 rte_panic("Failed to allocate %d mbufs\n", count);
384 nlog("burst transfer with data size %lu", MAX_DATA_SIZE);
385 for (i = 0; len > 0; ++i) {
387 struct burst_hdr *bhdr;
388 struct ethdi_hdr *edi_hdr;
390 /* Setup the ethdi_hdr. */
391 edi_hdr = (void *)rte_pktmbuf_append(mbufs[i], sizeof(*edi_hdr));
393 rte_panic("append of ethdi_hdr failed\n");
394 edi_hdr->pkt_type = PKT_BURST;
395 /* edi_hdr->source_id setup in tx_from_ring */
396 edi_hdr->dest_id = dst_id;
398 /* Setup the burst header */
399 bhdr = (void *)rte_pktmbuf_append(mbufs[i], sizeof(*bhdr));
400 if (bhdr == NULL) /* append failed. */
401 rte_panic("mbuf prepend of burst_hdr failed\n");
402 bhdr->original_type = pkt_type;
403 bhdr->pkt_idx = i; /* save the index of the burst chunk. */
404 bhdr->total_pkts = count;
406 /* now copy in the actual data */
407 const int curr_data_len = RTE_MIN(len, MAX_TX_LEN -
408 rte_pktmbuf_pkt_len(mbufs[i]) - sizeof(struct ether_hdr));
409 p = (void *)rte_pktmbuf_append(mbufs[i], curr_data_len);
411 rte_panic("mbuf append of %d data bytes failed\n", curr_data_len);
412 /* This copy is unavoidable, as we're splitting one big buffer
413 * into multiple mbufs. */
414 rte_memcpy(p, src, curr_data_len);
416 dlog("curr_data_len[%d] = %d", i, curr_data_len);
417 dlog("packet %d size %d", i, rte_pktmbuf_pkt_len(mbufs[i]));
419 /* Update our source data pointer and remaining length. */
420 len -= curr_data_len;
421 src += curr_data_len;
424 /* Now enqueue the full prepared burst. */
425 i = rte_ring_enqueue_bulk(ctx->tx_ring[0], (void **)mbufs, count, NULL);
426 PANIC_ON(i != count, "failed to enqueue all mbufs: %d/%d", i, count);
427 dlog("%d packets enqueued on port %d.", count, ctx->io_cfg.port);
434 /* Prepend ethernet header, possibly vlan tag. */
435 void xran_add_eth_hdr_vlan(struct ether_addr *dst, uint16_t ethertype, struct rte_mbuf *mb, uint16_t vlan_tci)
437 /* add in the ethernet header */
438 struct ether_hdr *h = (struct ether_hdr *)rte_pktmbuf_mtod(mb, struct ether_hdr*);
440 PANIC_ON(h == NULL, "mbuf prepend of ether_hdr failed");
442 /* Fill in the ethernet header. */
443 rte_eth_macaddr_get(mb->port, &h->s_addr); /* set source addr */
444 h->d_addr = *dst; /* set dst addr */
445 h->ether_type = rte_cpu_to_be_16(ethertype); /* ethertype too */
447 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1
449 char dst[ETHER_ADDR_FMT_SIZE] = "(empty)";
450 char src[ETHER_ADDR_FMT_SIZE] = "(empty)";
452 nlog("*** packet for TX below (len %d) ***", rte_pktmbuf_pkt_len(mb));
453 ether_format_addr(src, sizeof(src), &h->s_addr);
454 ether_format_addr(dst, sizeof(dst), &h->d_addr);
455 nlog("src: %s dst: %s ethertype: %.4X", src, dst, ethertype);
459 mb->vlan_tci = vlan_tci;
460 dlog("Inserting vlan tag of %d", vlan_tci);
461 rte_vlan_insert(&mb);