32 #include <sys/queue.h> 36 #include <linux/limits.h> 37 #include <sys/types.h> 41 #include <rte_config.h> 42 #include <rte_common.h> 44 #include <rte_memory.h> 45 #include <rte_memcpy.h> 46 #include <rte_memzone.h> 48 #include <rte_per_lcore.h> 49 #include <rte_launch.h> 50 #include <rte_atomic.h> 51 #include <rte_cycles.h> 52 #include <rte_prefetch.h> 53 #include <rte_lcore.h> 54 #include <rte_per_lcore.h> 55 #include <rte_branch_prediction.h> 56 #include <rte_interrupts.h> 58 #include <rte_debug.h> 59 #include <rte_ether.h> 60 #include <rte_ethdev.h> 62 #include <rte_mempool.h> 64 #include <rte_errno.h> 92 } io_ring = { {0}, 0, 0};
94 #define RINGSIZE sizeof(io_ring.buf) 95 #define RINGMASK (RINGSIZE - 1) 108 msg_len = vsnprintf(localbuf,
RINGSIZE, fmt, ap);
113 old_head = io_ring.head;
117 copy_len = RTE_MIN(msg_len, free);
121 new_head = old_head + copy_len;
124 if (likely(__atomic_compare_exchange_n(&io_ring.head, &old_head,
125 new_head, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)))
130 const int copy_start = (old_head &
RINGMASK);
131 if (copy_start < (new_head &
RINGMASK))
132 memcpy(io_ring.buf + copy_start, localbuf, copy_len);
134 const int chunk_len =
RINGSIZE - copy_start;
136 memcpy(io_ring.buf + copy_start, localbuf, chunk_len);
137 memcpy(io_ring.buf, localbuf + chunk_len, copy_len - chunk_len);
141 while (io_ring.read_head != old_head)
143 io_ring.read_head = new_head;
157 int xran_show_delayed_message(
void)
171 RTE_ASSERT(tail + wlen <=
RINGSIZE);
174 const ssize_t written = write(STDOUT_FILENO, io_ring.buf + tail, wlen);
179 io_ring.tail += written;
188 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
191 #ifdef XRAN_ATTACH_MBUF 209 rte_panic(
"Cannot create mbuf pool: %s\n", rte_strerror(rte_errno));
210 #ifdef XRAN_ATTACH_MBUF 212 rte_panic(
"Cannot create mbuf pool: %s\n", rte_strerror(rte_errno));
215 rte_panic(
"Cannot create mbuf pool: %s\n", rte_strerror(rte_errno));
217 rte_panic(
"Cannot create small mbuf pool: %s\n", rte_strerror(rte_errno));
219 rte_panic(
"Cannot create big mbuf pool: %s\n", rte_strerror(rte_errno));
233 struct ether_addr addr;
234 struct rte_eth_rxmode rxmode =
235 { .split_hdr_size = 0,
237 .offloads=(DEV_RX_OFFLOAD_JUMBO_FRAME|DEV_RX_OFFLOAD_CRC_STRIP)
239 struct rte_eth_txmode txmode = {
240 .mq_mode = ETH_MQ_TX_NONE
242 struct rte_eth_conf port_conf = {
246 struct rte_eth_rxconf rxq_conf;
247 struct rte_eth_txconf txq_conf;
250 struct rte_eth_dev_info dev_info;
251 const char *drv_name =
"";
252 int sock_id = rte_eth_dev_socket_id(p_id);
254 rte_eth_dev_info_get(p_id, &dev_info);
255 if (dev_info.driver_name)
256 drv_name = dev_info.driver_name;
257 printf(
"initializing port %d for TX, drv=%s\n", p_id, drv_name);
259 rte_eth_macaddr_get(p_id, &addr);
261 printf(
"Port %u MAC: %02"PRIx8
" %02"PRIx8
" %02"PRIx8
262 " %02"PRIx8
" %02"PRIx8
" %02"PRIx8
"\n",
264 addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2],
265 addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]);
268 ret = rte_eth_dev_configure(p_id, 1, 1, &port_conf);
270 rte_panic(
"Cannot configure port %u (%d)\n", p_id, ret);
272 ret = rte_eth_dev_adjust_nb_rx_tx_desc(p_id, &nb_rxd,&nb_txd);
276 rte_exit(EXIT_FAILURE,
"Cannot adjust number of " 277 "descriptors: err=%d, port=%d\n", ret, p_id);
279 printf(
"Port %u: nb_rxd %d nb_txd %d\n", p_id, nb_rxd, nb_txd);
282 rxq_conf = dev_info.default_rxconf;
283 ret = rte_eth_rx_queue_setup(p_id, 0, nb_rxd,
286 rte_panic(
"Cannot init RX for port %u (%d)\n",
290 txq_conf = dev_info.default_txconf;
291 ret = rte_eth_tx_queue_setup(p_id, 0, nb_txd, sock_id, &txq_conf);
293 rte_panic(
"Cannot init TX for port %u (%d)\n",
297 ret = rte_eth_dev_start(p_id);
299 rte_panic(
"Cannot start port %u (%d)\n", p_id, ret);
305 void xran_memdump(
void *addr,
int len)
308 char tmp_buf[len * 2 + len / 16 + 1];
313 for (i = 0; i < len; ++i) {
314 sprintf(p,
"%.2X ", ((uint8_t *)addr)[i]);
324 void xran_add_eth_hdr(
struct ether_addr *dst, uint16_t
ethertype,
struct rte_mbuf *mb)
327 struct ether_hdr *
const h = (
void *)rte_pktmbuf_prepend(mb,
sizeof(*h));
329 PANIC_ON(h == NULL,
"mbuf prepend of ether_hdr failed");
332 rte_eth_macaddr_get(mb->port, &h->s_addr);
334 h->ether_type = rte_cpu_to_be_16(
ethertype);
336 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1 338 char dst[ETHER_ADDR_FMT_SIZE] =
"(empty)";
339 char src[ETHER_ADDR_FMT_SIZE] =
"(empty)";
341 nlog(
"*** packet for TX below (len %d) ***", rte_pktmbuf_pkt_len(mb));
342 ether_format_addr(src,
sizeof(src), &h->s_addr);
343 ether_format_addr(dst,
sizeof(dst), &h->d_addr);
344 nlog(
"src: %s dst: %s ethertype: %.4X", src, dst,
ethertype);
350 rte_vlan_insert(&mb);
354 int xran_send_mbuf(
struct ether_addr *dst,
struct rte_mbuf *mb)
358 if (rte_eth_tx_burst(mb->port, 0, &mb, 1) == 1)
361 elog(
"packet sending failed on port %d", mb->port);
362 rte_pktmbuf_free(mb);
367 int xran_send_message_burst(
int dst_id,
int pkt_type,
void *body,
int len)
381 rte_panic(
"Failed to allocate %d mbufs\n", count);
384 for (i = 0; len > 0; ++i) {
390 edi_hdr = (
void *)rte_pktmbuf_append(mbufs[i],
sizeof(*edi_hdr));
392 rte_panic(
"append of ethdi_hdr failed\n");
398 bhdr = (
void *)rte_pktmbuf_append(mbufs[i],
sizeof(*bhdr));
400 rte_panic(
"mbuf prepend of burst_hdr failed\n");
406 const int curr_data_len = RTE_MIN(len,
MAX_TX_LEN -
407 rte_pktmbuf_pkt_len(mbufs[i]) -
sizeof(
struct ether_hdr));
408 p = (
void *)rte_pktmbuf_append(mbufs[i], curr_data_len);
410 rte_panic(
"mbuf append of %d data bytes failed\n", curr_data_len);
413 rte_memcpy(p, src, curr_data_len);
415 dlog(
"curr_data_len[%d] = %d", i, curr_data_len);
416 dlog(
"packet %d size %d", i, rte_pktmbuf_pkt_len(mbufs[i]));
419 len -= curr_data_len;
420 src += curr_data_len;
424 i = rte_ring_enqueue_bulk(ctx->
tx_ring[0], (
void **)mbufs, count, NULL);
425 PANIC_ON(i != count,
"failed to enqueue all mbufs: %d/%d", i, count);
437 struct ether_hdr *h = (
struct ether_hdr *)rte_pktmbuf_mtod(mb,
struct ether_hdr*);
439 PANIC_ON(h == NULL,
"mbuf prepend of ether_hdr failed");
442 rte_eth_macaddr_get(mb->port, &h->s_addr);
444 h->ether_type = rte_cpu_to_be_16(ethertype);
446 #if defined(DPDKIO_DEBUG) && DPDKIO_DEBUG > 1 448 char dst[ETHER_ADDR_FMT_SIZE] =
"(empty)";
449 char src[ETHER_ADDR_FMT_SIZE] =
"(empty)";
451 nlog(
"*** packet for TX below (len %d) ***", rte_pktmbuf_pkt_len(mb));
452 ether_format_addr(src,
sizeof(src), &h->s_addr);
453 ether_format_addr(dst,
sizeof(dst), &h->d_addr);
454 nlog(
"src: %s dst: %s ethertype: %.4X", src, dst, ethertype);
458 mb->vlan_tci = vlan_tci;
459 dlog(
"Inserting vlan tag of %d", vlan_tci);
460 rte_vlan_insert(&mb);
struct rte_mempool * socket_indirect_pool
#define PANIC_ON(x, m,...)
#define MBUF_POOL_ELEMENT
struct xran_io_loop_cfg io_cfg
struct rte_ring * tx_ring[ETHDI_VF_MAX]
struct rte_mempool * socket_direct_pool
void xran_init_port(int p_id, struct ether_addr *p_lls_cu_addr)
int __xran_delayed_msg(const char *fmt,...)
struct rte_mempool * _eth_mbuf_pool_rx
This file has all definitions for the Ethernet Data Interface Layer.
void xran_add_eth_hdr_vlan(struct ether_addr *dst, uint16_t ethertype, struct rte_mbuf *mb, uint16_t vlan_tci)
#define FLEXRAN_UP_VLAN_TAG
struct rte_mempool * _eth_mbuf_pool_small
struct rte_mempool * _eth_mbuf_pool_inderect
struct rte_mempool * _eth_mbuf_pool
This file has all definitions for the Ethernet Data Interface Layer.
#define MBUF_POOL_ELM_BIG
void xran_init_mbuf_pool(void)
struct rte_mempool * _eth_mbuf_pool_big
#define MBUF_POOL_ELM_SMALL