+int __xran_delayed_msg(const char *fmt, ...)
+{
+#if 0
+ va_list ap;
+ int msg_len;
+ char localbuf[RINGSIZE];
+ ring_idx old_head, new_head;
+ ring_idx copy_len;
+
+ /* first prep a copy of the message on the local stack */
+ va_start(ap, fmt);
+ msg_len = vsnprintf(localbuf, RINGSIZE, fmt, ap);
+ va_end(ap);
+
+ /* atomically reserve space in the ring */
+ for (;;) {
+ old_head = io_ring.head; /* snapshot head */
+ /* free always within range of [0, RINGSIZE] - proof by induction */
+ const ring_idx free = RINGSIZE - (old_head - io_ring.tail);
+
+ copy_len = RTE_MIN(msg_len, free);
+ if (copy_len <= 0)
+ return 0; /* vsnprintf error or ringbuff full. Drop log. */
+
+ new_head = old_head + copy_len;
+ RTE_ASSERT((ring_idx)(new_head - io_ring.tail) <= RINGSIZE);
+
+ if (likely(__atomic_compare_exchange_n(&io_ring.head, &old_head,
+ new_head, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)))
+ break;
+ }
+
+ /* Now copy data in at ease. */
+ const int copy_start = (old_head & RINGMASK);
+ if (copy_start < (new_head & RINGMASK)) /* no wrap */
+ memcpy(io_ring.buf + copy_start, localbuf, copy_len);
+ else { /* wrap-around */
+ const int chunk_len = RINGSIZE - copy_start;
+
+ memcpy(io_ring.buf + copy_start, localbuf, chunk_len);
+ memcpy(io_ring.buf, localbuf + chunk_len, copy_len - chunk_len);
+ }
+
+ /* wait for previous writes to complete before updating read_head. */
+ while (io_ring.read_head != old_head)
+ rte_pause();
+ io_ring.read_head = new_head;
+
+
+ return copy_len;
+ #endif
+ return 0;
+}