1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief xRAN application fragmentation for U-plane packets
22 * @file xran_app_frag.c
23 * @ingroup group_source_xran
24 * @author Intel Corporation
30 #include <immintrin.h>
32 #include <rte_memcpy.h>
33 #include <rte_mempool.h>
34 #include <rte_debug.h>
36 #include "xran_app_frag.h"
37 #include "xran_cp_api.h"
38 #include "xran_pkt_up.h"
39 #include "xran_printf.h"
40 #include "xran_common.h"
42 static inline void __fill_xranhdr_frag(struct xran_up_pkt_hdr *dst,
43 const struct xran_up_pkt_hdr *src, uint16_t rblen_bytes,
44 uint16_t rboff_bytes, uint16_t startPrbc, uint16_t numPrbc, uint32_t mf, uint8_t *seqid, uint8_t iqWidth)
46 struct data_section_hdr loc_data_sec_hdr;
47 struct xran_ecpri_hdr loc_ecpri_hdr;
49 rte_memcpy(dst, src, sizeof(*dst));
51 dst->ecpri_hdr.ecpri_seq_id.bits.seq_id = (*seqid)++;
53 print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n",startPrbc, numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid);
55 loc_data_sec_hdr.fields.all_bits = rte_be_to_cpu_32(dst->data_sec_hdr.fields.all_bits);
58 loc_data_sec_hdr.fields.start_prbu = startPrbc + rboff_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
59 loc_data_sec_hdr.fields.num_prbu = rblen_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
61 print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",startPrbc, numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu,
62 rboff_bytes, rblen_bytes);
64 dst->data_sec_hdr.fields.all_bits = rte_cpu_to_be_32(loc_data_sec_hdr.fields.all_bits);
66 dst->ecpri_hdr.cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) +
67 sizeof(struct data_section_hdr) + rblen_bytes + xran_get_ecpri_hdr_size());
70 static inline void __fill_xranhdr_frag_comp(struct xran_up_pkt_hdr_comp *dst,
71 const struct xran_up_pkt_hdr_comp *src, uint16_t rblen_bytes,
72 uint16_t rboff_bytes, uint16_t startPrbc, uint16_t numPrbc, uint32_t mf, uint8_t *seqid, uint8_t iqWidth)
74 struct data_section_hdr loc_data_sec_hdr;
75 struct xran_ecpri_hdr loc_ecpri_hdr;
77 rte_memcpy(dst, src, sizeof(*dst));
79 dst->ecpri_hdr.ecpri_seq_id.bits.seq_id = (*seqid)++;
81 print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n", startPrbc, numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid);
83 loc_data_sec_hdr.fields.all_bits = rte_be_to_cpu_32(dst->data_sec_hdr.fields.all_bits);
86 loc_data_sec_hdr.fields.start_prbu = startPrbc + rboff_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
87 loc_data_sec_hdr.fields.num_prbu = rblen_bytes/XRAN_PAYLOAD_1_RB_SZ(iqWidth);
89 print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",startPrbc, numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu,
90 rboff_bytes, rblen_bytes);
92 dst->data_sec_hdr.fields.all_bits = rte_cpu_to_be_32(loc_data_sec_hdr.fields.all_bits);
94 dst->ecpri_hdr.cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) +
95 sizeof(struct data_section_hdr) + sizeof(struct data_section_compression_hdr) + rblen_bytes + xran_get_ecpri_hdr_size());
100 static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
103 for (i = 0; i != num; i++)
104 rte_pktmbuf_free(mb[i]);
108 * XRAN fragmentation.
110 * This function implements the application fragmentation of XRAN packets.
115 * Array storing the output fragments.
117 * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing XRAN
118 * datagrams. This value includes the size of the XRAN headers.
120 * MBUF pool used for allocating direct buffers for the output fragments.
121 * @param pool_indirect
122 * MBUF pool used for allocating indirect buffers for the output fragments.
124 * Upon successful completion - number of output fragments placed
125 * in the pkts_out array.
126 * Otherwise - (-1) * <errno>.
129 xran_app_fragment_packet(struct rte_mbuf *pkt_in, /* eth hdr is prepended */
130 struct rte_mbuf **pkts_out,
131 uint16_t nb_pkts_out,
133 struct rte_mempool *pool_direct,
134 struct rte_mempool *pool_indirect,
135 int16_t nRBStart, /**< start RB of RB allocation */
136 int16_t nRBSize, /**< number of RBs used */
141 struct rte_mbuf *in_seg = NULL;
142 uint32_t out_pkt_pos = 0, in_seg_data_pos = 0;
143 uint32_t more_in_segs;
144 uint16_t fragment_offset, frag_size;
145 uint16_t frag_bytes_remaining;
146 struct eth_xran_up_pkt_hdr *in_hdr;
147 struct xran_up_pkt_hdr *in_hdr_xran;
149 struct eth_xran_up_pkt_hdr_comp *in_hdr_comp = NULL;
150 struct xran_up_pkt_hdr_comp *in_hdr_xran_comp = NULL;
152 int32_t eth_xran_up_headers_sz = 0;
153 eth_xran_up_headers_sz = sizeof(struct eth_xran_up_pkt_hdr);
156 eth_xran_up_headers_sz += sizeof(struct data_section_compression_hdr);
159 * Ensure the XRAN payload length of all fragments is aligned to a
160 * multiple of 48 bytes (1 RB with IQ of 16 bits each)
162 frag_size = ((mtu_size - eth_xran_up_headers_sz - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_1_RB_SZ(iqWidth))*XRAN_PAYLOAD_1_RB_SZ(iqWidth);
164 print_dbg("frag_size %d\n",frag_size);
167 in_hdr_comp = rte_pktmbuf_mtod(pkt_in, struct eth_xran_up_pkt_hdr_comp*);
168 in_hdr_xran_comp = &in_hdr_comp->xran_hdr;
169 if (unlikely(frag_size * nb_pkts_out <
170 (uint16_t)(pkt_in->pkt_len - sizeof (struct xran_up_pkt_hdr_comp)))){
171 print_err("-EINVAL\n");
175 in_hdr = rte_pktmbuf_mtod(pkt_in, struct eth_xran_up_pkt_hdr *);
176 in_hdr_xran = &in_hdr->xran_hdr;
177 /* Check that pkts_out is big enough to hold all fragments */
178 if (unlikely(frag_size * nb_pkts_out <
179 (uint16_t)(pkt_in->pkt_len - sizeof (struct xran_up_pkt_hdr)))){
180 print_err("-EINVAL\n");
187 in_seg_data_pos = sizeof(struct eth_xran_up_pkt_hdr_comp);
189 in_seg_data_pos = sizeof(struct eth_xran_up_pkt_hdr);
195 while (likely(more_in_segs)) {
196 struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
197 uint32_t more_out_segs;
198 struct xran_up_pkt_hdr *out_hdr;
199 struct xran_up_pkt_hdr_comp *out_hdr_comp;
201 /* Allocate direct buffer */
202 out_pkt = rte_pktmbuf_alloc(pool_direct);
203 if (unlikely(out_pkt == NULL)) {
204 print_err("pool_direct -ENOMEM\n");
205 __free_fragments(pkts_out, out_pkt_pos);
209 print_dbg("[%d] out_pkt %p\n",more_in_segs, out_pkt);
211 /* Reserve space for the XRAN header that will be built later */
212 //out_pkt->data_len = sizeof(struct xran_up_pkt_hdr);
213 //out_pkt->pkt_len = sizeof(struct xran_up_pkt_hdr);
215 if(rte_pktmbuf_append(out_pkt, sizeof(struct xran_up_pkt_hdr_comp)) ==NULL){
216 rte_panic("sizeof(struct xran_up_pkt_hdr)");
219 if(rte_pktmbuf_append(out_pkt, sizeof(struct xran_up_pkt_hdr)) ==NULL){
220 rte_panic("sizeof(struct xran_up_pkt_hdr)");
224 frag_bytes_remaining = frag_size;
226 out_seg_prev = out_pkt;
228 while (likely(more_out_segs && more_in_segs)) {
230 #ifdef XRAN_ATTACH_MBUF
231 struct rte_mbuf *out_seg = NULL;
233 /* Allocate indirect buffer */
234 print_dbg("Allocate indirect buffer \n");
235 out_seg = rte_pktmbuf_alloc(pool_indirect);
236 if (unlikely(out_seg == NULL)) {
237 print_err("pool_indirect -ENOMEM\n");
238 rte_pktmbuf_free(out_pkt);
239 __free_fragments(pkts_out, out_pkt_pos);
243 print_dbg("[%d %d] out_seg %p\n",more_out_segs, more_in_segs, out_seg);
244 out_seg_prev->next = out_seg;
245 out_seg_prev = out_seg;
247 /* Prepare indirect buffer */
248 rte_pktmbuf_attach(out_seg, in_seg);
250 len = frag_bytes_remaining;
251 if (len > (in_seg->data_len - in_seg_data_pos)) {
252 len = in_seg->data_len - in_seg_data_pos;
254 #ifdef XRAN_ATTACH_MBUF
255 out_seg->data_off = in_seg->data_off + in_seg_data_pos;
256 out_seg->data_len = (uint16_t)len;
257 out_pkt->pkt_len = (uint16_t)(len +
259 out_pkt->nb_segs += 1;
262 char* pChar = rte_pktmbuf_mtod(in_seg, char*);
263 void *iq_src = (pChar + in_seg_data_pos);
264 void *iq_dst = rte_pktmbuf_append(out_pkt, len);
266 print_dbg("rte_pktmbuf_attach\n");
268 rte_memcpy(iq_dst, iq_src, len);
270 print_err("iq_src %p iq_dst %p\n len %d room %d\n", iq_src, iq_dst, len, rte_pktmbuf_tailroom(out_pkt));
273 in_seg_data_pos += len;
274 frag_bytes_remaining -= len;
276 /* Current output packet (i.e. fragment) done ? */
277 if (unlikely(frag_bytes_remaining == 0))
280 /* Current input segment done ? */
281 if (unlikely(in_seg_data_pos == in_seg->data_len)) {
282 in_seg = in_seg->next;
285 if (unlikely(in_seg == NULL))
290 /* Build the XRAN header */
291 print_dbg("Build the XRAN header\n");
295 out_hdr_comp = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr_comp*);
296 __fill_xranhdr_frag_comp(out_hdr_comp, in_hdr_xran_comp,
297 (uint16_t)out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr_comp),
298 fragment_offset, nRBStart, nRBSize, more_in_segs, seqid, iqWidth);
300 fragment_offset = (uint16_t)(fragment_offset +
301 out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr_comp));
303 out_hdr = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr *);
304 __fill_xranhdr_frag(out_hdr, in_hdr_xran,
305 (uint16_t)out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr),
306 fragment_offset, nRBStart, nRBSize, more_in_segs, seqid, iqWidth);
308 fragment_offset = (uint16_t)(fragment_offset +
309 out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr));
312 //out_pkt->l3_len = sizeof(struct xran_up_pkt_hdr);
314 /* Write the fragment to the output list */
315 pkts_out[out_pkt_pos] = out_pkt;
316 print_dbg("out_pkt_pos %d data_len %d pkt_len %d\n", out_pkt_pos, out_pkt->data_len, out_pkt->pkt_len);
318 //rte_pktmbuf_dump(stdout, out_pkt, 96);