1 /******************************************************************************
\r
3 * Copyright (c) 2019 Intel.
\r
5 * Licensed under the Apache License, Version 2.0 (the "License");
\r
6 * you may not use this file except in compliance with the License.
\r
7 * You may obtain a copy of the License at
\r
9 * http://www.apache.org/licenses/LICENSE-2.0
\r
11 * Unless required by applicable law or agreed to in writing, software
\r
12 * distributed under the License is distributed on an "AS IS" BASIS,
\r
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
\r
14 * See the License for the specific language governing permissions and
\r
15 * limitations under the License.
\r
17 *******************************************************************************/
\r
20 * @brief xRAN application frgamentation for U-plane packets
\r
22 * @file xran_app_frag.c
\r
23 * @ingroup group_source_xran
\r
24 * @author Intel Corporation
\r
31 #include <rte_mbuf.h>
\r
32 #include <rte_memcpy.h>
\r
33 #include <rte_mempool.h>
\r
34 #include <rte_debug.h>
\r
36 #include "xran_app_frag.h"
\r
37 #include "xran_cp_api.h"
\r
38 #include "xran_pkt_up.h"
\r
39 #include "xran_printf.h"
\r
40 #include "xran_common.h"
\r
42 /* Fragment alignment */
\r
43 #define XRAN_PAYLOAD_RB_ALIGN (N_SC_PER_PRB*(IQ_BITS/8)*2) /**< at least 12*4=48 bytes per one RB */
\r
45 static inline void __fill_xranhdr_frag(struct xran_up_pkt_hdr *dst,
\r
46 const struct xran_up_pkt_hdr *src, uint16_t rblen_bytes,
\r
47 uint16_t rboff_bytes, struct xran_section_info *sectinfo, uint32_t mf, uint8_t *seqid)
\r
49 struct data_section_hdr loc_data_sec_hdr;
\r
50 struct xran_ecpri_hdr loc_ecpri_hdr;
\r
52 rte_memcpy(dst, src, sizeof(*dst));
\r
54 dst->ecpri_hdr.ecpri_seq_id.seq_id = (*seqid)++;
\r
56 print_dbg("sec [%d %d] sec %d mf %d g_sec %d\n",sectinfo->startPrbc, sectinfo->numPrbc, dst->ecpri_hdr.ecpri_seq_id.seq_id, mf, *seqid);
\r
58 loc_data_sec_hdr.fields.all_bits = rte_be_to_cpu_32(dst->data_sec_hdr.fields.all_bits);
\r
61 loc_data_sec_hdr.fields.start_prbu = sectinfo->startPrbc + rboff_bytes/(N_SC_PER_PRB*(IQ_BITS/8*2));
\r
62 loc_data_sec_hdr.fields.num_prbu = rblen_bytes/(N_SC_PER_PRB*(IQ_BITS/8*2));
\r
64 print_dbg("sec [%d %d] pkt [%d %d] rboff_bytes %d rblen_bytes %d\n",sectinfo->startPrbc, sectinfo->numPrbc, loc_data_sec_hdr.fields.start_prbu, loc_data_sec_hdr.fields.num_prbu,
\r
65 rboff_bytes, rblen_bytes);
\r
67 dst->data_sec_hdr.fields.all_bits = rte_cpu_to_be_32(loc_data_sec_hdr.fields.all_bits);
\r
70 dst->ecpri_hdr.cmnhdr.ecpri_payl_size = rte_cpu_to_be_16(sizeof(struct radio_app_common_hdr) +
\r
71 sizeof(struct data_section_hdr) + rblen_bytes + xran_get_ecpri_hdr_size());
\r
75 static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
\r
78 for (i = 0; i != num; i++)
\r
79 rte_pktmbuf_free(mb[i]);
\r
83 * XRAN fragmentation.
\r
85 * This function implements the application fragmentation of XRAN packets.
\r
90 * Array storing the output fragments.
\r
92 * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing XRAN
\r
93 * datagrams. This value includes the size of the XRAN headers.
\r
94 * @param pool_direct
\r
95 * MBUF pool used for allocating direct buffers for the output fragments.
\r
96 * @param pool_indirect
\r
97 * MBUF pool used for allocating indirect buffers for the output fragments.
\r
99 * Upon successful completion - number of output fragments placed
\r
100 * in the pkts_out array.
\r
101 * Otherwise - (-1) * <errno>.
\r
104 xran_app_fragment_packet(struct rte_mbuf *pkt_in, /* eth hdr is prepended */
\r
105 struct rte_mbuf **pkts_out,
\r
106 uint16_t nb_pkts_out,
\r
108 struct rte_mempool *pool_direct,
\r
109 struct rte_mempool *pool_indirect,
\r
110 struct xran_section_info *sectinfo,
\r
113 struct rte_mbuf *in_seg = NULL;
\r
114 uint32_t out_pkt_pos = 0, in_seg_data_pos = 0;
\r
115 uint32_t more_in_segs;
\r
116 uint16_t fragment_offset, frag_size;
\r
117 uint16_t frag_bytes_remaining;
\r
118 struct eth_xran_up_pkt_hdr *in_hdr;
\r
119 struct xran_up_pkt_hdr *in_hdr_xran;
\r
122 * Ensure the XRAN payload length of all fragments is aligned to a
\r
123 * multiple of 48 bytes (1 RB with IQ of 16 bits each)
\r
125 frag_size = ((mtu_size - sizeof(struct eth_xran_up_pkt_hdr) - RTE_PKTMBUF_HEADROOM)/XRAN_PAYLOAD_RB_ALIGN)*XRAN_PAYLOAD_RB_ALIGN;
\r
128 print_dbg("frag_size %d\n",frag_size);
\r
130 in_hdr = rte_pktmbuf_mtod(pkt_in, struct eth_xran_up_pkt_hdr *);
\r
132 in_hdr_xran = &in_hdr->xran_hdr;
\r
134 /* Check that pkts_out is big enough to hold all fragments */
\r
135 if (unlikely(frag_size * nb_pkts_out <
\r
136 (uint16_t)(pkt_in->pkt_len - sizeof (struct xran_up_pkt_hdr)))){
\r
137 print_err("-EINVAL\n");
\r
142 in_seg_data_pos = sizeof(struct eth_xran_up_pkt_hdr);
\r
144 fragment_offset = 0;
\r
147 while (likely(more_in_segs)) {
\r
148 struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
\r
149 uint32_t more_out_segs;
\r
150 struct xran_up_pkt_hdr *out_hdr;
\r
152 /* Allocate direct buffer */
\r
153 out_pkt = rte_pktmbuf_alloc(pool_direct);
\r
154 if (unlikely(out_pkt == NULL)) {
\r
155 print_err("pool_direct -ENOMEM\n");
\r
156 __free_fragments(pkts_out, out_pkt_pos);
\r
160 print_dbg("[%d] out_pkt %p\n",more_in_segs, out_pkt);
\r
162 /* Reserve space for the XRAN header that will be built later */
\r
163 //out_pkt->data_len = sizeof(struct xran_up_pkt_hdr);
\r
164 //out_pkt->pkt_len = sizeof(struct xran_up_pkt_hdr);
\r
165 if(rte_pktmbuf_append(out_pkt, sizeof(struct xran_up_pkt_hdr)) ==NULL){
\r
166 rte_panic("sizeof(struct xran_up_pkt_hdr)");
\r
168 frag_bytes_remaining = frag_size;
\r
170 out_seg_prev = out_pkt;
\r
172 while (likely(more_out_segs && more_in_segs)) {
\r
174 #ifdef XRAN_ATTACH_MBUF
\r
175 struct rte_mbuf *out_seg = NULL;
\r
177 /* Allocate indirect buffer */
\r
178 print_dbg("Allocate indirect buffer \n");
\r
179 out_seg = rte_pktmbuf_alloc(pool_indirect);
\r
180 if (unlikely(out_seg == NULL)) {
\r
181 print_err("pool_indirect -ENOMEM\n");
\r
182 rte_pktmbuf_free(out_pkt);
\r
183 __free_fragments(pkts_out, out_pkt_pos);
\r
187 print_dbg("[%d %d] out_seg %p\n",more_out_segs, more_in_segs, out_seg);
\r
188 out_seg_prev->next = out_seg;
\r
189 out_seg_prev = out_seg;
\r
191 /* Prepare indirect buffer */
\r
192 rte_pktmbuf_attach(out_seg, in_seg);
\r
194 len = frag_bytes_remaining;
\r
195 if (len > (in_seg->data_len - in_seg_data_pos)) {
\r
196 len = in_seg->data_len - in_seg_data_pos;
\r
198 #ifdef XRAN_ATTACH_MBUF
\r
199 out_seg->data_off = in_seg->data_off + in_seg_data_pos;
\r
200 out_seg->data_len = (uint16_t)len;
\r
201 out_pkt->pkt_len = (uint16_t)(len +
\r
203 out_pkt->nb_segs += 1;
\r
206 char* pChar = rte_pktmbuf_mtod(in_seg, char*);
\r
207 void *iq_src = (pChar + in_seg_data_pos);
\r
208 void *iq_dst = rte_pktmbuf_append(out_pkt, len);
\r
210 print_dbg("rte_pktmbuf_attach\n");
\r
211 if(iq_src && iq_dst)
\r
212 rte_memcpy(iq_dst, iq_src, len);
\r
214 print_err("iq_src %p iq_dst %p\n len %d room %d\n", iq_src, iq_dst, len, rte_pktmbuf_tailroom(out_pkt));
\r
217 in_seg_data_pos += len;
\r
218 frag_bytes_remaining -= len;
\r
220 /* Current output packet (i.e. fragment) done ? */
\r
221 if (unlikely(frag_bytes_remaining == 0))
\r
224 /* Current input segment done ? */
\r
225 if (unlikely(in_seg_data_pos == in_seg->data_len)) {
\r
226 in_seg = in_seg->next;
\r
227 in_seg_data_pos = 0;
\r
229 if (unlikely(in_seg == NULL))
\r
234 /* Build the XRAN header */
\r
235 print_dbg("Build the XRAN header\n");
\r
236 out_hdr = rte_pktmbuf_mtod(out_pkt, struct xran_up_pkt_hdr *);
\r
238 __fill_xranhdr_frag(out_hdr, in_hdr_xran,
\r
239 (uint16_t)out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr),
\r
240 fragment_offset, sectinfo, more_in_segs, seqid);
\r
242 fragment_offset = (uint16_t)(fragment_offset +
\r
243 out_pkt->pkt_len - sizeof(struct xran_up_pkt_hdr));
\r
245 //out_pkt->l3_len = sizeof(struct xran_up_pkt_hdr);
\r
247 /* Write the fragment to the output list */
\r
248 pkts_out[out_pkt_pos] = out_pkt;
\r
249 print_dbg("out_pkt_pos %d data_len %d pkt_len %d\n", out_pkt_pos, out_pkt->data_len, out_pkt->pkt_len);
\r
251 //rte_pktmbuf_dump(stdout, out_pkt, 96);
\r
254 return out_pkt_pos;
\r