* INTC Contribution to the O-RAN F Release for O-DU Low
[o-du/phy.git] / fhi_lib / lib / src / xran_tx_proc.c
index 3cfb2be..45a17a8 100644 (file)
@@ -46,6 +46,7 @@
 #include <rte_memzone.h>
 #include <rte_mbuf.h>
 #include <rte_ring.h>
+#include <rte_ethdev.h>
 
 #include "xran_fh_o_du.h"
 
@@ -61,7 +62,6 @@
 #include "xran_dev.h"
 #include "xran_frame_struct.h"
 #include "xran_printf.h"
-#include "xran_app_frag.h"
 #include "xran_tx_proc.h"
 #include "xran_cp_proc.h"
 
@@ -74,6 +74,7 @@ enum xran_in_period
      XRAN_IN_NEXT_PERIOD
 };
 
+extern int32_t first_call;
 
 struct rte_mbuf *
 xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_buff, uint16_t ext_buff_len,
@@ -177,8 +178,6 @@ static inline int32_t prepare_symbol_opt(enum xran_pkt_dir direction,
     int32_t n_bytes;
     int32_t prep_bytes;
     int16_t nPktSize;
-    uint32_t off;
-
 
     iqWidth = (iqWidth==0) ? 16 : iqWidth;
     switch(compMeth) {
@@ -242,7 +241,10 @@ static inline int32_t prepare_symbol_opt(enum xran_pkt_dir direction,
                                                 RU_Port_ID,
                                                 seq_id,
                                                 staticEn,
-                                                do_copy);
+                                                do_copy,
+                                                1,
+                                                section_id,
+                                                0);
     if (prep_bytes <= 0)
         errx(1, "failed preparing symbol");
 
@@ -264,21 +266,20 @@ int32_t xran_process_tx_sym_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti,
     char        *p_sec_iq = NULL;
     void        *mb  = NULL;
     void        *send_mb  = NULL;
-    int         prb_num = 0;
-    uint16_t    iq_sample_size_bits = 16;
-    uint16_t    vf_id = 0;
+    // int         prb_num = 0;
+    uint16_t    vf_id = 0 , num_sections = 0, curr_sect_id = 0 ;
 
     struct xran_prb_map *prb_map = NULL;
-    uint8_t  num_ant_elm  = 0;
+    //uint8_t  num_ant_elm  = 0;
 
     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
     if (p_xran_dev_ctx == NULL)
         return retval;
     struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
-    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
-    struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
+    //struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+    //struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
 
-    num_ant_elm = xran_get_num_ant_elm(pHandle);
+    //num_ant_elm = xran_get_num_ant_elm(pHandle);
     enum xran_pkt_dir direction;
     enum xran_comp_hdr_type staticEn = XRAN_COMP_HDR_TYPE_DYNAMIC;
 
@@ -298,10 +299,10 @@ int32_t xran_process_tx_sym_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti,
     struct rte_mbuf_ext_shared_info * p_share_data = NULL;
     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
         direction = XRAN_DIR_DL; /* O-DU */
-        prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
+        //prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
     } else {
         direction = XRAN_DIR_UL; /* RU */
-        prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
+        //prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
     }
 
     if(xran_fs_get_slot_type(PortId, cc_id, tti, ((p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)? XRAN_SLOT_TYPE_DL : XRAN_SLOT_TYPE_UL)) ==  1
@@ -321,16 +322,19 @@ int32_t xran_process_tx_sym_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti,
                 int32_t elmIdx = 0;
                 for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++){
                     //print_err("tti is %d, cc_id is %d, ant_id is %d, prb_map->nPrbElm id - %d", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, prb_map->nPrbElm);
-                    uint16_t sec_id  = elmIdx;
                     struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
                     struct xran_section_desc * p_sec_desc = NULL;
+                    uint16_t sec_id  = prb_map_elm->nSectId;
                     p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sec_id];
 
+                    if(unlikely(sym_id < prb_map_elm->nStartSymb || sym_id >= (prb_map_elm->nStartSymb + prb_map_elm->numSymb)))
+                          continue;
+
                     if(prb_map_elm == NULL){
                         rte_panic("p_sec_desc == NULL\n");
                     }
 
-                    p_sec_desc =  prb_map_elm->p_sec_desc[sym_id][0];
+                    p_sec_desc = &prb_map_elm->sec_desc[sym_id][0];
 
                     p_sec_iq     = ((char*)pos + p_sec_desc->iq_buffer_offset);
 
@@ -389,240 +393,307 @@ int32_t xran_process_tx_sym_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti,
                                           xran_get_upul_seqid(pHandle, cc_id, ant_id);
 
 
+                    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
+                                        && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B)
+                    {
+                        num_sections = (prb_map_elm->bf_weight.extType == 1) ? prb_map_elm->bf_weight.numSetBFWs : 1 ;
+                        if (prb_map_elm->bf_weight.extType != 1) 
+                            curr_sect_id = sec_id;
+                    }
+                    else
+                        num_sections = 1;
 
                     /* first all PRBs */
-                    int32_t num_bytes = prepare_symbol_ex(direction, sec_id,
+                    prepare_symbol_ex(direction, curr_sect_id,
                                       send_mb,
                                       (uint8_t *)p_sec_iq,
                                       prb_map_elm->compMethod,
                                       prb_map_elm->iqWidth,
                                       p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
                                       frame_id, subframe_id, slot_id, sym_id,
-                                      prb_map_elm->nRBStart, prb_map_elm->nRBSize,
+                                      prb_map_elm->UP_nRBStart, prb_map_elm->UP_nRBSize,
                                       cc_id, ant_id,
                                       seq_id,
-                                          0,
-                                          staticEn);
+                                      0,
+                                      staticEn,
+                                      num_sections,
+                                      p_sec_desc->iq_buffer_offset);
+
+                    curr_sect_id += num_sections;
 
                     rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
                     pCnt->tx_counter++;
                     pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
                     p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, vf_id);
-                }
+                } /* for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++) */
             } else {
                 printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_id, sym_id);
             }
 
-            if(p_xran_dev_ctx->enablePrach
-                && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU)) {   /* Only RU needs to send PRACH I/Q */
-                uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
-
-                if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
-                        && (is_prach_slot == 1)
-                        && (sym_id >= p_xran_dev_ctx->prach_start_symbol[cc_id])
-                        && (sym_id <= p_xran_dev_ctx->prach_last_symbol[cc_id])) {
-                    int prach_port_id = ant_id + pPrachCPConfig->eAxC_offset;
-                    int compMethod, parm_size;
-                    uint8_t symb_id_offset = sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id];
-
-                    compMethod = p_xran_dev_ctx->fh_cfg.ru_conf.compMeth_PRACH;
-                    switch(compMethod) {
-                        case XRAN_COMPMETHOD_BLKFLOAT:      parm_size = 1; break;
-                        case XRAN_COMPMETHOD_MODULATION:    parm_size = 0; break;
-                        default:
-                            parm_size = 0;
-                        }
-                    pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[symb_id_offset].pData;
-                    //pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id]) * pPrachCPConfig->numPrbc * N_SC_PER_PRB * 4;
-                    /*pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id])
-                            * (3*p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth + parm_size)
-                            * pPrachCPConfig->numPrbc;*/
-                    mb  = NULL;//(void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
-
-                    send_symbol_ex(pHandle,
-                            direction,
-                            xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, slot_id),
-                            (struct rte_mbuf *)mb,
-                            (uint8_t *)pos,
-                            compMethod,
-                            p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth_PRACH,
-                            p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
-                            frame_id, subframe_id, slot_id, sym_id,
-                            pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
-                            cc_id, prach_port_id,
-                            xran_get_upul_seqid(pHandle, cc_id, prach_port_id));
-                    retval = 1;
-                }
-            } /* if(p_xran_dev_ctx->enablePrach ..... */
         } /* RU mode or C-Plane is not used */
     }
-
     return retval;
 }
-
-int32_t
-xran_process_tx_srs_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id)
+int32_t xran_process_tx_prach_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id)
 {
     int32_t     retval = 0;
     char        *pos = NULL;
-    char        *p_sec_iq = NULL;
     void        *mb  = NULL;
-    void        *send_mb  = NULL;
-    int         prb_num = 0;
-    uint16_t    iq_sample_size_bits = 16;
-
-    struct xran_prb_map *prb_map = NULL;
-    uint8_t  num_ant_elm  = 0;
 
     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *)pHandle;
-    struct xran_common_counters * pCnt = &p_xran_dev_ctx->fh_counters;
-    struct xran_prach_cp_config *pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
-    struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
+    if (p_xran_dev_ctx == NULL)
+        return retval;
 
-    num_ant_elm = xran_get_num_ant_elm(pHandle);
-    enum xran_pkt_dir direction;
+    struct xran_prach_cp_config *pPrachCPConfig;
+    if(p_xran_dev_ctx->dssEnable){
+        int i = tti % p_xran_dev_ctx->dssPeriod;
+        if(p_xran_dev_ctx->technology[i]==1) {
+            pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+        }
+        else{
+            pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
+        }
+    }
+    else{
+        pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+    }
 
-    struct rte_mbuf *eth_oran_hdr = NULL;
-    char        *ext_buff = NULL;
-    uint16_t    ext_buff_len = 0;
-    struct rte_mbuf *tmp = NULL;
-    rte_iova_t ext_buff_iova = 0;
-    int32_t ant_elm_eAxC_id = ant_id + p_srs_cfg->eAxC_offset;
-    uint32_t vf_id = 0;
-    enum xran_comp_hdr_type staticEn = XRAN_COMP_HDR_TYPE_DYNAMIC;
+    enum xran_pkt_dir direction = XRAN_DIR_UL;
+    uint8_t PortId = p_xran_dev_ctx->xran_port_id;
 
-    if (p_xran_dev_ctx != NULL)
-    {
 
-    if(p_xran_dev_ctx->xran_port_id >= XRAN_PORTS_NUM)
+    if(PortId >= XRAN_PORTS_NUM)
         rte_panic("incorrect PORT ID\n");
 
-    struct rte_mbuf_ext_shared_info * p_share_data = NULL;
 
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
-        direction = XRAN_DIR_DL; /* O-DU */
-        prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
-        rte_panic("incorrect O_DU\n");
-    } else {
-        direction = XRAN_DIR_UL; /* RU */
-        prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
-    }
+    if(p_xran_dev_ctx->enablePrach
+          && (p_xran_dev_ctx->fh_init.io_cfg.id == O_RU) && (ant_id < XRAN_MAX_PRACH_ANT_NUM)){
 
+        if(xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_UL
+          || xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD) {   /* Only RU needs to send PRACH I/Q */
 
-        staticEn = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
+          uint32_t is_prach_slot = xran_is_prach_slot(PortId, subframe_id, slot_id);
 
+            if(((frame_id % pPrachCPConfig->x) == pPrachCPConfig->y[0])
+                    && (is_prach_slot == 1)
+                    && (sym_id >= p_xran_dev_ctx->prach_start_symbol[cc_id])
+                    && (sym_id <= p_xran_dev_ctx->prach_last_symbol[cc_id])) {
+                int prach_port_id = ant_id + pPrachCPConfig->eAxC_offset;
+                int compMethod;
+                //int parm_size;
+                uint8_t symb_id_offset = sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id];
 
-#if 1
-    if (tti % 5 == 3) {
-        {
-#else
-    if(xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_UL) ==  1
-            || xran_fs_get_slot_type(cc_id, tti, XRAN_SLOT_TYPE_FDD) ==  1) {
-        if(xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_UL
-           || xran_fs_get_symbol_type(cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD) {
+                compMethod = p_xran_dev_ctx->fh_cfg.ru_conf.compMeth_PRACH;
+#if 0
+                switch(compMethod) {
+                    case XRAN_COMPMETHOD_BLKFLOAT:      parm_size = 1; break;
+                    case XRAN_COMPMETHOD_MODULATION:    parm_size = 0; break;
+                    default:
+                        parm_size = 0;
+                    }
 #endif
-            pos = (char*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
-            mb  = (void*) p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
-            prb_map  = (struct xran_prb_map *) p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
-            vf_id  = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_elm_eAxC_id);
-
-            if(prb_map) {
-                int32_t elmIdx = 0;
-                for (elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++) {
-                    uint16_t sec_id  = elmIdx;
-                    struct xran_prb_elm * prb_map_elm = &prb_map->prbMap[elmIdx];
-                    struct xran_section_desc * p_sec_desc = NULL;
-
-                    if(prb_map_elm == NULL) {
-                        rte_panic("p_sec_desc == NULL\n");
+                pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[symb_id_offset].pData;
+                //pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id]) * pPrachCPConfig->numPrbc * N_SC_PER_PRB * 4;
+                /*pos += (sym_id - p_xran_dev_ctx->prach_start_symbol[cc_id])
+                        * (3*p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth + parm_size)
+                        * pPrachCPConfig->numPrbc;*/
+                mb  = NULL;//(void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
+
+                struct xran_prach_cp_config *pPrachCPConfig;
+                if(p_xran_dev_ctx->dssEnable){
+                    int i = tti % p_xran_dev_ctx->dssPeriod;
+                    if(p_xran_dev_ctx->technology[i]==1) {
+                        pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
                     }
+                    else{
+                        pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfigLTE);
+                    }
+                }
+                else{
+                    pPrachCPConfig = &(p_xran_dev_ctx->PrachCPConfig);
+                }
+                
+                
+                if (1500 == p_xran_dev_ctx->fh_init.mtu && pPrachCPConfig->filterIdx == XRAN_FILTERINDEX_PRACH_012)
+                {
+                    pos = (char*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pData;
+                    mb  = (void*) p_xran_dev_ctx->sFHPrachRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[0].pCtrl;
+                    /*one prach for more then one pkg*/
+                    send_symbol_mult_section_ex(pHandle,
+                        direction,
+                        xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, subframe_id, slot_id),
+                        (struct rte_mbuf *)mb,
+                        (uint8_t *)pos,
+                        compMethod,
+                        p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth,
+                        p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
+                        frame_id, subframe_id, slot_id, sym_id,
+                        pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
+                        cc_id, prach_port_id,
+                        0);
+                }
+                else{
+                    send_symbol_ex(pHandle,
+                        direction,
+                        xran_alloc_sectionid(pHandle, direction, cc_id, prach_port_id, subframe_id, slot_id),
+                        (struct rte_mbuf *)mb,
+                        (uint8_t *)pos,
+                        compMethod,
+                        p_xran_dev_ctx->fh_cfg.ru_conf.iqWidth_PRACH,
+                        p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
+                        frame_id, subframe_id, slot_id, sym_id,
+                        pPrachCPConfig->startPrbc, pPrachCPConfig->numPrbc,
+                        cc_id, prach_port_id,
+                        xran_get_upul_seqid(pHandle, cc_id, prach_port_id));
+                }
+                retval = 1;
+            }
+        } /* if(p_xran_dev_ctx->enablePrach ..... */
+    }
+  return retval;
+}
+int32_t
+xran_process_tx_srs_cp_off(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id,
+            uint32_t frame_id, uint32_t subframe_id, uint32_t slot_id)
+{
+    int32_t     retval = 0;
+    char        *pos = NULL;
+    char        *p_sec_iq = NULL;
+    void        *mb  = NULL;
+    char        *ext_buff = NULL;
+    uint16_t    ext_buff_len = 0 , num_sections=0 , section_id=0;
+    int32_t     antElm_eAxC_id;
+    uint32_t    vf_id = 0;
+    int32_t     elmIdx;
+    uint32_t    sym_id;
+    enum xran_pkt_dir direction;
+    enum xran_comp_hdr_type staticEn;
 
-                    /* skip, if not scheduled */
-                    if(sym_id < prb_map_elm->nStartSymb || sym_id >= prb_map_elm->nStartSymb + prb_map_elm->numSymb)
-                        return 0;
+    rte_iova_t ext_buff_iova = 0;
+    struct rte_mbuf *tmp = NULL;
+    struct xran_prb_map *prb_map = NULL;
+    struct xran_device_ctx * p_xran_dev_ctx;
+    struct xran_common_counters *pCnt;
+    //struct xran_prach_cp_config *pPrachCPConfig;
+    struct xran_srs_config *p_srs_cfg;
+    struct rte_mbuf *eth_oran_hdr = NULL;
+    struct rte_mbuf_ext_shared_info *p_share_data = NULL;
 
-                    p_share_data = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
-                    p_sec_desc =  prb_map_elm->p_sec_desc[sym_id][0];
-                    p_sec_iq     = ((char*)pos + p_sec_desc->iq_buffer_offset);
 
-                        /* calculate offset for external buffer */
-                    ext_buff_len = p_sec_desc->iq_buffer_len;
-                    ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
-                                    sizeof (struct xran_ecpri_hdr) +
-                                    sizeof (struct radio_app_common_hdr) +
-                                    sizeof(struct data_section_hdr));
+    p_xran_dev_ctx  = (struct xran_device_ctx *)pHandle;
+    if(p_xran_dev_ctx == NULL)
+    {
+        print_err("dev_ctx is NULL. ctx_id=%d, tti=%d, cc_id=%d, ant_id=%d, frame_id=%d, subframe_id=%d, slot_id=%d\n",
+                    ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id);
+        return 0;
+    }
 
-                    ext_buff_len += RTE_PKTMBUF_HEADROOM +
-                                    sizeof (struct xran_ecpri_hdr) +
-                                    sizeof (struct radio_app_common_hdr) +
-                                    sizeof(struct data_section_hdr) + 18;
+    if(p_xran_dev_ctx->xran_port_id >= XRAN_PORTS_NUM)
+        rte_panic("incorrect PORT ID\n");
 
-                        if ((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)&&(staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC)){
-                        ext_buff     -= sizeof (struct data_section_compression_hdr);
-                        ext_buff_len += sizeof (struct data_section_compression_hdr);
-                    }
+    pCnt            = &p_xran_dev_ctx->fh_counters;
+    //pPrachCPConfig  = &(p_xran_dev_ctx->PrachCPConfig);
+    p_srs_cfg       = &(p_xran_dev_ctx->srs_cfg);
 
-//                    eth_oran_hdr =  rte_pktmbuf_alloc(_eth_mbuf_pool_small);
-                    eth_oran_hdr = xran_ethdi_mbuf_indir_alloc();
+    /* Only O-RU sends SRS U-Plane */
+    direction   = XRAN_DIR_UL;
+    staticEn    = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
+    antElm_eAxC_id  = ant_id + p_srs_cfg->eAxC_offset;
 
-                    if (unlikely (( eth_oran_hdr) == NULL)) {
-                        rte_panic("Failed rte_pktmbuf_alloc\n");
-                    }
+    prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers->pData;
+    if(prb_map)
+    {
+        for(elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++)
+        {
+            struct xran_prb_elm *prb_map_elm = &prb_map->prbMap[elmIdx];
+            struct xran_section_desc * p_sec_desc = NULL;
 
-                    p_share_data->free_cb = extbuf_free_callback;
-                    p_share_data->fcb_opaque = NULL;
-                    rte_mbuf_ext_refcnt_set(p_share_data, 1);
+            if(prb_map_elm == NULL)
+                rte_panic("p_sec_desc == NULL\n");
 
-                    ext_buff_iova = rte_mempool_virt2iova(mb);
-                    if (unlikely (( ext_buff_iova) == 0)) {
-                        rte_panic("Failed rte_mem_virt2iova \n");
-                    }
+            sym_id  = prb_map->prbMap[elmIdx].nStartSymb;
+            pos     = (char*)p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
+            mb      = (void*)p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
 
-                    if (unlikely (( (rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA)) {
-                        rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
-                    }
 
-                    rte_pktmbuf_attach_extbuf(eth_oran_hdr,
-                                              ext_buff,
-                                              ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
-                                              ext_buff_len,
-                                              p_share_data);
+            p_share_data    = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
+            p_sec_desc      = &prb_map_elm->sec_desc[sym_id][0];
+            p_sec_iq        = ((char*)pos + p_sec_desc->iq_buffer_offset);
 
-                    rte_pktmbuf_reset_headroom(eth_oran_hdr);
+            /* calculate offset for external buffer */
+            ext_buff_len = p_sec_desc->iq_buffer_len;
 
-                    tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
-                    if (unlikely (( tmp) == NULL)) {
-                        rte_panic("Failed rte_pktmbuf_prepend \n");
-                    }
-                    send_mb = eth_oran_hdr;
+            ext_buff = p_sec_iq - (RTE_PKTMBUF_HEADROOM +
+                            sizeof (struct xran_ecpri_hdr) +
+                            sizeof (struct radio_app_common_hdr) +
+                            sizeof(struct data_section_hdr));
 
-                    uint8_t seq_id = (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) ?
-                                          xran_get_updl_seqid(pHandle, cc_id, ant_elm_eAxC_id) :
-                                          xran_get_upul_seqid(pHandle, cc_id, ant_elm_eAxC_id);
-                    /* first all PRBs */
-                    int32_t num_bytes = prepare_symbol_ex(direction, sec_id,
-                                      send_mb,
-                                      (uint8_t *)p_sec_iq,
-                                      prb_map_elm->compMethod,
-                                      prb_map_elm->iqWidth,
-                                      p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
-                                      frame_id, subframe_id, slot_id, sym_id,
-                                      prb_map_elm->nRBStart, prb_map_elm->nRBSize,
-                                      cc_id, ant_elm_eAxC_id,
-                                      seq_id,
-                                          0,
-                                          staticEn);
+            ext_buff_len += RTE_PKTMBUF_HEADROOM +
+                            sizeof (struct xran_ecpri_hdr) +
+                            sizeof (struct radio_app_common_hdr) +
+                            sizeof(struct data_section_hdr) + 18;
 
-                    rte_mbuf_sanity_check((struct rte_mbuf *)send_mb, 0);
-                    pCnt->tx_counter++;
-                    pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len((struct rte_mbuf *)send_mb);
-                    p_xran_dev_ctx->send_upmbuf2ring((struct rte_mbuf *)send_mb, ETHER_TYPE_ECPRI, vf_id);
-                }
-            } else {
-                printf("(%d %d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, ant_elm_eAxC_id, sym_id);
+            if((prb_map_elm->compMethod != XRAN_COMPMETHOD_NONE)
+                && (staticEn == XRAN_COMP_HDR_TYPE_DYNAMIC))
+            {
+                ext_buff     -= sizeof (struct data_section_compression_hdr);
+                ext_buff_len += sizeof (struct data_section_compression_hdr);
             }
-        }
-    }
+
+            eth_oran_hdr = xran_ethdi_mbuf_indir_alloc();
+            if(unlikely(eth_oran_hdr == NULL))
+                rte_panic("Failed rte_pktmbuf_alloc\n");
+
+            p_share_data->free_cb = extbuf_free_callback;
+            p_share_data->fcb_opaque = NULL;
+            rte_mbuf_ext_refcnt_set(p_share_data, 1);
+
+            ext_buff_iova = rte_mempool_virt2iova(mb);
+            if(unlikely(ext_buff_iova == 0 || ext_buff_iova == RTE_BAD_IOVA))
+                rte_panic("Failed rte_mem_virt2iova : %lu\n", ext_buff_iova);
+
+            rte_pktmbuf_attach_extbuf(eth_oran_hdr,
+                                      ext_buff,
+                                      ext_buff_iova + RTE_PTR_DIFF(ext_buff , mb),
+                                      ext_buff_len,
+                                      p_share_data);
+
+            rte_pktmbuf_reset_headroom(eth_oran_hdr);
+
+            tmp = (struct rte_mbuf *)rte_pktmbuf_prepend(eth_oran_hdr, sizeof(struct rte_ether_hdr));
+            if(unlikely(tmp == NULL))
+                rte_panic("Failed rte_pktmbuf_prepend \n");
+
+            uint8_t seq_id = xran_get_upul_seqid(pHandle, cc_id, antElm_eAxC_id);
+
+            num_sections = (prb_map_elm->bf_weight.extType == 1) ? prb_map_elm->bf_weight.numSetBFWs : 1 ;
+
+            prepare_symbol_ex(direction, prb_map_elm->nSectId,
+                              (void *)eth_oran_hdr, (uint8_t *)p_sec_iq,
+                              prb_map_elm->compMethod, prb_map_elm->iqWidth,
+                              p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
+                              frame_id, subframe_id, slot_id, sym_id,
+                              prb_map_elm->UP_nRBStart, prb_map_elm->UP_nRBSize,
+                              cc_id, antElm_eAxC_id,
+                              seq_id,
+                              0,
+                              staticEn,
+                              num_sections,
+                              0);
+
+            section_id += num_sections;
+
+            rte_mbuf_sanity_check(eth_oran_hdr, 0);
+
+            vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, antElm_eAxC_id);
+            pCnt->tx_counter++;
+            pCnt->tx_bytes_counter += rte_pktmbuf_pkt_len(eth_oran_hdr);
+            p_xran_dev_ctx->send_upmbuf2ring(eth_oran_hdr, ETHER_TYPE_ECPRI, vf_id);
+        } /* for(elmIdx = 0; elmIdx < prb_map->nPrbElm && elmIdx < XRAN_MAX_SECTIONS_PER_SLOT; elmIdx++) */
+    } /* if(prb_map) */
+    else
+    {
+        printf("(%d %d %d) prb_map == NULL\n", tti % XRAN_N_FE_BUF_LEN, cc_id, antElm_eAxC_id);
     }
 
     return retval;
@@ -685,7 +756,7 @@ xran_attach_up_ext_buf(uint16_t vf_id, int8_t* p_ext_buff_start, int8_t* p_ext_b
     return mb_oran_hdr_ext;
 }
 
-int32_t xran_process_tx_sym_cp_on_dispatch_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t num_cc, int32_t num_ant, uint32_t frame_id,
+int32_t xran_process_tx_sym_cp_on_dispatch_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id,
     uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
     uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
 {
@@ -699,14 +770,16 @@ int32_t xran_process_tx_sym_cp_on_dispatch_opt(void* pHandle, uint8_t ctx_id, ui
         p_desc->pHandle     = pHandle;
         p_desc->ctx_id      = ctx_id;
         p_desc->tti         = tti;
-        p_desc->cc_id       = num_cc;
-        p_desc->ant_id      = num_ant;
+        p_desc->start_cc    = start_cc;
+        p_desc->cc_num      = num_cc;
+        p_desc->start_ant   = start_ant;
+        p_desc->ant_num     = num_ant;
         p_desc->frame_id    = frame_id;
         p_desc->subframe_id = subframe_id;
         p_desc->slot_id     = slot_id;
         p_desc->sym_id      = sym_id;
         p_desc->compType    = (uint32_t)compType;
-        p_desc->direction    = (uint32_t)direction;
+        p_desc->direction   = (uint32_t)direction;
         p_desc->xran_port_id    = xran_port_id;
         p_desc->p_sec_db = (void*)p_sec_db;
 
@@ -726,7 +799,7 @@ int32_t xran_process_tx_sym_cp_on_dispatch_opt(void* pHandle, uint8_t ctx_id, ui
 }
 
 int32_t
-xran_process_tx_sym_cp_on_dispatch(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
+xran_process_tx_sym_cp_on_dispatch(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant, int32_t num_ant, uint32_t frame_id, uint32_t subframe_id,
                                    uint32_t slot_id, uint32_t sym_id)
 {
     int32_t     retval = 0;
@@ -739,8 +812,10 @@ xran_process_tx_sym_cp_on_dispatch(void *pHandle, uint8_t ctx_id, uint32_t tti,
         p_desc->pHandle     = pHandle;
         p_desc->ctx_id      = ctx_id;
         p_desc->tti         = tti;
-        p_desc->cc_id       = cc_id;
-        p_desc->ant_id      = ant_id;
+        p_desc->start_cc    = start_cc;
+        p_desc->cc_num       = num_cc;
+        p_desc->start_ant    = start_ant;
+        p_desc->ant_num      = num_ant;
         p_desc->frame_id    = frame_id;
         p_desc->subframe_id = subframe_id;
         p_desc->slot_id     = slot_id;
@@ -762,29 +837,22 @@ xran_process_tx_sym_cp_on_dispatch(void *pHandle, uint8_t ctx_id, uint32_t tti,
 }
 
 int32_t
-xran_process_tx_sym_cp_on(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t cc_id, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
+xran_process_tx_sym_cp_on(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t cc_id, int32_t start_ant, int32_t ant_id, uint32_t frame_id, uint32_t subframe_id,
     uint32_t slot_id, uint32_t sym_id)
 {
     int32_t     retval = 0;
-
-    struct rte_mbuf *eth_oran_hdr = NULL;
-    char        *ext_buff = NULL;
     uint16_t    ext_buff_len = 0;
-    struct rte_mbuf *tmp = NULL;
-    rte_iova_t ext_buff_iova = 0;
     char        *pos      = NULL;
     char        *p_sec_iq = NULL;
     void        *mb  = NULL;
     struct rte_mbuf *to_free_mbuf =  NULL;
-    int         prb_num = 0;
+    //int         prb_num = 0;
     uint16_t    iq_sample_size_bits = 16;
     uint32_t    next = 0;
     int32_t     num_sections = 0;
     uint16_t    len  = 0;
     int16_t     len2 = 0;
     uint16_t    i    = 0;
-
-    uint64_t    t1;
     struct mbuf_table  loc_tx_mbufs;
     struct xran_up_pkt_gen_params loc_xp;
 
@@ -800,513 +868,818 @@ xran_process_tx_sym_cp_on(void *pHandle, uint8_t ctx_id, uint32_t tti, int32_t c
     {
         compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
 
+        if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
+            direction = XRAN_DIR_DL; /* O-DU */
+            //prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
+        } else {
+            direction = XRAN_DIR_UL; /* RU */
+            //prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
+        }
 
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
-        direction = XRAN_DIR_DL; /* O-DU */
-        prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
-    } else {
-        direction = XRAN_DIR_UL; /* RU */
-        prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
-    }
-
-    vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
-    next = 0;
-    num_sections = xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id, ctx_id);
-    /* iterate C-Plane configuration to generate corresponding U-Plane */
-    if(num_sections)
-        prepare_sf_slot_sym(direction, frame_id, subframe_id, slot_id, sym_id, &loc_xp);
+        vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, ant_id);
+        next = 0;
+        num_sections = xran_cp_getsize_section_info(pHandle, direction, cc_id, ant_id, ctx_id);
+        /* iterate C-Plane configuration to generate corresponding U-Plane */
+        if(num_sections)
+            prepare_sf_slot_sym(direction, frame_id, subframe_id, slot_id, sym_id, &loc_xp);
 
-    loc_tx_mbufs.len = 0;
-    while(next < num_sections) {
-        sectinfo = xran_cp_iterate_section_info(pHandle, direction, cc_id, ant_id, ctx_id, &next);
+        loc_tx_mbufs.len = 0;
+        while(next < num_sections) {
+            sectinfo = xran_cp_iterate_section_info(pHandle, direction, cc_id, ant_id, ctx_id, &next);
 
-        if(sectinfo == NULL)
-            break;
+            if(sectinfo == NULL)
+                break;
 
-        if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) {   /* only supports type 1 */
-            print_err("Invalid section type in section DB - %d", sectinfo->type);
-            continue;
-        }
+            if(sectinfo->type != XRAN_CP_SECTIONTYPE_1) {   /* only supports type 1 */
+                print_err("Invalid section type in section DB - %d", sectinfo->type);
+                continue;
+            }
 
-        /* skip, if not scheduled */
-        if(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol)
-            continue;
+            /* skip, if not scheduled */
+            if(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol)
+                continue;
 
 
-        if(sectinfo->compMeth)
-            iq_sample_size_bits = sectinfo->iqWidth;
+            if(sectinfo->compMeth)
+                iq_sample_size_bits = sectinfo->iqWidth;
 
-        print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
-                    sectinfo->type, sectinfo->id, sectinfo->startPrbc,
-                    sectinfo->numPrbc,sectinfo->startSymId, sectinfo->numSymbol);
+            print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
+                        sectinfo->type, sectinfo->id, sectinfo->startPrbc,
+                        sectinfo->numPrbc,sectinfo->startSymId, sectinfo->numSymbol);
 
-        p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sectinfo->id];
+            p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sectinfo->id];
 
-        len  = loc_tx_mbufs.len;
-        len2 = 0;
-        i    = 0;
+            len  = loc_tx_mbufs.len;
+            len2 = 0;
+            i    = 0;
 
-        //Added for Klocworks
-        if (len >= MBUF_TABLE_SIZE) {
-            len = MBUF_TABLE_SIZE - 1;
-            rte_panic("len >= MBUF_TABLE_SIZE\n");
-        }
+            //Added for Klocworks
+            if (len >= MBUF_TABLE_SIZE) {
+                len = MBUF_TABLE_SIZE - 1;
+                rte_panic("len >= MBUF_TABLE_SIZE\n");
+            }
 
-        to_free_mbuf  = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id];
-        pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
-        mb  = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
+            to_free_mbuf  = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id];
+            pos = (char*) p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
+            mb  = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
 
-        if(mb == NULL) {
-            rte_panic("mb == NULL\n");
-        }
+            if(mb == NULL) {
+                rte_panic("mb == NULL\n");
+            }
 
-        p_sec_iq     = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
-        ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
+            p_sec_iq     = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
+            ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
 
-        mb = xran_attach_up_ext_buf(vf_id, (int8_t *)mb, (int8_t *) p_sec_iq,
-                            (uint16_t) ext_buff_len,
-                                p_share_data, (enum xran_compression_method) sectinfo->compMeth, compType);
-        p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id] =  mb;
-        rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
+            mb = xran_attach_up_ext_buf(vf_id, (int8_t *)mb, (int8_t *) p_sec_iq,
+                                (uint16_t) ext_buff_len,
+                                    p_share_data, (enum xran_compression_method) sectinfo->compMeth, compType);
+            p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][sectinfo->id] =  mb;
+            rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
 
-        if(to_free_mbuf) {
-            rte_pktmbuf_free(to_free_mbuf);
-        }
+            if(to_free_mbuf) {
+                rte_pktmbuf_free(to_free_mbuf);
+            }
 
-        /* first all PRBs */
-        prepare_symbol_opt(direction, sectinfo->id,
-                          mb,
-                          (struct rb_map *)p_sec_iq,
-                          sectinfo->compMeth,
-                          sectinfo->iqWidth,
-                          p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
-                              sectinfo->startPrbc,
-                              sectinfo->numPrbc,
-                              cc_id,
-                              ant_id,
-                          xran_get_updl_seqid(pHandle, cc_id, ant_id),
-                          0,
-                              &loc_xp,
-                              compType);
-
-        /* if we don't need to do any fragmentation */
-        if (likely (p_xran_dev_ctx->fh_init.mtu >=
-                        sectinfo->numPrbc * (3*iq_sample_size_bits + 1))) {
-            /* no fragmentation */
-            loc_tx_mbufs.m_table[len] = mb;
-            len2 = 1;
-        } else {
-            /* fragmentation */
-            uint8_t * seq_num = xran_get_updl_seqid_addr(pHandle, cc_id, ant_id);
-            if(seq_num)
-                (*seq_num)--;
-            else
-                rte_panic("pointer to seq number is NULL [CC %d Ant %d]\n", cc_id, ant_id);
-
-            len2 = xran_app_fragment_packet(mb,
-                            &loc_tx_mbufs.m_table[len],
-                            (uint16_t)(MBUF_TABLE_SIZE - len),
-                            p_xran_dev_ctx->fh_init.mtu,
-                            p_xran_dev_ctx->direct_pool,
-                            p_xran_dev_ctx->indirect_pool,
-                            sectinfo->startPrbc,
-                            sectinfo->numPrbc,
-                            seq_num,
+            /* first all PRBs */
+            prepare_symbol_opt(direction, sectinfo->id,
+                            mb,
+                            (struct rb_map *)p_sec_iq,
+                            sectinfo->compMeth,
                             sectinfo->iqWidth,
-                            ((sectinfo->iqWidth == 16)||(compType==XRAN_COMP_HDR_TYPE_STATIC)) ? 0 : 1);
-
-            /* Free input packet */
-            rte_pktmbuf_free(mb);
-
-            /* If we fail to fragment the packet */
-            if (unlikely (len2 < 0)){
-                print_err("len2= %d\n", len2);
+                            p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder,
+                                sectinfo->startPrbc,
+                                sectinfo->numPrbc,
+                                cc_id,
+                                ant_id,
+                            xran_get_updl_seqid(pHandle, cc_id, ant_id),
+                            0,
+                                &loc_xp,
+                                compType);
+
+            /* if we don't need to do any fragmentation */
+            if (likely (p_xran_dev_ctx->fh_init.mtu >=
+                            sectinfo->numPrbc * (3*iq_sample_size_bits + 1))) {
+                /* no fragmentation */
+                loc_tx_mbufs.m_table[len] = mb;
+                len2 = 1;
+            } else {
+                /* current code should not go to fragmentation as it should be taken care of by section allocation already */
+                print_err("should not go to fragmentation mtu %d packet size %d\n", p_xran_dev_ctx->fh_init.mtu, sectinfo->numPrbc * (3*iq_sample_size_bits + 1));
                 return 0;
             }
-        }
-        if(len2 > 1){
-            for (i = len; i < len + len2; i ++) {
-                struct rte_mbuf *m;
-                m = loc_tx_mbufs.m_table[i];
-                struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
-                    rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
-                if (eth_hdr == NULL) {
-                    rte_panic("No headroom in mbuf.\n");
+            if(len2 > 1){
+                for (i = len; i < len + len2; i ++) {
+                    struct rte_mbuf *m;
+                    m = loc_tx_mbufs.m_table[i];
+                    struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
+                        rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
+                    if (eth_hdr == NULL) {
+                        rte_panic("No headroom in mbuf.\n");
+                    }
                 }
             }
-        }
 
-        len += len2;
-        if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM)) {
-              rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
-        }
-        loc_tx_mbufs.len = len;
-    } /* while(section) */
+            len += len2;
+            if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM)) {
+                rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
+            }
+            loc_tx_mbufs.len = len;
+        } /* while(section) */
 
-    /* Transmit packets */
-    xran_send_burst(p_xran_dev_ctx, &loc_tx_mbufs, vf_id);
-    loc_tx_mbufs.len = 0;
-    retval = 1;
+        /* Transmit packets */
+        xran_send_burst(p_xran_dev_ctx, &loc_tx_mbufs, vf_id);
+        loc_tx_mbufs.len = 0;
+        retval = 1;
     }
 
     return retval;
 }
 
-//#define TRANSMIT_BURST
-//#define ENABLE_DEBUG_COREDUMP
-
-#define ETHER_TYPE_ECPRI_BE (0xFEAE)
-
-int32_t xran_process_tx_sym_cp_on_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t num_cc, int32_t num_ant, uint32_t frame_id,
-    uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
-    uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
+int32_t
+xran_prepare_up_dl_sym(uint16_t xran_port_id, uint32_t nSlotIdx,  uint32_t nCcStart, uint32_t nCcNum, uint32_t nSymMask, uint32_t nAntStart,
+                            uint32_t nAntNum, uint32_t nSymStart, uint32_t nSymNum)
 {
-    uint8_t seq_id = 0;
-    int32_t cc_id = 0, ant_id = 0;
-    char* ext_buff = NULL;
-    uint16_t ext_buff_len = 0;
-    rte_iova_t ext_buff_iova = 0;
-    char* pos = NULL;
-    char* p_sec_iq = NULL;
-    void* mb = NULL, *mb_base = NULL;
-    struct rte_mbuf* to_free_mbuf = NULL;
-    uint16_t iq_sample_size_bits = 16;
-    uint32_t next = 0;
-    int32_t num_sections = 0, total_sections = 0;
-    uint16_t len = 0, len2 = 0, len_frag = 0;
-    char* pStart = 0;
-    uint16_t cid = 0;
-    uint8_t compMeth = 0;
-    uint8_t iqWidth = 0;
-    int parm_size = 0;
-    int32_t n_bytes = 0, elm_bytes = 0;
-    uint16_t section_id;
-    uint16_t prb_num = 0;
-    uint16_t prb_start = 0;
-    int16_t nPktSize = 0;
-    uint16_t ecpri_payl_size = 0;
-#ifdef TRANSMIT_BURST
-    struct mbuf_table  loc_tx_mbufs;
+    int32_t     retval = 0;
+    uint32_t    tti=0;
+    uint32_t    numSlotMu1 = 5;
+#if XRAN_MLOG_VAR
+    uint32_t    mlogVar[15];
+    uint32_t    mlogVarCnt = 0;
 #endif
-    struct mbuf_table  loc_tx_mbufs_fragmented;
-    struct xran_up_pkt_gen_params xp;
-    struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
-    struct xran_section_info* sectinfo = NULL;
-    struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
-    uint16_t vf_id = 0;
-    struct rte_mbuf_ext_shared_info* p_share_data = NULL;
-    struct xran_sectioninfo_db* ptr_sect_elm = NULL;
-    struct rte_mbuf* mb_oran_hdr_ext = NULL;
-    struct rte_mempool_objhdr* iova_hdr = NULL;
-    struct xran_eaxcid_config* conf = &(p_xran_dev_ctx->eAxc_id_cfg);
-    struct rte_ether_hdr* ether_hdr = NULL;
-    struct xran_ecpri_hdr* ecpri_hdr = NULL;
-    struct radio_app_common_hdr* app_hdr = NULL;
-    struct data_section_hdr* section_hdr = NULL;
-    struct data_section_compression_hdr* compression_hdr = NULL;
-    const int16_t ccid_pos = conf->bit_ccId;
-    const int16_t ccid_mask = conf->mask_ccId;
-    const int16_t antid_pos = conf->bit_ruPortId;
-    const int16_t antid_mask = conf->mask_ruPortId;
-
-    const int16_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
-    const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
-    uint16_t comp_head_upd = 0;
+    unsigned long t1 = MLogXRANTick();
 
-    const int16_t total_header_size = (RTE_PKTMBUF_HEADROOM +
-        sizeof(struct xran_ecpri_hdr) +
-        sizeof(struct radio_app_common_hdr) +
-        sizeof(struct data_section_hdr));
+    void        *pHandle = NULL;
+    int32_t     ant_id   = 0;
+    int32_t     cc_id    = 0;
+    uint8_t     num_eAxc = 0;
+    uint8_t     num_eAxc_prach = 0;
+    uint8_t     num_eAxAntElm = 0;
+    uint8_t     num_CCPorts = 0;
+    uint32_t    frame_id    = 0;
+    uint32_t    subframe_id = 0;
+    uint32_t    slot_id     = 0;
+    uint32_t    sym_id      = 0;
+    uint32_t    sym_idx_to_send  = 0;
+    uint32_t    idxSym;
+    uint8_t     ctx_id;
+    enum xran_in_period inPeriod;
+    uint32_t interval;
+    uint8_t PortId;
+    struct xran_device_ctx * p_xran_dev_ctx = NULL;
 
-    uint16_t* __restrict pSrc = NULL;
-    uint16_t* __restrict pDst = NULL;
+    p_xran_dev_ctx = xran_dev_get_ctx_by_id(xran_port_id);
 
-    const enum xran_input_byte_order iq_buf_byte_order = p_xran_dev_ctx->fh_cfg.ru_conf.byteOrder;
+    if(p_xran_dev_ctx == NULL)
+        return 0;
 
-    /* radio app header */
-    xp.app_params.data_feature.value = 0x10;
-    xp.app_params.data_feature.data_direction = direction;
-    xp.app_params.frame_id = frame_id;
-    xp.app_params.sf_slot_sym.subframe_id = subframe_id;
-    xp.app_params.sf_slot_sym.slot_id = slot_id;
-    xp.app_params.sf_slot_sym.symb_id = sym_id;
-    /* convert to network byte order */
-    xp.app_params.sf_slot_sym.value = rte_cpu_to_be_16(xp.app_params.sf_slot_sym.value);
+    if(p_xran_dev_ctx->xran2phy_mem_ready == 0)
+        return 0;
 
+    interval = p_xran_dev_ctx->interval_us_local;
+    PortId = p_xran_dev_ctx->xran_port_id;
 
-    for (cc_id = 0; cc_id < num_cc; cc_id++)
-    {
-        for (ant_id = 0; ant_id < num_ant; ant_id++)
-        {
-            ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][ant_id];
-            if (unlikely(ptr_sect_elm == NULL))
-                return (0);
-            num_sections = ptr_sect_elm->cur_index;
+    pHandle =  p_xran_dev_ctx;
 
-            /* iterate C-Plane configuration to generate corresponding U-Plane */
-            vf_id = p_xran_dev_ctx->map2vf[direction][cc_id][ant_id][XRAN_UP_VF];
-            pos = (char*)p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData;
-            mb_base = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
-            if (unlikely(mb_base == NULL))
+    for (idxSym = nSymStart; idxSym < (nSymStart + nSymNum) && idxSym < XRAN_NUM_OF_SYMBOL_PER_SLOT; idxSym++) {
+        t1 = MLogXRANTick();
+        if(((1 << idxSym) & nSymMask) ) {
+            sym_idx_to_send = nSlotIdx*XRAN_NUM_OF_SYMBOL_PER_SLOT + idxSym;
+            XranOffsetSym(p_xran_dev_ctx->sym_up, sym_idx_to_send, XRAN_NUM_OF_SYMBOL_PER_SLOT*SLOTNUM_PER_SUBFRAME(interval)*1000, &inPeriod);
+            tti         = XranGetTtiNum(sym_idx_to_send, XRAN_NUM_OF_SYMBOL_PER_SLOT);
+            slot_id     = XranGetSlotNum(tti, SLOTNUM_PER_SUBFRAME(interval));
+            subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+
+            uint16_t sfnSecStart = xran_getSfnSecStart();
+            if(unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
             {
-                rte_panic("mb == NULL\n");
+                // For DU
+                sfnSecStart = (sfnSecStart + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
             }
-
-            cid = ((cc_id << ccid_pos) & ccid_mask) | ((ant_id << antid_pos) & antid_mask);
-            cid = rte_cpu_to_be_16(cid);
-            iq_sample_size_bits = 16;
-
-#ifdef TRANSMIT_BURST
-            loc_tx_mbufs.len = 0;
-#endif
-            loc_tx_mbufs_fragmented.len = 0;
-            len_frag = 0;
-#pragma loop_count min=1, max=16
-            for (next=0; next< num_sections; next++)
+            else if(unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
             {
-                sectinfo = &ptr_sect_elm->list[next];
-
-                if (unlikely(sectinfo == NULL))
-                    break;
-                if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
-                {   /* only supports type 1 */
-                    print_err("Invalid section type in section DB - %d", sectinfo->type);
-                    continue;
+                // For RU
+                if (sfnSecStart >= NUM_OF_FRAMES_PER_SECOND)
+                {
+                    sfnSecStart -= NUM_OF_FRAMES_PER_SECOND;
+                }
+                else
+                {
+                    sfnSecStart += NUM_OF_FRAMES_PER_SFN_PERIOD - NUM_OF_FRAMES_PER_SECOND;
                 }
-                /* skip, if not scheduled */
-                if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
-                    continue;
+            }
+            frame_id    = XranGetFrameNum(tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+            // ORAN frameId, 8 bits, [0, 255]
+            frame_id = (frame_id & 0xff);
 
-                compMeth = sectinfo->compMeth;
-                iqWidth = sectinfo->iqWidth;
-                section_id = sectinfo->id;
-                prb_start = sectinfo->startPrbc;
-                prb_num = sectinfo->numPrbc;
-                seq_id = xran_updl_seq_id_num[xran_port_id][cc_id][ant_id]++;
-                len2 = 0;
+            sym_id      = XranGetSymNum(sym_idx_to_send, XRAN_NUM_OF_SYMBOL_PER_SLOT);
+            ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
 
-                if (compMeth)
-                    iq_sample_size_bits = iqWidth;
+            print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
 
-                comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
+#if XRAN_MLOG_VAR
+            mlogVarCnt = 0;
+            mlogVar[mlogVarCnt++] = 0xAAAAAAAA;
+            mlogVar[mlogVarCnt++] = xran_lib_ota_sym_idx[PortId];
+            mlogVar[mlogVarCnt++] = idxSym;
+            mlogVar[mlogVarCnt++] = abs(p_xran_dev_ctx->sym_up);
+            mlogVar[mlogVarCnt++] = tti;
+            mlogVar[mlogVarCnt++] = frame_id;
+            mlogVar[mlogVarCnt++] = subframe_id;
+            mlogVar[mlogVarCnt++] = slot_id;
+            mlogVar[mlogVarCnt++] = sym_id;
+            mlogVar[mlogVarCnt++] = PortId;
+            MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
+#endif
+            if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
+                    && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B)
+            {
+                    num_eAxc    = xran_get_num_eAxcUl(pHandle);
+            }
+            else
+            {
+                    num_eAxc    = xran_get_num_eAxc(pHandle);
+            }
 
-                print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
-                    sectinfo->type, sectinfo->id, sectinfo->startPrbc,
-                    sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
+            num_eAxc_prach = ((num_eAxc > XRAN_MAX_PRACH_ANT_NUM)? XRAN_MAX_PRACH_ANT_NUM : num_eAxc);
+            num_CCPorts = xran_get_num_cc(pHandle);
 
-                p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][section_id];
-                p_share_data->free_cb = extbuf_free_callback;
-                p_share_data->fcb_opaque = NULL;
-                rte_mbuf_ext_refcnt_set(p_share_data, 1);
+            /* U-Plane */
+            if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP) {
+                enum xran_comp_hdr_type compType;
+                enum xran_pkt_dir direction;
+                //uint32_t prb_num;
+                uint32_t loc_ret = 1;
+                uint16_t xran_port_id;
+                PSECTION_DB_TYPE p_sec_db = NULL;
 
-#ifdef TRANSMIT_BURST
-                len = loc_tx_mbufs.len;
-                //Added for Klocworks
-                if (unlikely(len >= MBUF_TABLE_SIZE))
+                compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
+
+                if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)
                 {
-                    len = MBUF_TABLE_SIZE - 1;
-                    rte_panic("len >= MBUF_TABLE_SIZE\n");
+                    direction = XRAN_DIR_DL; /* O-DU */
+                    //prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
+                }
+                else
+                {
+                    direction = XRAN_DIR_UL; /* RU */
+                    //prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
                 }
-#endif
-                p_sec_iq = ((char*)pos + sectinfo->sec_desc[sym_id].iq_buffer_offset);
-                ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
 
-                ext_buff = p_sec_iq - total_header_size;
-                ext_buff_len += (total_header_size + 18);
+                if(unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM))
+                {
+                    print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
+                    loc_ret = 0;
+                }
 
-                if (comp_head_upd)
+                if(unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX))
                 {
-                    ext_buff -= sizeof(struct data_section_compression_hdr);
-                    ext_buff_len += sizeof(struct data_section_compression_hdr);
+                    print_err("Invalid Context id - %d", ctx_id);
+                    loc_ret = 0;
                 }
 
-                mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
-                if (unlikely((mb_oran_hdr_ext) == NULL))
+                if(unlikely(direction > XRAN_DIR_MAX))
                 {
-                    rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
+                    print_err("Invalid direction - %d", direction);
+                    loc_ret = 0;
                 }
 
-                iova_hdr = (struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size);
-                ext_buff_iova = iova_hdr->iova;
+                if(unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX))
+                {
+                    print_err("Invalid CC id - %d", num_CCPorts);
+                    loc_ret = 0;
+                }
 
-#ifdef ENABLE_DEBUG_COREDUMP
-                if (unlikely(ext_buff_iova == 0))
+                if(unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR)))
                 {
-                    rte_panic("Failed rte_mem_virt2iova\n");
+                    print_err("Invalid eAxC id - %d", num_eAxc);
+                    loc_ret = 0;
                 }
-                if (unlikely(((rte_iova_t)ext_buff_iova) == RTE_BAD_IOVA))
+
+                xran_port_id = p_xran_dev_ctx->xran_port_id;
+                p_sec_db = p_sectiondb[p_xran_dev_ctx->xran_port_id];
+                if(unlikely(p_sec_db == NULL))
                 {
-                    rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
+                    print_err("p_sec_db == NULL\n");
+                    loc_ret = 0;
+                }
+
+                if (loc_ret) {
+                    retval = xran_process_tx_sym_cp_on_opt(pHandle, ctx_id, tti,
+                                        nCcStart, nCcNum, nAntStart, nAntNum, frame_id, subframe_id, slot_id, idxSym,
+                                        compType, direction, xran_port_id, p_sec_db);
+                } else {
+                    print_err("loc_ret %d\n", loc_ret);
+                    retval = 0;
                 }
+            } else {
+                for (ant_id = 0; ant_id < num_eAxc; ant_id++) {
+                    for (cc_id = 0; cc_id < num_CCPorts; cc_id++) {
+                        //struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
+                        if(p_xran_dev_ctx->puschMaskEnable)
+                        {
+                            if((tti % numSlotMu1) != p_xran_dev_ctx->puschMaskSlot)
+                                retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
+                        }
+                        else
+                            retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
+
+                        if(p_xran_dev_ctx->enablePrach && (ant_id < num_eAxc_prach) )
+                        {
+                            retval = xran_process_tx_prach_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
+                        }
+                    }
+                }
+            }
+
+            /* SRS U-Plane, only for O-RU emulation with Cat B */
+            if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
+                    && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B
+                    && p_xran_dev_ctx->enableSrs
+                    && ((p_xran_dev_ctx->srs_cfg.symbMask >> idxSym)&1))
+            {
+                struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
+
+                for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
+                {
+                    /* check special frame */
+                    if((xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) ==  1)
+                        || (xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) ==  1))
+                    {
+                        if(((tti % p_xran_dev_ctx->fh_cfg.frame_conf.nTddPeriod) == pSrsCfg->slot)
+                            && (p_xran_dev_ctx->ndm_srs_scheduled == 0))
+                        {
+                            int elmIdx;
+                            struct xran_prb_map *prb_map;
+                            prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
+
+                            /* if PRB map is present in first antenna, assume SRS might be scheduled. */
+                            if(prb_map && prb_map->nPrbElm)
+                            {
+                                /* NDM U-Plane is not enabled */
+                                if(pSrsCfg->ndm_offset == 0)
+                                {
+
+                                    if (prb_map->nPrbElm > 0)
+                                    {
+                                        /* Check symbol range in PRB Map */
+                                        if(sym_id >= prb_map->prbMap[0].nStartSymb
+                                            && sym_id < (prb_map->prbMap[0].nStartSymb + prb_map->prbMap[0].numSymb))
+                                            for(ant_id=0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
+                                                xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id);
+                                    }
+
+                                }
+                                /* NDM U-Plane is enabled, SRS U-Planes will be transmitted after ndm_offset (in slots) */
+                                else
+                                {
+                                    p_xran_dev_ctx->ndm_srs_scheduled   = 1;
+                                    p_xran_dev_ctx->ndm_srs_tti         = tti;
+                                    p_xran_dev_ctx->ndm_srs_txtti       = (tti + pSrsCfg->ndm_offset)%2000;
+                                    p_xran_dev_ctx->ndm_srs_schedperiod = pSrsCfg->slot;
+                                }
+                            }
+                        }
+                    }
+                    /* check SRS NDM UP has been scheduled in non special slots */
+                    else if(p_xran_dev_ctx->ndm_srs_scheduled
+                            && p_xran_dev_ctx->ndm_srs_txtti == tti)
+                    {
+                        int ndm_step;
+                        uint32_t srs_tti, srsFrame, srsSubframe, srsSlot;
+                        uint8_t  srsCtx;
+
+                        srs_tti     = p_xran_dev_ctx->ndm_srs_tti;
+                        num_eAxAntElm = xran_get_num_ant_elm(pHandle);
+                        ndm_step    = num_eAxAntElm / pSrsCfg->ndm_txduration;
+
+                        srsSlot     = XranGetSlotNum(srs_tti, SLOTNUM_PER_SUBFRAME(interval));
+                        srsSubframe = XranGetSubFrameNum(srs_tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+                        srsFrame    = XranGetFrameNum(srs_tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+                        srsFrame    = (srsFrame & 0xff);
+                        srsCtx      =  srs_tti % XRAN_MAX_SECTIONDB_CTX;
+
+                        if(sym_id < pSrsCfg->ndm_txduration)
+                        {
+                            for(ant_id=sym_id*ndm_step; ant_id < (sym_id+1)*ndm_step; ant_id++)
+                                xran_process_tx_srs_cp_off(pHandle, srsCtx, srs_tti, cc_id, ant_id, srsFrame, srsSubframe, srsSlot);
+                        }
+                        else
+                        {
+                            p_xran_dev_ctx->ndm_srs_scheduled   = 0;
+                            p_xran_dev_ctx->ndm_srs_tti         = 0;
+                            p_xran_dev_ctx->ndm_srs_txtti       = 0;
+                            p_xran_dev_ctx->ndm_srs_schedperiod = 0;
+                        }
+                    }
+                }
+            }
+        }
+        MLogXRANTask(PID_DISPATCH_TX_SYM, t1, MLogXRANTick());
+    }
+
+    return retval;
+}
+
+
+static inline uint16_t
+xran_tx_sym_from_ring(struct xran_device_ctx* p_xran_dev_ctx, struct rte_ring *r, uint16_t vf_id)
+{
+    struct rte_mbuf *mbufs[XRAN_MAX_MEM_IF_RING_SIZE];
+    uint16_t dequeued, sent = 0;
+    uint32_t remaining;
+    //long t1 = MLogXRANTick();
+
+    dequeued = rte_ring_dequeue_burst(r, (void **)mbufs, XRAN_MAX_MEM_IF_RING_SIZE,
+            &remaining);
+    if (!dequeued)
+        return 0;   /* Nothing to send. */
+
+    while (1) {
+        //sent += p_xran_dev_ctx->send_upmbuf2ring(mbufs[sent], ETHER_TYPE_ECPRI, vf_id);
+        sent += rte_eth_tx_burst(vf_id, 0, &mbufs[sent], dequeued - sent);
+        if (sent == dequeued){
+            // MLogXRANTask(PID_REQUEUE_TX_SYM, t1, MLogXRANTick());
+            return remaining;
+        }
+    }
+}
+
+int32_t
+xran_process_tx_sym_cp_on_ring(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant,  int32_t num_ant, uint32_t frame_id,
+    uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
+    uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
+{
+    struct rte_ring *ring = NULL;
+    struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
+    int32_t cc_id  = 0;
+    int32_t ant_id = 0;
+    uint16_t vf_id = 0;
+
+    for (cc_id = start_cc; cc_id < (start_cc + num_cc); cc_id++) {
+        for (ant_id = start_ant; ant_id < (start_ant + num_ant); ant_id++) {
+            vf_id = p_xran_dev_ctx->map2vf[direction][cc_id][ant_id][XRAN_UP_VF];
+            ring    = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pRing;
+            xran_tx_sym_from_ring(p_xran_dev_ctx, ring, vf_id);
+        }
+    }
+    return 0;
+}
+
+//#define TRANSMIT_BURST
+//#define ENABLE_DEBUG_COREDUMP
+
+#define ETHER_TYPE_ECPRI_BE (0xFEAE)
+
+int32_t
+xran_process_tx_sym_cp_on_opt(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant,  int32_t num_ant, uint32_t frame_id,
+    uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
+    uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
+{
+    struct xran_up_pkt_gen_params *pxp;
+    struct data_section_hdr *pDataSec;
+    char* ext_buff;
+    void  *mb_base;
+    struct rte_ring *ring;
+    char* pStart;
+    struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
+    struct xran_section_info* sectinfo;
+    struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
+    struct rte_mbuf_ext_shared_info* p_share_data;
+    struct xran_sectioninfo_db* ptr_sect_elm = NULL;
+    struct rte_mbuf* mb_oran_hdr_ext = NULL;
+    struct xran_ecpri_hdr* ecpri_hdr = NULL;
+    //uint16_t* __restrict pSrc = NULL;
+    uint16_t* __restrict pDst = NULL;
+
+    uint16_t next;
+    uint16_t ext_buff_len = 0;
+    uint16_t iq_sample_size_bytes=0;
+    uint16_t num_sections = 0, total_sections = 0;
+    uint16_t n_bytes;
+    uint16_t elm_bytes = 0;
+    uint16_t section_id;
+    uint16_t nPktSize=0;
+    uint16_t cid;
+    uint16_t vf_id;
+    const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
+    uint8_t seq_id = 0;
+    uint8_t cc_id, ant_id;
+
+#ifdef TRANSMIT_BURST
+    uint16_t len = 0;
+#endif
+    //uint16_t len2 = 0, len_frag = 0;
+    uint8_t compMeth;
+    uint8_t iqWidth;
+    uint8_t parm_size;
+#ifdef TRANSMIT_BURST
+    struct mbuf_table  loc_tx_mbufs;
+    struct mbuf_table  loc_tx_mbufs_fragmented = {0};
 #endif
-                mb_oran_hdr_ext->buf_addr = ext_buff;
-                mb_oran_hdr_ext->buf_iova = ext_buff_iova + RTE_PTR_DIFF(ext_buff, mb_base);
-                mb_oran_hdr_ext->buf_len = ext_buff_len;
-                mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
-                mb_oran_hdr_ext->shinfo = p_share_data;
-                mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
-                mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
-                mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
-                mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
-
-                mb = (void*)mb_oran_hdr_ext;
-
-                to_free_mbuf = p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id];
-                p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id] = mb;
-                rte_pktmbuf_refcnt_update(mb, 1); /* make sure eth won't free our mbuf */
-                if (to_free_mbuf)
+    uint8_t fragNeeded=0;
+
+    const uint8_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
+    uint8_t comp_head_upd = 0;
+
+    const uint8_t total_header_size = (RTE_PKTMBUF_HEADROOM +
+        sizeof(struct xran_ecpri_hdr) +
+        sizeof(struct radio_app_common_hdr) +
+        sizeof(struct data_section_hdr));
+
+
+    for (cc_id = start_cc; cc_id < (start_cc + num_cc); cc_id++)
+    {
+        for (ant_id = start_ant; ant_id < (start_ant + num_ant); ant_id++)
+        {
+            ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][ant_id];
+            if (unlikely(ptr_sect_elm == NULL)){
+                rte_panic("ptr_sect_elm == NULL\n");
+                return (0);
+            }
+
+            if(0!=ptr_sect_elm->cur_index)
+            {
+                num_sections = ptr_sect_elm->cur_index;
+                /* iterate C-Plane configuration to generate corresponding U-Plane */
+                vf_id = p_xran_dev_ctx->map2vf[direction][cc_id][ant_id][XRAN_UP_VF];
+                mb_base = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
+                ring    = p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pRing;
+                if (unlikely(mb_base == NULL))
                 {
-                    rte_pktmbuf_free(to_free_mbuf);
+                    rte_panic("mb == NULL\n");
                 }
+                cid = ((cc_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ccId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ccId) | ((ant_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ruPortId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ruPortId);
+                cid = rte_cpu_to_be_16(cid);
+
+#ifdef TRANSMIT_BURST
+                loc_tx_mbufs.len = 0;
+#endif
+                //len_frag = 0;
+#pragma loop_count min=1, max=16
+                for (next=0; next< num_sections; next++)
+                {
+                    sectinfo = &ptr_sect_elm->list[next];
 
-                pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
+                    if (unlikely(sectinfo == NULL)) {
+                        print_err("sectinfo == NULL\n");
+                        break;
+                    }
+                    if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
+                    {   /* only supports type 1 */
+                        print_err("Invalid section type in section DB - %d", sectinfo->type);
+                        continue;
+                    }
+                    /* skip, if not scheduled */
+                    if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
+                        continue;
+
+                    compMeth = sectinfo->compMeth;
+                    iqWidth = sectinfo->iqWidth;
+                    section_id = sectinfo->id;
+
+                    comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
+
+                    if(sectinfo->prbElemBegin || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable)
+                    {
+                        if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU)
+                            seq_id = xran_updl_seq_id_num[xran_port_id][cc_id][ant_id]++;
+                        else
+                            seq_id = xran_upul_seq_id_num[xran_port_id][cc_id][ant_id]++;
+                        iq_sample_size_bytes = 18 +   sizeof(struct xran_ecpri_hdr) +
+                                sizeof(struct radio_app_common_hdr);
+                    }
+
+
+                    if (compMeth)
+                    {
+                        iq_sample_size_bytes += sizeof(struct data_section_hdr) ;
+
+                        if (comp_head_upd)
+                        {
+                            iq_sample_size_bytes += sizeof(struct data_section_compression_hdr);
+                        }
+
+                        iq_sample_size_bytes += sectinfo->numPrbc*(iqWidth*3 + 1);
+                    }
 
-                ether_hdr = (struct rte_ether_hdr*)pStart;
+                    print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
+                            sectinfo->type, sectinfo->id, sectinfo->startPrbc,
+                            sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
 
-                /* Fill in the ethernet header. */
+
+#ifdef TRANSMIT_BURST
+                    len = loc_tx_mbufs.len;
+                    //Added for Klocworks
+                    if (unlikely(len >= MBUF_TABLE_SIZE))
+                    {
+                        len = MBUF_TABLE_SIZE - 1;
+                        rte_panic("len >= MBUF_TABLE_SIZE\n");
+                    }
+#endif
+                    if(sectinfo->prbElemBegin || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable)
+                    {
+                        p_share_data = &p_xran_dev_ctx->share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][section_id];
+                        p_share_data->free_cb = extbuf_free_callback;
+                        p_share_data->fcb_opaque = NULL;
+                        rte_mbuf_ext_refcnt_set(p_share_data, 1);
+
+                        /* Create ethernet + eCPRI + radio app header */
+                        ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
+
+                        ext_buff = ((char*)p_xran_dev_ctx->sFrontHaulTxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData + sectinfo->sec_desc[sym_id].iq_buffer_offset) - total_header_size;
+                        ext_buff_len += (total_header_size + 18);
+
+                        if (comp_head_upd)
+                        {
+                            ext_buff -= sizeof(struct data_section_compression_hdr);
+                            ext_buff_len += sizeof(struct data_section_compression_hdr);
+                        }
+
+                        mb_oran_hdr_ext = rte_pktmbuf_alloc(_eth_mbuf_pool_vf_small[vf_id]);
+                        if (unlikely((mb_oran_hdr_ext) == NULL))
+                        {
+                            rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
+                        }
+
+#ifdef ENABLE_DEBUG_COREDUMP
+                        if (unlikely((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova == 0))
+                        {
+                            rte_panic("Failed rte_mem_virt2iova\n");
+                        }
+                        if (unlikely(((rte_iova_t)(struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova) == RTE_BAD_IOVA))
+                        {
+                            rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
+                        }
+#endif
+                        mb_oran_hdr_ext->buf_addr = ext_buff;
+                        mb_oran_hdr_ext->buf_iova = ((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size))->iova + RTE_PTR_DIFF(ext_buff, mb_base);
+                        mb_oran_hdr_ext->buf_len = ext_buff_len;
+                        mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
+                        mb_oran_hdr_ext->shinfo = p_share_data;
+                        mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
+                        mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
+                        mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
+                        mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
+
+                        p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id] = (void*)mb_oran_hdr_ext;
+                        rte_pktmbuf_refcnt_update((void*)mb_oran_hdr_ext, 1); /* make sure eth won't free our mbuf */
+                        if (p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id])
+                        {
+                            rte_pktmbuf_free(p_xran_dev_ctx->to_free_mbuf[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id][sym_id][section_id]);
+                        }
+
+                        pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
+
+                        /* Fill in the ethernet header. */
 #ifndef TRANSMIT_BURST
-                rte_eth_macaddr_get(mb_oran_hdr_ext->port, &ether_hdr->s_addr);         /* set source addr */
-                ether_hdr->d_addr = eth_ctx->entities[vf_id][ID_O_RU];                  /* set dst addr */
-                ether_hdr->ether_type = ETHER_TYPE_ECPRI_BE;                            /* ethertype */
+                        rte_eth_macaddr_get(mb_oran_hdr_ext->port, &((struct rte_ether_hdr*)pStart)->s_addr);         /* set source addr */
+                        ((struct rte_ether_hdr*)pStart)->d_addr = eth_ctx->entities[vf_id][ID_O_RU];                  /* set dst addr */
+                        ((struct rte_ether_hdr*)pStart)->ether_type = ETHER_TYPE_ECPRI_BE;                            /* ethertype */
 #endif
-                iqWidth = (iqWidth == 0) ? 16 : iqWidth;
-                switch (compMeth)
-                {
+                        nPktSize = sizeof(struct rte_ether_hdr)
+                                                + sizeof(struct xran_ecpri_hdr)
+                                                + sizeof(struct radio_app_common_hdr) ;
+
+                        ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
+
+                        ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
+                        ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
+                        ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
+
+                        /* one to one lls-CU to RU only and band sector is the same */
+                        ecpri_hdr->ecpri_xtc_id = cid;
+
+                        /* no transport layer fragmentation supported */
+                        ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
+                        ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
+                        ecpri_hdr->cmnhdr.bits.ecpri_payl_size =  sizeof(struct radio_app_common_hdr) + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();;;
+
+                    } /* if(sectinfo->prbElemBegin) */
+
+                    /* Prepare U-Plane section hdr */
+                    iqWidth = (iqWidth == 0) ? 16 : iqWidth;
+                    switch (compMeth)
+                    {
                     case XRAN_COMPMETHOD_BLKFLOAT:      parm_size = 1; break;
                     case XRAN_COMPMETHOD_MODULATION:    parm_size = 0; break;
                     default:
                         parm_size = 0;
-                }
-                n_bytes = (3 * iqWidth + parm_size) * prb_num;
-                n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
-
-                nPktSize = sizeof(struct rte_ether_hdr)
-                        + sizeof(struct xran_ecpri_hdr)
-                        + sizeof(struct radio_app_common_hdr)
-                        + sizeof(struct data_section_hdr)
-                        + n_bytes;
-
-                if (comp_head_upd)
-                    nPktSize += sizeof(struct data_section_compression_hdr);
-
-                xp.sec_hdr.fields.sect_id = section_id;
-                xp.sec_hdr.fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(prb_num);
-                xp.sec_hdr.fields.start_prbu = (uint8_t)prb_start;
-                xp.sec_hdr.fields.sym_inc = 0;
-                xp.sec_hdr.fields.rb = 0;
-                    /* network byte order */
-                xp.sec_hdr.fields.all_bits = rte_cpu_to_be_32(xp.sec_hdr.fields.all_bits);
+                    }
 
-                    /* compression */
-                xp.compr_hdr_param.ud_comp_hdr.ud_comp_meth = compMeth;
-                xp.compr_hdr_param.ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
-                xp.compr_hdr_param.rsrvd = 0;
-
-                ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
-
-                ecpri_payl_size = n_bytes
-                    + sizeof(struct data_section_hdr)
-                    + sizeof(struct radio_app_common_hdr)
-                    + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();
-
-                if (comp_head_upd)
-                    ecpri_payl_size += sizeof(struct data_section_compression_hdr);
-
-                ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
-                ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
-                ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
-                ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_payl_size);
-
-                    /* one to one lls-CU to RU only and band sector is the same */
-                ecpri_hdr->ecpri_xtc_id = cid;
-
-                    /* no transport layer fragmentation supported */
-                ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
-                ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
-
-                pSrc = (uint16_t*)&(xp.app_params);
-                pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
-                *pDst++ = *pSrc++;
-                *pDst++ = *pSrc++;
-                *pDst++ = *pSrc++;
-                *pDst++ = *pSrc++;
-                if (comp_head_upd)
-                {
-                    *pDst++ = *pSrc++;
-                }
+                    n_bytes = (3 * iqWidth + parm_size) * sectinfo->numPrbc; //Dont understand this
+                    n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
 
-                rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
-                rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
+                    /* Ethernet & eCPRI added already */
+                    nPktSize += sizeof(struct data_section_hdr) + n_bytes;
 
-                elm_bytes += nPktSize;
+                    if (comp_head_upd)
+                        nPktSize += sizeof(struct data_section_compression_hdr);
 
-                /* Restore fragmentation support in this code version */
-                /* if we don't need to do any fragmentation */
-                if (likely(p_xran_dev_ctx->fh_init.mtu >= sectinfo->numPrbc * (3 * iq_sample_size_bits + 1)))
-                {
-                    /* no fragmentation */
-                    len2 = 1;
-#ifdef TRANSMIT_BURST
-                    loc_tx_mbufs.m_table[len++] = mb;
-                    if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM))
+                    if(likely((ecpri_hdr!=NULL)))
                     {
-                        rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
+                        ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_hdr) + n_bytes ;
+
+                        if (comp_head_upd)
+                            ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_compression_hdr);
                     }
-                    loc_tx_mbufs.len = len;
-#else
-                    xran_enqueue_mbuf(mb_oran_hdr_ext, eth_ctx->tx_ring[vf_id]);
-#endif
-                }
-                else
-                {
-                    /* fragmentation */
-                    /* only burst transmission mode is supported for fragmented packets*/
-                    uint8_t* p_seq_num = &xran_updl_seq_id_num[xran_port_id][cc_id][ant_id];
-                    (*p_seq_num)--;
-
-                    len2 = xran_app_fragment_packet(mb_oran_hdr_ext,
-                        &loc_tx_mbufs_fragmented.m_table[len_frag],
-                        (uint16_t)(MBUF_TABLE_SIZE - len_frag),
-                        p_xran_dev_ctx->fh_init.mtu,
-                        p_xran_dev_ctx->direct_pool,
-                        p_xran_dev_ctx->indirect_pool,
-                        prb_start,
-                        prb_num,
-                        p_seq_num,
-                        iqWidth,
-                        ((iqWidth == 16) || (compType == XRAN_COMP_HDR_TYPE_STATIC)) ? 0 : 1);
-
-                    /* Free input packet */
-                    rte_pktmbuf_free(mb_oran_hdr_ext);
-
-                    /* If we fail to fragment the packet */
-                    if (unlikely(len2 < 0))
+                    else
                     {
-                        print_err("len2= %d\n", len2);
-                        continue;
+                        print_err("ecpri_hdr should not be NULL\n");
+                    }
+                    //ecpri_hdr->cmnhdr.bits.ecpri_payl_size += ecpri_payl_size;
+
+                    /* compression */
+
+                    if(sectinfo->prbElemBegin || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable)
+                    {
+                        pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
+                        pxp = (struct xran_up_pkt_gen_params *)pDst;
+                        /* radio app header */
+                        pxp->app_params.data_feature.value = 0x10;
+                        pxp->app_params.data_feature.data_direction = direction;
+                        pxp->app_params.frame_id = frame_id;
+                        pxp->app_params.sf_slot_sym.subframe_id = subframe_id;
+                        pxp->app_params.sf_slot_sym.slot_id = slot_id;
+                        pxp->app_params.sf_slot_sym.symb_id = sym_id;
+                        /* convert to network byte order */
+                        pxp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(pxp->app_params.sf_slot_sym.value);
+                        pDst += 2;
+                    }
+
+                    pDataSec = (struct data_section_hdr *)pDst;
+                    if(pDataSec){
+                        pDataSec->fields.sect_id = section_id;
+                        pDataSec->fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(sectinfo->numPrbc);
+                        pDataSec->fields.start_prbu = (sectinfo->startPrbc & 0x03ff);
+                        pDataSec->fields.sym_inc = 0;
+                        pDataSec->fields.rb = 0;
+                        /* network byte order */
+                        pDataSec->fields.all_bits = rte_cpu_to_be_32(pDataSec->fields.all_bits);
+                        pDst += 2;
                     }
-                    if (unlikely(len2 > 1))
+                    else
+                    {
+                        print_err("pDataSec is NULL idx = %u num_sections = %u\n", next, num_sections);
+                        // return 0;
+                    }
+
+                    if (comp_head_upd)
+                    {
+                        if(pDst == NULL){
+                            print_err("pDst == NULL\n");
+                            return 0;
+                        }
+                        ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_comp_meth = compMeth;
+                        ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
+                        ((struct data_section_compression_hdr *)pDst)->rsrvd = 0;
+                        pDst++;
+                    }
+
+                    //Increment by IQ data len
+                    pDst = (uint16_t *)((uint8_t *)pDst + n_bytes) ;
+                    if(mb_oran_hdr_ext){
+                        rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
+                        rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
+                    }
+
+                    if(sectinfo->prbElemEnd || p_xran_dev_ctx->RunSlotPrbMapBySymbolEnable) /* Transmit the packet */
                     {
-                        for (int32_t i = len_frag; i < len_frag + len2; i++)
+                        if(likely((ecpri_hdr!=NULL)))
+                            ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_hdr->cmnhdr.bits.ecpri_payl_size);
+                        else
+                            print_err("ecpri_hdr should not be NULL\n");
+                        /* if we don't need to do any fragmentation */
+                        if (likely(p_xran_dev_ctx->fh_init.mtu >= (iq_sample_size_bytes)))
                         {
-                            struct rte_mbuf* m;
-                            m = loc_tx_mbufs_fragmented.m_table[i];
-                            struct rte_ether_hdr* eth_hdr = (struct rte_ether_hdr*)
-                                rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct rte_ether_hdr));
-                            if (eth_hdr == NULL)
+                            /* no fragmentation */
+                            //len2 = 1;
+#ifdef TRANSMIT_BURST
+                            loc_tx_mbufs.m_table[len++] = (void*)mb_oran_hdr_ext;
+                            if (unlikely(len > XRAN_MAX_PKT_BURST_PER_SYM))
                             {
-                                rte_panic("No headroom in mbuf.\n");
+                                rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
+                            }
+                            loc_tx_mbufs.len = len;
+#else
+
+                            if(p_xran_dev_ctx->fh_init.io_cfg.bbu_offload){
+                                rte_ring_enqueue(ring, mb_oran_hdr_ext);
+                            } else {
+                                xran_enqueue_mbuf(mb_oran_hdr_ext, eth_ctx->tx_ring[vf_id]);
                             }
+#endif
                         }
-                    }
+                        else
+                        {
+                            /* current code should not go to fragmentation as it should be taken care of by section allocation already */
+                            // print_err("should not go into fragmentation mtu %d packet size %d\n", p_xran_dev_ctx->fh_init.mtu, sectinfo->numPrbc * (3*iq_sample_size_bits + 1));
+                            return 0;
+                        }
+                        elm_bytes += nPktSize;
+                    } /* if(prbElemEnd) */
+                }/* section loop */
+            } /* if ptr_sect_elm->cur_index */
 
-                    len_frag += len2;
-                    if (unlikely(len_frag > XRAN_MAX_PKT_BURST_PER_SYM)) {
-                        rte_panic("XRAN_MAX_PKT_BURST_PER_SYM\n");
-                    }
-                    loc_tx_mbufs_fragmented.len = len_frag;
-                }
-            } /* section loop */
             total_sections += num_sections;
 
             /* Transmit packets */
@@ -1315,19 +1688,28 @@ int32_t xran_process_tx_sym_cp_on_opt(void* pHandle, uint8_t ctx_id, uint32_t tt
             {
                 for (int32_t i = 0; i < loc_tx_mbufs.len; i++)
                 {
-                    p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs.m_table[i], ETHER_TYPE_ECPRI, vf_id);
+                    if(p_xran_dev_ctx->fh_init.io_cfg.bbu_offload){
+                        rte_ring_enqueue(ring, loc_tx_mbufs_fragmented.m_table[i]);
+                    } else {
+                        p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs.m_table[i], ETHER_TYPE_ECPRI, vf_id);
+                    }
                 }
                 loc_tx_mbufs.len = 0;
             }
 #endif
             /* Transmit fragmented packets */
-            if (unlikely(loc_tx_mbufs_fragmented.len))
+            if (unlikely(fragNeeded))
             {
+#if 0   /* There is no logic populating loc_tx_mbufs_fragmented. hence disabling this code */
                 for (int32_t i = 0; i < loc_tx_mbufs_fragmented.len; i++)
                 {
-                    p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs_fragmented.m_table[i], ETHER_TYPE_ECPRI, vf_id);
+                    if(p_xran_dev_ctx->fh_init.io_cfg.bbu_offload){
+                        rte_ring_enqueue(ring, loc_tx_mbufs_fragmented.m_table[i]);
+                    } else {
+                        p_xran_dev_ctx->send_upmbuf2ring(loc_tx_mbufs_fragmented.m_table[i], ETHER_TYPE_ECPRI, vf_id);
+                    }
                 }
-                loc_tx_mbufs_fragmented.len = 0;
+#endif
             }
         } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
     } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
@@ -1339,6 +1721,309 @@ int32_t xran_process_tx_sym_cp_on_opt(void* pHandle, uint8_t ctx_id, uint32_t tt
     return 1;
 }
 
+int32_t
+xran_process_tx_srs_cp_on(void* pHandle, uint8_t ctx_id, uint32_t tti, int32_t start_cc, int32_t num_cc, int32_t start_ant,  int32_t num_ant, uint32_t frame_id,
+    uint32_t subframe_id, uint32_t slot_id, uint32_t sym_id, enum xran_comp_hdr_type compType, enum xran_pkt_dir direction,
+    uint16_t xran_port_id, PSECTION_DB_TYPE p_sec_db)
+{
+    struct xran_up_pkt_gen_params *pxp;
+    struct data_section_hdr *pDataSec;
+    int32_t antElm_eAxC_id = 0;//  = ant_id + p_srs_cfg->eAxC_offset;
+
+    struct xran_srs_config *p_srs_cfg;
+
+    char* ext_buff;
+    void  *mb_base;
+    char* pStart;
+    struct xran_ethdi_ctx* eth_ctx = xran_ethdi_get_ctx();
+    struct xran_section_info* sectinfo;
+    struct xran_device_ctx* p_xran_dev_ctx = (struct xran_device_ctx*)pHandle;
+    p_srs_cfg       = &(p_xran_dev_ctx->srs_cfg);
+    struct rte_mbuf_ext_shared_info* p_share_data;
+    struct xran_sectioninfo_db* ptr_sect_elm = NULL;
+    struct rte_mbuf* mb_oran_hdr_ext = NULL;
+    struct xran_ecpri_hdr* ecpri_hdr = NULL;
+    uint16_t* __restrict pDst = NULL;
+
+    uint16_t next;
+    uint16_t ext_buff_len = 0;
+    uint16_t iq_sample_size_bytes=0;
+    uint16_t num_sections = 0, total_sections = 0;
+    uint16_t n_bytes;
+    uint16_t elm_bytes = 0;
+    uint16_t section_id;
+    uint16_t nPktSize=0;
+    uint16_t cid;
+    uint16_t vf_id;
+    const int16_t rte_mempool_objhdr_size = sizeof(struct rte_mempool_objhdr);
+    uint8_t seq_id = 0;
+    uint8_t cc_id, ant_id;
+    uint8_t compMeth;
+    uint8_t iqWidth;
+    uint8_t parm_size;
+
+    const uint8_t rte_ether_hdr_size = sizeof(struct rte_ether_hdr);
+    uint8_t comp_head_upd = 0;
+
+    const uint8_t total_header_size = (RTE_PKTMBUF_HEADROOM +
+        sizeof(struct xran_ecpri_hdr) +
+        sizeof(struct radio_app_common_hdr) +
+        sizeof(struct data_section_hdr));
+
+    for (cc_id = start_cc; cc_id < (start_cc + num_cc); cc_id++)
+    {
+        for (ant_id = start_ant; ant_id < (start_ant + num_ant); ant_id++)
+        {
+            antElm_eAxC_id  = ant_id + p_srs_cfg->eAxC_offset;
+            ptr_sect_elm = p_sec_db->p_sectiondb_elm[ctx_id][direction][cc_id][antElm_eAxC_id];
+
+            if (unlikely(ptr_sect_elm == NULL)){
+                printf("ant_id = %d ctx_id = %d,start_ant = %d, num_ant = %d, antElm_eAxC_id = %d\n",ant_id,ctx_id,start_ant,num_ant,antElm_eAxC_id);
+                rte_panic("ptr_sect_elm == NULL\n");
+                return (0);
+            }
+            if(0!=ptr_sect_elm->cur_index)
+            {
+                num_sections = ptr_sect_elm->cur_index;
+                /* iterate C-Plane configuration to generate corresponding U-Plane */
+                vf_id = xran_map_ecpriPcid_to_vf(p_xran_dev_ctx, direction, cc_id, antElm_eAxC_id);//p_xran_dev_ctx->map2vf[direction][cc_id][antElm_eAxC_id][XRAN_UP_VF];
+                mb_base = p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pCtrl;
+                if (unlikely(mb_base == NULL))
+                {
+                    rte_panic("mb == NULL\n");
+                }
+                cid = ((cc_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ccId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ccId) | ((antElm_eAxC_id << p_xran_dev_ctx->eAxc_id_cfg.bit_ruPortId) & p_xran_dev_ctx->eAxc_id_cfg.mask_ruPortId);
+                cid = rte_cpu_to_be_16(cid);
+#pragma loop_count min=1, max=16
+                for (next=0; next< num_sections; next++)
+                {
+                    sectinfo = &ptr_sect_elm->list[next];
+
+                    if (unlikely(sectinfo == NULL)) {
+                        print_err("sectinfo == NULL\n");
+                        break;
+                    }
+                    if (unlikely(sectinfo->type != XRAN_CP_SECTIONTYPE_1))
+                    {   /* only supports type 1 */
+                        print_err("Invalid section type in section DB - %d", sectinfo->type);
+                        continue;
+                    }
+                    /* skip, if not scheduled */
+                    if (unlikely(sym_id < sectinfo->startSymId || sym_id >= sectinfo->startSymId + sectinfo->numSymbol))
+                        continue;
+                    compMeth = sectinfo->compMeth;
+                    iqWidth = sectinfo->iqWidth;
+                    section_id = sectinfo->id;
+
+                    comp_head_upd = ((compMeth != XRAN_COMPMETHOD_NONE) && (compType == XRAN_COMP_HDR_TYPE_DYNAMIC));
+
+                    if(sectinfo->prbElemBegin)
+                    {
+                        seq_id = xran_get_upul_seqid(pHandle, cc_id, antElm_eAxC_id);
+                        iq_sample_size_bytes = 18 +   sizeof(struct xran_ecpri_hdr) +
+                                sizeof(struct radio_app_common_hdr);
+                    }
+
+                    if (compMeth)
+                    {
+                        iq_sample_size_bytes += sizeof(struct data_section_hdr) ;
+
+                        if (comp_head_upd)
+                        {
+                            iq_sample_size_bytes += sizeof(struct data_section_compression_hdr);
+                        }
+
+                        iq_sample_size_bytes += sectinfo->numPrbc*(iqWidth*3 + 1);
+                    }
+
+                    print_dbg(">>> sym %2d [%d] type%d id %d startPrbc=%d numPrbc=%d startSymId=%d numSymbol=%d\n", sym_id, next,
+                            sectinfo->type, sectinfo->id, sectinfo->startPrbc,
+                            sectinfo->numPrbc, sectinfo->startSymId, sectinfo->numSymbol);
+
+                    if(sectinfo->prbElemBegin)
+                    {
+                        p_share_data    = &p_xran_dev_ctx->srs_share_data.sh_data[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id];
+                        p_share_data->free_cb = extbuf_free_callback;
+                        p_share_data->fcb_opaque = NULL;
+                        rte_mbuf_ext_refcnt_set(p_share_data, 1);
+
+                        /* Create ethernet + eCPRI + radio app header */
+                        ext_buff_len = sectinfo->sec_desc[sym_id].iq_buffer_len;
+
+                        ext_buff = ((char*)p_xran_dev_ctx->sFHSrsRxBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][ant_id].sBufferList.pBuffers[sym_id].pData + sectinfo->sec_desc[sym_id].iq_buffer_offset) - total_header_size;
+                        ext_buff_len += (total_header_size + 18);
+
+                        if (comp_head_upd)
+                        {
+                            ext_buff -= sizeof(struct data_section_compression_hdr);
+                            ext_buff_len += sizeof(struct data_section_compression_hdr);
+                        }
+
+                        mb_oran_hdr_ext = xran_ethdi_mbuf_indir_alloc();
+                        if (unlikely((mb_oran_hdr_ext) == NULL))
+                        {
+                            rte_panic("[core %d]Failed rte_pktmbuf_alloc on vf %d\n", rte_lcore_id(), vf_id);
+                        }
+
+#ifdef ENABLE_DEBUG_COREDUMP
+                        if (unlikely((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova == 0))
+                        {
+                            rte_panic("Failed rte_mem_virt2iova\n");
+                        }
+                        if (unlikely(((rte_iova_t)(struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size)->iova) == RTE_BAD_IOVA))
+                        {
+                            rte_panic("Failed rte_mem_virt2iova RTE_BAD_IOVA \n");
+                        }
+#endif
+                        mb_oran_hdr_ext->buf_addr = ext_buff;
+                        mb_oran_hdr_ext->buf_iova = ((struct rte_mempool_objhdr*)RTE_PTR_SUB(mb_base, rte_mempool_objhdr_size))->iova + RTE_PTR_DIFF(ext_buff, mb_base);
+                        mb_oran_hdr_ext->buf_len = ext_buff_len;
+                        mb_oran_hdr_ext->ol_flags |= EXT_ATTACHED_MBUF;
+                        mb_oran_hdr_ext->shinfo = p_share_data;
+                        mb_oran_hdr_ext->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, (uint16_t)mb_oran_hdr_ext->buf_len) - rte_ether_hdr_size;
+                        mb_oran_hdr_ext->data_len = (uint16_t)(mb_oran_hdr_ext->data_len + rte_ether_hdr_size);
+                        mb_oran_hdr_ext->pkt_len = mb_oran_hdr_ext->pkt_len + rte_ether_hdr_size;
+                        mb_oran_hdr_ext->port = eth_ctx->io_cfg.port[vf_id];
+                        pStart = (char*)((char*)mb_oran_hdr_ext->buf_addr + mb_oran_hdr_ext->data_off);
+
+                        /* Fill in the ethernet header. */
+                        rte_eth_macaddr_get(mb_oran_hdr_ext->port, &((struct rte_ether_hdr*)pStart)->s_addr);         /* set source addr */
+                        ((struct rte_ether_hdr*)pStart)->d_addr = eth_ctx->entities[vf_id][ID_O_RU];                  /* set dst addr */
+                        ((struct rte_ether_hdr*)pStart)->ether_type = ETHER_TYPE_ECPRI_BE;                            /* ethertype */
+
+                        nPktSize = sizeof(struct rte_ether_hdr)
+                                                + sizeof(struct xran_ecpri_hdr)
+                                                + sizeof(struct radio_app_common_hdr) ;
+
+                        ecpri_hdr = (struct xran_ecpri_hdr*)(pStart + sizeof(struct rte_ether_hdr));
+
+                        ecpri_hdr->cmnhdr.data.data_num_1 = 0x0;
+                        ecpri_hdr->cmnhdr.bits.ecpri_ver = XRAN_ECPRI_VER;
+                        ecpri_hdr->cmnhdr.bits.ecpri_mesg_type = ECPRI_IQ_DATA;
+
+                        /* one to one lls-CU to RU only and band sector is the same */
+                        ecpri_hdr->ecpri_xtc_id = cid;
+
+                        /* no transport layer fragmentation supported */
+                        ecpri_hdr->ecpri_seq_id.data.data_num_1 = 0x8000;
+                        ecpri_hdr->ecpri_seq_id.bits.seq_id = seq_id;
+                        ecpri_hdr->cmnhdr.bits.ecpri_payl_size =  sizeof(struct radio_app_common_hdr) + XRAN_ECPRI_HDR_SZ; //xran_get_ecpri_hdr_size();;;
+
+                    } /* if(sectinfo->prbElemBegin) */
+
+                    /* Prepare U-Plane section hdr */
+                    iqWidth = (iqWidth == 0) ? 16 : iqWidth;
+                    switch (compMeth)
+                    {
+                    case XRAN_COMPMETHOD_BLKFLOAT:      parm_size = 1; break;
+                    case XRAN_COMPMETHOD_MODULATION:    parm_size = 0; break;
+                    default:
+                        parm_size = 0;
+                    }
+
+                    n_bytes = (3 * iqWidth + parm_size) * sectinfo->numPrbc;
+                    n_bytes = RTE_MIN(n_bytes, XRAN_MAX_MBUF_LEN);
+
+                    /* Ethernet & eCPRI added already */
+                    nPktSize += sizeof(struct data_section_hdr) + n_bytes;
+
+                    if (comp_head_upd)
+                        nPktSize += sizeof(struct data_section_compression_hdr);
+
+                    if(likely((ecpri_hdr!=NULL)))
+                    {
+                        ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_hdr) + n_bytes ;
+
+                        if (comp_head_upd)
+                            ecpri_hdr->cmnhdr.bits.ecpri_payl_size += sizeof(struct data_section_compression_hdr);
+                    }
+                    else
+                    {
+                        print_err("ecpri_hdr should not be NULL\n");
+                    }
+
+                    if(sectinfo->prbElemBegin)
+                    {
+                        pDst = (uint16_t*)(pStart + sizeof(struct rte_ether_hdr) + sizeof(struct xran_ecpri_hdr));
+                        pxp = (struct xran_up_pkt_gen_params *)pDst;
+                        /* radio app header */
+                        pxp->app_params.data_feature.value = 0x10;
+                        pxp->app_params.data_feature.data_direction = direction;
+                        pxp->app_params.frame_id = frame_id;
+                        pxp->app_params.sf_slot_sym.subframe_id = subframe_id;
+                        pxp->app_params.sf_slot_sym.slot_id = slot_id;
+                        pxp->app_params.sf_slot_sym.symb_id = sym_id;
+                        /* convert to network byte order */
+                        pxp->app_params.sf_slot_sym.value = rte_cpu_to_be_16(pxp->app_params.sf_slot_sym.value);
+                        pDst += 2;
+                    }
+
+                    pDataSec = (struct data_section_hdr *)pDst;
+                    if(pDataSec){
+                        pDataSec->fields.sect_id = section_id;
+                        pDataSec->fields.num_prbu = (uint8_t)XRAN_CONVERT_NUMPRBC(sectinfo->numPrbc);
+                        pDataSec->fields.start_prbu = (sectinfo->startPrbc & 0x03ff);
+                        pDataSec->fields.sym_inc = 0;
+                        pDataSec->fields.rb = 0;
+                        /* network byte order */
+                        pDataSec->fields.all_bits = rte_cpu_to_be_32(pDataSec->fields.all_bits);
+                        pDst += 2;
+                    }
+                    else
+                    {
+                        print_err("pDataSec is NULL idx = %u num_sections = %u\n", next, num_sections);
+                        // return 0;
+                    }
+
+                    if (comp_head_upd)
+                    {
+                        if(pDst == NULL){
+                            print_err("pDst == NULL\n");
+                            return 0;
+                        }
+                        ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_comp_meth = compMeth;
+                        ((struct data_section_compression_hdr *)pDst)->ud_comp_hdr.ud_iq_width = XRAN_CONVERT_IQWIDTH(iqWidth);
+                        ((struct data_section_compression_hdr *)pDst)->rsrvd = 0;
+                        pDst++;
+                    }
+
+                    //Increment by IQ data len
+                    pDst = (uint16_t *)((uint8_t *)pDst + n_bytes) ;
+                    if(mb_oran_hdr_ext){
+                        rte_pktmbuf_pkt_len(mb_oran_hdr_ext) = nPktSize;
+                        rte_pktmbuf_data_len(mb_oran_hdr_ext) = nPktSize;
+                    }
+
+                    if(sectinfo->prbElemEnd) /* Transmit the packet */
+                    {
+                        if(likely((ecpri_hdr!=NULL)))
+                            ecpri_hdr->cmnhdr.bits.ecpri_payl_size = rte_cpu_to_be_16(ecpri_hdr->cmnhdr.bits.ecpri_payl_size);
+                        else
+                            print_err("ecpri_hdr should not be NULL\n");
+                        /* if we don't need to do any fragmentation */
+                        if (likely(p_xran_dev_ctx->fh_init.mtu >= (iq_sample_size_bytes)))
+                        {
+                            p_xran_dev_ctx->send_upmbuf2ring(mb_oran_hdr_ext, ETHER_TYPE_ECPRI, vf_id);
+                        }
+                        else
+                        {
+                            return 0;
+                        }
+                        elm_bytes += nPktSize;
+                    } /* if(prbElemEnd) */
+                }/* section loop */
+            } /* if ptr_sect_elm->cur_index */
+            total_sections += num_sections;
+        } /* for(cc_id = 0; cc_id < num_CCPorts; cc_id++) */
+    } /* for(ant_id = 0; ant_id < num_eAxc; ant_id++) */
+
+    struct xran_common_counters* pCnt = &p_xran_dev_ctx->fh_counters;
+    pCnt->tx_counter += total_sections;
+    pCnt->tx_bytes_counter += elm_bytes;
+    return 1;
+}
+
+
 
 int32_t xran_process_tx_sym(void *arg)
 {
@@ -1349,12 +2034,13 @@ int32_t xran_process_tx_sym(void *arg)
     uint32_t    mlogVar[15];
     uint32_t    mlogVarCnt = 0;
 #endif
-    unsigned long t1 = MLogTick();
+    unsigned long t1 = MLogXRANTick();
 
     void        *pHandle = NULL;
     int32_t     ant_id   = 0;
     int32_t     cc_id    = 0;
     uint8_t     num_eAxc = 0;
+    uint8_t     num_eAxc_prach = 0;
     uint8_t     num_eAxAntElm = 0;
     uint8_t     num_CCPorts = 0;
     uint32_t    frame_id    = 0;
@@ -1362,7 +2048,6 @@ int32_t xran_process_tx_sym(void *arg)
     uint32_t    slot_id     = 0;
     uint32_t    sym_id      = 0;
     uint32_t    sym_idx     = 0;
-
     uint8_t     ctx_id;
     struct xran_device_ctx * p_xran_dev_ctx = (struct xran_device_ctx *) arg;
     enum xran_in_period inPeriod;
@@ -1383,12 +2068,12 @@ int32_t xran_process_tx_sym(void *arg)
     subframe_id = XranGetSubFrameNum(tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
 
     uint16_t sfnSecStart = xran_getSfnSecStart();
-    if (unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
+    if(unlikely(inPeriod == XRAN_IN_NEXT_PERIOD))
     {
         // For DU
         sfnSecStart = (sfnSecStart + NUM_OF_FRAMES_PER_SECOND) & 0x3ff;
     }
-    else if (unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
+    else if(unlikely(inPeriod == XRAN_IN_PREV_PERIOD))
     {
         // For RU
         if (sfnSecStart >= NUM_OF_FRAMES_PER_SECOND)
@@ -1405,7 +2090,7 @@ int32_t xran_process_tx_sym(void *arg)
     frame_id = (frame_id & 0xff);
 
     sym_id      = XranGetSymNum(sym_idx, XRAN_NUM_OF_SYMBOL_PER_SLOT);
-    ctx_id      = XranGetSlotNum(tti, SLOTS_PER_SYSTEMFRAME(interval)) % XRAN_MAX_SECTIONDB_CTX;
+    ctx_id      = tti % XRAN_MAX_SECTIONDB_CTX;
 
     print_dbg("[%d]SFN %d sf %d slot %d\n", tti, frame_id, subframe_id, slot_id);
 
@@ -1423,67 +2108,62 @@ int32_t xran_process_tx_sym(void *arg)
     MLogAddVariables(mlogVarCnt, mlogVar, MLogTick());
 #endif
 
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {
+    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU
+            && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B)
+    {
             num_eAxc    = xran_get_num_eAxcUl(pHandle);
-    } else {
+    }
+    else
+    {
             num_eAxc    = xran_get_num_eAxc(pHandle);
     }
 
+    num_eAxc_prach = ((num_eAxc > XRAN_MAX_PRACH_ANT_NUM)? XRAN_MAX_PRACH_ANT_NUM : num_eAxc);
     num_CCPorts = xran_get_num_cc(pHandle);
 
     /* U-Plane */
     if(p_xran_dev_ctx->fh_init.io_cfg.id == O_DU && p_xran_dev_ctx->enableCP)
     {
-        if(p_xran_dev_ctx->tx_sym_gen_func) {
+        if(p_xran_dev_ctx->tx_sym_gen_func)
+        {
             enum xran_comp_hdr_type compType;
-            enum xran_pkt_dir direction;
-            uint32_t prb_num, loc_ret = 1;
+            uint8_t loc_ret = 1;
             uint16_t xran_port_id;
             PSECTION_DB_TYPE p_sec_db = NULL;
 
             compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
 
-            if (p_xran_dev_ctx->fh_init.io_cfg.id == O_DU) {
-                direction = XRAN_DIR_DL; /* O-DU */
-                prb_num = p_xran_dev_ctx->fh_cfg.nDLRBs;
-            }
-            else {
-                direction = XRAN_DIR_UL; /* RU */
-                prb_num = p_xran_dev_ctx->fh_cfg.nULRBs;
-            }
-
-            if (unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM)) {
+            if(unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM))
+            {
                 print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
                 loc_ret = 0;
             }
 
-            if (unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX)) {
+            if(unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX))
+            {
                 print_err("Invalid Context id - %d", ctx_id);
                 loc_ret = 0;
             }
 
-            if (unlikely(direction > XRAN_DIR_MAX)) {
-                print_err("Invalid direction - %d", direction);
-                loc_ret = 0;
-            }
-
-            if (unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX)) {
+            if(unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX))
+            {
                 print_err("Invalid CC id - %d", num_CCPorts);
                 loc_ret = 0;
             }
 
-            if (unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR))) {
+            if(unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR)))
+            {
                 print_err("Invalid eAxC id - %d", num_eAxc);
                 loc_ret = 0;
             }
 
             xran_port_id = p_xran_dev_ctx->xran_port_id;
-            p_sec_db = p_sectiondb[p_xran_dev_ctx->xran_port_id];
+            p_sec_db = p_sectiondb[xran_port_id];
 
-            if (loc_ret)
-            {
-                retval = p_xran_dev_ctx->tx_sym_gen_func(pHandle, ctx_id, tti, num_CCPorts, num_eAxc, frame_id, subframe_id, slot_id, sym_id,
-                    compType, direction, xran_port_id, p_sec_db);
+            if (loc_ret) {
+                p_xran_dev_ctx->tx_sym_gen_func(pHandle, ctx_id, tti,
+                                    0, num_CCPorts, 0, num_eAxc, frame_id, subframe_id, slot_id, sym_id,
+                                    compType, XRAN_DIR_DL, xran_port_id, p_sec_db);
             }
             else
             {
@@ -1495,45 +2175,244 @@ int32_t xran_process_tx_sym(void *arg)
             rte_panic("p_xran_dev_ctx->tx_sym_gen_func== NULL\n");
          }
     }
-    else
+    else if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && p_xran_dev_ctx->enableCP)
     {
-        for (ant_id = 0; ant_id < num_eAxc; ant_id++)
-        {
-            for (cc_id = 0; cc_id < num_CCPorts; cc_id++)
-            {
-                struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
+        if(first_call) {
+            enum xran_comp_hdr_type compType;
+            uint16_t xran_port_id;
+            PSECTION_DB_TYPE p_sec_db = NULL;
 
-                if(p_xran_dev_ctx->puschMaskEnable)
-                {
-                    if((tti % numSlotMu1 == p_xran_dev_ctx->puschMaskSlot))
-                        ;
+            if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) ==  1
+                || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) ==  1
+                || xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_FDD) ==  1){
+
+                if(xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_UL
+                    || xran_fs_get_symbol_type(PortId, cc_id, tti, sym_id) == XRAN_SYMBOL_TYPE_FDD){
+
+                    uint8_t loc_ret = 1;
+                    compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
+                    if(unlikely(p_xran_dev_ctx->xran_port_id > XRAN_PORTS_NUM))
+                    {
+                        print_err("Invalid Port id - %d", p_xran_dev_ctx->xran_port_id);
+                        loc_ret = 0;
+                    }
+
+                    if(unlikely(ctx_id > XRAN_MAX_SECTIONDB_CTX))
+                    {
+                        print_err("Invalid Context id - %d", ctx_id);
+                        loc_ret = 0;
+                    }
+
+                    if(unlikely(num_CCPorts > XRAN_COMPONENT_CARRIERS_MAX))
+                    {
+                        print_err("Invalid CC id - %d", num_CCPorts);
+                        loc_ret = 0;
+                    }
+
+                    if(unlikely(num_eAxc > (XRAN_MAX_ANTENNA_NR * 2 + XRAN_MAX_ANT_ARRAY_ELM_NR)))
+                    {
+                        print_err("Invalid eAxC id - %d", num_eAxc);
+                        loc_ret = 0;
+                    }
+
+                    xran_port_id = p_xran_dev_ctx->xran_port_id;
+                    p_sec_db = p_sectiondb[xran_port_id];
+
+                    if (loc_ret) {
+                        xran_process_tx_sym_cp_on_opt(pHandle, ctx_id, tti,
+                                            0, num_CCPorts, 0, num_eAxc, frame_id, subframe_id, slot_id, sym_id,
+                                            compType, XRAN_DIR_UL, xran_port_id, p_sec_db);
+                    }
                     else
-                        retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
+                    {
+                        retval = 0;
+                    }
                 }
-                else
-                    retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
+            }
+
+            if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_B
+                    && p_xran_dev_ctx->enableSrs
+                    && ((p_xran_dev_ctx->srs_cfg.symbMask >> sym_id)&1))
+            {
+                xran_port_id = p_xran_dev_ctx->xran_port_id;
+                p_sec_db = p_sectiondb[xran_port_id];
+                compType = p_xran_dev_ctx->fh_cfg.ru_conf.xranCompHdrType;
+                struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
+                struct xran_prb_map *prb_map;
+                /* check special frame */
+                if(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) ==  1)
+                {
+                    if(((tti % p_xran_dev_ctx->fh_cfg.frame_conf.nTddPeriod) == pSrsCfg->slot)
+                        && (p_xran_dev_ctx->ndm_srs_scheduled == 0))
+                    {
 
-                if(p_xran_dev_ctx->enableSrs && (p_srs_cfg->symbMask & (1 << sym_id)))
+                        prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
+                        /* NDM U-Plane is not enabled */
+                        if(pSrsCfg->ndm_offset == 0)
+                        {
+                            retval = xran_process_tx_srs_cp_on(pHandle, ctx_id, tti,
+                                        0, num_CCPorts, 0, xran_get_num_ant_elm(pHandle), frame_id, subframe_id, slot_id, sym_id,
+                                        compType, XRAN_DIR_UL, xran_port_id, p_sec_db);
+                        }
+                        /* NDM U-Plane is enabled, SRS U-Planes will be transmitted after ndm_offset (in slots) */
+                        else
+                        {
+                            p_xran_dev_ctx->ndm_srs_scheduled   = 1;
+                            p_xran_dev_ctx->ndm_srs_tti         = tti;
+                            p_xran_dev_ctx->ndm_srs_txtti       = (tti + pSrsCfg->ndm_offset)%2000;
+                            p_xran_dev_ctx->ndm_srs_schedperiod = pSrsCfg->slot;
+                        }
+                    }
+                }
+                /* check SRS NDM UP has been scheduled in non special slots */
+                else if(p_xran_dev_ctx->ndm_srs_scheduled
+                        && p_xran_dev_ctx->ndm_srs_txtti == tti)
                 {
-                    retval = xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
+                    prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
+                    xran_port_id = p_xran_dev_ctx->xran_port_id;
+                    p_sec_db = p_sectiondb[xran_port_id];
+                    int ndm_step;
+                    uint32_t srs_tti, srsFrame, srsSubframe, srsSlot, srs_sym;
+                    uint8_t  srsCtx;
+                    if(prb_map && prb_map->nPrbElm)
+                    {
+                        srs_sym = prb_map->prbMap[0].nStartSymb;
+
+                        srs_tti     = p_xran_dev_ctx->ndm_srs_tti;
+                        num_eAxAntElm = xran_get_num_ant_elm(pHandle);
+                        ndm_step    = num_eAxAntElm / pSrsCfg->ndm_txduration;
+
+                        srsSlot     = XranGetSlotNum(srs_tti, SLOTNUM_PER_SUBFRAME(interval));
+                        srsSubframe = XranGetSubFrameNum(srs_tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+                        srsFrame    = XranGetFrameNum(srs_tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+                        srsFrame    = (srsFrame & 0xff);
+                        srsCtx      = srs_tti % XRAN_MAX_SECTIONDB_CTX;
+
+                        if(sym_id < pSrsCfg->ndm_txduration)
+                        {
+                            retval = xran_process_tx_srs_cp_on(pHandle, srsCtx, srs_tti,
+                                    0, num_CCPorts, sym_id*ndm_step, ndm_step, srsFrame, srsSubframe, srsSlot, srs_sym,
+                                    compType, XRAN_DIR_UL, xran_port_id, p_sec_db);
+                        }
+                        else
+                        {
+                            p_xran_dev_ctx->ndm_srs_scheduled   = 0;
+                            p_xran_dev_ctx->ndm_srs_tti         = 0;
+                            p_xran_dev_ctx->ndm_srs_txtti       = 0;
+                            p_xran_dev_ctx->ndm_srs_schedperiod = 0;
+                        }
+                    }
                 }
             }
         }
     }
+    else  {
+        if(first_call) {
+            for (ant_id = 0; ant_id < num_eAxc; ant_id++)
+            {
+                for (cc_id = 0; cc_id < num_CCPorts; cc_id++)
+                {
+                    //struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
+                    if(p_xran_dev_ctx->puschMaskEnable)
+                    {
+                        if((tti % numSlotMu1) != p_xran_dev_ctx->puschMaskSlot)
+                            retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
+                    }
+                    else
+                        retval = xran_process_tx_sym_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id, 0);
+
+                    if(p_xran_dev_ctx->enablePrach && (ant_id < num_eAxc_prach) )
+                    {
+                        retval = xran_process_tx_prach_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
+                    }
+                }
+            }
+
+            if(xran_get_ru_category(pHandle) == XRAN_CATEGORY_B
+                    && p_xran_dev_ctx->enableSrs
+                    && ((p_xran_dev_ctx->srs_cfg.symbMask >> sym_id)&1))
+            {
+                struct xran_srs_config *pSrsCfg = &(p_xran_dev_ctx->srs_cfg);
+
+                for(cc_id = 0; cc_id < num_CCPorts; cc_id++)
+                {
+                    /* check special frame */
+                    if((xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_SP) ==  1)
+                        ||(xran_fs_get_slot_type(PortId, cc_id, tti, XRAN_SLOT_TYPE_UL) ==  1))
+                    {
+                        if(((tti % p_xran_dev_ctx->fh_cfg.frame_conf.nTddPeriod) == pSrsCfg->slot)
+                            && (p_xran_dev_ctx->ndm_srs_scheduled == 0))
+                        {
+                            int elmIdx;
+                            struct xran_prb_map *prb_map;
+                            prb_map = (struct xran_prb_map *)p_xran_dev_ctx->sFHSrsRxPrbMapBbuIoBufCtrl[tti % XRAN_N_FE_BUF_LEN][cc_id][0].sBufferList.pBuffers->pData;
 
-    if(p_xran_dev_ctx->fh_init.io_cfg.id == O_RU && p_xran_dev_ctx->enableSrs && xran_get_ru_category(pHandle) == XRAN_CATEGORY_B) {
-        num_eAxAntElm = xran_get_num_ant_elm(pHandle);
-        struct xran_srs_config *p_srs_cfg = &(p_xran_dev_ctx->srs_cfg);
-        for(num_eAxc = 0; ant_id < num_eAxAntElm; ant_id++) {
-            for(cc_id = 0; cc_id < num_CCPorts; cc_id++) {
-                if( p_srs_cfg->symbMask & (1 << sym_id)) {
-                    retval = xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id, sym_id);
+                            /* if PRB map is present in first antenna, assume SRS might be scheduled. */
+                            if(prb_map && prb_map->nPrbElm)
+                            {
+                                /* NDM U-Plane is not enabled */
+                                if(pSrsCfg->ndm_offset == 0)
+                                {
+
+                                    if (prb_map->nPrbElm > 0)
+                                    {
+                                        if(sym_id >= prb_map->prbMap[0].nStartSymb
+                                                && sym_id < (prb_map->prbMap[0].nStartSymb + prb_map->prbMap[0].numSymb))
+                                            for(ant_id=0; ant_id < xran_get_num_ant_elm(pHandle); ant_id++)
+                                                xran_process_tx_srs_cp_off(pHandle, ctx_id, tti, cc_id, ant_id, frame_id, subframe_id, slot_id);
+                                    }
+
+                                }
+                                /* NDM U-Plane is enabled, SRS U-Planes will be transmitted after ndm_offset (in slots) */
+                                else
+                                {
+                                    p_xran_dev_ctx->ndm_srs_scheduled   = 1;
+                                    p_xran_dev_ctx->ndm_srs_tti         = tti;
+                                    p_xran_dev_ctx->ndm_srs_txtti       = (tti + pSrsCfg->ndm_offset)%2000;
+                                    p_xran_dev_ctx->ndm_srs_schedperiod = pSrsCfg->slot;
+                                }
+                            }
+                        }
+                    }
+                    /* check SRS NDM UP has been scheduled in non special slots */
+                    /*NDM feature enables the spread of SRS packets
+                    Non delay measurement SRS PDSCH PUSCH delay measure it*/
+                    else if(p_xran_dev_ctx->ndm_srs_scheduled
+                            && p_xran_dev_ctx->ndm_srs_txtti == tti)
+                    {
+                        int ndm_step;
+                        uint32_t srs_tti, srsFrame, srsSubframe, srsSlot;
+                        uint8_t  srsCtx;
+
+                        srs_tti     = p_xran_dev_ctx->ndm_srs_tti;
+                        num_eAxAntElm = xran_get_num_ant_elm(pHandle);
+                        ndm_step    = num_eAxAntElm / pSrsCfg->ndm_txduration;
+
+                        srsSlot     = XranGetSlotNum(srs_tti, SLOTNUM_PER_SUBFRAME(interval));
+                        srsSubframe = XranGetSubFrameNum(srs_tti,SLOTNUM_PER_SUBFRAME(interval),  SUBFRAMES_PER_SYSTEMFRAME);
+                        srsFrame    = XranGetFrameNum(srs_tti,sfnSecStart,SUBFRAMES_PER_SYSTEMFRAME, SLOTNUM_PER_SUBFRAME(interval));
+                        srsFrame    = (srsFrame & 0xff);
+                        srsCtx      = srs_tti % XRAN_MAX_SECTIONDB_CTX;
+
+                        if(sym_id < pSrsCfg->ndm_txduration)
+                        {
+                            for(ant_id=sym_id*ndm_step; ant_id < (sym_id+1)*ndm_step; ant_id++)
+                                xran_process_tx_srs_cp_off(pHandle, srsCtx, srs_tti, cc_id, ant_id, srsFrame, srsSubframe, srsSlot);
+                        }
+                        else
+                        {
+                            p_xran_dev_ctx->ndm_srs_scheduled   = 0;
+                            p_xran_dev_ctx->ndm_srs_tti         = 0;
+                            p_xran_dev_ctx->ndm_srs_txtti       = 0;
+                            p_xran_dev_ctx->ndm_srs_schedperiod = 0;
+                        }
+                    }
                 }
             }
         }
     }
 
-    MLogTask(PID_DISPATCH_TX_SYM, t1, MLogTick());
+    MLogXRANTask(PID_DISPATCH_TX_SYM, t1, MLogXRANTick());
     return retval;
 }