/******************************************************************************
*
-* Copyright (c) 2019 Intel.
+* Copyright (c) 2021 Intel.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* @defgroup nr5g_fapi_source_framework_wls_fapi2phy_group
**/
+#include "nr5g_mac_phy_api.h"
#include "nr5g_fapi_std.h"
#include "nr5g_fapi_common_types.h"
+#include "nr5g_fapi_internal.h"
#include "nr5g_fapi_wls.h"
#include "nr5g_fapi_fapi2phy_wls.h"
#include "nr5g_fapi_log.h"
+#include "nr5g_fapi_framework.h"
+
+static uint32_t g_to_free_send_list_cnt[TO_FREE_SIZE] = { 0 };
+static uint64_t g_to_free_send_list[TO_FREE_SIZE][TOTAL_FREE_BLOCKS] = { {0L} };
+static uint32_t g_to_free_recv_list_cnt[TO_FREE_SIZE] = { 0 };
+static uint64_t g_to_free_recv_list[TO_FREE_SIZE][TOTAL_FREE_BLOCKS] = { {0L} };
+
+static uint32_t g_to_free_send_list_cnt_urllc[TO_FREE_SIZE_URLLC] = { 0 };
+static uint64_t g_to_free_send_list_urllc[TO_FREE_SIZE_URLLC][TOTAL_FREE_BLOCKS] = { {0L} };
+
+static uint32_t g_free_recv_idx = 0;
+static uint32_t g_free_send_idx = 0;
+static uint32_t g_free_send_idx_urllc = 0;
+
+uint64_t *nr5g_fapi_fapi2phy_wls_get(
+ uint32_t * const msg_size,
+ uint16_t * const msg_type,
+ uint16_t * const flags);
+
+uint8_t nr5g_fapi_fapi2phy_wls_put(
+ uint64_t p_msg,
+ uint32_t msg_size,
+ uint16_t msg_type,
+ uint16_t flags);
//------------------------------------------------------------------------------
/** @ingroup nr5g_fapi_source_framework_wls_fapi2phy_group
**/
//----------------------------------------------------------------------------------
inline uint64_t *nr5g_fapi_fapi2phy_wls_get(
- uint32_t * msg_size,
- uint16_t * msg_type,
- uint16_t * flags)
+ uint32_t * const msg_size,
+ uint16_t * const msg_type,
+ uint16_t * const flags)
{
uint64_t *data = NULL;
WLS_HANDLE h_wls;
h_wls = nr5g_fapi_fapi2phy_wls_instance();
data = (uint64_t *) WLS_Get(h_wls, &ms, &mt, &f);
*msg_size = ms, *msg_type = mt, *flags = f;
- NR5G_FAPI_LOG(TRACE_LOG, ("[NR5G_FAPI][FAPI2PHY WLS][GET] %p size: %d "
+ NR5G_FAPI_LOG(TRACE_LOG, ("[FAPI2PHY WLS][GET] %p size: %d "
"type: %x flags: %x", data, *msg_size, *msg_type, *flags));
return data;
int ret = SUCCESS;
WLS_HANDLE h_phy_wls = nr5g_fapi_fapi2phy_wls_instance();
uint64_t pa = nr5g_fapi_wls_va_to_pa(h_phy_wls, (void *)p_msg);
- NR5G_FAPI_LOG(TRACE_LOG, ("[NR5G_FAPI][FAPI2PHY WLS][PUT] %ld size: %d "
+ NR5G_FAPI_LOG(TRACE_LOG, ("[FAPI2PHY WLS][PUT] %ld size: %d "
"type: %x flags: %x", pa, msg_size, msg_type, flags));
ret = WLS_Put(h_phy_wls, pa, msg_size, msg_type, flags);
return ret;
}
-//----------------------------------------------------------------------------------
-/** @ingroup nr5g_fapi_source_framework_wls_fapi2phy_group
- *
- * @param void
- *
- * @return 0 if SUCCESS
- *
- * @description
- * This function is called at WLS init and waits in an infinite for L1 to respond back with some information
- * needed by the L2
- *
-**/
-//----------------------------------------------------------------------------------
-inline uint8_t nr5g_fapi_fapi2phy_wls_ready(
- )
-{
- int ret = SUCCESS;
- //NR5G_FAPI_LOG(TRACE_LOG, ("Waiting for L1 to respond in WLS Ready"));
- ret = WLS_Ready(nr5g_fapi_fapi2phy_wls_instance());
- return ret;
-}
-
//----------------------------------------------------------------------------------
/** @ingroup nr5g_fapi_source_framework_wls_fapi2phy_group
*
*
**/
//----------------------------------------------------------------------------------
-inline uint8_t nr5g_fapi_fapi2phy_wls_wait(
+inline uint32_t nr5g_fapi_fapi2phy_wls_wait(
)
{
int ret = SUCCESS;
return (!((flags & WLS_TF_FIN) || (flags == 0)));
}
+void nr5g_fapi_transfer_to_free_recv_list (
+ PMAC2PHY_QUEUE_EL p_qelm_list
+ /*uint32_t* free_recv_idx*/)
+{
+ wls_fapi_add_recv_apis_to_free(p_qelm_list, g_free_recv_idx);
+ (g_free_recv_idx)++;
+ if ((g_free_recv_idx) >= TO_FREE_SIZE) {
+ (g_free_recv_idx) = 0;
+ }
+ // Free few TTIs Later
+ wls_fapi_free_recv_free_list(g_free_recv_idx);
+
+ wls_fapi_add_blocks_to_ul();
+}
+
//----------------------------------------------------------------------------------
/** @ingroup nr5g_fapi_source_framework_wls_fapi2phy_group
*
uint32_t msg_size = 0;
uint32_t num_elms = 0;
uint64_t *p_msg = NULL;
- static uint32_t g_free_recv_idx = 0;
- PMAC2PHY_QUEUE_EL p_qelm_list = NULL;
+ PMAC2PHY_QUEUE_EL p_qelm_list = NULL, p_urllc_qelm_list = NULL;
PMAC2PHY_QUEUE_EL p_qelm = NULL;
- PMAC2PHY_QUEUE_EL p_tail_qelm = NULL;
+ PMAC2PHY_QUEUE_EL p_tail_qelm = NULL, p_urllc_tail_qelm = NULL;
+ uint64_t start_tick = 0;
num_elms = nr5g_fapi_fapi2phy_wls_wait();
if (!num_elms)
return p_qelm_list;
+ start_tick = __rdtsc();
+
do {
p_msg = nr5g_fapi_fapi2phy_wls_get(&msg_size, &msg_type, &flags);
if (p_msg) {
continue;
}
p_qelm->pNext = NULL;
+
+ if (flags & WLS_TF_URLLC)
+ {
+ if (p_urllc_qelm_list) {
+ p_urllc_tail_qelm->pNext = p_qelm;
+ p_urllc_tail_qelm = p_qelm;
+ } else {
+ p_urllc_qelm_list = p_qelm;
+ p_urllc_tail_qelm = p_qelm;
+ }
+ } else {
if (p_qelm_list) {
p_tail_qelm->pNext = p_qelm;
p_tail_qelm = p_qelm;
p_tail_qelm = p_qelm;
}
}
+
+ }
num_elms--;
} while (num_elms && is_msg_present(flags));
- if (p_qelm_list) {
- wls_fapi_add_recv_apis_to_free(p_qelm_list, g_free_recv_idx);
- g_free_recv_idx++;
- if (g_free_recv_idx >= TO_FREE_SIZE) {
- g_free_recv_idx = 0;
- }
- // Free 10 TTIs Later
- wls_fapi_free_recv_free_list(g_free_recv_idx);
+ if (p_urllc_qelm_list) {
+ nr5g_fapi_transfer_to_free_recv_list (p_urllc_qelm_list);
+ nr5g_fapi_urllc_thread_callback((void *) p_urllc_qelm_list,
+ &nr5g_fapi_get_nr5g_fapi_phy_ctx()->urllc_phy2mac_params);
+ }
- wls_fapi_add_blocks_to_ul();
+ if (p_qelm_list) {
+ nr5g_fapi_transfer_to_free_recv_list (p_qelm_list);
}
+ tick_total_wls_get_per_tti_ul += __rdtsc() - start_tick;
return p_qelm_list;
}
PDLPDUDataStruct p_dl_pdu_data = (PDLPDUDataStruct) (p_dl_sdu_req + 1);
uint32_t i, j, is_last, is_last1, msg_type;
uint16_t list_flags = flags;
+ uint16_t flags_urllc = (flags & WLS_TF_URLLC) ? WLS_TF_URLLC : 0;
for (i = 0; i < p_dl_sdu_req->nPDU; i++) {
is_last = (i == (p_dl_sdu_req->nPDU - 1));
is_last1 = (((j == 0) && (p_dl_pdu_data->pPayload2 == 0)) ||
(j == (MAX_DL_PER_UE_CODEWORD_NUM - 1)));
if ((list_flags & WLS_TF_FIN) && is_last && is_last1) {
- flags = WLS_SG_LAST;
+ flags = WLS_SG_LAST | flags_urllc;
} else {
- flags = WLS_SG_NEXT;
+ flags = WLS_SG_NEXT | flags_urllc;
}
WLS_HANDLE h_phy_wls = nr5g_fapi_fapi2phy_wls_instance();
**/
//------------------------------------------------------------------------------
uint8_t nr5g_fapi_fapi2phy_wls_send(
- void *data)
+ void *data,
+ bool is_urllc)
{
p_nr5g_fapi_wls_context_t p_wls_ctx = nr5g_fapi_wls_context();
PMAC2PHY_QUEUE_EL p_curr_msg = NULL;
PL1L2MessageHdr p_msg_header;
uint16_t flags = 0;
+ uint16_t flags_urllc = (is_urllc ? WLS_TF_URLLC : 0);
uint8_t ret = SUCCESS;
int n_zbc_blocks = 0, is_zbc = 0, count = 0;
- static uint32_t g_free_send_idx = 0;
p_curr_msg = (PMAC2PHY_QUEUE_EL) data;
- wls_fapi_add_send_apis_to_free(p_curr_msg, g_free_send_idx);
+ is_urllc ? wls_fapi_add_send_apis_to_free_urllc(p_curr_msg, g_free_send_idx_urllc)
+ : wls_fapi_add_send_apis_to_free(p_curr_msg, g_free_send_idx);
if (pthread_mutex_lock((pthread_mutex_t *) & p_wls_ctx->fapi2phy_lock_send)) {
NR5G_FAPI_LOG(ERROR_LOG, ("unable to lock send pthread mutex"));
}
if (p_curr_msg->pNext) {
- flags = WLS_SG_FIRST;
+ flags = WLS_SG_FIRST | flags_urllc;
while (p_curr_msg) {
// only batch mode
count++;
if (SUCCESS != nr5g_fapi_fapi2phy_wls_put((uint64_t) p_curr_msg,
p_curr_msg->nMessageLen + sizeof(MAC2PHY_QUEUE_EL),
p_msg_header->nMessageType, flags)) {
- if (pthread_mutex_unlock((pthread_mutex_t *) & p_wls_ctx->
- fapi2phy_lock_send)) {
+ if (pthread_mutex_unlock((pthread_mutex_t *) &
+ p_wls_ctx->fapi2phy_lock_send)) {
NR5G_FAPI_LOG(ERROR_LOG,
("unable to unlock send pthread mutex"));
}
p_curr_msg = p_curr_msg->pNext;
} else { /* p_curr_msg->Next */
// LAST
- flags = WLS_SG_LAST;
+ flags = WLS_SG_LAST | flags_urllc;
is_zbc = 0;
if (nr5g_fapi_fapi2phy_is_sdu_zbc_block(p_msg_header,
&n_zbc_blocks)) {
- flags = WLS_SG_NEXT;
+ flags = WLS_SG_NEXT | flags_urllc;
is_zbc = 1;
}
if (nr5g_fapi_fapi2phy_wls_put((uint64_t) p_curr_msg,
p_curr_msg->nMessageLen + sizeof(MAC2PHY_QUEUE_EL),
p_msg_header->nMessageType, flags) != SUCCESS) {
printf("Error\n");
- if (pthread_mutex_unlock((pthread_mutex_t *) & p_wls_ctx->
- fapi2phy_lock_send)) {
+ if (pthread_mutex_unlock((pthread_mutex_t *) &
+ p_wls_ctx->fapi2phy_lock_send)) {
NR5G_FAPI_LOG(ERROR_LOG,
("unable to unlock send pthread mutex"));
}
if (is_zbc) { // ZBC blocks
if (nr5g_fapi_fapi2phy_send_zbc_blocks(p_msg_header,
- WLS_SG_LAST) != SUCCESS) {
+ WLS_SG_LAST | flags_urllc) != SUCCESS) {
printf("Error\n");
if (pthread_mutex_unlock((pthread_mutex_t *) &
p_wls_ctx->fapi2phy_lock_send)) {
}
p_curr_msg = NULL;
} /* p_curr_msg->Next */
- flags = WLS_SG_NEXT;
+ flags = WLS_SG_NEXT | flags_urllc;
}
- } else { // one block
+ } else { // one block
count++;
if (nr5g_fapi_fapi2phy_is_sdu_zbc_block(p_curr_msg, &n_zbc_blocks)) {
printf("Error ZBC block cannot be only one in the list\n");
- if (pthread_mutex_unlock((pthread_mutex_t *) & p_wls_ctx->
- fapi2phy_lock_send)) {
+ if (pthread_mutex_unlock((pthread_mutex_t *) &
+ p_wls_ctx->fapi2phy_lock_send)) {
NR5G_FAPI_LOG(ERROR_LOG,
("unable to unlock send pthread mutex"));
}
p_curr_msg->nMessageLen + sizeof(MAC2PHY_QUEUE_EL),
p_curr_msg->nMessageType, flags)) {
printf("Error\n");
- if (pthread_mutex_unlock((pthread_mutex_t *) & p_wls_ctx->
- fapi2phy_lock_send)) {
+ if (pthread_mutex_unlock((pthread_mutex_t *) &
+ p_wls_ctx->fapi2phy_lock_send)) {
NR5G_FAPI_LOG(ERROR_LOG,
("unable to unlock send pthread mutex"));
}
}
if (count > 1) {
+ if(is_urllc) {
+ g_free_send_idx_urllc++;
+ if (g_free_send_idx_urllc >= TO_FREE_SIZE_URLLC)
+ g_free_send_idx_urllc = 0;
+ } else {
g_free_send_idx++;
if (g_free_send_idx >= TO_FREE_SIZE)
g_free_send_idx = 0;
+ }
- // Free 10 TTIs Later
- wls_fapi_free_send_free_list(g_free_send_idx);
+ // Free some TTIs Later
+ is_urllc ? wls_fapi_free_send_free_list_urllc()
+ : wls_fapi_free_send_free_list();
}
- if (pthread_mutex_unlock((pthread_mutex_t *) & p_wls_ctx->
- fapi2phy_lock_send)) {
+ if (pthread_mutex_unlock((pthread_mutex_t *) &
+ p_wls_ctx->fapi2phy_lock_send)) {
NR5G_FAPI_LOG(ERROR_LOG, ("unable to unlock send pthread mutex"));
return FAILURE;
}
return ret;
}
+
+//------------------------------------------------------------------------------
+/** @ingroup nr5g_fapi_source_framework_wls_lib_group
+ *
+ * @param[in] pListElem Pointer to List element header
+ * @param[in] idx Subframe Number
+ *
+ * @return Number of blocks freed
+ *
+ * @description This function Frees all the blocks in a List Element Linked
+ * List coming from L1 by storing them into an array to be
+ * freed at a later point in time.
+**/
+//------------------------------------------------------------------------------
+uint8_t get_stats_location(
+ uint8_t msg_type)
+{
+ uint8_t loc;
+ switch (msg_type) {
+ case MSG_TYPE_PHY_CONFIG_REQ:
+ loc = MEM_STAT_CONFIG_REQ;
+ break;
+ case MSG_TYPE_PHY_START_REQ:
+ loc = MEM_STAT_START_REQ;
+ break;
+ case MSG_TYPE_PHY_STOP_REQ:
+ loc = MEM_STAT_STOP_REQ;
+ break;
+ case MSG_TYPE_PHY_SHUTDOWN_REQ:
+ loc = MEM_STAT_SHUTDOWN_REQ;
+ break;
+ case MSG_TYPE_PHY_DL_CONFIG_REQ:
+ loc = MEM_STAT_DL_CONFIG_REQ;
+ break;
+ case MSG_TYPE_PHY_UL_CONFIG_REQ:
+ loc = MEM_STAT_UL_CONFIG_REQ;
+ break;
+ case MSG_TYPE_PHY_UL_DCI_REQ:
+ loc = MEM_STAT_UL_DCI_REQ;
+ break;
+ case MSG_TYPE_PHY_TX_REQ:
+ loc = MEM_STAT_TX_REQ;
+ break;
+ case MSG_TYPE_PHY_DL_IQ_SAMPLES:
+ loc = MEM_STAT_DL_IQ_SAMPLES;
+ break;
+ case MSG_TYPE_PHY_UL_IQ_SAMPLES:
+ loc = MEM_STAT_UL_IQ_SAMPLES;
+ break;
+ default:
+ loc = MEM_STAT_DEFAULT;
+ }
+
+ return loc;
+}
+
+//------------------------------------------------------------------------------
+/** @ingroup nr5g_fapi_source_framework_wls_lib_group
+ *
+ * @param[in] pListElem Pointer to List element header
+ * @param[in] idx Subframe Number
+ *
+ * @return Number of blocks freed
+ *
+ * @description This function Frees all the blocks in a List Element Linked
+ * List coming from L1 by storing them into an array to be
+ * freed at a later point in time.
+**/
+//------------------------------------------------------------------------------
+void wls_fapi_add_recv_apis_to_free(
+ PMAC2PHY_QUEUE_EL pListElem,
+ uint32_t idx)
+{
+ PMAC2PHY_QUEUE_EL pNextMsg = NULL;
+ L1L2MessageHdr *p_msg_header = NULL;
+ PRXULSCHIndicationStruct p_phy_rx_ulsch_ind = NULL;
+ PULSCHPDUDataStruct p_ulsch_pdu = NULL;
+ uint8_t *ptr = NULL;
+ uint32_t count;
+ uint8_t i;
+
+ WLS_HANDLE h_wls;
+ p_nr5g_fapi_wls_context_t p_wls_ctx = nr5g_fapi_wls_context();
+ h_wls = p_wls_ctx->h_wls[NR5G_FAPI2PHY_WLS_INST];
+
+ count = g_to_free_recv_list_cnt[idx];
+ pNextMsg = pListElem;
+ while (pNextMsg) {
+ if (count >= TOTAL_FREE_BLOCKS) {
+ NR5G_FAPI_LOG(ERROR_LOG, ("%s: Reached max capacity of free list.\n"
+ "\t\t\t\tlist index: %d list count: %d max list count: %d",
+ __func__, idx, count, TOTAL_FREE_BLOCKS));
+ return;
+ }
+
+ g_to_free_recv_list[idx][count++] = (uint64_t) pNextMsg;
+ p_msg_header = (PL1L2MessageHdr) (pNextMsg + 1);
+ if (p_msg_header->nMessageType == MSG_TYPE_PHY_RX_ULSCH_IND) {
+ p_phy_rx_ulsch_ind = (PRXULSCHIndicationStruct) p_msg_header;
+ for (i = 0u; i < p_phy_rx_ulsch_ind->nUlsch; i++) {
+ p_ulsch_pdu = &(p_phy_rx_ulsch_ind->sULSCHPDUDataStruct[i]);
+ if(p_ulsch_pdu->nPduLen > 0) {
+ ptr = (uint8_t *) nr5g_fapi_wls_pa_to_va(h_wls,
+ (uint64_t) p_ulsch_pdu->pPayload);
+
+ if (ptr) {
+ g_to_free_recv_list[idx][count++] = (uint64_t) ptr;
+ }
+ } else {
+ NR5G_FAPI_LOG(DEBUG_LOG, ("%s: Payload for"
+ "MSG_TYPE_PHY_RX_ULSCH_IND ulsch pdu (%u/%u) is NULL."
+ "Skip adding to free list.",
+ __func__, i, p_phy_rx_ulsch_ind->nUlsch));
+ }
+ }
+ }
+ pNextMsg = pNextMsg->pNext;
+ }
+
+ g_to_free_recv_list[idx][count] = 0L;
+ g_to_free_recv_list_cnt[idx] = count;
+
+ NR5G_FAPI_LOG(DEBUG_LOG, ("To Free %d\n", count));
+}
+
+//------------------------------------------------------------------------------
+/** @ingroup nr5g_fapi_source_framework_wls_lib_group
+ *
+ * @param[in] idx subframe Number
+ *
+ * @return Number of blocks freed
+ *
+ * @description This function frees all blocks that have been added to the
+ * free array
+**/
+//------------------------------------------------------------------------------
+void wls_fapi_free_recv_free_list(
+ uint32_t idx)
+{
+ PMAC2PHY_QUEUE_EL pNextMsg = NULL;
+ int count = 0;
+
+ if (idx >= TO_FREE_SIZE) {
+ NR5G_FAPI_LOG(ERROR_LOG, ("%s: list index: %d\n", __func__, idx));
+ return;
+ }
+
+ pNextMsg = (PMAC2PHY_QUEUE_EL) g_to_free_recv_list[idx][count];
+ while (pNextMsg) {
+ wls_fapi_free_buffer(pNextMsg, MIN_UL_BUF_LOCATIONS);
+ g_to_free_recv_list[idx][count++] = 0L;
+ if (g_to_free_recv_list[idx][count])
+ pNextMsg = (PMAC2PHY_QUEUE_EL) g_to_free_recv_list[idx][count];
+ else
+ pNextMsg = 0L;
+ }
+
+ NR5G_FAPI_LOG(DEBUG_LOG, ("Free %d\n", count));
+ g_to_free_recv_list_cnt[idx] = 0;
+
+ return;
+}
+
+//------------------------------------------------------------------------------
+/** @ingroup nr5g_fapi_source_framework_wls_lib_group
+ *
+ * @param[in] pListElem Pointer to List element header
+ * @param[in] idx Subframe Number
+ *
+ * @return Number of blocks freed
+ *
+ * @description This function Frees all the blocks in a List Element Linked
+ * List coming from L1 by storing them into an array to be
+ * freed at a later point in time.
+**/
+//------------------------------------------------------------------------------
+void wls_fapi_add_send_apis_to_free(
+ PMAC2PHY_QUEUE_EL pListElem,
+ uint32_t idx)
+{
+ PMAC2PHY_QUEUE_EL pNextMsg = NULL;
+ uint32_t count;
+
+ count = g_to_free_send_list_cnt[idx];
+ pNextMsg = pListElem;
+ while (pNextMsg) {
+ if (count >= TOTAL_FREE_BLOCKS) {
+ NR5G_FAPI_LOG(ERROR_LOG, ("%s: Reached max capacity of free list.\n"
+ "\t\t\t\tlist index: %d list count: %d max list count: %d",
+ __func__, idx, count, TOTAL_FREE_BLOCKS));
+ return;
+ }
+
+ g_to_free_send_list[idx][count++] = (uint64_t) pNextMsg;
+ pNextMsg = pNextMsg->pNext;
+ }
+
+ g_to_free_send_list[idx][count] = 0L;
+ g_to_free_send_list_cnt[idx] = count;
+
+ NR5G_FAPI_LOG(DEBUG_LOG, ("To Free %d\n", count));
+}
+
+//------------------------------------------------------------------------------
+/** @ingroup nr5g_fapi_source_framework_wls_lib_group
+ *
+ * @param[in] idx subframe Number
+ *
+ * @return Number of blocks freed
+ *
+ * @description This function frees all blocks that have been added to the
+ * free array
+**/
+//------------------------------------------------------------------------------
+void wls_fapi_free_send_free_list()
+{
+ PMAC2PHY_QUEUE_EL pNextMsg = NULL;
+ L1L2MessageHdr *p_msg_header = NULL;
+ int count = 0, loc = 0;
+
+ if (g_free_send_idx >= TO_FREE_SIZE) {
+ NR5G_FAPI_LOG(ERROR_LOG, ("%s: list index: %d\n", __func__, g_free_send_idx));
+ return;
+ }
+
+ pNextMsg = (PMAC2PHY_QUEUE_EL) g_to_free_send_list[g_free_send_idx][count];
+ while (pNextMsg) {
+ p_msg_header = (PL1L2MessageHdr) (pNextMsg + 1);
+ loc = get_stats_location(p_msg_header->nMessageType);
+ wls_fapi_free_buffer(pNextMsg, loc);
+ g_to_free_send_list[g_free_send_idx][count++] = 0L;
+ if (g_to_free_send_list[g_free_send_idx][count])
+ pNextMsg = (PMAC2PHY_QUEUE_EL) g_to_free_send_list[g_free_send_idx][count];
+ else
+ pNextMsg = 0L;
+ }
+
+ NR5G_FAPI_LOG(DEBUG_LOG, ("Free %d\n", count));
+ g_to_free_send_list_cnt[g_free_send_idx] = 0;
+
+ return;
+}
+
+//------------------------------------------------------------------------------
+/** @ingroup nr5g_fapi_source_framework_wls_lib_group
+ *
+ * @param[in] pListElem Pointer to List element header
+ * @param[in] idx Subframe Number
+ *
+ * @return Number of blocks freed
+ *
+ * @description This function Frees all the blocks in a List Element Linked
+ * List coming from L1 by storing them into an array to be
+ * freed at a later point in time. Used by urllc thread.
+**/
+//------------------------------------------------------------------------------
+void wls_fapi_add_send_apis_to_free_urllc(
+ PMAC2PHY_QUEUE_EL pListElem,
+ uint32_t idx)
+{
+ PMAC2PHY_QUEUE_EL pNextMsg = NULL;
+ uint32_t count;
+
+ count = g_to_free_send_list_cnt_urllc[idx];
+ pNextMsg = pListElem;
+ while (pNextMsg) {
+ if (count >= TOTAL_FREE_BLOCKS) {
+ NR5G_FAPI_LOG(ERROR_LOG, ("%s: Reached max capacity of free list.\n"
+ "\t\t\t\tlist index: %d list count: %d max list count: %d",
+ __func__, idx, count, TOTAL_FREE_BLOCKS));
+ return;
+ }
+
+ g_to_free_send_list_urllc[idx][count++] = (uint64_t) pNextMsg;
+ pNextMsg = pNextMsg->pNext;
+ }
+
+ g_to_free_send_list_urllc[idx][count] = 0L;
+ g_to_free_send_list_cnt_urllc[idx] = count;
+
+ NR5G_FAPI_LOG(DEBUG_LOG, ("To Free %d\n", count));
+}
+
+//------------------------------------------------------------------------------
+/** @ingroup nr5g_fapi_source_framework_wls_lib_group
+ *
+ * @param[in] idx subframe Number
+ *
+ * @return Number of blocks freed
+ *
+ * @description This function frees all blocks that have been added to the
+ * free array. Used by urllc thread.
+**/
+//------------------------------------------------------------------------------
+void wls_fapi_free_send_free_list_urllc()
+{
+ PMAC2PHY_QUEUE_EL pNextMsg = NULL;
+ L1L2MessageHdr *p_msg_header = NULL;
+ int count = 0, loc = 0;
+
+ if (g_free_send_idx_urllc >= TO_FREE_SIZE_URLLC) {
+ NR5G_FAPI_LOG(ERROR_LOG, ("%s: list index: %d\n", __func__, g_free_send_idx_urllc));
+ return;
+ }
+
+ pNextMsg = (PMAC2PHY_QUEUE_EL) g_to_free_send_list_urllc[g_free_send_idx_urllc][count];
+ while (pNextMsg) {
+ p_msg_header = (PL1L2MessageHdr) (pNextMsg + 1);
+ loc = get_stats_location(p_msg_header->nMessageType);
+ wls_fapi_free_buffer(pNextMsg, loc);
+ g_to_free_send_list_urllc[g_free_send_idx_urllc][count++] = 0L;
+ if (g_to_free_send_list_urllc[g_free_send_idx_urllc][count])
+ pNextMsg = (PMAC2PHY_QUEUE_EL) g_to_free_send_list_urllc[g_free_send_idx_urllc][count];
+ else
+ pNextMsg = 0L;
+ }
+
+ NR5G_FAPI_LOG(DEBUG_LOG, ("Free %d\n", count));
+ g_to_free_send_list_cnt_urllc[g_free_send_idx_urllc] = 0;
+
+ return;
+}