1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /************************************************************************
25 Desc: C source code for Entry point fucntions
29 **********************************************************************/
31 /** @file rg_sch_cmn.c
32 @brief This file implements the schedulers main access to MAC layer code.
36 /* header include files -- defines (.h) */
37 #include "common_def.h"
43 #include "rg_sch_err.h"
44 #include "rg_sch_inf.h"
46 #include "rg_sch_cmn.h"
47 #include "rl_interface.h"
48 #include "rl_common.h"
50 /* header/extern include files (.x) */
51 #include "tfu.x" /* TFU types */
52 #include "lrg.x" /* layer management typedefs for MAC */
53 #include "rgr.x" /* layer management typedefs for MAC */
54 #include "rgm.x" /* layer management typedefs for MAC */
55 #include "rg_sch_inf.x" /* typedefs for Scheduler */
56 #include "rg_sch.x" /* typedefs for Scheduler */
57 #include "rg_sch_cmn.x" /* typedefs for Scheduler */
59 #include "lrg.x" /* Stats Structures */
60 #endif /* MAC_SCH_STATS */
63 #endif /* __cplusplus */
66 uint32_t emtcStatsUlTomSrInd;
67 uint32_t emtcStatsUlBsrTmrTxp;
70 #define RG_ITBS_DIFF(_x, _y) ((_x) > (_y) ? (_x) - (_y) : (_y) - (_x))
71 Void rgSCHSc1UlInit ARGS((RgUlSchdApis *apis));
72 #ifdef RG_PHASE2_SCHED
73 Void rgSCHRrUlInit ARGS((RgUlSchdApis *apis));
75 Void rgSCHEmtcHqInfoFree ARGS((RgSchCellCb *cell, RgSchDlHqProcCb *hqP));
76 Void rgSCHEmtcRrUlInit ARGS((RgUlSchdApis *apis));
77 Void rgSCHEmtcCmnDlInit ARGS((Void));
78 Void rgSCHEmtcCmnUlInit ARGS((Void));
79 Void rgSCHEmtcCmnUeNbReset ARGS((RgSchUeCb *ueCb));
80 RgSchCmnCqiToTbs *rgSchEmtcCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
82 Void rgSCHMaxciUlInit ARGS((RgUlSchdApis *apis));
83 Void rgSCHPfsUlInit ARGS((RgUlSchdApis *apis));
85 Void rgSCHSc1DlInit ARGS((RgDlSchdApis *apis));
86 #ifdef RG_PHASE2_SCHED
87 Void rgSCHRrDlInit ARGS((RgDlSchdApis *apis));
89 Void rgSCHEmtcRrDlInit ARGS((RgDlEmtcSchdApis *apis));
91 Void rgSCHMaxciDlInit ARGS((RgDlSchdApis *apis));
92 Void rgSCHPfsDlInit ARGS((RgDlSchdApis *apis));
94 Void rgSCHDlfsInit ARGS((RgDlfsSchdApis *apis));
98 Void rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl ARGS((RgSchCellCb *cell));
99 Void rgSCHCmnGetEmtcDciFrmtSizes ARGS((RgSchCellCb *cell));
100 Void rgSCHEmtcRrUlProcRmvFrmRetx ARGS((RgSchCellCb *cell, RgSchUlHqProcCb *proc));
101 S16 rgSCHCmnPrecompEmtcMsg3Vars
103 RgSchCmnUlCell *cellUl,
109 Void rgSCHEmtcCmnUeCcchSduDel
114 Void rgSCHEmtcRmvFrmTaLst
116 RgSchCmnDlCell *cellDl,
119 Void rgSCHEmtcInitTaLst
121 RgSchCmnDlCell *cellDl
123 Void rgSCHEmtcAddToTaLst
125 RgSchCmnDlCell *cellDl,
132 static Void rgSCHDlSiSched ARGS((RgSchCellCb *cell,
133 RgSchCmnDlRbAllocInfo *allocInfo,
134 RgInfSfAlloc *subfrmAlloc));
135 static Void rgSCHChkNUpdSiCfg ARGS((RgSchCellCb *cell));
136 static Void rgSCHSelectSi ARGS((RgSchCellCb *cell));
137 #endif /*RGR_SI_SCH*/
138 /* LTE_ADV_FLAG_REMOVED_START */
141 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
149 static S16 rgSCHCmnBuildRntpInfo (
157 static Void rgSCHCmnNonDlfsType0Alloc
161 RgSchDlRbAlloc *allocInfo,
164 static uint8_t rgSchCmnUlRvIdxToIMcsTbl[4] = {32, 30, 31, 29};
165 static Void rgSCHCmnUlNonadapRetx ARGS((
166 RgSchCmnUlCell *cellUl,
170 static Void rgSCHCmnUlSfRlsRetxProcs ARGS((
176 static S16 rgSCHCmnUlMdfyGrntForCqi ARGS((
183 uint32_t stepDownItbs,
187 static Void rgSCHCmnFillHqPPdcchDciFrmt1 ARGS((
189 RgSchDlRbAlloc *rbAllocInfo,
190 RgSchDlHqProcCb *hqP,
194 static Void rgSCHCmnFillHqPPdcchDciFrmt1A ARGS((
196 RgSchDlRbAlloc *rbAllocInfo,
197 RgSchDlHqProcCb *hqP,
201 static Void rgSCHCmnFillHqPPdcchDciFrmt1B ARGS((
203 RgSchDlRbAlloc *rbAllocInfo,
204 RgSchDlHqProcCb *hqP,
208 static Void rgSCHCmnFillHqPPdcchDciFrmt2 ARGS((
210 RgSchDlRbAlloc *rbAllocInfo,
211 RgSchDlHqProcCb *hqP,
215 static Void rgSCHCmnFillHqPPdcchDciFrmt2A ARGS((
217 RgSchDlRbAlloc *rbAllocInfo,
218 RgSchDlHqProcCb *hqP,
225 Void rgSCHCmnDlSpsSch
229 /* LTE_ADV_FLAG_REMOVED_END */
231 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc ARGS((
233 RgSchCmnDlRbAllocInfo *allocInfo
235 static Void rgSCHBcchPcchDlRbAlloc ARGS((
237 RgSchCmnDlRbAllocInfo *allocInfo
239 static Void rgSCHCmnDlBcchPcchAlloc ARGS((
243 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
246 TfuDlCqiPucch *pucchCqi,
247 RgrUeCqiRept *ueCqiRept,
249 Bool *is2ndCwCqiAvail
251 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
254 TfuDlCqiPusch *puschCqi,
255 RgrUeCqiRept *ueCqiRept,
257 Bool *is2ndCwCqiAvail
260 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
263 TfuDlCqiPucch *pucchCqi
265 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
268 TfuDlCqiPusch *puschCqi
271 /* ccpu00117452 - MOD - Changed macro name from
272 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
274 static S16 rgSCHCmnUeDlPwrCtColltCqiRept ARGS((
277 RgrUeCqiRept *ueCqiRept));
278 #endif /* End of RGR_CQI_REPT */
279 /* Fix: syed align multiple UEs to refresh at same time */
280 static Void rgSCHCmnGetRefreshPer ARGS((
284 static S16 rgSCHCmnApplyUeRefresh ARGS((
288 Void rgSCHCmnDlSetUeAllocLmtLa ARGS
293 static Void rgSCHCheckAndSetTxScheme ARGS
301 static uint32_t rgSCHCmnCalcDwPtsTbSz ARGS
311 static Void rgSCHCmnCalcDwPtsTbSz2Cw ARGS
327 static Void rgSCHCmnInitRbAlloc ARGS
333 #endif /* __cplusplus */
337 RgSchdApis rgSchCmnApis;
338 static RgUlSchdApis rgSchUlSchdTbl[RGSCH_NUM_SCHEDULERS];
339 static RgDlSchdApis rgSchDlSchdTbl[RGSCH_NUM_SCHEDULERS];
341 static RgUlSchdApis rgSchEmtcUlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
342 static RgDlEmtcSchdApis rgSchEmtcDlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
344 #ifdef RG_PHASE2_SCHED
345 static RgDlfsSchdApis rgSchDlfsSchdTbl[RGSCH_NUM_DLFS_SCHEDULERS];
347 RgUlSchdInits rgSchUlSchdInits = RGSCH_ULSCHED_INITS;
348 RgDlSchdInits rgSchDlSchdInits = RGSCH_DLSCHED_INITS;
350 static RgEmtcUlSchdInits rgSchEmtcUlSchdInits = RGSCH_EMTC_ULSCHED_INITS;
351 static RgEmtcDlSchdInits rgSchEmtcDlSchdInits = RGSCH_EMTC_DLSCHED_INITS;
353 #if (defined (RG_PHASE2_SCHED) && defined (TFU_UPGRADE))
354 static RgDlfsSchdInits rgSchDlfsSchdInits = RGSCH_DLFSSCHED_INITS;
357 typedef Void (*RgSchCmnDlAllocRbFunc) ARGS((RgSchCellCb *cell, RgSchDlSf *subFrm,
358 RgSchUeCb *ue, uint32_t bo, uint32_t *effBo, RgSchDlHqProcCb *proc,
359 RgSchCmnDlRbAllocInfo *cellWdAllocInfo));
360 typedef uint8_t (*RgSchCmnDlGetPrecInfFunc) ARGS((RgSchCellCb *cell, RgSchUeCb *ue,
361 uint8_t numLyrs, Bool bothCwEnbld));
362 static Void rgSCHCmnDlAllocTxRbTM1 ARGS((
368 RgSchDlHqProcCb *proc,
369 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
371 static Void rgSCHCmnDlAllocTxRbTM2 ARGS((
377 RgSchDlHqProcCb *proc,
378 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
380 static Void rgSCHCmnDlAllocTxRbTM3 ARGS((
386 RgSchDlHqProcCb *proc,
387 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
389 static Void rgSCHCmnDlAllocTxRbTM4 ARGS((
395 RgSchDlHqProcCb *proc,
396 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
399 static Void rgSCHCmnDlAllocTxRbTM5 ARGS((
405 RgSchDlHqProcCb *proc,
406 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
409 static Void rgSCHCmnDlAllocTxRbTM6 ARGS((
415 RgSchDlHqProcCb *proc,
416 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
418 static Void rgSCHCmnDlAllocTxRbTM7 ARGS((
424 RgSchDlHqProcCb *proc,
425 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
427 static Void rgSCHCmnDlAllocRetxRbTM1 ARGS((
433 RgSchDlHqProcCb *proc,
434 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
436 static Void rgSCHCmnDlAllocRetxRbTM2 ARGS((
442 RgSchDlHqProcCb *proc,
443 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
445 static Void rgSCHCmnDlAllocRetxRbTM3 ARGS((
451 RgSchDlHqProcCb *proc,
452 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
454 static Void rgSCHCmnDlAllocRetxRbTM4 ARGS((
460 RgSchDlHqProcCb *proc,
461 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
464 static Void rgSCHCmnDlAllocRetxRbTM5 ARGS((
470 RgSchDlHqProcCb *proc,
471 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
474 static Void rgSCHCmnDlAllocRetxRbTM6 ARGS((
480 RgSchDlHqProcCb *proc,
481 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
483 static Void rgSCHCmnDlAllocRetxRbTM7 ARGS((
489 RgSchDlHqProcCb *proc,
490 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
494 static uint8_t rgSchGetN1ResCount ARGS ((
498 Bool rgSchCmnChkDataOnlyOnPcell
504 uint8_t rgSCHCmnCalcPcqiBitSz
511 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
512 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[7] = {rgSCHCmnDlAllocTxRbTM1,
513 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
514 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7};
516 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
517 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[7] = {rgSCHCmnDlAllocRetxRbTM1,
518 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
519 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7};
521 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
522 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[9] = {rgSCHCmnDlAllocTxRbTM1,
523 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
524 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7, NULLP, NULLP};
526 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
527 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[9] = {rgSCHCmnDlAllocRetxRbTM1,
528 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
529 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7, NULLP, NULLP};
534 static uint8_t rgSCHCmnDlTM3PrecInf2 ARGS((
540 static uint8_t rgSCHCmnDlTM3PrecInf4 ARGS((
546 static uint8_t rgSCHCmnDlTM4PrecInf2 ARGS((
552 static uint8_t rgSCHCmnDlTM4PrecInf4 ARGS((
558 /* Functions specific to each transmission mode for DL RB Allocation*/
559 RgSchCmnDlGetPrecInfFunc getPrecInfoFunc[2][2] = {
560 {rgSCHCmnDlTM3PrecInf2, rgSCHCmnDlTM3PrecInf4},
561 {rgSCHCmnDlTM4PrecInf2, rgSCHCmnDlTM4PrecInf4}
564 static S16 rgSCHCmnDlAlloc1CwRetxRb ARGS((
568 RgSchDlHqTbCb *tbInfo,
573 static S16 rgSCHCmnDlAlloc2CwRetxRb ARGS((
577 RgSchDlHqProcCb *proc,
582 static Void rgSCHCmnDlTM3TxTx ARGS((
588 RgSchDlHqProcCb *proc,
589 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
591 static Void rgSCHCmnDlTM3TxRetx ARGS((
597 RgSchDlHqProcCb *proc,
598 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
600 static Void rgSCHCmnDlTM3RetxRetx ARGS((
606 RgSchDlHqProcCb *proc,
607 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
610 static Void rgSCHCmnNonDlfsUpdTyp2Alloc ARGS((
616 /* LTE_ADV_FLAG_REMOVED_START */
618 static Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc ARGS((
625 /* LTE_ADV_FLAG_REMOVED_END */
626 static Void rgSCHCmnDlRbInfoAddUeTx ARGS((
628 RgSchCmnDlRbAllocInfo *allocInfo,
630 RgSchDlHqProcCb *proc
632 static Void rgSCHCmnDlRbInfoAddUeRetx ARGS((
634 RgSchCmnDlRbAllocInfo *allocInfo,
638 static Void rgSCHCmnDlAdd2NonSchdRetxLst ARGS((
639 RgSchCmnDlRbAllocInfo *allocInfo,
641 RgSchDlHqProcCb *proc
643 static S16 rgSCHCmnDlAlloc2CwTxRetxRb ARGS((
647 RgSchDlHqTbCb *reTxTb,
652 static S16 rgSCHCmnDlAlloc2CwTxRb ARGS((
656 RgSchDlHqProcCb *proc,
661 static S16 rgSCHCmnDlAlloc1CwTxRb ARGS((
665 RgSchDlHqTbCb *tbInfo,
671 static Void rgSCHCmnFillHqPTb ARGS((
673 RgSchDlRbAlloc *rbAllocInfo,
679 static Void rgSCHCmnDlGetBestFitHole ARGS((
682 uint32_t *crntAllocMask,
685 uint8_t *allocNumRbs,
688 #ifdef RGSCH_SPS_UNUSED
689 static uint32_t rgSCHCmnGetRaType1Mask ARGS((
695 static uint32_t rgSCHCmnGetRaType0Mask ARGS((
699 static uint32_t rgSCHCmnGetRaType2Mask ARGS((
705 Bool rgSCHCmnRetxAllocAvoid ARGS((
708 RgSchDlHqProcCb *proc
711 uint16_t rgSCHCmnGetSiSetId ARGS((
714 uint16_t minPeriodicity
719 //TODO_SID: Currenly table is only for 100 Prbs. Need to modify wrt VRBG table 8.1.5.2.1-1 V5G_213
720 uint32_t rgSch5gtfTbSzTbl[MAX_5GTF_MCS] =
721 {1864, 5256, 8776, 13176, 17576, 21976, 26376, 31656, 35176, 39576, 43976, 47496, 52776, 59376, 66392};
722 uint32_t g5gtfTtiCnt = 0;
723 uint32_t gUl5gtfSrRecv = 0;
724 uint32_t gUl5gtfBsrRecv = 0;
725 uint32_t gUl5gtfUeSchPick = 0;
726 uint32_t gUl5gtfPdcchSchd = 0;
727 uint32_t gUl5gtfAllocAllocated = 0;
728 uint32_t gUl5gtfUeRbAllocDone = 0;
729 uint32_t gUl5gtfUeRmvFnlzZeroBo = 0;
730 uint32_t gUl5gtfUeFnlzReAdd = 0;
731 uint32_t gUl5gtfPdcchSend = 0;
732 uint32_t gUl5gtfRbAllocFail = 0;
733 uint32_t ul5gtfsidUlMarkUl = 0;
734 uint32_t ul5gtfsidDlSchdPass = 0;
735 uint32_t ul5gtfsidDlAlreadyMarkUl = 0;
736 uint32_t ul5gtfTotSchdCnt = 0;
739 /* CQI Offset Index to Beta CQI Offset value mapping,
740 * stored as parts per 1000. Reserved is set to 0.
741 * Refer 36.213 sec 8.6.3 Tbl 8.6.3-3 */
742 uint32_t rgSchCmnBetaCqiOffstTbl[16] = {0, 0, 1125,
743 1250, 1375, 1625, 1750, 2000, 2250, 2500, 2875,
744 3125, 3500, 4000, 5000, 6250};
745 uint32_t rgSchCmnBetaHqOffstTbl[16] = {2000, 2500, 3125,
746 4000, 5000, 6250, 8000,10000, 12625, 15875, 20000,
747 31000, 50000,80000,126000,0};
748 uint32_t rgSchCmnBetaRiOffstTbl[16] = {1250, 1625, 2000,
749 2500, 3125, 4000, 5000, 6250, 8000, 10000,12625,
751 S8 rgSchCmnDlCqiDiffOfst[8] = {0, 1, 2, 3, -4, -3, -2, -1};
753 /* Include CRS REs while calculating Efficiency */
754 const static uint8_t rgSchCmnAntIdx[5] = {0,0,1,0,2};
755 const static uint8_t rgSchCmnNumResForCrs[5] = {0,6,12,0,16};
756 uint32_t cfiSwitchCnt ;
762 S8 rgSchCmnApUeSelDiffCqi[4] = {1, 2, 3, 4};
763 S8 rgSchCmnApEnbConfDiffCqi[4] = {0, 1, 2, -1};
766 typedef struct rgSchCmnDlUeDciFrmtOptns
768 TfuDciFormat spfcDciFrmt; /* TM(Transmission Mode) specific DCI format.
769 * Search space : UE Specific by C-RNTI only. */
770 uint8_t spfcDciRAType; /* Resource Alloctn(RA) type for spfcDciFrmt */
771 TfuDciFormat prfrdDciFrmt; /* Preferred DCI format among the available
772 * options for TD (Transmit Diversity) */
773 uint8_t prfrdDciRAType; /* Resource Alloctn(RA) type for prfrdDciFrmt */
774 }RgSchCmnDlUeDciFrmtOptns;
777 /* DCI Format options for each Transmission Mode */
778 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[7] = {
779 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
780 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
781 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
782 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
783 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
784 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
785 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
789 /* DCI Format options for each Transmission Mode */
790 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[9] = {
791 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
792 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
793 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
794 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
795 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
796 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
797 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
802 typedef struct rgSchCmnDlImcsTbl
804 uint8_t modOdr; /* Modulation Order */
805 uint8_t iTbs; /* ITBS */
806 }RgSchCmnDlImcsTbl[29];
808 const struct rgSchCmnMult235Info
810 uint8_t match; /* Closest number satisfying 2^a.3^b.5^c, with a bias
811 * towards the smaller number */
812 uint8_t prvMatch; /* Closest number not greater than array index
813 * satisfying 2^a.3^b.5^c */
814 } rgSchCmnMult235Tbl[110+1] = {
816 {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {6, 6}, {8, 8},
817 {9, 9}, {10, 10}, {10, 10}, {12, 12}, {12, 12}, {15, 12}, {15, 15},
818 {16, 16}, {16, 16}, {18, 18}, {18, 18}, {20, 20}, {20, 20}, {20, 20},
819 {24, 20}, {24, 24}, {25, 25}, {25, 25}, {27, 27}, {27, 27}, {30, 27},
820 {30, 30}, {30, 30}, {32, 32}, {32, 32}, {32, 32}, {36, 32}, {36, 36},
821 {36, 36}, {36, 36}, {40, 36}, {40, 40}, {40, 40}, {40, 40}, {45, 40},
822 {45, 40}, {45, 45}, {45, 45}, {48, 45}, {48, 48}, {48, 48}, {50, 50},
823 {50, 50}, {50, 50}, {54, 50}, {54, 54}, {54, 54}, {54, 54}, {54, 54},
824 {60, 54}, {60, 54}, {60, 60}, {60, 60}, {60, 60}, {64, 60}, {64, 64},
825 {64, 64}, {64, 64}, {64, 64}, {64, 64}, {72, 64}, {72, 64}, {72, 64},
826 {72, 72}, {72, 72}, {75, 72}, {75, 75}, {75, 75}, {75, 75}, {80, 75},
827 {80, 75}, {80, 80}, {81, 81}, {81, 81}, {81, 81}, {81, 81}, {81, 81},
828 {90, 81}, {90, 81}, {90, 81}, {90, 81}, {90, 90}, {90, 90}, {90, 90},
829 {90, 90}, {96, 90}, {96, 90}, {96, 96}, {96, 96}, {96, 96}, {100, 96},
830 {100, 100}, {100, 100}, {100, 100}, {100, 100}, {100, 100}, {108, 100},
831 {108, 100}, {108, 100}, {108, 108}, {108, 108}, {108, 108}
835 /* BI table from 36.321 Table 7.2.1 */
836 const static S16 rgSchCmnBiTbl[RG_SCH_CMN_NUM_BI_VAL] = {
837 0, 10, 20, 30,40,60,80,120,160,240,320,480,960};
838 RgSchCmnUlCqiInfo rgSchCmnUlCqiTbl[RG_SCH_CMN_UL_NUM_CQI] = {
840 {RGSCH_CMN_QM_CQI_1,RGSCH_CMN_UL_EFF_CQI_1 },
841 {RGSCH_CMN_QM_CQI_2,RGSCH_CMN_UL_EFF_CQI_2 },
842 {RGSCH_CMN_QM_CQI_3,RGSCH_CMN_UL_EFF_CQI_3 },
843 {RGSCH_CMN_QM_CQI_4,RGSCH_CMN_UL_EFF_CQI_4 },
844 {RGSCH_CMN_QM_CQI_5,RGSCH_CMN_UL_EFF_CQI_5 },
845 {RGSCH_CMN_QM_CQI_6,RGSCH_CMN_UL_EFF_CQI_6 },
846 {RGSCH_CMN_QM_CQI_7,RGSCH_CMN_UL_EFF_CQI_7 },
847 {RGSCH_CMN_QM_CQI_8,RGSCH_CMN_UL_EFF_CQI_8 },
848 {RGSCH_CMN_QM_CQI_9,RGSCH_CMN_UL_EFF_CQI_9 },
849 {RGSCH_CMN_QM_CQI_10,RGSCH_CMN_UL_EFF_CQI_10 },
850 {RGSCH_CMN_QM_CQI_11,RGSCH_CMN_UL_EFF_CQI_11 },
851 {RGSCH_CMN_QM_CQI_12,RGSCH_CMN_UL_EFF_CQI_12 },
852 {RGSCH_CMN_QM_CQI_13,RGSCH_CMN_UL_EFF_CQI_13 },
853 {RGSCH_CMN_QM_CQI_14,RGSCH_CMN_UL_EFF_CQI_14 },
854 {RGSCH_CMN_QM_CQI_15,RGSCH_CMN_UL_EFF_CQI_15 },
858 /* This table maps a (delta_offset * 2 + 2) to a (beta * 8)
859 * where beta is 10^-(delta_offset/10) rounded off to nearest 1/8
861 static uint16_t rgSchCmnUlBeta8Tbl[29] = {
862 6, RG_SCH_CMN_UL_INVALID_BETA8, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23,
863 25, 28, 32, RG_SCH_CMN_UL_INVALID_BETA8, 40, RG_SCH_CMN_UL_INVALID_BETA8,
864 50, RG_SCH_CMN_UL_INVALID_BETA8, 64, RG_SCH_CMN_UL_INVALID_BETA8, 80,
865 RG_SCH_CMN_UL_INVALID_BETA8, 101, RG_SCH_CMN_UL_INVALID_BETA8, 127,
866 RG_SCH_CMN_UL_INVALID_BETA8, 160
870 /* QCI to SVC priority mapping. Index specifies the Qci*/
871 static uint8_t rgSchCmnDlQciPrio[RG_SCH_CMN_MAX_QCI] = RG_SCH_CMN_QCI_TO_PRIO;
873 /* The configuration is efficiency measured per 1024 REs. */
874 /* The first element stands for when CQI is not known */
875 /* This table is used to translate CQI to its corrospoding */
876 /* allocation parameters. These are currently from 36.213 */
877 /* Just this talbe needs to be edited for modifying the */
878 /* the resource allocation behaviour */
880 /* ADD CQI to MCS mapping correction
881 * single dimensional array is replaced by 2 dimensions for different CFI*/
882 static uint16_t rgSchCmnCqiPdschEff[4][16] = {RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI1,
883 RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI2,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI3};
885 static uint16_t rgSchCmn2LyrCqiPdschEff[4][16] = {RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI1,
886 RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI2, RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI3};
888 /* This configuration determines the transalation of a UEs CQI to its */
889 /* PDCCH coding efficiency. This may be edited based on the installation */
890 static uint8_t rgSchCmnDlRvTbl[4] = {0, 2, 3, 1}; /* RVIdx sequence is corrected*/
892 /* Indexed by [DciFrmt].
893 * Considering the following definition in determining the dciFrmt index.
908 static uint16_t rgSchCmnDciFrmtSizes[10];
910 static uint16_t rgSchCmnCqiPdcchEff[16] = RG_SCH_CMN_CQI_TO_PDCCH_EFF;
914 RgSchTddUlDlSubfrmTbl rgSchTddUlDlSubfrmTbl = {
915 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME},
916 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
917 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
918 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
919 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
920 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
921 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME}
926 uint8_t rgSchTddSpsDlMaxRetxTbl[RGSCH_MAX_TDD_UL_DL_CFG] = {
938 /* Special Subframes in OFDM symbols */
939 /* ccpu00134197-MOD-Correct the number of symbols */
940 RgSchTddSplSubfrmInfoTbl rgSchTddSplSubfrmInfoTbl = {
944 {11, 1, 1, 10, 1, 1},
952 /* PHICH 'm' value Table */
953 RgSchTddPhichMValTbl rgSchTddPhichMValTbl = {
954 {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
955 {0, 1, 0, 0, 1, 0, 1, 0, 0, 1},
956 {0, 0, 0, 1, 0, 0, 0, 0, 1, 0},
957 {1, 0, 0, 0, 0, 0, 0, 0, 1, 1},
958 {0, 0, 0, 0, 0, 0, 0, 0, 1, 1},
959 {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
960 {1, 1, 0, 0, 0, 1, 1, 0, 0, 1}
963 /* PHICH 'K' value Table */
964 RgSchTddKPhichTbl rgSchTddKPhichTbl = {
965 {0, 0, 4, 7, 6, 0, 0, 4, 7, 6},
966 {0, 0, 4, 6, 0, 0, 0, 4, 6, 0},
967 {0, 0, 6, 0, 0, 0, 0, 6, 0, 0},
968 {0, 0, 6, 6, 6, 0, 0, 0, 0, 0},
969 {0, 0, 6, 6, 0, 0, 0, 0, 0, 0},
970 {0, 0, 6, 0, 0, 0, 0, 0, 0, 0},
971 {0, 0, 4, 6, 6, 0, 0, 4, 7, 0}
974 /* Uplink association index 'K' value Table */
975 RgSchTddUlAscIdxKDashTbl rgSchTddUlAscIdxKDashTbl = {
976 {0, 0, 6, 4, 0, 0, 0, 6, 4, 0},
977 {0, 0, 4, 0, 0, 0, 0, 4, 0, 0},
978 {0, 0, 4, 4, 4, 0, 0, 0, 0, 0},
979 {0, 0, 4, 4, 0, 0, 0, 0, 0, 0},
980 {0, 0, 4, 0, 0, 0, 0, 0, 0, 0},
981 {0, 0, 7, 7, 5, 0, 0, 7, 7, 0}
985 /* PUSCH 'K' value Table */
986 RgSchTddPuschTxKTbl rgSchTddPuschTxKTbl = {
987 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
988 {0, 6, 0, 0, 4, 0, 6, 0, 0, 4},
989 {0, 0, 0, 4, 0, 0, 0, 0, 4, 0},
990 {4, 0, 0, 0, 0, 0, 0, 0, 4, 4},
991 {0, 0, 0, 0, 0, 0, 0, 0, 4, 4},
992 {0, 0, 0, 0, 0, 0, 0, 0, 4, 0},
993 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
996 /* PDSCH to PUCCH Table for DL Harq Feed back. Based on the
997 Downlink association set index 'K' table */
998 uint8_t rgSchTddPucchTxTbl[7][10] = {
999 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1000 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1001 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1002 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1003 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1004 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1005 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1008 /* Table to fetch the next DL sf idx for applying the
1009 new CFI. The next Dl sf Idx at which the new CFI
1010 is applied is always the starting Sf of the next ACK/NACK
1013 Ex: In Cfg-2, sf4 and sf9 are the only subframes at which
1014 a new ACK/NACK bundle of DL subframes can start
1016 D S U D D D S U D D D S U D D D S U D D
1019 dlSf Array for Cfg-2:
1020 sfNum: 0 1 3 4 5 6 8 9 0 1 3 4 5 6 8 9
1021 sfIdx: 0 1 2 3 4 5 6 7 8 9 10 11 12 12 14 15
1023 If CFI changes at sf0, nearest DL SF bundle >= 4 TTI is sf4
1024 So at sf4 the new CFI can be applied. To arrive at sf4 from
1025 sf0, the sfIdx has to be increased by 3 */
1027 uint8_t rgSchTddPdcchSfIncTbl[7][10] = {
1028 /* A/N Bundl: 0,1,5,6*/ {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
1029 /* A/N Bundl: 0,4,5,9*/ {2, 2, 0, 0, 3, 2, 2, 0, 0, 3},
1030 /* A/N Bundl: 4,9*/ {3, 6, 0, 5, 4, 3, 6, 0, 5, 4},
1031 /* A/N Bundl: 1,7,9*/ {4, 3, 0, 0, 0, 4, 5, 4, 6, 5},
1032 /* A/N Bundl: 0,6*/ {4, 3, 0, 0, 6, 5, 4, 7, 6, 5},
1033 /* A/N Bundl: 9*/ {8, 7, 0, 6, 5, 4, 12, 11, 10, 9},
1034 /* A/N Bundl: 0,1,5,6,9*/ {2, 1, 0, 0, 0, 2, 2, 0, 0, 3}
1038 /* combine compilation fixes */
1040 /* subframe offset values to be used when twoIntervalsConfig is enabled in UL
1042 RgSchTddSfOffTbl rgSchTddSfOffTbl = {
1043 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1044 {0, 0, 1, -1, 0, 0, 0, 1, -1, 0},
1045 {0, 0, 5, 0, 0, 0, 0, -5, 0, 0},
1046 {0, 0, 1, 1, -2, 0, 0, 0, 0, 0},
1047 {0, 0, 1, -1, 0, 0, 0, 0, 0, 0},
1048 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1049 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1053 /* Table to determine when uplink SPS configured grants should
1054 * explicitly be reserved in a subframe. When enries are same
1055 * as that of Msg3SubfrmTbl, indicates competition with msg3.
1056 * As of now, this is same as Msg3SubfrmTbl (leaving out uldlcfg 2),
1057 * except that all 255s are now zeros. */
1058 RgSchTddSpsUlRsrvTbl rgSchTddSpsUlRsrvTbl = {
1059 {0, 0, 0, 6, 8, 0, 0, 0, 6, 8},
1060 {0, 0, 6, 9, 0, 0, 0, 6, 9, 0},
1061 {0, 0, 10, 0, 0, 0, 0, 10, 0, 0},
1062 {0, 0, 0, 0, 8, 0, 7, 7, 14, 0},
1063 {0, 0, 0, 9, 0, 0, 7, 15, 0, 0},
1064 {0, 0, 10, 0, 0, 0, 16, 0, 0, 0},
1065 {0, 0, 0, 0, 8, 0, 0, 0, 9, 0}
1068 /* Inverse DL Assoc Set index Table */
1069 RgSchTddInvDlAscSetIdxTbl rgSchTddInvDlAscSetIdxTbl = {
1070 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1071 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1072 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1073 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1074 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1075 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1076 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1079 #endif /* (LTEMAC_SPS ) */
1081 /* Number of Uplink subframes Table */
1082 static uint8_t rgSchTddNumUlSf[] = {6, 4, 2, 3, 2, 1, 5};
1084 /* Downlink HARQ processes Table */
1085 RgSchTddUlNumHarqProcTbl rgSchTddUlNumHarqProcTbl = { 7, 4, 2, 3, 2, 1, 6};
1087 /* Uplink HARQ processes Table */
1088 RgSchTddDlNumHarqProcTbl rgSchTddDlNumHarqProcTbl = { 4, 7, 10, 9, 12, 15, 6};
1090 /* Downlink association index set 'K' value Table */
1091 RgSchTddDlAscSetIdxKTbl rgSchTddDlAscSetIdxKTbl = {
1092 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1094 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1096 { {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}} },
1098 { {0, {0}}, {0, {0}}, {3, {7, 6, 11}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1100 { {0, {0}}, {0, {0}}, {4, {12, 8, 7, 11}}, {4, {6, 5, 4, 7}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1102 { {0, {0}}, {0, {0}}, {9, {13, 12, 9, 8, 7, 5, 4, 11, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1104 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1107 /* ccpu132282-ADD-the table rgSchTddDlAscSetIdxKTbl is rearranged in
1108 * decreasing order of Km, this is used to calculate the NCE used for
1109 * calculating N1Pucch Resource for Harq*/
1110 RgSchTddDlAscSetIdxKTbl rgSchTddDlHqPucchResCalTbl = {
1111 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1113 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1115 { {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}} },
1117 { {0, {0}}, {0, {0}}, {3, {11, 7, 6}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1119 { {0, {0}}, {0, {0}}, {4, {12, 11, 8, 7}}, {4, {7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1121 { {0, {0}}, {0, {0}}, {9, {13, 12, 11, 9, 8, 7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1123 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1126 /* Minimum number of Ack/Nack feeback information to be
1127 stored for each UL-DL configuration */
1128 RgSchTddANFdbkMapTbl rgSchTddANFdbkMapTbl = {4, 4, 2, 3, 2, 1, 5};
1130 /* Uplink switch points and number of UL subframes Table */
1131 RgSchTddMaxUlSubfrmTbl rgSchTddMaxUlSubfrmTbl = {
1132 {2,3,3}, {2,2,2}, {2,1,1}, {1,3,0}, {1,2,0}, {1,1,0}, {2,3,2}
1135 /* Uplink switch points and number of DL subframes Table */
1136 RgSchTddMaxDlSubfrmTbl rgSchTddMaxDlSubfrmTbl = {
1137 {2,2,2}, {2,3,3}, {2,4,4}, {1,7,0}, {1,8,0}, {1,9,0}, {2,2,3}
1140 /* Number of UL subframes present before a particular subframe */
1141 RgSchTddNumUlSubfrmTbl rgSchTddNumUlSubfrmTbl = {
1142 {0, 0, 1, 2, 3, 3, 3, 4, 5, 6},
1143 {0, 0, 1, 2, 2, 2, 2, 3, 4, 4},
1144 {0, 0, 1, 1, 1, 1, 1, 2, 2, 2},
1145 {0, 0, 1, 2, 3, 3, 3, 3, 3, 3},
1146 {0, 0, 1, 2, 2, 2, 2, 2, 2, 2},
1147 {0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
1148 {0, 0, 1, 2, 3, 3, 3, 4, 5, 5}
1151 /* Number of DL subframes present till a particular subframe */
1152 RgSchTddNumDlSubfrmTbl rgSchTddNumDlSubfrmTbl = {
1153 {1, 2, 2, 2, 2, 3, 4, 4, 4, 4},
1154 {1, 2, 2, 2, 3, 4, 5, 5, 5, 6},
1155 {1, 2, 2, 3, 4, 5, 6, 6, 7, 8},
1156 {1, 2, 2, 2, 2, 3, 4, 5, 6, 7},
1157 {1, 2, 2, 2, 3, 4, 5, 6, 7, 8},
1158 {1, 2, 2, 3, 4, 5, 6, 7, 8, 9},
1159 {1, 2, 2, 2, 2, 3, 4, 4, 4, 5}
1163 /* Nearest possible UL subframe Index from UL subframe
1164 * DL Index < UL Index */
1165 RgSchTddLowDlSubfrmIdxTbl rgSchTddLowDlSubfrmIdxTbl = {
1166 {0, 1, 1, 1, 1, 5, 6, 6, 6, 6},
1167 {0, 1, 1, 1, 4, 5, 6, 6, 6, 9},
1168 {0, 1, 1, 3, 4, 5, 6, 6, 8, 9},
1169 {0, 1, 1, 1, 1, 5, 6, 7, 8, 9},
1170 {0, 1, 1, 1, 4, 5, 6, 7, 8, 9},
1171 {0, 1, 1, 3, 4, 5, 6, 7, 8, 9},
1172 {0, 1, 1, 1, 1, 5, 6, 6, 6, 9}
1175 /* Nearest possible DL subframe Index from UL subframe
1176 * DL Index > UL Index
1177 * 10 represents Next SFN low DL Idx */
1178 RgSchTddHighDlSubfrmIdxTbl rgSchTddHighDlSubfrmIdxTbl = {
1179 {0, 1, 5, 5, 5, 5, 6, 10, 10, 10},
1180 {0, 1, 4, 4, 4, 5, 6, 9, 9, 9},
1181 {0, 1, 3, 3, 4, 5, 6, 8, 8, 9},
1182 {0, 1, 5, 5, 5, 5, 6, 7, 8, 9},
1183 {0, 1, 4, 4, 4, 5, 6, 7, 8, 9},
1184 {0, 1, 3, 3, 4, 5, 6, 7, 8, 9},
1185 {0, 1, 5, 5, 5, 5, 6, 9, 9, 9}
1188 /* RACH Message3 related information */
1189 RgSchTddMsg3SubfrmTbl rgSchTddMsg3SubfrmTbl = {
1190 {7, 6, 255, 255, 255, 7, 6, 255, 255, 255},
1191 {7, 6, 255, 255, 8, 7, 6, 255, 255, 8},
1192 {7, 6, 255, 9, 8, 7, 6, 255, 9, 8},
1193 {12, 11, 255, 255, 255, 7, 6, 6, 6, 13},
1194 {12, 11, 255, 255, 8, 7, 6, 6, 14, 13},
1195 {12, 11, 255, 9, 8, 7, 6, 15, 14, 13},
1196 {7, 6, 255, 255, 255, 7, 6, 255, 255, 8}
1199 /* ccpu00132341-DEL Removed rgSchTddRlsDlSubfrmTbl and used Kset table for
1200 * releasing DL HARQs */
1202 /* DwPTS Scheduling Changes Start */
1203 /* Provides the number of Cell Reference Signals in DwPTS
1205 static uint8_t rgSchCmnDwptsCrs[2][3] = {/* [Spl Sf cfg][Ant Port] */
1206 {4, 8, 16}, /* Spl Sf cfg 1,2,3,6,7,8 */
1207 {6, 12, 20}, /* Spl Sf cfg 4 */
1210 static S8 rgSchCmnSplSfDeltaItbs[9] = RG_SCH_DWPTS_ITBS_ADJ;
1211 /* DwPTS Scheduling Changes End */
1215 static uint32_t rgSchCmnBsrTbl[64] = {
1216 0, 10, 12, 14, 17, 19, 22, 26,
1217 31, 36, 42, 49, 57, 67, 78, 91,
1218 107, 125, 146, 171, 200, 234, 274, 321,
1219 376, 440, 515, 603, 706, 826, 967, 1132,
1220 1326, 1552, 1817, 2127, 2490, 2915, 3413, 3995,
1221 4677, 5476, 6411, 7505, 8787, 10287, 12043, 14099,
1222 16507, 19325, 22624, 26487, 31009, 36304, 42502, 49759,
1223 58255, 68201, 79846, 93479, 109439, 128125, 150000, 220000
1226 static uint32_t rgSchCmnExtBsrTbl[64] = {
1227 0, 10, 13, 16, 19, 23, 29, 35,
1228 43, 53, 65, 80, 98, 120, 147, 181,
1229 223, 274, 337, 414, 509, 625, 769, 945,
1230 1162, 1429, 1757, 2161, 2657, 3267, 4017, 4940,
1231 6074, 7469, 9185, 11294, 13888, 17077, 20999, 25822,
1232 31752, 39045, 48012, 59039, 72598, 89272, 109774, 134986,
1233 165989, 204111, 250990, 308634, 379519, 466683, 573866, 705666,
1234 867737, 1067031, 1312097, 1613447, 1984009, 2439678, 3000000, 3100000
1237 uint8_t rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_MAX_CP][RG_SCH_CMN_UL_NUM_CQI];
1239 RgSchTbSzTbl rgTbSzTbl = {
1241 {16, 32, 56, 88, 120, 152, 176, 208, 224, 256, 288, 328, 344, 376, 392, 424, 456, 488, 504, 536, 568, 600, 616, 648, 680, 712, 744, 776, 776, 808, 840, 872, 904, 936, 968, 1000, 1032, 1032, 1064, 1096, 1128, 1160, 1192, 1224, 1256, 1256, 1288, 1320, 1352, 1384, 1416, 1416, 1480, 1480, 1544, 1544, 1608, 1608, 1608, 1672, 1672, 1736, 1736, 1800, 1800, 1800, 1864, 1864, 1928, 1928, 1992, 1992, 2024, 2088, 2088, 2088, 2152, 2152, 2216, 2216, 2280, 2280, 2280, 2344, 2344, 2408, 2408, 2472, 2472, 2536, 2536, 2536, 2600, 2600, 2664, 2664, 2728, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 2984, 2984, 3112},
1242 {24, 56, 88, 144, 176, 208, 224, 256, 328, 344, 376, 424, 456, 488, 520, 568, 600, 632, 680, 712, 744, 776, 808, 872, 904, 936, 968, 1000, 1032, 1064, 1128, 1160, 1192, 1224, 1256, 1288, 1352, 1384, 1416, 1416, 1480, 1544, 1544, 1608, 1608, 1672, 1736, 1736, 1800, 1800, 1864, 1864, 1928, 1992, 1992, 2024, 2088, 2088, 2152, 2152, 2216, 2280, 2280, 2344, 2344, 2408, 2472, 2472, 2536, 2536, 2600, 2600, 2664, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008},
1243 {32, 72, 144, 176, 208, 256, 296, 328, 376, 424, 472, 520, 568, 616, 648, 696, 744, 776, 840, 872, 936, 968, 1000, 1064, 1096, 1160, 1192, 1256, 1288, 1320, 1384, 1416, 1480, 1544, 1544, 1608, 1672, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2024, 2088, 2088, 2152, 2216, 2216, 2280, 2344, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2664, 2728, 2792, 2856, 2856, 2856, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968},
1244 {40, 104, 176, 208, 256, 328, 392, 440, 504, 568, 616, 680, 744, 808, 872, 904, 968, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 1992, 2024, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6200, 6456, 6456},
1245 {56, 120, 208, 256, 328, 408, 488, 552, 632, 696, 776, 840, 904, 1000, 1064, 1128, 1192, 1288, 1352, 1416, 1480, 1544, 1608, 1736, 1800, 1864, 1928, 1992, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2600, 2664, 2728, 2792, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992},
1246 {72, 144, 224, 328, 424, 504, 600, 680, 776, 872, 968, 1032, 1128, 1224, 1320, 1384, 1480, 1544, 1672, 1736, 1864, 1928, 2024, 2088, 2216, 2280, 2344, 2472, 2536, 2664, 2728, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4264, 4392, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9528},
1247 {328, 176, 256, 392, 504, 600, 712, 808, 936, 1032, 1128, 1224, 1352, 1480, 1544, 1672, 1736, 1864, 1992, 2088, 2216, 2280, 2408, 2472, 2600, 2728, 2792, 2984, 2984, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3880, 4008, 4136, 4136, 4264, 4392, 4584, 4584, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448},
1248 {104, 224, 328, 472, 584, 712, 840, 968, 1096, 1224, 1320, 1480, 1608, 1672, 1800, 1928, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536},
1249 {120, 256, 392, 536, 680, 808, 968, 1096, 1256, 1384, 1544, 1672, 1800, 1928, 2088, 2216, 2344, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15264},
1250 {136, 296, 456, 616, 776, 936, 1096, 1256, 1416, 1544, 1736, 1864, 2024, 2216, 2344, 2536, 2664, 2856, 2984, 3112, 3368, 3496, 3624, 3752, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 5160, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568},
1251 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080},
1252 {176, 376, 584, 776, 1000, 1192, 1384, 1608, 1800, 2024, 2216, 2408, 2600, 2792, 2984, 3240, 3496, 3624, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152},
1253 {208, 440, 680, 904, 1128, 1352, 1608, 1800, 2024, 2280, 2472, 2728, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 23688, 24496, 24496, 24496, 24496, 25456},
1254 {224, 488, 744, 1000, 1256, 1544, 1800, 2024, 2280, 2536, 2856, 3112, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 26416, 27376, 27376, 27376, 27376, 28336, 28336},
1255 {256, 552, 840, 1128, 1416, 1736, 1992, 2280, 2600, 2856, 3112, 3496, 3752, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 5992, 6200, 6456, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704},
1256 {280, 600, 904, 1224, 1544, 1800, 2152, 2472, 2728, 3112, 3368, 3624, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 6200, 6456, 6712, 6968, 7224, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008},
1257 {328, 632, 968, 1288, 1608, 1928, 2280, 2600, 2984, 3240, 3624, 3880, 4264, 4584, 4968, 5160, 5544, 5992, 6200, 6456, 6712, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 35160},
1258 {336, 696, 1064, 1416, 1800, 2152, 2536, 2856, 3240, 3624, 4008, 4392, 4776, 5160, 5352, 5736, 6200, 6456, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 39232},
1259 {376, 776, 1160, 1544, 1992, 2344, 2792, 3112, 3624, 4008, 4392, 4776, 5160, 5544, 5992, 6200, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14112, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816},
1260 {408, 840, 1288, 1736, 2152, 2600, 2984, 3496, 3880, 4264, 4776, 5160, 5544, 5992, 6456, 6968, 7224, 7736, 8248, 8504, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19080, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888},
1261 {440, 904, 1384, 1864, 2344, 2792, 3240, 3752, 4136, 4584, 5160, 5544, 5992, 6456, 6968, 7480, 7992, 8248, 8760, 9144, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 20616, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 48936, 51024, 51024, 51024},
1262 {488, 1000, 1480, 1992, 2472, 2984, 3496, 4008, 4584, 4968, 5544, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 9912, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056},
1263 {520, 1064, 1608, 2152, 2664, 3240, 3752, 4264, 4776, 5352, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 10296, 10680, 11448, 11832, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 19080, 19080, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256},
1264 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776},
1265 {584, 1192, 1800, 2408, 2984, 3624, 4264, 4968, 5544, 5992, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 22920, 23688, 24496, 25456, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592},
1266 {616, 1256, 1864, 2536, 3112, 3752, 4392, 5160, 5736, 6200, 6968, 7480, 8248, 8760, 9528, 10296, 10680, 11448, 12216, 12576, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592, 68808, 68808, 68808, 71112},
1267 {712, 1480, 2216, 2984, 3752, 4392, 5160, 5992, 6712, 7480, 8248, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 14688, 15264, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376}
1270 {32, 88, 152, 208, 256, 328, 376, 424, 488, 536, 600, 648, 712, 776, 808, 872, 936, 1000, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2088, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 5992, 6200},
1271 {56, 144, 208, 256, 344, 424, 488, 568, 632, 712, 776, 872, 936, 1000, 1064, 1160, 1224, 1288, 1384, 1416, 1544, 1608, 1672, 1736, 1800, 1864, 1992, 2024, 2088, 2152, 2280, 2344, 2408, 2472, 2536, 2600, 2728, 2792, 2856, 2856, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5352, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992},
1272 {72, 176, 256, 328, 424, 520, 616, 696, 776, 872, 968, 1064, 1160, 1256, 1320, 1416, 1544, 1608, 1672, 1800, 1864, 1992, 2088, 2152, 2216, 2344, 2408, 2536, 2600, 2664, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3368, 3496, 3624, 3624, 3752, 3880, 4008, 4008, 4136, 4264, 4264, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912},
1273 {104, 208, 328, 440, 568, 680, 808, 904, 1032, 1160, 1256, 1384, 1480, 1608, 1736, 1864, 1992, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2856, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4392, 4584, 4776, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12576, 12960, 12960},
1274 {120, 256, 408, 552, 696, 840, 1000, 1128, 1288, 1416, 1544, 1736, 1864, 1992, 2152, 2280, 2408, 2600, 2728, 2856, 2984, 3112, 3240, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840},
1275 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19080},
1276 {176, 392, 600, 808, 1032, 1224, 1480, 1672, 1864, 2088, 2280, 2472, 2728, 2984, 3112, 3368, 3496, 3752, 4008, 4136, 4392, 4584, 4776, 4968, 5160, 5352, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7992, 8248, 8248, 8504, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920},
1277 {224, 472, 712, 968, 1224, 1480, 1672, 1928, 2216, 2472, 2664, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376},
1278 {256, 536, 808, 1096, 1384, 1672, 1928, 2216, 2536, 2792, 3112, 3368, 3624, 3880, 4264, 4584, 4776, 4968, 5352, 5544, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576},
1279 {296, 616, 936, 1256, 1544, 1864, 2216, 2536, 2856, 3112, 3496, 3752, 4136, 4392, 4776, 5160, 5352, 5736, 5992, 6200, 6712, 6968, 7224, 7480, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160},
1280 {328, 680, 1032, 1384, 1736, 2088, 2472, 2792, 3112, 3496, 3880, 4264, 4584, 4968, 5352, 5736, 5992, 6200, 6712, 6968, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 21384, 21384, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888},
1281 {376, 776, 1192, 1608, 2024, 2408, 2792, 3240, 3624, 4008, 4392, 4776, 5352, 5736, 5992, 6456, 6968, 7224, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816},
1282 {440, 904, 1352, 1800, 2280, 2728, 3240, 3624, 4136, 4584, 4968, 5544, 5992, 6456, 6712, 7224, 7736, 8248, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 17568, 17568, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024},
1283 {488, 1000, 1544, 2024, 2536, 3112, 3624, 4136, 4584, 5160, 5736, 6200, 6712, 7224, 7736, 8248, 8760, 9144, 9912, 10296, 10680, 11448, 11832, 12216, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336},
1284 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776},
1285 {600, 1224, 1800, 2472, 3112, 3624, 4264, 4968, 5544, 6200, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 23688, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808},
1286 {632, 1288, 1928, 2600, 3240, 3880, 4584, 5160, 5992, 6456, 7224, 7736, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 68808, 71112, 71112, 71112, 71112},
1287 {696, 1416, 2152, 2856, 3624, 4392, 5160, 5736, 6456, 7224, 7992, 8760, 9528, 10296, 10680, 11448, 12216, 12960, 13536, 14688, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 71112, 73712, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 78704},
1288 {776, 1544, 2344, 3112, 4008, 4776, 5544, 6200, 7224, 7992, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 27376, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936},
1289 {840, 1736, 2600, 3496, 4264, 5160, 5992, 6968, 7736, 8504, 9528, 10296, 11064, 12216, 12960, 13536, 14688, 15264, 16416, 16992, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800},
1290 {904, 1864, 2792, 3752, 4584, 5544, 6456, 7480, 8248, 9144, 10296, 11064, 12216, 12960, 14112, 14688, 15840, 16992, 17568, 18336, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 42368, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 97896, 101840, 101840, 101840},
1291 {1000, 1992, 2984, 4008, 4968, 5992, 6968, 7992, 9144, 9912, 11064, 12216, 12960, 14112, 15264, 15840, 16992, 18336, 19080, 19848, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 31704, 32856, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136},
1292 {1064, 2152, 3240, 4264, 5352, 6456, 7480, 8504, 9528, 10680, 11832, 12960, 14112, 15264, 16416, 16992, 18336, 19080, 20616, 21384, 22920, 23688, 24496, 25456, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816},
1293 {1128, 2280, 3496, 4584, 5736, 6968, 7992, 9144, 10296, 11448, 12576, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 45352, 45352, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840,101840,101840,101840,105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496},
1294 {1192, 2408, 3624, 4968, 5992, 7224, 8504, 9912, 11064, 12216, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 42368, 43816, 45352, 46888, 46888, 48936, 51024, 51024, 52752, 52752, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208},
1295 {1256, 2536, 3752, 5160, 6200, 7480, 8760, 10296, 11448, 12576, 14112, 15264, 16416, 17568, 19080, 20616, 21384, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 43816, 43816, 45352, 46888, 48936, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040,115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208, 137792, 137792, 137792, 142248},
1296 {1480, 2984, 4392, 5992, 7480, 8760, 10296, 11832, 13536, 14688, 16416, 17568, 19080, 20616, 22152, 23688, 25456, 26416, 28336, 29296, 30576, 32856, 34008, 35160, 36696, 37888, 40576, 40576, 42368, 43816, 45352, 46888, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 63776, 63776, 66592, 68808, 68808, 71112, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 81176, 84760, 84760, 87936, 87936, 90816, 90816, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 128496, 128496, 128496, 133208, 133208, 133208, 137792, 137792, 137792, 142248, 142248, 142248, 146856, 146856,149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776}
1299 RgSchUlIMcsTbl rgUlIMcsTbl = {
1300 {2, 0}, {2, 1}, {2, 2}, {2, 3}, {2, 4}, {2, 5},
1301 {2, 6}, {2, 7}, {2, 8}, {2, 9}, {2, 10},
1302 {4, 10}, {4, 11}, {4, 12}, {4, 13}, {4, 14},
1303 {4, 15}, {4, 16}, {4, 17}, {4, 18}, {4, 19},
1304 {6, 19}, {6, 20}, {6, 21}, {6, 22}, {6, 23},
1305 {6, 24}, {6, 25}, {6, 26}
1307 RgSchUeCatTbl rgUeCatTbl = {
1308 /*Column1:Maximum number of bits of an UL-SCH
1309 transport block transmitted within a TTI
1311 Column2:Maximum number of bits of a DLSCH
1312 transport block received within a TTI
1314 Column3:Total number of soft channel bits
1316 Column4:Support for 64QAM in UL
1318 Column5:Maximum number of DL-SCH transport
1319 block bits received within a TTI
1321 Column6:Maximum number of supported layers for
1322 spatial multiplexing in DL
1324 {5160, {10296,0}, 250368, FALSE, 10296, 1},
1325 {25456, {51024,0}, 1237248, FALSE, 51024, 2},
1326 {51024, {75376,0}, 1237248, FALSE, 102048, 2},
1327 {51024, {75376,0}, 1827072, FALSE, 150752, 2},
1328 {75376, {149776,0}, 3667200, TRUE, 299552, 4},
1329 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1330 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1331 {149776,{299856,0}, 35982720,TRUE, 2998560, 8}
1334 /* [ccpu00138532]-ADD-The below table stores the min HARQ RTT time
1335 in Downlink for TDD and FDD. Indices 0 to 6 map to tdd UL DL config 0-6.
1336 Index 7 map to FDD */
1337 uint8_t rgSchCmnHarqRtt[8] = {4,7,10,9,12,15,6,8};
1338 /* Number of CFI Switchover Index is equals to 7 TDD Indexes + 1 FDD index */
1339 uint8_t rgSchCfiSwitchOvrWinLen[] = {7, 4, 2, 3, 2, 1, 6, 8};
1341 /* EffTbl is calculated for single layer and two layers.
1342 * CqiToTbs is calculated for single layer and two layers */
1343 RgSchCmnTbSzEff rgSchCmnNorCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1344 RgSchCmnTbSzEff rgSchCmnNorCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1345 /* New variable to store UL effiency values for normal and extended CP*/
1346 RgSchCmnTbSzEff rgSchCmnNorUlEff[1],rgSchCmnExtUlEff[1];
1347 RgSchCmnCqiToTbs rgSchCmnNorCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1348 RgSchCmnCqiToTbs rgSchCmnNorCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1349 RgSchCmnCqiToTbs *rgSchCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
1350 RgSchCmnTbSzEff rgSchCmnExtCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1351 RgSchCmnTbSzEff rgSchCmnExtCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1352 RgSchCmnCqiToTbs rgSchCmnExtCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1353 RgSchCmnCqiToTbs rgSchCmnExtCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1354 /* Include CRS REs while calculating Efficiency */
1355 RgSchCmnTbSzEff *rgSchCmnEffTbl[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_ANT_CONF][RG_SCH_CMN_MAX_CFI];
1356 RgSchCmnTbSzEff *rgSchCmnUlEffTbl[RG_SCH_CMN_MAX_CP];
1358 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3, 1};
1360 /* Added matrix 'rgRaPrmblToRaFrmTbl'for computation of RA sub-frames from RA preamble */
1361 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3};
1364 RgUlSchdInits rgSchUlSchdInits;
1365 RgDlSchdInits rgSchDlSchdInits;
1366 RgDlfsSchdInits rgSchDlfsSchdInits;
1368 RgEmtcUlSchdInits rgSchEmtcUlSchdInits;
1369 RgEmtcDlSchdInits rgSchEmtcDlSchdInits;
1373 static S16 rgSCHCmnUeIdleExdThrsld ARGS((
1377 RgSchUeCb* rgSCHCmnGetHoUe ARGS((
1381 static Void rgSCHCmnDelDedPreamble ARGS((
1385 RgSchUeCb* rgSCHCmnGetPoUe ARGS((
1388 CmLteTimingInfo timingInfo
1390 static Void rgSCHCmnDelRachInfo ARGS((
1394 static S16 rgSCHCmnUlRbAllocForPoHoUe ARGS((
1400 static Void rgSCHCmnHdlHoPo ARGS((
1402 CmLListCp *raRspLst,
1403 RgSchRaReqInfo *raReq
1405 static Void rgSCHCmnAllocPoHoGrnt ARGS((
1407 CmLListCp *raRspLst,
1409 RgSchRaReqInfo *raReq
1411 static Void rgSCHCmnFillPdcchOdr2Sf ARGS((
1418 static Void rgSCHCmnDlAdd2PdcchOdrQ ARGS((
1422 static Void rgSCHCmnDlRmvFrmPdcchOdrQ ARGS((
1426 static Void rgSCHCmnUpdNxtPrchMskIdx ARGS((
1429 static Void rgSCHCmnUpdRachParam ARGS((
1432 static S16 rgSCHCmnAllocPOParam ARGS((
1438 uint8_t *prachMskIdx
1440 static Void rgSCHCmnGenPdcchOrder ARGS((
1444 static Void rgSCHCmnCfgRachDedPrm ARGS((
1449 static Void rgSCHCmnHdlUlInactUes ARGS((
1452 static Void rgSCHCmnHdlDlInactUes ARGS((
1455 static Void rgSCHCmnUlInit ARGS((Void
1457 static Void rgSCHCmnDlInit ARGS((Void
1459 static Void rgSCHCmnInitDlRbAllocInfo ARGS((
1460 RgSchCmnDlRbAllocInfo *allocInfo
1462 static Void rgSCHCmnUpdUlCompEffBsr ARGS((
1466 static Void rgSCHCmnUlSetAllUnSched ARGS((
1467 RgSchCmnUlRbAllocInfo *allocInfo
1469 static Void rgSCHCmnUlUpdSf ARGS((
1471 RgSchCmnUlRbAllocInfo *allocInfo,
1474 static Void rgSCHCmnUlHndlAllocRetx ARGS((
1476 RgSchCmnUlRbAllocInfo *allocInfo,
1481 static Void rgSCHCmnGrpPwrCntrlPucch ARGS((
1485 static Void rgSCHCmnGrpPwrCntrlPusch ARGS((
1489 static Void rgSCHCmnDelUeFrmRefreshQ ARGS((
1493 static S16 rgSCHCmnTmrExpiry ARGS((
1494 PTR cb, /* Pointer to timer control block */
1495 S16 tmrEvnt /* Timer Event */
1497 static S16 rgSCHCmnTmrProc ARGS((
1500 static Void rgSCHCmnAddUeToRefreshQ ARGS((
1505 static Void rgSCHCmnDlCcchRetx ARGS((
1507 RgSchCmnDlRbAllocInfo *allocInfo
1509 static Void rgSCHCmnUpdUeMimoInfo ARGS((
1513 RgSchCmnCell *cellSchd
1515 static Void rgSCHCmnUpdUeUlCqiInfo ARGS((
1519 RgSchCmnUe *ueSchCmn,
1520 RgSchCmnCell *cellSchd,
1524 static Void rgSCHCmnDlCcchSduRetx ARGS((
1526 RgSchCmnDlRbAllocInfo *allocInfo
1528 static Void rgSCHCmnDlCcchSduTx ARGS((
1530 RgSchCmnDlRbAllocInfo *allocInfo
1532 static S16 rgSCHCmnCcchSduAlloc ARGS((
1535 RgSchCmnDlRbAllocInfo *allocInfo
1537 static S16 rgSCHCmnCcchSduDedAlloc ARGS((
1541 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc ARGS((
1547 static Void rgSCHCmnInitVars ARGS((
1551 /*ccpu00117180 - DEL - Moved rgSCHCmnUpdVars to .x as its access is now */
1552 static Void rgSCHCmnUlRbAllocForLst ARGS((
1558 CmLListCp *nonSchdLst,
1561 static S16 rgSCHCmnUlRbAllocForUe ARGS((
1568 static Void rgSCHCmnMsg3GrntReq ARGS((
1572 RgSchUlHqProcCb *hqProc,
1573 RgSchUlAlloc **ulAllocRef,
1574 uint8_t *hqProcIdRef
1576 static Void rgSCHCmnDlCcchRarAlloc ARGS((
1579 static Void rgSCHCmnDlCcchTx ARGS((
1581 RgSchCmnDlRbAllocInfo *allocInfo
1583 static Void rgSCHCmnDlBcchPcch ARGS((
1585 RgSchCmnDlRbAllocInfo *allocInfo,
1586 RgInfSfAlloc *subfrmAlloc
1588 Bool rgSCHCmnChkInWin ARGS((
1589 CmLteTimingInfo frm,
1590 CmLteTimingInfo start,
1593 Bool rgSCHCmnChkPastWin ARGS((
1594 CmLteTimingInfo frm,
1597 static Void rgSCHCmnClcAlloc ARGS((
1600 RgSchClcDlLcCb *lch,
1602 RgSchCmnDlRbAllocInfo *allocInfo
1605 static Void rgSCHCmnClcRbAlloc ARGS((
1616 static S16 rgSCHCmnMsg4Alloc ARGS((
1619 RgSchCmnDlRbAllocInfo *allocInfo
1621 static S16 rgSCHCmnMsg4DedAlloc ARGS((
1625 static Void rgSCHCmnDlRaRsp ARGS((
1627 RgSchCmnDlRbAllocInfo *allocInfo
1629 static S16 rgSCHCmnRaRspAlloc ARGS((
1635 RgSchCmnDlRbAllocInfo *allocInfo
1637 static Void rgSCHCmnUlUeDelAllocs ARGS((
1641 static Void rgSCHCmnDlSetUeAllocLmt ARGS((
1646 static S16 rgSCHCmnDlRgrCellCfg ARGS((
1651 static Void rgSCHCmnUlAdapRetx ARGS((
1652 RgSchUlAlloc *alloc,
1653 RgSchUlHqProcCb *proc
1655 static Void rgSCHCmnUlUpdAllocRetx ARGS((
1659 static Void rgSCHCmnUlSfReTxAllocs ARGS((
1663 /* Fix: syed Adaptive Msg3 Retx crash. */
1665 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1669 RgrUeRecfg *ueRecfg,
1673 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1683 * DL RB allocation specific functions
1686 static Void rgSCHCmnDlRbAlloc ARGS((
1688 RgSchCmnDlRbAllocInfo *allocInfo
1690 static Void rgSCHCmnNonDlfsRbAlloc ARGS((
1692 RgSchCmnDlRbAllocInfo *allocInfo
1694 static S16 rgSCHCmnNonDlfsCmnRbAlloc ARGS((
1696 RgSchDlRbAlloc *cmnAllocInfo));
1699 static Void rgSCHCmnNonDlfsPbchRbAllocAdj ARGS((
1701 RgSchDlRbAlloc *cmnAllocInfo,
1702 uint8_t pbchSsRsSym,
1705 /* Added function to adjust TBSize*/
1706 static Void rgSCHCmnNonDlfsPbchTbSizeAdj ARGS((
1707 RgSchDlRbAlloc *allocInfo,
1708 uint8_t numOvrlapgPbchRb,
1709 uint8_t pbchSsRsSym,
1714 /* Added function to find num of overlapping PBCH rb*/
1715 static Void rgSCHCmnFindNumPbchOvrlapRbs ARGS((
1718 RgSchDlRbAlloc *allocInfo,
1719 uint8_t *numOvrlapgPbchRb
1722 static uint8_t rgSCHCmnFindNumAddtlRbsAvl ARGS((
1725 RgSchDlRbAlloc *allocInfo
1729 static Void rgSCHCmnFindCodeRate ARGS((
1732 RgSchDlRbAlloc *allocInfo,
1738 static Void rgSCHCmnNonDlfsMsg4Alloc ARGS((
1740 RgSchCmnMsg4RbAlloc *msg4AllocInfo,
1743 static S16 rgSCHCmnNonDlfsMsg4RbAlloc ARGS((
1749 static S16 rgSCHCmnNonDlfsUeRbAlloc ARGS((
1753 uint8_t *isDlBwAvail
1756 static uint32_t rgSCHCmnCalcRiv ARGS(( uint8_t bw,
1762 static Void rgSCHCmnUpdHqAndDai ARGS((
1763 RgSchDlHqProcCb *hqP,
1765 RgSchDlHqTbCb *tbCb,
1768 static S16 rgSCHCmnUlCalcAvailBw ARGS((
1770 RgrCellCfg *cellCfg,
1772 uint8_t *rbStartRef,
1775 static S16 rgSCHCmnDlKdashUlAscInit ARGS((
1778 static S16 rgSCHCmnDlANFdbkInit ARGS((
1781 static S16 rgSCHCmnDlNpValInit ARGS((
1784 static S16 rgSCHCmnDlCreateRachPrmLst ARGS((
1787 static S16 rgSCHCmnDlCpyRachInfo ARGS((
1789 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
1792 static S16 rgSCHCmnDlRachInfoInit ARGS((
1795 static S16 rgSCHCmnDlPhichOffsetInit ARGS((
1800 static Void rgSCHCmnFindUlCqiUlTxAnt ARGS
1806 static RgSchCmnRank rgSCHCmnComputeRank ARGS
1809 uint32_t *pmiBitMap,
1813 static RgSchCmnRank rgSCHCmnComp2TxMode3 ARGS
1818 static RgSchCmnRank rgSCHCmnComp4TxMode3 ARGS
1823 static RgSchCmnRank rgSCHCmnComp2TxMode4 ARGS
1828 static RgSchCmnRank rgSCHCmnComp4TxMode4 ARGS
1833 static uint8_t rgSCHCmnCalcWcqiFrmSnr ARGS
1840 /* comcodsepa : start */
1843 * @brief This function computes efficiency and stores in a table.
1847 * Function: rgSCHCmnCompEff
1848 * Purpose: this function computes the efficiency as number of
1849 * bytes per 1024 symbols. The CFI table is also filled
1850 * with the same information such that comparison is valid
1852 * Invoked by: Scheduler
1854 * @param[in] uint8_t noPdcchSym
1855 * @param[in] uint8_t cpType
1856 * @param[in] uint8_t txAntIdx
1857 * @param[in] RgSchCmnTbSzEff* effTbl
1861 static Void rgSCHCmnCompEff(uint8_t noPdcchSym,uint8_t cpType,uint8_t txAntIdx,RgSchCmnTbSzEff *effTbl)
1865 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1871 case RG_SCH_CMN_NOR_CP:
1874 case RG_SCH_CMN_EXT_CP:
1878 /* Generate a log error. This case should never be executed */
1882 /* Depending on the Tx Antenna Index, deduct the
1883 * Resource elements for the CRS */
1887 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
1890 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
1893 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
1896 /* Generate a log error. This case should never be executed */
1899 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
1900 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1903 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1905 /* This line computes the coding efficiency per 1024 REs */
1906 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1908 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1913 * @brief This function computes efficiency and stores in a table.
1917 * Function: rgSCHCmnCompUlEff
1918 * Purpose: this function computes the efficiency as number of
1919 * bytes per 1024 symbols. The CFI table is also filled
1920 * with the same information such that comparison is valid
1922 * Invoked by: Scheduler
1924 * @param[in] uint8_t noUlRsSym
1925 * @param[in] uint8_t cpType
1926 * @param[in] uint8_t txAntIdx
1927 * @param[in] RgSchCmnTbSzEff* effTbl
1931 static Void rgSCHCmnCompUlEff(uint8_t noUlRsSym,uint8_t cpType,RgSchCmnTbSzEff *effTbl)
1940 case RG_SCH_CMN_NOR_CP:
1943 case RG_SCH_CMN_EXT_CP:
1947 /* Generate a log error. This case should never be executed */
1951 noResPerRb = ((noSymPerRb - noUlRsSym) * RB_SCH_CMN_NUM_SCS_PER_RB);
1952 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1955 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1957 /* This line computes the coding efficiency per 1024 REs */
1958 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1960 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1966 * @brief This function computes efficiency for 2 layers and stores in a table.
1970 * Function: rgSCHCmn2LyrCompEff
1971 * Purpose: this function computes the efficiency as number of
1972 * bytes per 1024 symbols. The CFI table is also filled
1973 * with the same information such that comparison is valid
1975 * Invoked by: Scheduler
1977 * @param[in] uint8_t noPdcchSym
1978 * @param[in] uint8_t cpType
1979 * @param[in] uint8_t txAntIdx
1980 * @param[in] RgSchCmnTbSzEff* effTbl2Lyr
1984 static Void rgSCHCmn2LyrCompEff(uint8_t noPdcchSym,uint8_t cpType,uint8_t txAntIdx,RgSchCmnTbSzEff *effTbl2Lyr)
1988 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1994 case RG_SCH_CMN_NOR_CP:
1997 case RG_SCH_CMN_EXT_CP:
2001 /* Generate a log error. This case should never be executed */
2005 /* Depending on the Tx Antenna Index, deduct the
2006 * Resource elements for the CRS */
2010 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
2013 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
2016 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
2019 /* Generate a log error. This case should never be executed */
2023 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
2024 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
2026 (*effTbl2Lyr)[i] = 0;
2027 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
2029 /* This line computes the coding efficiency per 1024 REs */
2030 (*effTbl2Lyr)[i] += (rgTbSzTbl[1][i][j] * 1024) / (noResPerRb * (j+1));
2032 (*effTbl2Lyr)[i] /= RG_SCH_CMN_NUM_RBS;
2039 * @brief This function initializes the rgSchCmnDciFrmtSizes table.
2043 * Function: rgSCHCmnGetDciFrmtSizes
2044 * Purpose: This function determines the sizes of all
2045 * the available DCI Formats. The order of
2046 * bits addition for each format is inaccordance
2048 * Invoked by: rgSCHCmnRgrCellCfg
2053 static Void rgSCHCmnGetDciFrmtSizes(RgSchCellCb *cell)
2057 /* DCI Format 0 size determination */
2058 rgSchCmnDciFrmtSizes[0] = 1 +
2060 rgSCHUtlLog32bitNbase2((cell->bwCfg.ulTotalBw * \
2061 (cell->bwCfg.ulTotalBw + 1))/2) +
2071 /* DCI Format 1 size determination */
2072 rgSchCmnDciFrmtSizes[1] = 1 +
2073 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2078 4 + 2 + /* HqProc Id and DAI */
2084 /* DCI Format 1A size determination */
2085 rgSchCmnDciFrmtSizes[2] = 1 + /* Flag for format0/format1a differentiation */
2086 1 + /* Localized/distributed VRB assignment flag */
2089 3 + /* Harq process Id */
2091 4 + /* Harq process Id */
2092 2 + /* UL Index or DAI */
2094 1 + /* New Data Indicator */
2097 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2098 (cell->bwCfg.dlTotalBw + 1))/2);
2099 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
2100 Since VRB is local */
2102 /* DCI Format 1B size determination */
2103 rgSchCmnDciFrmtSizes[3] = 1 +
2104 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2105 (cell->bwCfg.dlTotalBw + 1))/2) +
2115 ((cell->numTxAntPorts == 4)? 4:2) +
2118 /* DCI Format 1C size determination */
2119 /* Approximation: NDLVrbGap1 ~= Nprb for DL */
2120 rgSchCmnDciFrmtSizes[4] = (cell->bwCfg.dlTotalBw < 50)? 0:1 +
2121 (cell->bwCfg.dlTotalBw < 50)?
2122 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/2 * \
2123 (cell->bwCfg.dlTotalBw/2 + 1))/2)) :
2124 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/4 * \
2125 (cell->bwCfg.dlTotalBw/4 + 1))/2)) +
2128 /* DCI Format 1D size determination */
2129 rgSchCmnDciFrmtSizes[5] = 1 +
2130 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2131 (cell->bwCfg.dlTotalBw + 1))/2) +
2140 ((cell->numTxAntPorts == 4)? 4:2) +
2143 /* DCI Format 2 size determination */
2144 rgSchCmnDciFrmtSizes[6] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2145 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2153 ((cell->numTxAntPorts == 4)? 6:3);
2155 /* DCI Format 2A size determination */
2156 rgSchCmnDciFrmtSizes[7] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2157 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2165 ((cell->numTxAntPorts == 4)? 2:0);
2167 /* DCI Format 3 size determination */
2168 rgSchCmnDciFrmtSizes[8] = rgSchCmnDciFrmtSizes[0];
2170 /* DCI Format 3A size determination */
2171 rgSchCmnDciFrmtSizes[9] = rgSchCmnDciFrmtSizes[0];
2178 * @brief This function initializes the cmnCell->dciAggrLvl table.
2182 * Function: rgSCHCmnGetCqiDciFrmt2AggrLvl
2183 * Purpose: This function determines the Aggregation level
2184 * for each CQI level against each DCI format.
2185 * Invoked by: rgSCHCmnRgrCellCfg
2190 static Void rgSCHCmnGetCqiDciFrmt2AggrLvl(RgSchCellCb *cell)
2192 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2197 for (i = 0; i < RG_SCH_CMN_MAX_CQI; i++)
2199 for (j = 0; j < 10; j++)
2201 uint32_t pdcchBits; /* Actual number of phy bits needed for a given DCI Format
2202 * for a given CQI Level */
2203 pdcchBits = (rgSchCmnDciFrmtSizes[j] * 1024)/rgSchCmnCqiPdcchEff[i];
2205 if (pdcchBits < 192)
2207 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL2;
2210 if (pdcchBits < 384)
2212 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL4;
2215 if (pdcchBits < 768)
2217 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL8;
2220 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL16;
2227 * @brief This function initializes all the data for the scheduler.
2231 * Function: rgSCHCmnDlInit
2232 * Purpose: This function initializes the following information:
2233 * 1. Efficiency table
2234 * 2. CQI to table index - It is one row for upto 3 RBs
2235 * and another row for greater than 3 RBs
2236 * currently extended prefix is compiled out.
2237 * Invoked by: MAC intialization code..may be ActvInit
2242 static Void rgSCHCmnDlInit()
2248 RgSchCmnTbSzEff *effTbl;
2249 RgSchCmnCqiToTbs *tbsTbl;
2252 /* 0 corresponds to Single layer case, 1 corresponds to 2 layers case*/
2253 /* Init Efficiency table for normal cyclic prefix */
2254 /*Initialize Efficiency table for Layer Index 0 */
2255 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2256 /*Initialize Efficiency table for each of the CFI indices. The
2257 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2258 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[0];
2259 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[0];
2260 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[0];
2261 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[0];
2262 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2263 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[0];
2264 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[0];
2265 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[0];
2266 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[0];
2267 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2268 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[0];
2269 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[0];
2270 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[0];
2271 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[0];
2273 /*Initialize CQI to TBS table for Layer Index 0 for Normal CP */
2274 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[0];
2275 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[0];
2276 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[0];
2277 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[0];
2279 /*Intialize Efficency table for Layer Index 1 */
2280 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2281 /*Initialize Efficiency table for each of the CFI indices. The
2282 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2283 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[1];
2284 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[1];
2285 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[1];
2286 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[1];
2287 /*Initialize Efficiency table for Tx Antenna Port Index 1 */
2288 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[1];
2289 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[1];
2290 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[1];
2291 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[1];
2292 /*Initialize Efficiency table for Tx Antenna Port Index 2 */
2293 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[1];
2294 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[1];
2295 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[1];
2296 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[1];
2298 /*Initialize CQI to TBS table for Layer Index 1 for Normal CP */
2299 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[1];
2300 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[1];
2301 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[1];
2302 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[1];
2304 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2306 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2308 /* EfficiencyTbl calculation incase of 2 layers for normal CP */
2309 rgSCHCmnCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx,\
2310 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i]);
2311 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx, \
2312 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i]);
2316 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2318 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2320 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i];
2321 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][i];
2322 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2323 (j >= 0) && (k > 0); --j)
2325 /* ADD CQI to MCS mapping correction
2326 * single dimensional array is replaced by 2 dimensions for different CFI*/
2327 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2329 (*tbsTbl)[k--] = (uint8_t)j;
2336 /* effTbl,tbsTbl calculation incase of 2 layers for normal CP */
2337 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i];
2338 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][i];
2339 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2340 (j >= 0) && (k > 0); --j)
2342 /* ADD CQI to MCS mapping correction
2343 * single dimensional array is replaced by 2 dimensions for different CFI*/
2344 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2346 (*tbsTbl)[k--] = (uint8_t)j;
2356 /* Efficiency Table for Extended CP */
2357 /*Initialize Efficiency table for Layer Index 0 */
2358 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2359 /*Initialize Efficiency table for each of the CFI indices. The
2360 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2361 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[0];
2362 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[0];
2363 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[0];
2364 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[0];
2365 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2366 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[0];
2367 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[0];
2368 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[0];
2369 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[0];
2370 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2371 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[0];
2372 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[0];
2373 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[0];
2374 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[0];
2376 /*Initialize CQI to TBS table for Layer Index 0 for Extended CP */
2377 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[0];
2378 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[0];
2379 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[0];
2380 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[0];
2382 /*Initialize Efficiency table for Layer Index 1 */
2383 /*Initialize Efficiency table for each of the CFI indices. The
2384 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2385 /*Initialize Efficency table for Tx Antenna Port Index 0 */
2386 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[1];
2387 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[1];
2388 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[1];
2389 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[1];
2390 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2391 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[1];
2392 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[1];
2393 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[1];
2394 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[1];
2395 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2396 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[1];
2397 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[1];
2398 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[1];
2399 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[1];
2401 /*Initialize CQI to TBS table for Layer Index 1 for Extended CP */
2402 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[1];
2403 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[1];
2404 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[1];
2405 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[1];
2406 /* Activate this code when extended cp is supported */
2407 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2409 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2411 /* EfficiencyTbl calculation incase of 2 layers for extendedl CP */
2412 rgSCHCmnCompEff( (uint8_t)(i + 1 ), (uint8_t)RG_SCH_CMN_EXT_CP, idx,\
2413 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i]);
2414 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), (uint8_t) RG_SCH_CMN_EXT_CP,idx, \
2415 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i]);
2419 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2421 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2423 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i];
2424 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][i];
2425 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2426 (j >= 0) && (k > 0); --j)
2428 /* ADD CQI to MCS mapping correction
2429 * single dimensional array is replaced by 2 dimensions for different CFI*/
2430 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2432 (*tbsTbl)[k--] = (uint8_t)j;
2439 /* effTbl,tbsTbl calculation incase of 2 layers for extended CP */
2440 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i];
2441 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][i];
2442 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2443 (j >= 0) && (k > 0); --j)
2445 /* ADD CQI to MCS mapping correction
2446 * single dimensional array is replaced by 2 dimensions for different CFI*/
2447 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2449 (*tbsTbl)[k--] = (uint8_t)j;
2462 * @brief This function initializes all the data for the scheduler.
2466 * Function: rgSCHCmnUlInit
2467 * Purpose: This function initializes the following information:
2468 * 1. Efficiency table
2469 * 2. CQI to table index - It is one row for upto 3 RBs
2470 * and another row for greater than 3 RBs
2471 * currently extended prefix is compiled out.
2472 * Invoked by: MAC intialization code..may be ActvInit
2477 static Void rgSCHCmnUlInit()
2479 uint8_t *mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_NOR_CP][0];
2480 RgSchCmnTbSzEff *effTbl = &rgSchCmnNorUlEff[0];
2481 const RgSchCmnUlCqiInfo *cqiTbl = &rgSchCmnUlCqiTbl[0];
2485 /* Initaializing new variable added for UL eff */
2486 rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP] = &rgSchCmnNorUlEff[0];
2487 /* Reason behind using 3 as the number of symbols to rule out for
2488 * efficiency table computation would be that we are using 2 symbols for
2489 * DMRS(1 in each slot) and 1 symbol for SRS*/
2490 rgSCHCmnCompUlEff(RGSCH_UL_SYM_DMRS_SRS,RG_SCH_CMN_NOR_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP]);
2492 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2493 i >= 0 && j > 0; --i)
2495 if ((*effTbl)[i] <= cqiTbl[j].eff)
2497 mapTbl[j--] = (uint8_t)i;
2504 effTbl = &rgSchCmnExtUlEff[0];
2505 mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_EXT_CP][0];
2507 /* Initaializing new variable added for UL eff */
2508 rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP] = &rgSchCmnExtUlEff[0];
2509 /* Reason behind using 3 as the number of symbols to rule out for
2510 * efficiency table computation would be that we are using 2 symbols for
2511 * DMRS(1 in each slot) and 1 symbol for SRS*/
2512 rgSCHCmnCompUlEff(3,RG_SCH_CMN_EXT_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP]);
2514 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2515 i >= 0 && j > 0; --i)
2517 if ((*effTbl)[i] <= cqiTbl[j].eff)
2519 mapTbl[j--] = (uint8_t)i;
2531 * @brief This function initializes all the data for the scheduler.
2535 * Function: rgSCHCmnInit
2536 * Purpose: This function initializes the following information:
2537 * 1. Efficiency table
2538 * 2. CQI to table index - It is one row for upto 3 RBs
2539 * and another row for greater than 3 RBs
2540 * currently extended prefix is compiled out.
2541 * Invoked by: MAC intialization code..may be ActvInit
2553 rgSCHEmtcCmnDlInit();
2554 rgSCHEmtcCmnUlInit();
2560 /* Init the function pointers */
2561 rgSchCmnApis.rgSCHRgrUeCfg = rgSCHCmnRgrUeCfg;
2562 rgSchCmnApis.rgSCHRgrUeRecfg = rgSCHCmnRgrUeRecfg;
2563 rgSchCmnApis.rgSCHFreeUe = rgSCHCmnUeDel;
2564 rgSchCmnApis.rgSCHRgrCellCfg = rgSCHCmnRgrCellCfg;
2565 rgSchCmnApis.rgSCHRgrCellRecfg = rgSCHCmnRgrCellRecfg;
2566 rgSchCmnApis.rgSCHFreeCell = rgSCHCmnCellDel;
2567 rgSchCmnApis.rgSCHRgrLchCfg = rgSCHCmnRgrLchCfg;
2568 rgSchCmnApis.rgSCHRgrLcgCfg = rgSCHCmnRgrLcgCfg;
2569 rgSchCmnApis.rgSCHRgrLchRecfg = rgSCHCmnRgrLchRecfg;
2570 rgSchCmnApis.rgSCHRgrLcgRecfg = rgSCHCmnRgrLcgRecfg;
2571 rgSchCmnApis.rgSCHFreeDlLc = rgSCHCmnFreeDlLc;
2572 rgSchCmnApis.rgSCHFreeLcg = rgSCHCmnLcgDel;
2573 rgSchCmnApis.rgSCHRgrLchDel = rgSCHCmnRgrLchDel;
2574 rgSchCmnApis.rgSCHActvtUlUe = rgSCHCmnActvtUlUe;
2575 rgSchCmnApis.rgSCHActvtDlUe = rgSCHCmnActvtDlUe;
2576 rgSchCmnApis.rgSCHHdlUlTransInd = rgSCHCmnHdlUlTransInd;
2577 rgSchCmnApis.rgSCHDlDedBoUpd = rgSCHCmnDlDedBoUpd;
2578 rgSchCmnApis.rgSCHUlRecMsg3Alloc = rgSCHCmnUlRecMsg3Alloc;
2579 rgSchCmnApis.rgSCHUlCqiInd = rgSCHCmnUlCqiInd;
2580 rgSchCmnApis.rgSCHPucchDeltaPwrInd = rgSCHPwrPucchDeltaInd;
2581 rgSchCmnApis.rgSCHUlHqProcForUe = rgSCHCmnUlHqProcForUe;
2583 rgSchCmnApis.rgSCHUpdUlHqProc = rgSCHCmnUpdUlHqProc;
2585 rgSchCmnApis.rgSCHUpdBsrShort = rgSCHCmnUpdBsrShort;
2586 rgSchCmnApis.rgSCHUpdBsrTrunc = rgSCHCmnUpdBsrTrunc;
2587 rgSchCmnApis.rgSCHUpdBsrLong = rgSCHCmnUpdBsrLong;
2588 rgSchCmnApis.rgSCHUpdPhr = rgSCHCmnUpdPhr;
2589 rgSchCmnApis.rgSCHUpdExtPhr = rgSCHCmnUpdExtPhr;
2590 rgSchCmnApis.rgSCHContResUlGrant = rgSCHCmnContResUlGrant;
2591 rgSchCmnApis.rgSCHSrRcvd = rgSCHCmnSrRcvd;
2592 rgSchCmnApis.rgSCHFirstRcptnReq = rgSCHCmnFirstRcptnReq;
2593 rgSchCmnApis.rgSCHNextRcptnReq = rgSCHCmnNextRcptnReq;
2594 rgSchCmnApis.rgSCHFirstHqFdbkAlloc = rgSCHCmnFirstHqFdbkAlloc;
2595 rgSchCmnApis.rgSCHNextHqFdbkAlloc = rgSCHCmnNextHqFdbkAlloc;
2596 rgSchCmnApis.rgSCHDlProcAddToRetx = rgSCHCmnDlProcAddToRetx;
2597 rgSchCmnApis.rgSCHDlCqiInd = rgSCHCmnDlCqiInd;
2599 rgSchCmnApis.rgSCHUlProcAddToRetx = rgSCHCmnEmtcUlProcAddToRetx;
2602 rgSchCmnApis.rgSCHSrsInd = rgSCHCmnSrsInd;
2604 rgSchCmnApis.rgSCHDlTARpt = rgSCHCmnDlTARpt;
2605 rgSchCmnApis.rgSCHDlRlsSubFrm = rgSCHCmnDlRlsSubFrm;
2606 rgSchCmnApis.rgSCHUeReset = rgSCHCmnUeReset;
2608 rgSchCmnApis.rgSCHHdlCrntiCE = rgSCHCmnHdlCrntiCE;
2609 rgSchCmnApis.rgSCHDlProcAck = rgSCHCmnDlProcAck;
2610 rgSchCmnApis.rgSCHDlRelPdcchFbk = rgSCHCmnDlRelPdcchFbk;
2611 rgSchCmnApis.rgSCHUlSpsRelInd = rgSCHCmnUlSpsRelInd;
2612 rgSchCmnApis.rgSCHUlSpsActInd = rgSCHCmnUlSpsActInd;
2613 rgSchCmnApis.rgSCHUlCrcFailInd = rgSCHCmnUlCrcFailInd;
2614 rgSchCmnApis.rgSCHUlCrcInd = rgSCHCmnUlCrcInd;
2616 rgSchCmnApis.rgSCHDrxStrtInActvTmrInUl = rgSCHCmnDrxStrtInActvTmrInUl;
2617 rgSchCmnApis.rgSCHUpdUeDataIndLcg = rgSCHCmnUpdUeDataIndLcg;
2619 for (idx = 0; idx < RGSCH_NUM_SCHEDULERS; ++idx)
2621 rgSchUlSchdInits[idx](&rgSchUlSchdTbl[idx]);
2622 rgSchDlSchdInits[idx](&rgSchDlSchdTbl[idx]);
2625 for (idx = 0; idx < RGSCH_NUM_EMTC_SCHEDULERS; ++idx)
2627 rgSchEmtcUlSchdInits[idx](&rgSchEmtcUlSchdTbl[idx]);
2628 rgSchEmtcDlSchdInits[idx](&rgSchEmtcDlSchdTbl[idx]);
2631 #if (defined (RG_PHASE2_SCHED) && defined(TFU_UPGRADE))
2632 for (idx = 0; idx < RGSCH_NUM_DLFS_SCHEDULERS; ++idx)
2634 rgSchDlfsSchdInits[idx](&rgSchDlfsSchdTbl[idx]);
2638 rgSchCmnApis.rgSCHRgrSCellUeCfg = rgSCHCmnRgrSCellUeCfg;
2639 rgSchCmnApis.rgSCHRgrSCellUeDel = rgSCHCmnRgrSCellUeDel;
2646 * @brief This function is a wrapper to call scheduler specific API.
2650 * Function: rgSCHCmnDlRlsSubFrm
2651 * Purpose: Releases scheduler Information from DL SubFrm.
2655 * @param[in] RgSchCellCb *cell
2656 * @param[out] CmLteTimingInfo frm
2660 Void rgSCHCmnDlRlsSubFrm(RgSchCellCb *cell,CmLteTimingInfo frm)
2662 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2666 /* Get the pointer to the subframe */
2667 sf = rgSCHUtlSubFrmGet(cell, frm);
2669 rgSCHUtlSubFrmPut(cell, sf);
2672 /* Re-initialize DLFS specific information for the sub-frame */
2673 cellSch->apisDlfs->rgSCHDlfsReinitSf(cell, sf);
2681 * @brief This function is the starting function for DL allocation.
2685 * Function: rgSCHCmnDlCmnChAlloc
2686 * Purpose: Scheduling for downlink. It performs allocation in the order
2687 * of priority wich BCCH/PCH first, CCCH, Random Access and TA.
2689 * Invoked by: Scheduler
2691 * @param[in] RgSchCellCb* cell
2692 * @param[out] RgSchCmnDlRbAllocInfo* allocInfo
2696 static Void rgSCHCmnDlCcchRarAlloc(RgSchCellCb *cell)
2698 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2701 rgSCHCmnDlCcchRetx(cell, &cellSch->allocInfo);
2702 /* LTE_ADV_FLAG_REMOVED_START */
2703 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2705 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2707 /*eNodeB need to blank the subframe */
2711 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2716 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2718 /* LTE_ADV_FLAG_REMOVED_END */
2722 /*Added these function calls for processing CCCH SDU arriving
2723 * after guard timer expiry.Functions differ from above two functions
2724 * in using ueCb instead of raCb.*/
2725 rgSCHCmnDlCcchSduRetx(cell, &cellSch->allocInfo);
2726 /* LTE_ADV_FLAG_REMOVED_START */
2727 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2729 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2731 /*eNodeB need to blank the subframe */
2735 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2740 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2742 /* LTE_ADV_FLAG_REMOVED_END */
2746 if(cellSch->ul.msg3SchdIdx != RGSCH_INVALID_INFO)
2748 /* Do not schedule msg3 if there is a CFI change ongoing */
2749 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2751 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2755 /* LTE_ADV_FLAG_REMOVED_START */
2756 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2758 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2760 /*eNodeB need to blank the subframe */
2764 /* Do not schedule msg3 if there is a CFI change ongoing */
2765 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2767 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2773 /* Do not schedule msg3 if there is a CFI change ongoing */
2774 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2776 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2779 /* LTE_ADV_FLAG_REMOVED_END */
2787 * @brief Scheduling for CCCH SDU.
2791 * Function: rgSCHCmnCcchSduAlloc
2792 * Purpose: Scheduling for CCCH SDU
2794 * Invoked by: Scheduler
2796 * @param[in] RgSchCellCb* cell
2797 * @param[in] RgSchUeCb* ueCb
2798 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2802 static S16 rgSCHCmnCcchSduAlloc(RgSchCellCb *cell,RgSchUeCb *ueCb,RgSchCmnDlRbAllocInfo *allocInfo)
2804 RgSchDlRbAlloc *rbAllocInfo;
2805 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2806 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2809 /* Return if subframe BW exhausted */
2810 if (allocInfo->ccchSduAlloc.ccchSduDlSf->bw <=
2811 allocInfo->ccchSduAlloc.ccchSduDlSf->bwAssigned)
2813 DU_LOG("\nERROR --> SCH : bw<=bwAssigned for UEID:%d",ueCb->ueId);
2817 if (rgSCHDhmGetCcchSduHqProc(ueCb, cellSch->dl.time, &(ueDl->proc)) != ROK)
2819 DU_LOG("\nERROR --> SCH : rgSCHDhmGetCcchSduHqProc failed UEID:%d",ueCb->ueId);
2823 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
2824 rbAllocInfo->dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2826 if (rgSCHCmnCcchSduDedAlloc(cell, ueCb) != ROK)
2828 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
2829 rgSCHDhmRlsHqpTb(ueDl->proc, 0, FALSE);
2830 DU_LOG("\nERROR --> SCH : rgSCHCmnCcchSduDedAlloc failed UEID:%d",ueCb->ueId);
2833 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduTxLst, &ueDl->proc->reqLnk);
2834 ueDl->proc->reqLnk.node = (PTR)ueDl->proc;
2835 allocInfo->ccchSduAlloc.ccchSduDlSf->schdCcchUe++;
2840 * @brief This function scheduler for downlink CCCH messages.
2844 * Function: rgSCHCmnDlCcchSduTx
2845 * Purpose: Scheduling for downlink CCCH
2847 * Invoked by: Scheduler
2849 * @param[in] RgSchCellCb *cell
2850 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2854 static Void rgSCHCmnDlCcchSduTx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2858 RgSchCmnDlUe *ueCmnDl;
2859 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2860 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2863 node = cell->ccchSduUeLst.first;
2866 if(cellSch->dl.maxCcchPerDlSf &&
2867 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2873 ueCb = (RgSchUeCb *)(node->node);
2874 ueCmnDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2876 /* Fix : syed postpone scheduling for this
2877 * until msg4 is done */
2878 /* Fix : syed RLC can erroneously send CCCH SDU BO
2879 * twice. Hence an extra guard to avoid if already
2880 * scheduled for RETX */
2881 if ((!(ueCb->dl.dlInactvMask & RG_HQENT_INACTIVE)) &&
2884 if ((rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)) != ROK)
2891 DU_LOG("\nERROR --> SCH : THIS SHOULD "
2892 "NEVER HAPPEN for UEID:%d", ueCb->ueId);
2902 * @brief This function scheduler for downlink CCCH messages.
2906 * Function: rgSCHCmnDlCcchTx
2907 * Purpose: Scheduling for downlink CCCH
2909 * Invoked by: Scheduler
2911 * @param[in] RgSchCellCb *cell
2912 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2916 static Void rgSCHCmnDlCcchTx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2920 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2921 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
2923 node = cell->raInfo.toBeSchdLst.first;
2926 if(cellSch->dl.maxCcchPerDlSf &&
2927 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2934 raCb = (RgSchRaCb *)(node->node);
2936 /* Address allocation for this UE for MSG 4 */
2937 /* Allocation for Msg4 */
2938 if ((rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)) != ROK)
2949 * @brief This function scheduler for downlink CCCH messages.
2953 * Function: rgSCHCmnDlCcchSduRetx
2954 * Purpose: Scheduling for downlink CCCH
2956 * Invoked by: Scheduler
2958 * @param[in] RgSchCellCb *cell
2959 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2963 static Void rgSCHCmnDlCcchSduRetx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2965 RgSchDlRbAlloc *rbAllocInfo;
2967 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2969 RgSchDlHqProcCb *hqP;
2972 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2975 node = cellSch->dl.ccchSduRetxLst.first;
2978 if(cellSch->dl.maxCcchPerDlSf &&
2979 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2986 hqP = (RgSchDlHqProcCb *)(node->node);
2989 /* DwPts Scheduling Changes Start */
2991 if (rgSCHCmnRetxAvoidTdd(allocInfo->ccchSduAlloc.ccchSduDlSf,
2997 /* DwPts Scheduling Changes End */
2999 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3003 ueCb = (RgSchUeCb*)(hqP->hqE->ue);
3004 ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
3006 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3007 /* Fill RB Alloc Info */
3008 rbAllocInfo->dlSf = dlSf;
3009 rbAllocInfo->tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3010 rbAllocInfo->rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3011 /* Fix : syed iMcs setting did not correspond to RETX */
3012 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3013 rbAllocInfo->tbInfo[0].imcs);
3014 rbAllocInfo->rnti = ueCb->ueId;
3015 rbAllocInfo->tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3016 /* Fix : syed Copying info in entirety without depending on stale TX information */
3017 rbAllocInfo->tbInfo[0].tbCb = &hqP->tbInfo[0];
3018 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
3019 /* Fix : syed Assigning proc to scratchpad */
3022 retxBw += rbAllocInfo->rbsReq;
3024 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduRetxLst, \
3026 hqP->reqLnk.node = (PTR)hqP;
3030 dlSf->bwAssigned += retxBw;
3036 * @brief This function scheduler for downlink CCCH messages.
3040 * Function: rgSCHCmnDlCcchRetx
3041 * Purpose: Scheduling for downlink CCCH
3043 * Invoked by: Scheduler
3045 * @param[in] RgSchCellCb *cell
3046 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3050 static Void rgSCHCmnDlCcchRetx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
3053 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3055 RgSchDlHqProcCb *hqP;
3057 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3060 node = cellSch->dl.msg4RetxLst.first;
3063 if(cellSch->dl.maxCcchPerDlSf &&
3064 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3070 hqP = (RgSchDlHqProcCb *)(node->node);
3074 /* DwPts Scheduling Changes Start */
3076 if (rgSCHCmnRetxAvoidTdd(allocInfo->msg4Alloc.msg4DlSf,
3082 /* DwPts Scheduling Changes End */
3084 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3088 raCb = (RgSchRaCb*)(hqP->hqE->raCb);
3089 /* Fill RB Alloc Info */
3090 raCb->rbAllocInfo.dlSf = dlSf;
3091 raCb->rbAllocInfo.tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3092 raCb->rbAllocInfo.rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3093 /* Fix : syed iMcs setting did not correspond to RETX */
3094 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3095 raCb->rbAllocInfo.tbInfo[0].imcs);
3096 raCb->rbAllocInfo.rnti = raCb->tmpCrnti;
3097 raCb->rbAllocInfo.tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3098 /* Fix; syed Copying info in entirety without depending on stale TX information */
3099 raCb->rbAllocInfo.tbInfo[0].tbCb = &hqP->tbInfo[0];
3100 raCb->rbAllocInfo.tbInfo[0].schdlngForTb = TRUE;
3102 retxBw += raCb->rbAllocInfo.rbsReq;
3104 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4RetxLst, \
3106 hqP->reqLnk.node = (PTR)hqP;
3110 dlSf->bwAssigned += retxBw;
3116 * @brief This function implements scheduler DL allocation for
3117 * for broadcast (on PDSCH) and paging.
3121 * Function: rgSCHCmnDlBcchPcch
3122 * Purpose: This function implements scheduler for DL allocation
3123 * for broadcast (on PDSCH) and paging.
3125 * Invoked by: Scheduler
3127 * @param[in] RgSchCellCb* cell
3132 static Void rgSCHCmnDlBcchPcch(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo,RgInfSfAlloc *subfrmAlloc)
3134 CmLteTimingInfo frm;
3136 RgSchClcDlLcCb *pcch;
3140 RgSchClcDlLcCb *bcch, *bch;
3141 #endif/*RGR_SI_SCH*/
3144 frm = cell->crntTime;
3146 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
3147 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
3148 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
3150 // RGSCH_SUBFRAME_INDEX(frm);
3151 //RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
3154 /* Compute the subframe for which allocation is being made */
3155 /* essentially, we need pointer to the dl frame for this subframe */
3156 sf = rgSCHUtlSubFrmGet(cell, frm);
3160 bch = rgSCHDbmGetBcchOnBch(cell);
3161 #if (ERRCLASS & ERRCLS_DEBUG)
3164 DU_LOG("\nERROR --> SCH : BCCH on BCH is not configured");
3168 if (bch->boLst.first != NULLP)
3170 bo = (RgSchClcBoRpt *)(bch->boLst.first->node);
3171 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3173 sf->bch.tbSize = bo->bo;
3174 cmLListDelFrm(&bch->boLst, bch->boLst.first);
3175 /* ccpu00117052 - MOD - Passing double pointer
3176 for proper NULLP assignment*/
3177 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(*bo));
3178 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, bch->lcId,TRUE);
3183 if ((frm.sfn % 4 == 0) && (frm.subframe == 0))
3188 allocInfo->bcchAlloc.schdFirst = FALSE;
3189 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
3190 #if (ERRCLASS & ERRCLS_DEBUG)
3193 DU_LOG("\nERROR --> SCH : BCCH on DLSCH is not configured");
3197 if (bcch->boLst.first != NULLP)
3199 bo = (RgSchClcBoRpt *)(bcch->boLst.first->node);
3201 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3203 allocInfo->bcchAlloc.schdFirst = TRUE;
3204 /* Time to perform allocation for this BCCH transmission */
3205 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3209 if(!allocInfo->bcchAlloc.schdFirst)
3212 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
3213 #if (ERRCLASS & ERRCLS_DEBUG)
3216 DU_LOG("\nERROR --> SCH : BCCH on DLSCH is not configured");
3220 lnk = bcch->boLst.first;
3221 while (lnk != NULLP)
3223 bo = (RgSchClcBoRpt *)(lnk->node);
3225 valid = rgSCHCmnChkInWin(frm, bo->timeToTx, bo->maxTimeToTx);
3229 bo->i = RGSCH_CALC_SF_DIFF(frm, bo->timeToTx);
3230 /* Time to perform allocation for this BCCH transmission */
3231 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3236 valid = rgSCHCmnChkPastWin(frm, bo->maxTimeToTx);
3239 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
3240 /* ccpu00117052 - MOD - Passing double pointer
3241 for proper NULLP assignment*/
3242 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo,
3243 sizeof(RgSchClcBoRpt));
3249 rgSCHDlSiSched(cell, allocInfo, subfrmAlloc);
3250 #endif/*RGR_SI_SCH*/
3252 pcch = rgSCHDbmGetPcch(cell);
3256 DU_LOG("\nERROR --> SCH : PCCH on DLSCH is not configured");
3260 if (pcch->boLst.first != NULLP)
3262 bo = (RgSchClcBoRpt *)(pcch->boLst.first->node);
3264 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3266 /* Time to perform allocation for this PCCH transmission */
3267 rgSCHCmnClcAlloc(cell, sf, pcch, RGSCH_P_RNTI, allocInfo);
3275 * Fun: rgSCHCmnChkInWin
3277 * Desc: This function checks if frm occurs in window
3279 * Ret: TRUE - if in window
3284 * File: rg_sch_cmn.c
3287 Bool rgSCHCmnChkInWin(CmLteTimingInfo frm,CmLteTimingInfo start,CmLteTimingInfo end)
3292 if (end.sfn > start.sfn)
3294 if (frm.sfn > start.sfn
3295 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3297 if (frm.sfn < end.sfn
3299 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3301 || (frm.sfn == end.sfn && frm.slot <= start.slot))
3308 /* Testing for wrap around, sfn wraparound check should be enough */
3309 else if (end.sfn < start.sfn)
3311 if (frm.sfn > start.sfn
3312 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3318 if (frm.sfn < end.sfn
3319 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3325 else /* start.sfn == end.sfn */
3327 if (frm.sfn == start.sfn
3328 && (frm.slot >= start.slot
3329 && frm.slot <= end.slot))
3336 } /* end of rgSCHCmnChkInWin*/
3340 * Fun: rgSCHCmnChkPastWin
3342 * Desc: This function checks if frm has gone past window edge
3344 * Ret: TRUE - if past window edge
3349 * File: rg_sch_cmn.c
3352 Bool rgSCHCmnChkPastWin(CmLteTimingInfo frm,CmLteTimingInfo end)
3354 CmLteTimingInfo refFrm = end;
3358 RGSCH_INCR_FRAME(refFrm.sfn);
3359 RGSCH_INCR_SUB_FRAME(end, 1);
3360 pastWin = rgSCHCmnChkInWin(frm, end, refFrm);
3363 } /* end of rgSCHCmnChkPastWin*/
3366 * @brief This function implements allocation of the resources for common
3367 * channels BCCH, PCCH.
3371 * Function: rgSCHCmnClcAlloc
3372 * Purpose: This function implements selection of number of RBs based
3373 * the allowed grant for the service. It is also responsible
3374 * for selection of MCS for the transmission.
3376 * Invoked by: Scheduler
3378 * @param[in] RgSchCellCb *cell,
3379 * @param[in] RgSchDlSf *sf,
3380 * @param[in] RgSchClcDlLcCb *lch,
3381 * @param[in] uint16_t rnti,
3382 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3386 static Void rgSCHCmnClcAlloc(RgSchCellCb *cell,RgSchDlSf *sf,RgSchClcDlLcCb *lch,uint16_t rnti,RgSchCmnDlRbAllocInfo *allocInfo)
3388 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3395 uint8_t cfi = cellDl->currCfi;
3399 bo = (RgSchClcBoRpt *)(lch->boLst.first->node);
3403 /* rgSCHCmnClcRbAllocForFxdTb(cell, bo->bo, cellDl->ccchCqi, &rb);*/
3404 if(cellDl->bitsPerRb==0)
3406 while ((rgTbSzTbl[0][0][rb]) < (tbs*8))
3414 rb = RGSCH_CEIL((tbs*8), cellDl->bitsPerRb);
3416 /* DwPTS Scheduling Changes Start */
3418 if(sf->sfType == RG_SCH_SPL_SF_DATA)
3420 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
3422 /* Calculate the less RE's because of DwPTS */
3423 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
3425 /* Increase number of RBs in Spl SF to compensate for lost REs */
3426 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
3429 /* DwPTS Scheduling Changes End */
3430 /*ccpu00115595- end*/
3431 /* additional check to see if required RBs
3432 * exceeds the available */
3433 if (rb > sf->bw - sf->bwAssigned)
3435 DU_LOG("\nERROR --> SCH : BW allocation "
3436 "failed for CRNTI:%d",rnti);
3440 /* Update the subframe Allocated BW field */
3441 sf->bwAssigned = sf->bwAssigned + rb;
3442 /* Fill in the BCCH/PCCH transmission info to the RBAllocInfo struct */
3443 if (rnti == RGSCH_SI_RNTI)
3445 allocInfo->bcchAlloc.rnti = rnti;
3446 allocInfo->bcchAlloc.dlSf = sf;
3447 allocInfo->bcchAlloc.tbInfo[0].bytesReq = tbs;
3448 allocInfo->bcchAlloc.rbsReq = rb;
3449 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
3450 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
3451 /* Nprb indication at PHY for common Ch */
3452 allocInfo->bcchAlloc.nPrb = bo->nPrb;
3456 allocInfo->pcchAlloc.rnti = rnti;
3457 allocInfo->pcchAlloc.dlSf = sf;
3458 allocInfo->pcchAlloc.tbInfo[0].bytesReq = tbs;
3459 allocInfo->pcchAlloc.rbsReq = rb;
3460 allocInfo->pcchAlloc.tbInfo[0].imcs = mcs;
3461 allocInfo->pcchAlloc.tbInfo[0].noLyr = 1;
3462 allocInfo->pcchAlloc.nPrb = bo->nPrb;
3469 * @brief This function implements PDCCH allocation for common channels.
3473 * Function: rgSCHCmnCmnPdcchAlloc
3474 * Purpose: This function implements allocation of PDCCH for a UE.
3475 * 1. This uses index 0 of PDCCH table for efficiency.
3476 * 2. Uses he candidate PDCCH count for the aggr level.
3477 * 3. Look for availability for each candidate and choose
3478 * the first one available.
3480 * Invoked by: Scheduler
3482 * @param[in] RgSchCellCb *cell
3483 * @param[in] RgSchDlSf *sf
3484 * @return RgSchPdcch *
3485 * -# NULLP when unsuccessful
3488 RgSchPdcch *rgSCHCmnCmnPdcchAlloc(RgSchCellCb *cell,RgSchDlSf *subFrm)
3490 CmLteAggrLvl aggrLvl;
3491 RgSchPdcchInfo *pdcchInfo;
3493 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3494 uint8_t numCce; /*store num CCEs based on
3495 aggregation level */
3497 aggrLvl = cellSch->dl.cmnChAggrLvl;
3499 pdcchInfo = &(subFrm->pdcchInfo);
3501 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3504 if(subFrm->nCce != pdcchInfo->nCce)
3506 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3509 if(cell->nCce != pdcchInfo->nCce)
3511 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3517 case CM_LTE_AGGR_LVL4:
3520 case CM_LTE_AGGR_LVL8:
3523 case CM_LTE_AGGR_LVL16:
3530 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3533 pdcch->isSpsRnti = FALSE;
3535 /* Increment the CCE used counter in the current subframe */
3536 subFrm->cceCnt += numCce;
3537 pdcch->pdcchSearchSpace = RG_SCH_CMN_SEARCH_SPACE;
3542 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3543 subFrm->isCceFailure = TRUE;
3544 DU_LOG("\nDEBUG --> SCH : PDCCH ERR: NO PDDCH AVAIL IN COMMON SEARCH SPACE aggr:%u",
3551 * @brief This function implements bandwidth allocation for common channels.
3555 * Function: rgSCHCmnClcRbAlloc
3556 * Purpose: This function implements bandwith allocation logic
3557 * for common control channels.
3559 * Invoked by: Scheduler
3561 * @param[in] RgSchCellCb* cell
3562 * @param[in] uint32_t bo
3563 * @param[in] uint8_t cqi
3564 * @param[in] uint8_t *rb
3565 * @param[in] uint32_t *tbs
3566 * @param[in] uint8_t *mcs
3567 * @param[in] RgSchDlSf *sf
3572 Void rgSCHCmnClcRbAlloc
3585 static Void rgSCHCmnClcRbAlloc
3595 #endif /* LTEMAC_SPS */
3598 RgSchCmnTbSzEff *effTbl;
3601 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3602 uint8_t cfi = cellSch->dl.currCfi;
3605 /* first get the CQI to MCS table and determine the number of RBs */
3606 effTbl = (RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]);
3607 iTbsVal = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[cqi];
3608 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3610 /* Efficiency is number of bits per 1024 REs */
3611 eff = (*effTbl)[iTbsVal];
3613 /* Get the number of REs needed for this bo */
3614 noRes = ((bo * 8 * 1024) / eff );
3616 /* Get the number of RBs needed for this transmission */
3617 /* Number of RBs = No of REs / No of REs per RB */
3618 tmpRb = RGSCH_CEIL(noRes, cellSch->dl.noResPerRb[cfi]);
3619 /* KWORK_FIX: added check to see if rb has crossed maxRb*/
3620 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3621 if (tmpRb > cellSch->dl.maxDlBwPerUe)
3623 tmpRb = cellSch->dl.maxDlBwPerUe;
3625 while ((rgTbSzTbl[0][iTbsVal][tmpRb-1]/8) < bo &&
3626 (tmpRb < cellSch->dl.maxDlBwPerUe))
3629 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3631 *tbs = rgTbSzTbl[0][iTbsVal][tmpRb-1]/8;
3632 *rb = (uint8_t)tmpRb;
3633 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3641 * @brief Scheduling for MSG4.
3645 * Function: rgSCHCmnMsg4Alloc
3646 * Purpose: Scheduling for MSG4
3648 * Invoked by: Scheduler
3650 * @param[in] RgSchCellCb* cell
3651 * @param[in] RgSchRaCb* raCb
3652 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3656 static S16 rgSCHCmnMsg4Alloc(RgSchCellCb *cell,RgSchRaCb *raCb,RgSchCmnDlRbAllocInfo *allocInfo)
3658 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3661 /* SR_RACH_STATS : MSG4 TO BE TXED */
3663 /* Return if subframe BW exhausted */
3664 if (allocInfo->msg4Alloc.msg4DlSf->bw <=
3665 allocInfo->msg4Alloc.msg4DlSf->bwAssigned)
3667 DU_LOG("\nERROR --> SCH : bw<=bwAssigned");
3671 if (rgSCHDhmGetMsg4HqProc(raCb, cellSch->dl.time) != ROK)
3673 DU_LOG("\nERROR --> SCH : rgSCHDhmGetMsg4HqProc failed");
3677 raCb->rbAllocInfo.dlSf = allocInfo->msg4Alloc.msg4DlSf;
3679 if (rgSCHCmnMsg4DedAlloc(cell, raCb) != ROK)
3681 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
3682 rgSCHDhmRlsHqpTb(raCb->dlHqE->msg4Proc, 0, FALSE);
3683 DU_LOG("\nERROR --> SCH : rgSCHCmnMsg4DedAlloc failed.");
3686 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4TxLst, &raCb->dlHqE->msg4Proc->reqLnk);
3687 raCb->dlHqE->msg4Proc->reqLnk.node = (PTR)raCb->dlHqE->msg4Proc;
3688 allocInfo->msg4Alloc.msg4DlSf->schdCcchUe++;
3695 * @brief This function implements PDCCH allocation for an UE.
3699 * Function: PdcchAlloc
3700 * Purpose: This function implements allocation of PDCCH for an UE.
3701 * 1. Get the aggregation level for the CQI of the UE.
3702 * 2. Get the candidate PDCCH count for the aggr level.
3703 * 3. Look for availability for each candidate and choose
3704 * the first one available.
3706 * Invoked by: Scheduler
3711 * @param[in] dciFrmt
3712 * @return RgSchPdcch *
3713 * -# NULLP when unsuccessful
3716 RgSchPdcch *rgSCHCmnPdcchAlloc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlSf *subFrm,uint8_t cqi,TfuDciFormat dciFrmt,Bool isDtx)
3718 CmLteAggrLvl aggrLvl;
3719 RgSchPdcchInfo *pdcchInfo;
3723 /* 3.1 consider the selected DCI format size in determining the
3724 * aggregation level */
3725 //TODO_SID Need to update. Currently using 4 aggregation level
3726 aggrLvl = CM_LTE_AGGR_LVL2;//cellSch->dciAggrLvl[cqi][dciFrmt];
3729 if((dciFrmt == TFU_DCI_FORMAT_1A) &&
3730 ((ue) && (ue->allocCmnUlPdcch)) )
3732 pdcch = rgSCHCmnCmnPdcchAlloc(cell, subFrm);
3733 /* Since CRNTI Scrambled */
3736 pdcch->dciNumOfBits = ue->dciSize.cmnSize[dciFrmt];
3737 // prc_trace_format_string(PRC_TRACE_GROUP_PS, PRC_TRACE_INFO_LOW,"Forcing alloc in CMN search spc size %d fmt %d \n",
3738 // pdcch->dciNumOfBits, dciFrmt);
3744 /* Incrementing aggrLvl by 1 if it not AGGR_LVL8(MAX SIZE)
3745 * inorder to increse the redudancy bits for better decoding of UE */
3748 if (aggrLvl != CM_LTE_AGGR_LVL16)
3752 case CM_LTE_AGGR_LVL2:
3753 aggrLvl = CM_LTE_AGGR_LVL4;
3755 case CM_LTE_AGGR_LVL4:
3756 aggrLvl = CM_LTE_AGGR_LVL8;
3758 case CM_LTE_AGGR_LVL8:
3759 aggrLvl = CM_LTE_AGGR_LVL16;
3768 pdcchInfo = &subFrm->pdcchInfo;
3770 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3773 if(subFrm->nCce != pdcchInfo->nCce)
3775 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3778 if(cell->nCce != pdcchInfo->nCce)
3780 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3784 if (pdcchInfo->nCce < (1 << (aggrLvl - 1)))
3786 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3787 subFrm->isCceFailure = TRUE;
3788 DU_LOG("\nDEBUG --> SCH : PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
3794 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3796 /* SR_RACH_STATS : Reset isTBMsg4 */
3797 pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4= FALSE;
3798 pdcch->dci.u.format0Info.isSrGrant = FALSE;
3800 pdcch->isSpsRnti = FALSE;
3802 /* Increment the CCE used counter in the current subframe */
3803 subFrm->cceCnt += aggrLvl;
3804 pdcch->pdcchSearchSpace = RG_SCH_UE_SPECIFIC_SEARCH_SPACE;
3808 if (ue->cell != cell)
3810 /* Secondary Cell */
3811 //pdcch->dciNumOfBits = ue->dciSize.noUlCcSize[dciFrmt];
3812 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
3817 //pdcch->dciNumOfBits = ue->dciSize.dedSize[dciFrmt];
3818 //TODO_SID Need to update dci size.
3819 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
3825 pdcch->dciNumOfBits = cell->dciSize.size[dciFrmt];
3830 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3831 subFrm->isCceFailure = TRUE;
3833 DU_LOG("\nDEBUG --> SCH : PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
3840 * @brief This function implements BW allocation for CCCH SDU
3844 * Function: rgSCHCmnCcchSduDedAlloc
3845 * Purpose: Downlink bandwidth Allocation for CCCH SDU.
3847 * Invoked by: Scheduler
3849 * @param[in] RgSchCellCb* cell
3850 * @param[out] RgSchUeCb *ueCb
3854 static S16 rgSCHCmnCcchSduDedAlloc(RgSchCellCb *cell,RgSchUeCb *ueCb)
3856 RgSchDlHqEnt *hqE = NULLP;
3858 RgSchDlRbAlloc *rbAllocinfo = NULLP;
3859 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3863 uint8_t cfi = cellDl->currCfi;
3867 rbAllocinfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3869 effBo = ueCb->dlCcchInfo.bo + RGSCH_CCCH_SDU_HDRSIZE;
3872 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
3873 &rbAllocinfo->tbInfo[0].bytesReq,
3874 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
3875 #else /* LTEMAC_SPS */
3876 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
3877 &rbAllocinfo->tbInfo[0].bytesReq,\
3878 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
3880 #endif /* LTEMAC_SPS */
3883 /* Cannot exceed the total number of RBs in the cell */
3884 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
3885 rbAllocinfo->dlSf->bwAssigned)))
3887 /* Check if atleast one allocation was possible.
3888 This may be the case where the Bw is very less and
3889 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
3890 if (rbAllocinfo->dlSf->bwAssigned == 0)
3892 numRb = rbAllocinfo->dlSf->bw;
3893 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
3894 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
3898 rbAllocinfo->rbsReq = numRb;
3899 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
3900 /* DwPTS Scheduling Changes Start */
3902 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
3904 rbAllocinfo->tbInfo[0].bytesReq =
3905 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1,cfi);
3908 /* DwPTS Scheduling Changes End */
3909 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
3917 /* Update the subframe Allocated BW field */
3918 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
3919 rbAllocinfo->rbsReq;
3920 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
3921 rbAllocinfo->tbInfo[0].tbCb = &hqE->ccchSduProc->tbInfo[0];
3922 rbAllocinfo->rnti = ueCb->ueId;
3923 rbAllocinfo->tbInfo[0].noLyr = 1;
3930 * @brief This function implements BW allocation for MSG4
3934 * Function: rgSCHCmnMsg4DedAlloc
3935 * Purpose: Downlink bandwidth Allocation for MSG4.
3937 * Invoked by: Scheduler
3939 * @param[in] RgSchCellCb* cell
3940 * @param[out] RgSchRaCb *raCb
3944 static S16 rgSCHCmnMsg4DedAlloc(RgSchCellCb *cell,RgSchRaCb *raCb)
3947 RgSchDlRbAlloc *rbAllocinfo = &raCb->rbAllocInfo;
3951 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3952 uint8_t cfi = cellDl->currCfi;
3956 effBo = raCb->dlCcchInfo.bo + RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE;
3959 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
3960 &rbAllocinfo->tbInfo[0].bytesReq,\
3961 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
3962 #else /* LTEMAC_SPS */
3963 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
3964 &rbAllocinfo->tbInfo[0].bytesReq,\
3965 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
3967 #endif /* LTEMAC_SPS */
3970 /* Cannot exceed the total number of RBs in the cell */
3971 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
3972 rbAllocinfo->dlSf->bwAssigned)))
3974 /* Check if atleast one allocation was possible.
3975 This may be the case where the Bw is very less and
3976 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
3977 if (rbAllocinfo->dlSf->bwAssigned == 0)
3979 numRb = rbAllocinfo->dlSf->bw;
3980 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
3981 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
3985 rbAllocinfo->rbsReq = numRb;
3986 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
3987 /* DwPTS Scheduling Changes Start */
3989 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
3991 rbAllocinfo->tbInfo[0].bytesReq =
3992 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1, cfi);
3995 /* DwPTS Scheduling Changes End */
3996 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4004 /* Update the subframe Allocated BW field */
4005 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4006 rbAllocinfo->rbsReq;
4007 rbAllocinfo->rnti = raCb->tmpCrnti;
4008 rbAllocinfo->tbInfo[0].tbCb = &raCb->dlHqE->msg4Proc->tbInfo[0];
4009 rbAllocinfo->tbInfo[0].schdlngForTb = TRUE;
4010 rbAllocinfo->tbInfo[0].noLyr = 1;
4017 * @brief This function implements scheduling for RA Response.
4021 * Function: rgSCHCmnDlRaRsp
4022 * Purpose: Downlink scheduling for RA responses.
4024 * Invoked by: Scheduler
4026 * @param[in] RgSchCellCb* cell
4030 static Void rgSCHCmnDlRaRsp(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
4032 CmLteTimingInfo frm;
4033 CmLteTimingInfo schFrm;
4039 RgSchTddRachRspLst *rachRsp;
4040 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
4045 frm = cell->crntTime;
4046 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4048 /* Compute the subframe for which allocation is being made */
4049 /* essentially, we need pointer to the dl frame for this subframe */
4050 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4052 /* Get the RACH Response scheduling related information
4053 * for the subframe with RA index */
4054 raIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][frm.subframe]-1;
4056 rachRsp = &cell->rachRspLst[raIdx];
4058 for(sfnIdx = 0; sfnIdx < rachRsp->numRadiofrms; sfnIdx++)
4060 /* For all scheduled RACH Responses in SFNs */
4062 RG_SCH_CMN_DECR_FRAME(schFrm.sfn, rachRsp->rachRsp[sfnIdx].sfnOffset);
4063 /* For all scheduled RACH Responses in subframes */
4065 subfrmIdx < rachRsp->rachRsp[sfnIdx].numSubfrms; subfrmIdx++)
4067 schFrm.subframe = rachRsp->rachRsp[sfnIdx].subframe[subfrmIdx];
4068 /* compute the last RA RNTI used in the previous subframe */
4069 raIdx = (((schFrm.sfn % cell->raInfo.maxRaSize) * \
4070 RGSCH_NUM_SUB_FRAMES * RGSCH_MAX_RA_RNTI_PER_SUBFRM) \
4073 /* For all RA RNTIs within a subframe */
4075 for(i=0; (i < RGSCH_MAX_RA_RNTI_PER_SUBFRM) && \
4076 (noRaRnti < RGSCH_MAX_TDD_RA_RSP_ALLOC); i++)
4078 rarnti = (schFrm.subframe + RGSCH_NUM_SUB_FRAMES*i + 1);
4079 rntiIdx = (raIdx + RGSCH_NUM_SUB_FRAMES*i);
4081 if (cell->raInfo.raReqLst[rntiIdx].first != NULLP)
4083 /* compute the next RA RNTI */
4084 if (rgSCHCmnRaRspAlloc(cell, subFrm, rntiIdx,
4085 rarnti, noRaRnti, allocInfo) != ROK)
4087 /* The resources are exhausted */
4101 * @brief This function implements scheduling for RA Response.
4105 * Function: rgSCHCmnDlRaRsp
4106 * Purpose: Downlink scheduling for RA responses.
4108 * Invoked by: Scheduler
4110 * @param[in] RgSchCellCb* cell
4111 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4115 static Void rgSCHCmnDlRaRsp(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
4117 CmLteTimingInfo frm;
4118 CmLteTimingInfo winStartFrm;
4120 uint8_t winStartIdx;
4124 RgSchCmnCell *sched;
4125 uint8_t i,noRaRnti=0;
4127 frm = cell->crntTime;
4128 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4130 /* Compute the subframe for which allocation is being made */
4131 /* essentially, we need pointer to the dl frame for this subframe */
4132 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4133 sched = RG_SCH_CMN_GET_CELL(cell);
4135 /* ccpu00132523 - Window Start calculated by considering RAR window size,
4136 * RAR Wait period, Subframes occuppied for respective preamble format*/
4137 winGap = (sched->dl.numRaSubFrms-1) + (cell->rachCfg.raWinSize-1)
4138 +RGSCH_RARSP_WAIT_PERIOD;
4140 /* Window starting occassion is retrieved using the gap and tried to
4141 * fit to the size of raReqLst array*/
4142 RGSCHDECRFRMCRNTTIME(frm, winStartFrm, winGap);
4144 //5G_TODO TIMING update. Need to check
4145 winStartIdx = (winStartFrm.sfn & 1) * RGSCH_MAX_RA_RNTI+ winStartFrm.slot;
4147 for(i = 0; ((i < cell->rachCfg.raWinSize) && (noRaRnti < RG_SCH_CMN_MAX_CMN_PDCCH)); i++)
4149 raIdx = (winStartIdx + i) % RGSCH_RAREQ_ARRAY_SIZE;
4151 if (cell->raInfo.raReqLst[raIdx].first != NULLP)
4153 allocInfo->raRspAlloc[noRaRnti].biEstmt = \
4154 (!i * RGSCH_ONE_BIHDR_SIZE);
4155 rarnti = raIdx % RGSCH_MAX_RA_RNTI+ 1;
4156 if (rgSCHCmnRaRspAlloc(cell, subFrm, raIdx,
4157 rarnti, noRaRnti, allocInfo) != ROK)
4159 /* The resources are exhausted */
4162 /* ccpu00132523- If all the RAP ids are not scheduled then need not
4163 * proceed for next RA RNTIs*/
4164 if(allocInfo->raRspAlloc[noRaRnti].numRapids < cell->raInfo.raReqLst[raIdx].count)
4168 noRaRnti++; /* Max of RG_SCH_CMN_MAX_CMN_PDCCH RARNTIs
4169 for response allocation */
4178 * @brief This function allocates the resources for an RARNTI.
4182 * Function: rgSCHCmnRaRspAlloc
4183 * Purpose: Allocate resources to a RARNTI.
4184 * 0. Allocate PDCCH for sending the response.
4185 * 1. Locate the number of RA requests pending for the RARNTI.
4186 * 2. Compute the size of data to be built.
4187 * 3. Using common channel CQI, compute the number of RBs.
4189 * Invoked by: Scheduler
4191 * @param[in] RgSchCellCb *cell,
4192 * @param[in] RgSchDlSf *subFrm,
4193 * @param[in] uint16_t rarnti,
4194 * @param[in] uint8_t noRaRnti
4195 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4199 static S16 rgSCHCmnRaRspAlloc(RgSchCellCb *cell,RgSchDlSf *subFrm,uint16_t raIndex,uint16_t rarnti,uint8_t noRaRnti,RgSchCmnDlRbAllocInfo *allocInfo)
4201 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4202 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4206 /*ccpu00116700,ccpu00116708- Corrected the wrong type for mcs*/
4209 /* RACH handling related changes */
4210 Bool isAlloc = FALSE;
4211 static uint8_t schdNumRapid = 0;
4212 uint8_t remNumRapid = 0;
4217 uint8_t cfi = cellDl->currCfi;
4224 /* ccpu00132523: Resetting the schdRap Id count in every scheduling subframe*/
4231 if (subFrm->bw == subFrm->bwAssigned)
4233 DU_LOG("\nERROR --> SCH : bw == bwAssigned RARNTI:%d",rarnti);
4237 reqLst = &cell->raInfo.raReqLst[raIndex];
4238 if (reqLst->count == 0)
4240 DU_LOG("\nERROR --> SCH : reqLst Count=0 RARNTI:%d",rarnti);
4243 remNumRapid = reqLst->count;
4246 /* Limit number of rach rsps to maxMsg3PerUlsf */
4247 if ( schdNumRapid+remNumRapid > cellUl->maxMsg3PerUlSf )
4249 remNumRapid = cellUl->maxMsg3PerUlSf-schdNumRapid;
4255 /* Try allocating for as many RAPIDs as possible */
4256 /* BI sub-header size to the tbSize requirement */
4257 noBytes = RGSCH_GET_RAR_BYTES(remNumRapid) +\
4258 allocInfo->raRspAlloc[noRaRnti].biEstmt;
4259 if ((allwdTbSz = rgSCHUtlGetAllwdCchTbSz(noBytes*8, &nPrb, &mcs)) == -1)
4265 /* rgSCHCmnClcRbAllocForFxdTb(cell, allwdTbSz/8, cellDl->ccchCqi, &rb);*/
4266 if(cellDl->bitsPerRb==0)
4268 while ((rgTbSzTbl[0][0][rb]) <(uint32_t) allwdTbSz)
4276 rb = RGSCH_CEIL(allwdTbSz, cellDl->bitsPerRb);
4278 /* DwPTS Scheduling Changes Start */
4280 if (subFrm->sfType == RG_SCH_SPL_SF_DATA)
4282 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
4284 /* Calculate the less RE's because of DwPTS */
4285 lostRe = rb * (cellDl->noResPerRb[cfi] -
4286 cellDl->numReDwPts[cfi]);
4288 /* Increase number of RBs in Spl SF to compensate for lost REs */
4289 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
4292 /* DwPTS Scheduling Changes End */
4294 /*ccpu00115595- end*/
4295 if (rb > subFrm->bw - subFrm->bwAssigned)
4300 /* Allocation succeeded for 'remNumRapid' */
4303 DU_LOG("\nINFO --> SCH : RAR alloc noBytes:%u,allwdTbSz:%u,tbs:%u,rb:%u\n",
4304 noBytes,allwdTbSz,tbs,rb);
4309 DU_LOG("\nERROR --> SCH : BW alloc Failed");
4313 subFrm->bwAssigned = subFrm->bwAssigned + rb;
4315 /* Fill AllocInfo structure */
4316 allocInfo->raRspAlloc[noRaRnti].rnti = rarnti;
4317 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].bytesReq = tbs;
4318 allocInfo->raRspAlloc[noRaRnti].rbsReq = rb;
4319 allocInfo->raRspAlloc[noRaRnti].dlSf = subFrm;
4320 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].imcs = mcs;
4321 allocInfo->raRspAlloc[noRaRnti].raIndex = raIndex;
4322 /* RACH changes for multiple RAPID handling */
4323 allocInfo->raRspAlloc[noRaRnti].numRapids = remNumRapid;
4324 allocInfo->raRspAlloc[noRaRnti].nPrb = nPrb;
4325 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].noLyr = 1;
4326 allocInfo->raRspAlloc[noRaRnti].vrbgReq = RGSCH_CEIL(nPrb,MAX_5GTF_VRBG_SIZE);
4327 schdNumRapid += remNumRapid;
4331 /***********************************************************
4333 * Func : rgSCHCmnUlAllocFillRbInfo
4335 * Desc : Fills the start RB and the number of RBs for
4336 * uplink allocation.
4344 **********************************************************/
4345 Void rgSCHCmnUlAllocFillRbInfo(RgSchCellCb *cell,RgSchUlSf *sf,RgSchUlAlloc *alloc)
4347 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4348 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4349 uint8_t cfi = cellDl->currCfi;
4352 alloc->grnt.rbStart = (alloc->sbStart * cellUl->sbSize) +
4353 cell->dynCfiCb.bwInfo[cfi].startRb;
4355 /* Num RBs = numSbAllocated * sbSize - less RBs in the last SB */
4356 alloc->grnt.numRb = (alloc->numSb * cellUl->sbSize);
4362 * @brief Grant request for Msg3.
4366 * Function : rgSCHCmnMsg3GrntReq
4368 * This is invoked by downlink scheduler to request allocation
4371 * - Attempt to allocate msg3 in the current msg3 subframe
4372 * Allocation attempt based on whether preamble is from group A
4373 * and the value of MESSAGE_SIZE_GROUP_A
4374 * - Link allocation with passed RNTI and msg3 HARQ process
4375 * - Set the HARQ process ID (*hqProcIdRef)
4377 * @param[in] RgSchCellCb *cell
4378 * @param[in] CmLteRnti rnti
4379 * @param[in] Bool preamGrpA
4380 * @param[in] RgSchUlHqProcCb *hqProc
4381 * @param[out] RgSchUlAlloc **ulAllocRef
4382 * @param[out] uint8_t *hqProcIdRef
4385 static Void rgSCHCmnMsg3GrntReq
4390 RgSchUlHqProcCb *hqProc,
4391 RgSchUlAlloc **ulAllocRef,
4392 uint8_t *hqProcIdRef
4395 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4396 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
4398 RgSchUlAlloc *alloc;
4403 *ulAllocRef = NULLP;
4405 /* Fix: ccpu00120610 Use remAllocs from subframe during msg3 allocation */
4406 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
4410 if (preamGrpA == FALSE)
4412 numSb = cellUl->ra.prmblBNumSb;
4413 iMcs = cellUl->ra.prmblBIMcs;
4417 numSb = cellUl->ra.prmblANumSb;
4418 iMcs = cellUl->ra.prmblAIMcs;
4421 if ((hole = rgSCHUtlUlHoleFirst(sf)) != NULLP)
4423 if(*sf->allocCountRef == 0)
4425 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4426 /* Reinitialize the hole */
4427 if (sf->holeDb->count == 1 && (hole->start == 0)) /* Sanity check of holeDb */
4429 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4430 /* Re-Initialize available subbands because of CFI change*/
4431 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4435 DU_LOG("\nERROR --> SCH : holeDb sanity check failed RNTI:%d",rnti);
4438 if (numSb <= hole->num)
4441 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
4442 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
4443 alloc->grnt.iMcs = iMcs;
4444 alloc->grnt.iMcsCrnt = iMcs;
4445 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
4446 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
4447 /* To include the length and ModOrder in DataRecp Req.*/
4448 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
4449 RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
4450 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
4451 alloc->grnt.nDmrs = 0;
4452 alloc->grnt.hop = 0;
4453 alloc->grnt.delayBit = 0;
4454 alloc->grnt.isRtx = FALSE;
4455 *ulAllocRef = alloc;
4456 *hqProcIdRef = (cellUl->msg3SchdHqProcIdx);
4457 hqProc->procId = *hqProcIdRef;
4458 hqProc->ulSfIdx = (cellUl->msg3SchdIdx);
4461 alloc->pdcch = FALSE;
4462 alloc->forMsg3 = TRUE;
4463 alloc->hqProc = hqProc;
4464 rgSCHUhmNewTx(hqProc, (uint8_t)(cell->rachCfg.maxMsg3Tx - 1), alloc);
4465 DU_LOG("\nDEBUG --> SCH : RNTI:%d MSG3 ALLOC proc(%lu)procId(%d)schdIdx(%d)\n",
4467 ((PTR)alloc->hqProc),
4468 alloc->hqProc->procId,
4469 alloc->hqProc->ulSfIdx);
4470 DU_LOG("\nDEBUG --> SCH : alloc(%p)maxMsg3Tx(%d)",
4472 cell->rachCfg.maxMsg3Tx);
4481 * @brief This function determines the allocation limits and
4482 * parameters that aid in DL scheduling.
4486 * Function: rgSCHCmnDlSetUeAllocLmt
4487 * Purpose: This function determines the Maximum RBs
4488 * a UE is eligible to get based on softbuffer
4489 * limitation and cell->>>maxDlBwPerUe. The Codeword
4490 * specific parameters like iTbs, eff and noLyrs
4491 * are also set in this function. This function
4492 * is called while UE configuration and UeDlCqiInd.
4494 * Invoked by: Scheduler
4496 * @param[in] RgSchCellCb *cellCb
4497 * @param[in] RgSchCmnDlUe *ueDl
4501 static Void rgSCHCmnDlSetUeAllocLmt(RgSchCellCb *cell,RgSchCmnDlUe *ueDl,Bool isEmtcUe)
4505 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4506 uint8_t cfi = cellSch->dl.currCfi;
4510 if(TRUE == isEmtcUe)
4512 /* ITbs for CW0 for 1 Layer Tx */
4513 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4514 [ueDl->mimoInfo.cwInfo[0].cqi];
4515 /* ITbs for CW0 for 2 Layer Tx */
4516 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4517 [ueDl->mimoInfo.cwInfo[0].cqi];
4518 /* Eff for CW0 for 1 Layer Tx */
4519 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4520 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4521 /* Eff for CW0 for 2 Layer Tx */
4522 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4523 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4525 /* ITbs for CW1 for 1 Layer Tx */
4526 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4527 [ueDl->mimoInfo.cwInfo[1].cqi];
4528 /* ITbs for CW1 for 2 Layer Tx */
4529 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4530 [ueDl->mimoInfo.cwInfo[1].cqi];
4531 /* Eff for CW1 for 1 Layer Tx */
4532 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4533 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4534 /* Eff for CW1 for 2 Layer Tx */
4535 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4536 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4541 /* ITbs for CW0 for 1 Layer Tx */
4542 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4543 [ueDl->mimoInfo.cwInfo[0].cqi];
4544 /* ITbs for CW0 for 2 Layer Tx */
4545 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4546 [ueDl->mimoInfo.cwInfo[0].cqi];
4547 /* Eff for CW0 for 1 Layer Tx */
4548 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4549 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4550 /* Eff for CW0 for 2 Layer Tx */
4551 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4552 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4554 /* ITbs for CW1 for 1 Layer Tx */
4555 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4556 [ueDl->mimoInfo.cwInfo[1].cqi];
4557 /* ITbs for CW1 for 2 Layer Tx */
4558 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4559 [ueDl->mimoInfo.cwInfo[1].cqi];
4560 /* Eff for CW1 for 1 Layer Tx */
4561 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4562 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4563 /* Eff for CW1 for 2 Layer Tx */
4564 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4565 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4569 // ueDl->laCb.cqiBasediTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0] * 100;
4571 /* Assigning noLyrs to each CW assuming optimal Spatial multiplexing
4573 (ueDl->mimoInfo.ri/2 == 0)? (ueDl->mimoInfo.cwInfo[0].noLyr = 1) : \
4574 (ueDl->mimoInfo.cwInfo[0].noLyr = ueDl->mimoInfo.ri/2);
4575 ueDl->mimoInfo.cwInfo[1].noLyr = ueDl->mimoInfo.ri - ueDl->mimoInfo.cwInfo[0].noLyr;
4576 /* rg002.101:ccpu00102106: correcting DL harq softbuffer limitation logic.
4577 * The maxTbSz is the maximum number of PHY bits a harq process can
4578 * hold. Hence we limit our allocation per harq process based on this.
4579 * Earlier implementation we misinterpreted the maxTbSz to be per UE
4580 * per TTI, but in fact it is per Harq per TTI. */
4581 /* rg002.101:ccpu00102106: cannot exceed the harq Tb Size
4582 * and harq Soft Bits limit.*/
4584 /* Considering iTbs corresponding to 2 layer transmission for
4585 * codeword0(approximation) and the maxLayers supported by
4586 * this UE at this point of time. */
4587 RG_SCH_CMN_TBS_TO_MODODR(ueDl->mimoInfo.cwInfo[0].iTbs[1], modOrder);
4589 /* Bits/modOrder gives #REs, #REs/noResPerRb gives #RBs */
4590 /* rg001.301 -MOD- [ccpu00119213] : avoiding wraparound */
4591 maxRb = ((ueDl->maxSbSz)/(cellSch->dl.noResPerRb[cfi] * modOrder *\
4592 ueDl->mimoInfo.ri));
4593 if (cellSch->dl.isDlFreqSel)
4595 /* Rounding off to left nearest multiple of RBG size */
4596 maxRb -= maxRb % cell->rbgSize;
4598 ueDl->maxRb = RGSCH_MIN(maxRb, cellSch->dl.maxDlBwPerUe);
4599 if (cellSch->dl.isDlFreqSel)
4601 /* Rounding off to right nearest multiple of RBG size */
4602 if (ueDl->maxRb % cell->rbgSize)
4604 ueDl->maxRb += (cell->rbgSize -
4605 (ueDl->maxRb % cell->rbgSize));
4609 /* Set the index of the cwInfo, which is better in terms of
4610 * efficiency. If RI<2, only 1 CW, hence btrCwIdx shall be 0 */
4611 if (ueDl->mimoInfo.ri < 2)
4613 ueDl->mimoInfo.btrCwIdx = 0;
4617 if (ueDl->mimoInfo.cwInfo[0].eff[ueDl->mimoInfo.cwInfo[0].noLyr-1] <\
4618 ueDl->mimoInfo.cwInfo[1].eff[ueDl->mimoInfo.cwInfo[1].noLyr-1])
4620 ueDl->mimoInfo.btrCwIdx = 1;
4624 ueDl->mimoInfo.btrCwIdx = 0;
4634 * @brief This function updates TX Scheme.
4638 * Function: rgSCHCheckAndSetTxScheme
4639 * Purpose: This function determines the Maximum RBs
4640 * a UE is eligible to get based on softbuffer
4641 * limitation and cell->>>maxDlBwPerUe. The Codeword
4642 * specific parameters like iTbs, eff and noLyrs
4643 * are also set in this function. This function
4644 * is called while UE configuration and UeDlCqiInd.
4646 * Invoked by: Scheduler
4648 * @param[in] RgSchCellCb *cell
4649 * @param[in] RgSchUeCb *ue
4653 static Void rgSCHCheckAndSetTxScheme(RgSchCellCb *cell,RgSchUeCb *ue)
4655 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4656 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue ,cell);
4657 uint8_t cfi = cellSch->dl.currCfi;
4659 uint8_t cqiBasediTbs;
4663 maxiTbs = (*(RgSchCmnCqiToTbs*)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4664 [RG_SCH_CMN_MAX_CQI - 1];
4665 cqiBasediTbs = (ueDl->laCb[0].cqiBasediTbs)/100;
4666 actualiTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0];
4668 if((actualiTbs < RG_SCH_TXSCHEME_CHNG_ITBS_FACTOR) && (cqiBasediTbs >
4669 actualiTbs) && ((cqiBasediTbs - actualiTbs) > RG_SCH_TXSCHEME_CHNG_THRSHD))
4671 RG_SCH_CMN_SET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
4674 if(actualiTbs >= maxiTbs)
4676 RG_SCH_CMN_UNSET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
4683 * @brief This function determines the allocation limits and
4684 * parameters that aid in DL scheduling.
4688 * Function: rgSCHCmnDlSetUeAllocLmtLa
4689 * Purpose: This function determines the Maximum RBs
4690 * a UE is eligible to get based on softbuffer
4691 * limitation and cell->>>maxDlBwPerUe. The Codeword
4692 * specific parameters like iTbs, eff and noLyrs
4693 * are also set in this function. This function
4694 * is called while UE configuration and UeDlCqiInd.
4696 * Invoked by: Scheduler
4698 * @param[in] RgSchCellCb *cell
4699 * @param[in] RgSchUeCb *ue
4703 Void rgSCHCmnDlSetUeAllocLmtLa(RgSchCellCb *cell,RgSchUeCb *ue)
4707 uint8_t reportediTbs;
4708 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4709 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
4710 uint8_t cfi = cellSch->dl.currCfi;
4715 maxiTbs = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[RG_SCH_CMN_MAX_CQI - 1];
4716 if(ueDl->cqiFlag == TRUE)
4718 for(cwIdx=0; cwIdx < RG_SCH_CMN_MAX_CW_PER_UE; cwIdx++)
4722 /* Calcluating the reported iTbs for code word 0 */
4723 reportediTbs = ue->ue5gtfCb.mcs;
4725 iTbsNew = (S32) reportediTbs;
4727 if(!ueDl->laCb[cwIdx].notFirstCqi)
4729 /* This is the first CQI report from UE */
4730 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
4731 ueDl->laCb[cwIdx].notFirstCqi = TRUE;
4733 else if ((RG_ITBS_DIFF(reportediTbs, ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0])) > 5)
4735 /* Ignore this iTBS report and mark that last iTBS report was */
4736 /* ignored so that subsequently we reset the LA algorithm */
4737 ueDl->laCb[cwIdx].lastiTbsIgnored = TRUE;
4738 ueDl->laCb[cwIdx].numLastiTbsIgnored++;
4739 if( ueDl->laCb[cwIdx].numLastiTbsIgnored > 10)
4741 /* CQI reported by UE is not catching up. Reset the LA algorithm */
4742 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
4743 ueDl->laCb[cwIdx].deltaiTbs = 0;
4744 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
4745 ueDl->laCb[cwIdx].numLastiTbsIgnored = 0;
4750 if (ueDl->laCb[cwIdx].lastiTbsIgnored != TRUE)
4752 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
4753 (80 * ueDl->laCb[cwIdx].cqiBasediTbs))/100;
4757 /* Reset the LA as iTbs in use caught up with the value */
4758 /* reported by UE. */
4759 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
4760 (80 * ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] * 100))/100;
4761 ueDl->laCb[cwIdx].deltaiTbs = 0;
4762 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
4766 iTbsNew = (ueDl->laCb[cwIdx].cqiBasediTbs + ueDl->laCb[cwIdx].deltaiTbs)/100;
4768 RG_SCH_CHK_ITBS_RANGE(iTbsNew, maxiTbs);
4770 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] = RGSCH_MIN(iTbsNew, cell->thresholds.maxDlItbs);
4771 //ueDl->mimoInfo.cwInfo[cwIdx].iTbs[1] = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
4773 ue->ue5gtfCb.mcs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
4775 DU_LOG("\nINFO --> SCH : reportediTbs[%d] cqiBasediTbs[%d] deltaiTbs[%d] iTbsNew[%d] mcs[%d] cwIdx[%d]\n",
4776 reportediTbs, ueDl->laCb[cwIdx].cqiBasediTbs, ueDl->laCb[cwIdx].deltaiTbs,
4777 iTbsNew, ue->ue5gtfCb.mcs, cwIdx);
4781 if((ue->mimoInfo.txMode != RGR_UE_TM_3) && (ue->mimoInfo.txMode != RGR_UE_TM_4))
4786 ueDl->cqiFlag = FALSE;
4793 /***********************************************************
4795 * Func : rgSCHCmnDlUeResetTemp
4797 * Desc : Reset whatever variables where temporarily used
4798 * during UE scheduling.
4806 **********************************************************/
4807 Void rgSCHCmnDlHqPResetTemp(RgSchDlHqProcCb *hqP)
4810 /* Fix: syed having a hqP added to Lists for RB assignment rather than
4811 * a UE, as adding UE was limiting handling some scenarios */
4812 hqP->reqLnk.node = (PTR)NULLP;
4813 hqP->schdLstLnk.node = (PTR)NULLP;
4816 } /* rgSCHCmnDlHqPResetTemp */
4818 /***********************************************************
4820 * Func : rgSCHCmnDlUeResetTemp
4822 * Desc : Reset whatever variables where temporarily used
4823 * during UE scheduling.
4831 **********************************************************/
4832 Void rgSCHCmnDlUeResetTemp(RgSchUeCb *ue,RgSchDlHqProcCb *hqP)
4834 RgSchDlRbAlloc *allocInfo;
4835 RgSchCmnDlUe *cmnUe = RG_SCH_CMN_GET_DL_UE(ue,hqP->hqE->cell);
4841 /* Fix : syed check for UE's existence was useless.
4842 * Instead we need to check that reset is done only for the
4843 * information of a scheduled harq proc, which is cmnUe->proc.
4844 * Reset should not be done for non-scheduled hqP */
4845 if((cmnUe->proc == hqP) || (cmnUe->proc == NULLP))
4847 cmnUe->proc = NULLP;
4848 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, hqP->hqE->cell);
4850 tmpCb = allocInfo->laaCb;
4852 memset(allocInfo, 0, sizeof(RgSchDlRbAlloc));
4853 allocInfo->rnti = ue->ueId;
4855 allocInfo->laaCb = tmpCb;
4857 /* Fix: syed moving this to a common function for both scheduled
4858 * and non-scheduled UEs */
4859 cmnUe->outStndAlloc = 0;
4861 rgSCHCmnDlHqPResetTemp(hqP);
4864 } /* rgSCHCmnDlUeResetTemp */
4866 /***********************************************************
4868 * Func : rgSCHCmnUlUeResetTemp
4870 * Desc : Reset whatever variables where temporarily used
4871 * during UE scheduling.
4879 **********************************************************/
4880 Void rgSCHCmnUlUeResetTemp(RgSchCellCb *cell,RgSchUeCb *ue)
4882 RgSchCmnUlUe *cmnUlUe = RG_SCH_CMN_GET_UL_UE(ue,cell);
4884 memset(&cmnUlUe->alloc, 0, sizeof(cmnUlUe->alloc));
4887 } /* rgSCHCmnUlUeResetTemp */
4892 * @brief This function fills the PDCCH information from dlProc.
4896 * Function: rgSCHCmnFillPdcch
4897 * Purpose: This function fills in the PDCCH information
4898 * obtained from the RgSchDlRbAlloc
4899 * during common channel scheduling(P, SI, RA - RNTI's).
4901 * Invoked by: Downlink Scheduler
4903 * @param[out] RgSchPdcch* pdcch
4904 * @param[in] RgSchDlRbAlloc* rbAllocInfo
4908 Void rgSCHCmnFillPdcch(RgSchCellCb *cell,RgSchPdcch *pdcch,RgSchDlRbAlloc *rbAllocInfo)
4911 /* common channel pdcch filling,
4912 * only 1A and Local is supported */
4913 pdcch->rnti = rbAllocInfo->rnti;
4914 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
4915 switch(rbAllocInfo->dciFormat)
4917 #ifdef RG_5GTF /* ANOOP: ToDo: DCI format B1/B2 filling */
4918 case TFU_DCI_FORMAT_B1:
4921 pdcch->dci.u.formatB1Info.formatType = 0;
4922 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].cmnGrnt.xPDSCHRange;
4923 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].cmnGrnt.rbAssign;
4924 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = 0;
4925 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
4926 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = 0;
4927 //pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].ndi;
4928 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].cmnGrnt.rv;
4929 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
4930 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
4931 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
4932 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
4933 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
4934 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
4935 //TODO_SID: Need to update
4936 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
4937 pdcch->dci.u.formatB1Info.beamSwitch = 0;
4938 pdcch->dci.u.formatB1Info.SRS_Config = 0;
4939 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
4940 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
4941 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
4942 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].cmnGrnt.SCID;
4943 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
4944 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
4945 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
4947 break; /* case TFU_DCI_FORMAT_B1: */
4950 case TFU_DCI_FORMAT_B2:
4952 //DU_LOG("\nINFO --> SCH : RG_5GTF:: Pdcch filling with DCI format B2\n");
4954 break; /* case TFU_DCI_FORMAT_B2: */
4957 case TFU_DCI_FORMAT_1A:
4958 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
4960 /*Nprb indication at PHY for common Ch
4961 *setting least significant bit of tpc field to 1 if
4962 nPrb=3 and 0 otherwise. */
4963 if (rbAllocInfo->nPrb == 3)
4965 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 1;
4969 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 0;
4971 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
4972 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
4973 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
4974 rbAllocInfo->tbInfo[0].imcs;
4975 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = 0;
4976 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = 0;
4978 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
4980 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
4981 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
4982 rbAllocInfo->allocInfo.raType2.rbStart,
4983 rbAllocInfo->allocInfo.raType2.numRb);
4986 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = \
4989 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
4990 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
4993 break; /* case TFU_DCI_FORMAT_1A: */
4994 case TFU_DCI_FORMAT_1:
4995 pdcch->dci.u.format1Info.tpcCmd = 0;
4996 /* Avoiding this check,as we dont support Type1 RA */
4998 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5001 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5002 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5003 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5005 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5006 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5008 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5009 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5011 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5012 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5016 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5017 pdcch->dci.u.format1Info.allocInfo.ndi = 0;
5018 pdcch->dci.u.format1Info.allocInfo.mcs = rbAllocInfo->tbInfo[0].imcs;
5019 pdcch->dci.u.format1Info.allocInfo.rv = 0;
5021 pdcch->dci.u.format1Info.dai = 1;
5025 DU_LOG("\nERROR --> SCH : Allocator's icorrect "
5026 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5034 * @brief This function finds whether the subframe is special subframe or not.
5038 * Function: rgSCHCmnIsSplSubfrm
5039 * Purpose: This function finds the subframe index of the special subframe
5040 * and finds whether the current DL index matches it or not.
5042 * Invoked by: Scheduler
5044 * @param[in] uint8_t splfrmCnt
5045 * @param[in] uint8_t curSubfrmIdx
5046 * @param[in] uint8_t periodicity
5047 * @param[in] RgSchTddSubfrmInfo *subfrmInfo
5051 static Bool rgSCHCmnIsSplSubfrm(uint8_t splfrmCnt,uint8_t curSubfrmIdx,uint8_t periodicity,RgSchTddSubfrmInfo *subfrmInfo)
5053 uint8_t dlSfCnt = 0;
5054 uint8_t splfrmIdx = 0;
5058 if(periodicity == RG_SCH_CMN_5_MS_PRD)
5062 dlSfCnt = ((splfrmCnt-1)/2) *\
5063 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5064 dlSfCnt = dlSfCnt + subfrmInfo->numFrmHf1;
5068 dlSfCnt = (splfrmCnt/2) * \
5069 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5074 dlSfCnt = splfrmCnt * subfrmInfo->numFrmHf1;
5076 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1 +\
5077 (periodicity*splfrmCnt - dlSfCnt);
5081 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1;
5084 if(splfrmIdx == curSubfrmIdx)
5093 * @brief This function updates DAI or UL index.
5097 * Function: rgSCHCmnUpdHqAndDai
5098 * Purpose: Updates the DAI based on UL-DL Configuration
5099 * index and UE. It also updates the HARQ feedback
5100 * time and 'm' index.
5104 * @param[in] RgDlHqProcCb *hqP
5105 * @param[in] RgSchDlSf *subFrm
5106 * @param[in] RgSchDlHqTbCb *tbCb
5107 * @param[in] uint8_t tbAllocIdx
5111 static Void rgSCHCmnUpdHqAndDai(RgSchDlHqProcCb *hqP,RgSchDlSf *subFrm,RgSchDlHqTbCb *tbCb,uint8_t tbAllocIdx)
5113 RgSchUeCb *ue = hqP->hqE->ue;
5118 /* set the time at which UE shall send the feedback
5119 * for this process */
5120 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5121 subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5122 tbCb->fdbkTime.subframe = subFrm->dlFdbkInfo.subframe;
5123 tbCb->m = subFrm->dlFdbkInfo.m;
5127 /* set the time at which UE shall send the feedback
5128 * for this process */
5129 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5130 hqP->subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5131 tbCb->fdbkTime.subframe = hqP->subFrm->dlFdbkInfo.subframe;
5132 tbCb->m = hqP->subFrm->dlFdbkInfo.m;
5135 /* ccpu00132340-MOD- DAI need to be updated for first TB only*/
5136 if(ue && !tbAllocIdx)
5138 Bool havePdcch = (tbCb->hqP->pdcch ? TRUE : FALSE);
5141 dlDai = rgSCHCmnUpdDai(ue, &tbCb->fdbkTime, tbCb->m, havePdcch,tbCb->hqP,
5144 {/* Non SPS occasions */
5145 tbCb->hqP->pdcch->dlDai = dlDai;
5146 /* hqP->ulDai is used for N1 resource filling
5147 * when SPS occaions present in a bundle */
5148 tbCb->hqP->ulDai = tbCb->dai;
5149 tbCb->hqP->dlDai = dlDai;
5153 /* Updatijng pucchFdbkIdx for both PUCCH or PUSCH
5155 tbCb->pucchFdbkIdx = tbCb->hqP->ulDai;
5162 * @brief This function updates DAI or UL index.
5166 * Function: rgSCHCmnUpdDai
5167 * Purpose: Updates the DAI in the ack-nack info, a valid
5168 * ue should be passed
5172 * @param[in] RgDlHqProcCb *hqP
5173 * @param[in] RgSchDlSf *subFrm
5174 * @param[in] RgSchDlHqTbCb *tbCb
5175 * @return uint8_t dlDai
5178 uint8_t rgSCHCmnUpdDai
5181 CmLteTimingInfo *fdbkTime,
5184 RgSchDlHqProcCb *hqP,
5188 RgSchTddANInfo *anInfo;
5189 uint8_t servCellIdx;
5190 uint8_t ackNackFdbkArrSize;
5195 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5196 hqP->hqE->cell->cellId,
5199 servCellIdx = RGSCH_PCELL_INDEX;
5201 ackNackFdbkArrSize = hqP->hqE->cell->ackNackFdbkArrSize;
5203 {/* SPS on primary cell */
5204 servCellIdx = RGSCH_PCELL_INDEX;
5205 ackNackFdbkArrSize = ue->cell->ackNackFdbkArrSize;
5209 anInfo = rgSCHUtlGetUeANFdbkInfo(ue, fdbkTime,servCellIdx);
5211 /* If no ACK/NACK feedback already present, create a new one */
5214 anInfo = &ue->cellInfo[servCellIdx]->anInfo[ue->cellInfo[servCellIdx]->nextFreeANIdx];
5215 anInfo->sfn = fdbkTime->sfn;
5216 anInfo->subframe = fdbkTime->subframe;
5217 anInfo->latestMIdx = m;
5218 /* Fixing DAI value - ccpu00109162 */
5219 /* Handle TDD case as in MIMO definition of the function */
5225 anInfo->isSpsOccasion = FALSE;
5226 /* set the free Index to store Ack/Nack Information*/
5227 ue->cellInfo[servCellIdx]->nextFreeANIdx = (ue->cellInfo[servCellIdx]->nextFreeANIdx + 1) %
5233 anInfo->latestMIdx = m;
5234 /* Fixing DAI value - ccpu00109162 */
5235 /* Handle TDD case as in MIMO definition of the function */
5236 anInfo->ulDai = anInfo->ulDai + 1;
5239 anInfo->dlDai = anInfo->dlDai + 1;
5243 /* ignoring the Scell check,
5244 * for primary cell this field is unused*/
5247 anInfo->n1ResTpcIdx = hqP->tpc;
5251 {/* As this not required for release pdcch */
5252 *ulDai = anInfo->ulDai;
5255 return (anInfo->dlDai);
5258 #endif /* ifdef LTE_TDD */
5260 uint32_t rgHqRvRetxCnt[4][2];
5261 uint32_t rgUlrate_grant;
5264 * @brief This function fills the HqP TB with rbAllocInfo.
5268 * Function: rgSCHCmnFillHqPTb
5269 * Purpose: This function fills in the HqP TB with rbAllocInfo.
5271 * Invoked by: rgSCHCmnFillHqPTb
5273 * @param[in] RgSchCellCb* cell
5274 * @param[in] RgSchDlRbAlloc *rbAllocInfo,
5275 * @param[in] uint8_t tbAllocIdx
5276 * @param[in] RgSchPdcch *pdcch
5281 Void rgSCHCmnFillHqPTb
5284 RgSchDlRbAlloc *rbAllocInfo,
5289 static Void rgSCHCmnFillHqPTb
5292 RgSchDlRbAlloc *rbAllocInfo,
5296 #endif /* LTEMAC_SPS */
5298 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
5299 RgSchDlTbAllocInfo *tbAllocInfo = &rbAllocInfo->tbInfo[tbAllocIdx];
5300 RgSchDlHqTbCb *tbInfo = tbAllocInfo->tbCb;
5301 RgSchDlHqProcCb *hqP = tbInfo->hqP;
5304 /*ccpu00120365-ADD-if tb is disabled, set mcs=0,rv=1.
5305 * Relevant for DCI format 2 & 2A as per 36.213-7.1.7.2
5307 if ( tbAllocInfo->isDisabled)
5310 tbInfo->dlGrnt.iMcs = 0;
5311 tbInfo->dlGrnt.rv = 1;
5313 /* Fill for TB retransmission */
5314 else if (tbInfo->txCntr > 0)
5317 tbInfo->timingInfo = cmnCellDl->time;
5319 if ((tbInfo->isAckNackDtx == TFU_HQFDB_DTX))
5321 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5322 rgHqRvRetxCnt[tbInfo->dlGrnt.rv][tbInfo->tbIdx]++;
5326 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[++(tbInfo->ccchSchdInfo.rvIdx) & 0x03];
5329 /* fill the scheduler information of hqProc */
5330 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5331 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx,hqP->tbInfo,tbInfo->tbIdx );
5332 rgSCHDhmHqTbRetx(hqP->hqE, tbInfo->timingInfo, hqP, tbInfo->tbIdx);
5334 /* Fill for TB transmission */
5337 /* Fill the HqProc */
5338 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5339 tbInfo->tbSz = tbAllocInfo->bytesAlloc;
5340 tbInfo->timingInfo = cmnCellDl->time;
5342 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[0];
5343 /* fill the scheduler information of hqProc */
5344 tbInfo->ccchSchdInfo.rvIdx = 0;
5345 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5346 /* DwPts Scheduling Changes Start */
5347 /* DwPts Scheduling Changes End */
5348 cell->measurements.dlBytesCnt += tbAllocInfo->bytesAlloc;
5351 /*ccpu00120365:-ADD-only add to subFrm list if tb is not disabled */
5352 if ( tbAllocInfo->isDisabled == FALSE )
5354 /* Set the number of transmitting SM layers for this TB */
5355 tbInfo->numLyrs = tbAllocInfo->noLyr;
5356 /* Set the TB state as WAITING to indicate TB has been
5357 * considered for transmission */
5358 tbInfo->state = HQ_TB_WAITING;
5359 hqP->subFrm = rbAllocInfo->dlSf;
5360 tbInfo->hqP->pdcch = pdcch;
5361 //tbInfo->dlGrnt.numRb = rbAllocInfo->rbsAlloc;
5362 rgSCHUtlDlHqPTbAddToTx(hqP->subFrm, hqP, tbInfo->tbIdx);
5368 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5372 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5373 * Purpose: This function fills in the PDCCH information
5374 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5375 * for dedicated service scheduling. It also
5376 * obtains TPC to be filled in from the power module.
5377 * Assign the PDCCH to HQProc.
5379 * Invoked by: Downlink Scheduler
5381 * @param[in] RgSchCellCb* cell
5382 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5383 * @param[in] RgDlHqProc* hqP
5384 * @param[out] RgSchPdcch *pdcch
5385 * @param[in] uint8_t tpc
5389 static Void rgSCHCmnFillHqPPdcchDciFrmtB1B2
5392 RgSchDlRbAlloc *rbAllocInfo,
5393 RgSchDlHqProcCb *hqP,
5400 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5401 //Currently hardcoding values here.
5402 //DU_LOG("\nINFO --> SCH : Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
5403 switch(rbAllocInfo->dciFormat)
5405 case TFU_DCI_FORMAT_B1:
5407 pdcch->dci.u.formatB1Info.formatType = 0;
5408 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5409 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5410 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5411 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5412 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5413 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5414 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5415 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5416 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5417 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5418 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5419 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5420 //TODO_SID: Need to update
5421 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5422 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5423 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5424 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5425 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5426 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5427 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5428 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5429 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5430 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5433 case TFU_DCI_FORMAT_B2:
5435 pdcch->dci.u.formatB2Info.formatType = 1;
5436 pdcch->dci.u.formatB2Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5437 pdcch->dci.u.formatB2Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5438 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5439 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5440 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5441 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5442 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5443 pdcch->dci.u.formatB2Info.CSI_BSI_BRI_Req = 0;
5444 pdcch->dci.u.formatB2Info.CSIRS_BRRS_TxTiming = 0;
5445 pdcch->dci.u.formatB2Info.CSIRS_BRRS_SymbIdx = 0;
5446 pdcch->dci.u.formatB2Info.CSIRS_BRRS_ProcInd = 0;
5447 pdcch->dci.u.formatB2Info.xPUCCH_TxTiming = 0;
5448 //TODO_SID: Need to update
5449 pdcch->dci.u.formatB2Info.freqResIdx_xPUCCH = 0;
5450 pdcch->dci.u.formatB2Info.beamSwitch = 0;
5451 pdcch->dci.u.formatB2Info.SRS_Config = 0;
5452 pdcch->dci.u.formatB2Info.SRS_Symbol = 0;
5453 //TODO_SID: Need to check.Currently setting 4(2 layer, ports(8,9) w/o OCC).
5454 pdcch->dci.u.formatB2Info.AntPorts_numLayers = 4;
5455 pdcch->dci.u.formatB2Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5456 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5457 pdcch->dci.u.formatB2Info.tpcCmd = 1; //tpc;
5458 pdcch->dci.u.formatB2Info.DL_PCRS = 0;
5462 DU_LOG("\nERROR --> SCH : 5GTF_ERROR Allocator's incorrect "
5463 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5470 uint32_t totPcellSCell;
5471 uint32_t addedForScell;
5472 uint32_t addedForScell1;
5473 uint32_t addedForScell2;
5475 * @brief This function fills the PDCCH information from dlProc.
5479 * Function: rgSCHCmnFillHqPPdcch
5480 * Purpose: This function fills in the PDCCH information
5481 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5482 * for dedicated service scheduling. It also
5483 * obtains TPC to be filled in from the power module.
5484 * Assign the PDCCH to HQProc.
5486 * Invoked by: Downlink Scheduler
5488 * @param[in] RgSchCellCb* cell
5489 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5490 * @param[in] RgDlHqProc* hqP
5494 Void rgSCHCmnFillHqPPdcch(RgSchCellCb *cell,RgSchDlRbAlloc *rbAllocInfo,RgSchDlHqProcCb *hqP)
5496 RgSchCmnDlCell *cmnCell = RG_SCH_CMN_GET_DL_CELL(cell);
5497 RgSchPdcch *pdcch = rbAllocInfo->pdcch;
5504 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5511 tpc = rgSCHPwrPucchTpcForUe(cell, hqP->hqE->ue);
5513 /* Fix: syed moving this to a common function for both scheduled
5514 * and non-scheduled UEs */
5516 pdcch->ue = hqP->hqE->ue;
5517 if (hqP->hqE->ue->csgMmbrSta == FALSE)
5519 cmnCell->ncsgPrbCnt += rbAllocInfo->rbsAlloc;
5521 cmnCell->totPrbCnt += rbAllocInfo->rbsAlloc;
5524 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlPrbUsg +=
5525 rbAllocInfo->rbsAlloc;
5526 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw0iTbs +=
5527 rbAllocInfo->tbInfo[0].iTbs;
5528 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw0iTbs ++;
5529 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
5530 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5533 totPcellSCell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5534 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5536 addedForScell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5537 addedForScell1 += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5539 DU_LOG("\nINFO --> SCH : Hqp %d cell %d addedForScell %lu addedForScell1 %lu sfn:sf %d:%d \n",
5541 hqP->hqE->cell->cellId,
5545 cell->crntTime.slot);
5549 hqP->hqE->cell->tenbStats->sch.dlPrbUsage[0] +=
5550 rbAllocInfo->rbsAlloc;
5551 hqP->hqE->cell->tenbStats->sch.dlSumCw0iTbs +=
5552 rbAllocInfo->tbInfo[0].iTbs;
5553 hqP->hqE->cell->tenbStats->sch.dlNumCw0iTbs ++;
5554 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
5555 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5556 if (rbAllocInfo->tbInfo[1].schdlngForTb)
5558 hqP->hqE->cell->tenbStats->sch.dlSumCw1iTbs +=
5559 rbAllocInfo->tbInfo[1].iTbs;
5560 hqP->hqE->cell->tenbStats->sch.dlNumCw1iTbs ++;
5561 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw1iTbs +=
5562 rbAllocInfo->tbInfo[1].iTbs;
5563 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw1iTbs ++;
5564 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
5565 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5569 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5571 addedForScell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5572 addedForScell2 += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5574 DU_LOG("\nINFO --> SCH : Hqp %d cell %d addedForScell %lu addedForScell2 %lu \n",
5576 hqP->hqE->cell->cellId,
5581 totPcellSCell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5585 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
5586 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5589 DU_LOG("\nINFO --> SCH : add DL TPT is %lu sfn:sf %d:%d \n", hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt ,
5591 cell->crntTime.slot);
5597 pdcch->rnti = rbAllocInfo->rnti;
5598 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
5599 /* Update subframe and pdcch info in HqTb control block */
5600 switch(rbAllocInfo->dciFormat)
5603 case TFU_DCI_FORMAT_B1:
5604 case TFU_DCI_FORMAT_B2:
5606 // DU_LOG("\nINFO --> SCH : RG_5GTF:: Pdcch filling with DCI format B1/B2\n");
5607 rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, \
5613 DU_LOG("\nERROR --> SCH : Allocator's incorrect dciForamt Fill for RNTI:%d",rbAllocInfo->rnti);
5620 * @brief This function fills the PDCCH DCI format 1 information from dlProc.
5624 * Function: rgSCHCmnFillHqPPdcchDciFrmt1
5625 * Purpose: This function fills in the PDCCH information
5626 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5627 * for dedicated service scheduling. It also
5628 * obtains TPC to be filled in from the power module.
5629 * Assign the PDCCH to HQProc.
5631 * Invoked by: Downlink Scheduler
5633 * @param[in] RgSchCellCb* cell
5634 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5635 * @param[in] RgDlHqProc* hqP
5636 * @param[out] RgSchPdcch *pdcch
5637 * @param[in] uint8_t tpc
5642 static Void rgSCHCmnFillHqPPdcchDciFrmt1
5645 RgSchDlRbAlloc *rbAllocInfo,
5646 RgSchDlHqProcCb *hqP,
5653 RgSchTddANInfo *anInfo;
5657 /* For activation or reactivation,
5658 * Harq ProcId should be 0 */
5659 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5663 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5664 pdcch->dci.u.format1Info.tpcCmd = tpc;
5665 /* Avoiding this check,as we dont support Type1 RA */
5667 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5670 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5671 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5672 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5674 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5675 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5677 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5678 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5680 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5681 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5686 if ((!(hqP->tbInfo[0].txCntr)) &&
5687 (cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5688 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5689 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV)))
5692 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5696 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
5699 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
5702 pdcch->dci.u.format1Info.allocInfo.ndi =
5703 rbAllocInfo->tbInfo[0].tbCb->ndi;
5704 pdcch->dci.u.format1Info.allocInfo.mcs =
5705 rbAllocInfo->tbInfo[0].imcs;
5706 pdcch->dci.u.format1Info.allocInfo.rv =
5707 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5709 if(hqP->hqE->ue != NULLP)
5712 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5713 hqP->hqE->cell->cellId,
5716 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5717 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5719 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5720 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5725 pdcch->dci.u.format1Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5729 /* Fixing DAI value - ccpu00109162 */
5730 pdcch->dci.u.format1Info.dai = RG_SCH_MAX_DAI_IDX;
5736 /* always 0 for RACH */
5737 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5739 /* Fixing DAI value - ccpu00109162 */
5740 pdcch->dci.u.format1Info.dai = 1;
5749 * @brief This function fills the PDCCH DCI format 1A information from dlProc.
5753 * Function: rgSCHCmnFillHqPPdcchDciFrmt1A
5754 * Purpose: This function fills in the PDCCH information
5755 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5756 * for dedicated service scheduling. It also
5757 * obtains TPC to be filled in from the power module.
5758 * Assign the PDCCH to HQProc.
5760 * Invoked by: Downlink Scheduler
5762 * @param[in] RgSchCellCb* cell
5763 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5764 * @param[in] RgDlHqProc* hqP
5765 * @param[out] RgSchPdcch *pdcch
5766 * @param[in] uint8_t tpc
5770 static Void rgSCHCmnFillHqPPdcchDciFrmt1A
5773 RgSchDlRbAlloc *rbAllocInfo,
5774 RgSchDlHqProcCb *hqP,
5781 RgSchTddANInfo *anInfo;
5785 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5789 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5790 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
5791 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = tpc;
5792 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
5793 rbAllocInfo->tbInfo[0].imcs;
5794 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = TRUE;
5796 if ((!(hqP->tbInfo[0].txCntr)) &&
5797 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5798 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5799 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
5802 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val = 0;
5806 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val
5810 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val =
5813 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = \
5814 rbAllocInfo->tbInfo[0].tbCb->ndi;
5815 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = \
5816 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5817 /* As of now, we do not support Distributed allocations */
5818 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
5819 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
5820 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
5822 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
5823 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5824 rbAllocInfo->allocInfo.raType2.rbStart,
5825 rbAllocInfo->allocInfo.raType2.numRb);
5827 if(hqP->hqE->ue != NULLP)
5830 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5831 hqP->hqE->cell->cellId,
5833 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5834 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5836 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5837 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5840 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5843 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val =
5844 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5848 /* Fixing DAI value - ccpu00109162 */
5849 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = RG_SCH_MAX_DAI_IDX;
5850 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
5857 /* always 0 for RACH */
5858 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres
5861 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5862 /* Fixing DAI value - ccpu00109162 */
5863 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
5871 * @brief This function fills the PDCCH DCI format 1B information from dlProc.
5875 * Function: rgSCHCmnFillHqPPdcchDciFrmt1B
5876 * Purpose: This function fills in the PDCCH information
5877 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5878 * for dedicated service scheduling. It also
5879 * obtains TPC to be filled in from the power module.
5880 * Assign the PDCCH to HQProc.
5882 * Invoked by: Downlink Scheduler
5884 * @param[in] RgSchCellCb* cell
5885 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5886 * @param[in] RgDlHqProc* hqP
5887 * @param[out] RgSchPdcch *pdcch
5888 * @param[in] uint8_t tpc
5892 static Void rgSCHCmnFillHqPPdcchDciFrmt1B
5895 RgSchDlRbAlloc *rbAllocInfo,
5896 RgSchDlHqProcCb *hqP,
5903 RgSchTddANInfo *anInfo;
5907 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5911 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5912 pdcch->dci.u.format1bInfo.tpcCmd = tpc;
5913 pdcch->dci.u.format1bInfo.allocInfo.mcs = \
5914 rbAllocInfo->tbInfo[0].imcs;
5916 if ((!(hqP->tbInfo[0].txCntr)) &&
5917 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5918 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5919 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
5922 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = 0;
5926 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
5929 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
5931 pdcch->dci.u.format1bInfo.allocInfo.ndi = \
5932 rbAllocInfo->tbInfo[0].tbCb->ndi;
5933 pdcch->dci.u.format1bInfo.allocInfo.rv = \
5934 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5935 /* As of now, we do not support Distributed allocations */
5936 pdcch->dci.u.format1bInfo.allocInfo.isLocal = TRUE;
5937 pdcch->dci.u.format1bInfo.allocInfo.nGap2.pres = NOTPRSNT;
5938 pdcch->dci.u.format1bInfo.allocInfo.alloc.type =
5940 pdcch->dci.u.format1bInfo.allocInfo.alloc.u.riv =
5941 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5942 rbAllocInfo->allocInfo.raType2.rbStart,
5943 rbAllocInfo->allocInfo.raType2.numRb);
5944 /* Fill precoding Info */
5945 pdcch->dci.u.format1bInfo.allocInfo.pmiCfm = \
5946 rbAllocInfo->mimoAllocInfo.precIdxInfo >> 4;
5947 pdcch->dci.u.format1bInfo.allocInfo.tPmi = \
5948 rbAllocInfo->mimoAllocInfo.precIdxInfo & 0x0F;
5950 if(hqP->hqE->ue != NULLP)
5953 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5954 hqP->hqE->cell->cellId,
5956 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5957 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5959 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5960 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5965 pdcch->dci.u.format1bInfo.dai =
5966 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5970 pdcch->dci.u.format1bInfo.dai = RG_SCH_MAX_DAI_IDX;
5971 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
5982 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5986 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5987 * Purpose: This function fills in the PDCCH information
5988 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5989 * for dedicated service scheduling. It also
5990 * obtains TPC to be filled in from the power module.
5991 * Assign the PDCCH to HQProc.
5993 * Invoked by: Downlink Scheduler
5995 * @param[in] RgSchCellCb* cell
5996 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5997 * @param[in] RgDlHqProc* hqP
5998 * @param[out] RgSchPdcch *pdcch
5999 * @param[in] uint8_t tpc
6003 static Void rgSCHCmnFillHqPPdcchDciFrmt2
6006 RgSchDlRbAlloc *rbAllocInfo,
6007 RgSchDlHqProcCb *hqP,
6014 RgSchTddANInfo *anInfo;
6018 /* ccpu00119023-ADD-For activation or reactivation,
6019 * Harq ProcId should be 0 */
6020 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6024 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6025 /*ccpu00120365:-ADD-call also if tb is disabled */
6026 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6027 rbAllocInfo->tbInfo[1].isDisabled)
6029 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6031 pdcch->dci.u.format2Info.tpcCmd = tpc;
6032 /* Avoiding this check,as we dont support Type1 RA */
6034 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6037 pdcch->dci.u.format2Info.allocInfo.isAllocType0 = TRUE;
6038 pdcch->dci.u.format2Info.allocInfo.resAllocMap[0] =
6039 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6041 pdcch->dci.u.format2Info.allocInfo.resAllocMap[1] =
6042 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6044 pdcch->dci.u.format2Info.allocInfo.resAllocMap[2] =
6045 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6047 pdcch->dci.u.format2Info.allocInfo.resAllocMap[3] =
6048 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6053 if ((!(hqP->tbInfo[0].txCntr)) &&
6054 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6055 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6056 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6059 pdcch->dci.u.format2Info.allocInfo.harqProcId = 0;
6063 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6066 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6068 /* Initialize the TB info for both the TBs */
6069 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].mcs = 0;
6070 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].rv = 1;
6071 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].mcs = 0;
6072 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].rv = 1;
6073 /* Fill tbInfo for scheduled TBs */
6074 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6075 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6076 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6077 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6078 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6079 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6080 /* If we reach this function. It is safely assumed that
6081 * rbAllocInfo->tbInfo[0] always has non default valid values.
6082 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6083 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6085 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6086 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6087 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6088 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6089 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6090 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6092 pdcch->dci.u.format2Info.allocInfo.transSwap =
6093 rbAllocInfo->mimoAllocInfo.swpFlg;
6094 pdcch->dci.u.format2Info.allocInfo.precoding =
6095 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6097 if(hqP->hqE->ue != NULLP)
6101 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6102 hqP->hqE->cell->cellId,
6104 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6105 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6107 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6108 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6113 pdcch->dci.u.format2Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6117 pdcch->dci.u.format2Info.dai = RG_SCH_MAX_DAI_IDX;
6118 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
6128 * @brief This function fills the PDCCH DCI format 2A information from dlProc.
6132 * Function: rgSCHCmnFillHqPPdcchDciFrmt2A
6133 * Purpose: This function fills in the PDCCH information
6134 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6135 * for dedicated service scheduling. It also
6136 * obtains TPC to be filled in from the power module.
6137 * Assign the PDCCH to HQProc.
6139 * Invoked by: Downlink Scheduler
6141 * @param[in] RgSchCellCb* cell
6142 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6143 * @param[in] RgDlHqProc* hqP
6144 * @param[out] RgSchPdcch *pdcch
6145 * @param[in] uint8_t tpc
6149 static Void rgSCHCmnFillHqPPdcchDciFrmt2A
6152 RgSchDlRbAlloc *rbAllocInfo,
6153 RgSchDlHqProcCb *hqP,
6159 RgSchTddANInfo *anInfo;
6163 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6167 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6168 /*ccpu00120365:-ADD-call also if tb is disabled */
6169 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6170 rbAllocInfo->tbInfo[1].isDisabled)
6173 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6176 pdcch->dci.u.format2AInfo.tpcCmd = tpc;
6177 /* Avoiding this check,as we dont support Type1 RA */
6179 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6182 pdcch->dci.u.format2AInfo.allocInfo.isAllocType0 = TRUE;
6183 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[0] =
6184 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6186 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[1] =
6187 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6189 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[2] =
6190 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6192 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[3] =
6193 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6198 if ((!(hqP->tbInfo[0].txCntr)) &&
6199 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6200 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6201 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6204 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = 0;
6208 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6211 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6213 /* Initialize the TB info for both the TBs */
6214 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].mcs = 0;
6215 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].rv = 1;
6216 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].mcs = 0;
6217 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].rv = 1;
6218 /* Fill tbInfo for scheduled TBs */
6219 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6220 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6221 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6222 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6223 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6224 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6225 /* If we reach this function. It is safely assumed that
6226 * rbAllocInfo->tbInfo[0] always has non default valid values.
6227 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6229 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6231 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6232 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6233 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6234 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6235 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6236 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6239 pdcch->dci.u.format2AInfo.allocInfo.transSwap =
6240 rbAllocInfo->mimoAllocInfo.swpFlg;
6241 pdcch->dci.u.format2AInfo.allocInfo.precoding =
6242 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6244 if(hqP->hqE->ue != NULLP)
6247 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6248 hqP->hqE->cell->cellId,
6250 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6251 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6253 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6254 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6259 pdcch->dci.u.format2AInfo.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6263 pdcch->dci.u.format2AInfo.dai = RG_SCH_MAX_DAI_IDX;
6264 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
6276 * @brief init of Sch vars.
6280 * Function: rgSCHCmnInitVars
6281 Purpose: Initialization of various UL subframe indices
6283 * @param[in] RgSchCellCb *cell
6287 static Void rgSCHCmnInitVars(RgSchCellCb *cell)
6289 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6292 cellUl->idx = RGSCH_INVALID_INFO;
6293 cellUl->schdIdx = RGSCH_INVALID_INFO;
6294 cellUl->schdHqProcIdx = RGSCH_INVALID_INFO;
6295 cellUl->msg3SchdIdx = RGSCH_INVALID_INFO;
6297 cellUl->emtcMsg3SchdIdx = RGSCH_INVALID_INFO;
6299 cellUl->msg3SchdHqProcIdx = RGSCH_INVALID_INFO;
6300 cellUl->rcpReqIdx = RGSCH_INVALID_INFO;
6301 cellUl->hqFdbkIdx[0] = RGSCH_INVALID_INFO;
6302 cellUl->hqFdbkIdx[1] = RGSCH_INVALID_INFO;
6303 cellUl->reTxIdx[0] = RGSCH_INVALID_INFO;
6304 cellUl->reTxIdx[1] = RGSCH_INVALID_INFO;
6305 /* Stack Crash problem for TRACE5 Changes. Added the return below */
6312 * @brief Updation of Sch vars per TTI.
6316 * Function: rgSCHCmnUpdVars
6317 * Purpose: Updation of Sch vars per TTI.
6319 * @param[in] RgSchCellCb *cell
6323 Void rgSCHCmnUpdVars(RgSchCellCb *cell)
6325 CmLteTimingInfo timeInfo;
6326 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6330 idx = (cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot);
6331 cellUl->idx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6333 DU_LOG("\nDEBUG --> SCH : idx %d cellUl->idx %d RGSCH_NUM_SUB_FRAMES_5G %d time(%d %d) \n",idx,cellUl->idx ,RGSCH_NUM_SUB_FRAMES_5G,cell->crntTime.sfn,cell->crntTime.slot);
6335 /* Need to scheduler for after SCHED_DELTA */
6336 /* UL allocation has been advanced by 1 subframe
6337 * so that we do not wrap around and send feedback
6338 * before the data is even received by the PHY */
6339 /* Introduced timing delta for UL control */
6340 idx = (cellUl->idx + TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA);
6341 cellUl->schdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6343 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6344 TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA)
6345 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6347 /* ccpu00127193 filling schdTime for logging and enhancement purpose*/
6348 cellUl->schdTime = timeInfo;
6350 /* msg3 scheduling two subframes after general scheduling */
6351 idx = (cellUl->idx + RG_SCH_CMN_DL_DELTA + RGSCH_RARSP_MSG3_DELTA);
6352 cellUl->msg3SchdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6354 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6355 RG_SCH_CMN_DL_DELTA+ RGSCH_RARSP_MSG3_DELTA)
6356 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6358 idx = (cellUl->idx + TFU_RECPREQ_DLDELTA);
6360 cellUl->rcpReqIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6362 /* Downlink harq feedback is sometime after data reception / harq failure */
6363 /* Since feedback happens prior to scheduling being called, we add 1 to */
6364 /* take care of getting the correct subframe for feedback */
6365 idx = (cellUl->idx - TFU_CRCIND_ULDELTA + RG_SCH_CMN_UL_NUM_SF);
6367 DU_LOG("\nDEBUG --> SCH : Finally setting cellUl->hqFdbkIdx[0] = %d TFU_CRCIND_ULDELTA %d RG_SCH_CMN_UL_NUM_SF %d\n",idx,TFU_CRCIND_ULDELTA,RG_SCH_CMN_UL_NUM_SF);
6369 cellUl->hqFdbkIdx[0] = (idx % (RG_SCH_CMN_UL_NUM_SF));
6371 idx = ((cellUl->schdIdx) % (RG_SCH_CMN_UL_NUM_SF));
6373 cellUl->reTxIdx[0] = (uint8_t) idx;
6375 DU_LOG("\nDEBUG --> SCH : cellUl->hqFdbkIdx[0] %d cellUl->reTxIdx[0] %d \n",cellUl->hqFdbkIdx[0], cellUl->reTxIdx[0] );
6377 /* RACHO: update cmn sched specific RACH variables,
6378 * mainly the prachMaskIndex */
6379 rgSCHCmnUpdRachParam(cell);
6388 * @brief To get uplink subframe index associated with current PHICH
6393 * Function: rgSCHCmnGetPhichUlSfIdx
6394 * Purpose: Gets uplink subframe index associated with current PHICH
6395 * transmission based on SFN and subframe no
6397 * @param[in] CmLteTimingInfo *timeInfo
6398 * @param[in] RgSchCellCb *cell
6402 uint8_t rgSCHCmnGetPhichUlSfIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6404 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6406 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6413 dlsf = rgSCHUtlSubFrmGet(cell, *timeInfo);
6415 if(dlsf->phichOffInfo.sfnOffset == RGSCH_INVALID_INFO)
6417 return (RGSCH_INVALID_INFO);
6419 subframe = dlsf->phichOffInfo.subframe;
6421 sfn = (RGSCH_MAX_SFN + timeInfo->sfn -
6422 dlsf->phichOffInfo.sfnOffset) % RGSCH_MAX_SFN;
6424 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
6425 * wrap case such that idx will be proper*/
6426 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6427 numUlSf = ((numUlSf * sfn) + rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subframe]) - 1;
6428 idx = numUlSf % (cellUl->numUlSubfrms);
6434 * @brief To get uplink subframe index.
6439 * Function: rgSCHCmnGetUlSfIdx
6440 * Purpose: Gets uplink subframe index based on SFN and subframe number.
6442 * @param[in] CmLteTimingInfo *timeInfo
6443 * @param[in] uint8_t ulDlCfgIdx
6447 uint8_t rgSCHCmnGetUlSfIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6449 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6450 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6455 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
6456 * wrap case such that idx will be proper*/
6457 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6458 numUlSf = ((numUlSf * timeInfo->sfn) + \
6459 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->subframe]) - 1;
6460 idx = numUlSf % (cellUl->numUlSubfrms);
6468 * @brief To get uplink hq index.
6473 * Function: rgSCHCmnGetUlHqProcIdx
6474 * Purpose: Gets uplink subframe index based on SFN and subframe number.
6476 * @param[in] CmLteTimingInfo *timeInfo
6477 * @param[in] uint8_t ulDlCfgIdx
6481 uint8_t rgSCHCmnGetUlHqProcIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6487 numUlSf = (timeInfo->sfn * RGSCH_NUM_SUB_FRAMES_5G + timeInfo->slot);
6488 procId = numUlSf % RGSCH_NUM_UL_HQ_PROC;
6490 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6491 /*ccpu00130639 - MOD - To get correct UL HARQ Proc IDs for all UL/DL Configs*/
6492 uint8_t numUlSfInSfn;
6493 S8 sfnCycle = cell->tddHqSfnCycle;
6494 uint8_t numUlHarq = rgSchTddUlNumHarqProcTbl[ulDlCfgIdx]
6496 /* TRACE 5 Changes */
6498 /* Calculate the number of UL SF in one SFN */
6499 numUlSfInSfn = RGSCH_NUM_SUB_FRAMES -
6500 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6502 /* Check for the SFN wrap around case */
6503 if(cell->crntTime.sfn == 1023 && timeInfo->sfn == 0)
6507 else if(cell->crntTime.sfn == 0 && timeInfo->sfn == 1023)
6509 /* sfnCycle decremented by 1 */
6510 sfnCycle = (sfnCycle + numUlHarq-1) % numUlHarq;
6512 /* Calculate the total number of UL sf */
6513 /* -1 is done since uplink sf are counted from 0 */
6514 numUlSf = numUlSfInSfn * (timeInfo->sfn + (sfnCycle*1024)) +
6515 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->slot] - 1;
6517 procId = numUlSf % numUlHarq;
6523 /* UL_ALLOC_CHANGES */
6524 /***********************************************************
6526 * Func : rgSCHCmnUlFreeAlloc
6528 * Desc : Free an allocation - invokes UHM and releases
6529 * alloc for the scheduler
6530 * Doest need subframe as argument
6538 **********************************************************/
6539 Void rgSCHCmnUlFreeAlloc(RgSchCellCb *cell,RgSchUlAlloc *alloc)
6541 RgSchUlHqProcCb *hqProc;
6545 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
6546 if ((alloc->hqProc->remTx == 0) &&
6547 (alloc->hqProc->rcvdCrcInd == FALSE) &&
6550 RgSchRaCb *raCb = alloc->raCb;
6551 rgSCHUhmFreeProc(alloc->hqProc, cell);
6552 rgSCHUtlUlAllocRelease(alloc);
6553 rgSCHRamDelRaCb(cell, raCb, TRUE);
6558 hqProc = alloc->hqProc;
6559 rgSCHUtlUlAllocRelease(alloc);
6560 rgSCHUhmFreeProc(hqProc, cell);
6565 /***********************************************************
6567 * Func : rgSCHCmnUlFreeAllocation
6569 * Desc : Free an allocation - invokes UHM and releases
6570 * alloc for the scheduler
6578 **********************************************************/
6579 Void rgSCHCmnUlFreeAllocation(RgSchCellCb *cell,RgSchUlSf *sf,RgSchUlAlloc *alloc)
6581 RgSchUlHqProcCb *hqProc;
6586 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
6587 if ((alloc->hqProc->remTx == 0) &&
6588 (alloc->hqProc->rcvdCrcInd == FALSE) &&
6591 RgSchRaCb *raCb = alloc->raCb;
6592 rgSCHUhmFreeProc(alloc->hqProc, cell);
6593 rgSCHUtlUlAllocRls(sf, alloc);
6594 rgSCHRamDelRaCb(cell, raCb, TRUE);
6599 hqProc = alloc->hqProc;
6600 rgSCHUhmFreeProc(hqProc, cell);
6602 /* re-setting the PRB count while freeing the allocations */
6605 rgSCHUtlUlAllocRls(sf, alloc);
6611 * @brief This function implements PDCCH allocation for an UE
6612 * in the currently running subframe.
6616 * Function: rgSCHCmnPdcchAllocCrntSf
6617 * Purpose: This function determines current DL subframe
6618 * and UE DL CQI to call the actual pdcch allocator
6620 * Note that this function is called only
6621 * when PDCCH request needs to be made during
6622 * uplink scheduling.
6624 * Invoked by: Scheduler
6626 * @param[in] RgSchCellCb *cell
6627 * @param[in] RgSchUeCb *ue
6628 * @return RgSchPdcch *
6629 * -# NULLP when unsuccessful
6631 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf(RgSchCellCb *cell,RgSchUeCb *ue)
6633 CmLteTimingInfo frm = cell->crntTime;
6634 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
6636 RgSchPdcch *pdcch = NULLP;
6638 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
6639 sf = rgSCHUtlSubFrmGet(cell, frm);
6642 if (ue->allocCmnUlPdcch)
6644 pdcch = rgSCHCmnCmnPdcchAlloc(cell, sf);
6645 /* Since CRNTI Scrambled */
6648 pdcch->dciNumOfBits = ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
6654 //pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, y, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_0, FALSE);
6655 pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_A1, FALSE);
6660 /***********************************************************
6662 * Func : rgSCHCmnUlAllocFillNdmrs
6664 * Desc : Determines and fills N_dmrs for a UE uplink
6669 * Notes: N_dmrs determination is straightforward, so
6670 * it is configured per subband
6674 **********************************************************/
6675 Void rgSCHCmnUlAllocFillNdmrs(RgSchCmnUlCell *cellUl,RgSchUlAlloc *alloc)
6677 alloc->grnt.nDmrs = cellUl->dmrsArr[alloc->sbStart];
6681 /***********************************************************
6683 * Func : rgSCHCmnUlAllocLnkHqProc
6685 * Desc : Links a new allocation for an UE with the
6686 * appropriate HARQ process of the UE.
6694 **********************************************************/
6695 Void rgSCHCmnUlAllocLnkHqProc(RgSchUeCb *ue,RgSchUlAlloc *alloc,RgSchUlHqProcCb *proc,Bool isRetx)
6700 rgSCHCmnUlAdapRetx(alloc, proc);
6704 #ifdef LTE_L2_MEAS /* L2_COUNTERS */
6707 rgSCHUhmNewTx(proc, (((RgUeUlHqCb*)proc->hqEnt)->maxHqRetx), alloc);
6713 * @brief This function releases a PDCCH in the subframe that is
6714 * currently being allocated for.
6718 * Function: rgSCHCmnPdcchRlsCrntSf
6719 * Purpose: This function determines current DL subframe
6720 * which is considered for PDCCH allocation,
6721 * and then calls the actual function that
6722 * releases a PDCCH in a specific subframe.
6723 * Note that this function is called only
6724 * when PDCCH release needs to be made during
6725 * uplink scheduling.
6727 * Invoked by: Scheduler
6729 * @param[in] RgSchCellCb *cell
6730 * @param[in] RgSchPdcch *pdcch
6733 Void rgSCHCmnPdcchRlsCrntSf(RgSchCellCb *cell,RgSchPdcch *pdcch)
6735 CmLteTimingInfo frm = cell->crntTime;
6738 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
6739 sf = rgSCHUtlSubFrmGet(cell, frm);
6740 rgSCHUtlPdcchPut(cell, &sf->pdcchInfo, pdcch);
6743 /***********************************************************
6745 * Func : rgSCHCmnUlFillPdcchWithAlloc
6747 * Desc : Fills a PDCCH with format 0 information.
6755 **********************************************************/
6756 Void rgSCHCmnUlFillPdcchWithAlloc(RgSchPdcch *pdcch,RgSchUlAlloc *alloc,RgSchUeCb *ue)
6760 pdcch->rnti = alloc->rnti;
6761 //pdcch->dci.dciFormat = TFU_DCI_FORMAT_A2;
6762 pdcch->dci.dciFormat = alloc->grnt.dciFrmt;
6764 //Currently hardcoding values here.
6765 //DU_LOG("\nINFO --> SCH : Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
6766 switch(pdcch->dci.dciFormat)
6768 case TFU_DCI_FORMAT_A1:
6770 pdcch->dci.u.formatA1Info.formatType = 0;
6771 pdcch->dci.u.formatA1Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
6772 pdcch->dci.u.formatA1Info.xPUSCH_TxTiming = 0;
6773 pdcch->dci.u.formatA1Info.RBAssign = alloc->grnt.rbAssign;
6774 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
6775 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
6776 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
6777 pdcch->dci.u.formatA1Info.CSI_BSI_BRI_Req = 0;
6778 pdcch->dci.u.formatA1Info.CSIRS_BRRS_TxTiming = 0;
6779 pdcch->dci.u.formatA1Info.CSIRS_BRRS_SymbIdx = 0;
6780 pdcch->dci.u.formatA1Info.CSIRS_BRRS_ProcInd = 0;
6781 pdcch->dci.u.formatA1Info.numBSI_Reports = 0;
6782 pdcch->dci.u.formatA1Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
6783 pdcch->dci.u.formatA1Info.beamSwitch = 0;
6784 pdcch->dci.u.formatA1Info.SRS_Config = 0;
6785 pdcch->dci.u.formatA1Info.SRS_Symbol = 0;
6786 pdcch->dci.u.formatA1Info.REMapIdx_DMRS_PCRS_numLayers = 0;
6787 pdcch->dci.u.formatA1Info.SCID = alloc->grnt.SCID;
6788 pdcch->dci.u.formatA1Info.PMI = alloc->grnt.PMI;
6789 pdcch->dci.u.formatA1Info.UL_PCRS = 0;
6790 pdcch->dci.u.formatA1Info.tpcCmd = alloc->grnt.tpc;
6793 case TFU_DCI_FORMAT_A2:
6795 pdcch->dci.u.formatA2Info.formatType = 1;
6796 pdcch->dci.u.formatA2Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
6797 pdcch->dci.u.formatA2Info.xPUSCH_TxTiming = 0;
6798 pdcch->dci.u.formatA2Info.RBAssign = alloc->grnt.rbAssign;
6799 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
6800 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
6801 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
6802 pdcch->dci.u.formatA2Info.CSI_BSI_BRI_Req = 0;
6803 pdcch->dci.u.formatA2Info.CSIRS_BRRS_TxTiming = 0;
6804 pdcch->dci.u.formatA2Info.CSIRS_BRRS_SymbIdx = 0;
6805 pdcch->dci.u.formatA2Info.CSIRS_BRRS_ProcInd = 0;
6806 pdcch->dci.u.formatA2Info.numBSI_Reports = 0;
6807 pdcch->dci.u.formatA2Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
6808 pdcch->dci.u.formatA2Info.beamSwitch = 0;
6809 pdcch->dci.u.formatA2Info.SRS_Config = 0;
6810 pdcch->dci.u.formatA2Info.SRS_Symbol = 0;
6811 pdcch->dci.u.formatA2Info.REMapIdx_DMRS_PCRS_numLayers = 0;
6812 pdcch->dci.u.formatA2Info.SCID = alloc->grnt.SCID;
6813 pdcch->dci.u.formatA2Info.PMI = alloc->grnt.PMI;
6814 pdcch->dci.u.formatA2Info.UL_PCRS = 0;
6815 pdcch->dci.u.formatA2Info.tpcCmd = alloc->grnt.tpc;
6819 DU_LOG("\nERROR --> SCH : 5GTF_ERROR UL Allocator's icorrect "
6820 "dciForamt Fill RNTI:%d",alloc->rnti);
6828 /***********************************************************
6830 * Func : rgSCHCmnUlAllocFillTpc
6832 * Desc : Determines and fills TPC for an UE allocation.
6840 **********************************************************/
6841 Void rgSCHCmnUlAllocFillTpc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchUlAlloc *alloc)
6843 alloc->grnt.tpc = rgSCHPwrPuschTpcForUe(cell, ue);
6848 /***********************************************************
6850 * Func : rgSCHCmnAddUeToRefreshQ
6852 * Desc : Adds a UE to refresh queue, so that the UE is
6853 * periodically triggered to refresh it's GBR and
6862 **********************************************************/
6863 static Void rgSCHCmnAddUeToRefreshQ(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t wait)
6865 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
6867 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
6871 memset(&arg, 0, sizeof(arg));
6872 arg.tqCp = &sched->tmrTqCp;
6873 arg.tq = sched->tmrTq;
6874 arg.timers = &ueSchd->tmr;
6878 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
6885 * @brief Perform UE reset procedure.
6889 * Function : rgSCHCmnUlUeReset
6891 * This functions performs BSR resetting and
6892 * triggers UL specific scheduler
6893 * to Perform UE reset procedure.
6895 * @param[in] RgSchCellCb *cell
6896 * @param[in] RgSchUeCb *ue
6899 static Void rgSCHCmnUlUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
6901 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
6902 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
6904 RgSchCmnLcg *lcgCmn;
6906 RgSchCmnAllocRecord *allRcd;
6908 ue->ul.minReqBytes = 0;
6909 ue->ul.totalBsr = 0;
6911 ue->ul.nonGbrLcgBs = 0;
6912 ue->ul.effAmbr = ue->ul.cfgdAmbr;
6914 node = ueUl->ulAllocLst.first;
6917 allRcd = (RgSchCmnAllocRecord *)node->node;
6921 for(lcgCnt = 0; lcgCnt < RGSCH_MAX_LCG_PER_UE; lcgCnt++)
6923 lcgCmn = RG_SCH_CMN_GET_UL_LCG(&ue->ul.lcgArr[lcgCnt]);
6925 lcgCmn->reportedBs = 0;
6926 lcgCmn->effGbr = lcgCmn->cfgdGbr;
6927 lcgCmn->effDeltaMbr = lcgCmn->deltaMbr;
6929 rgSCHCmnUlUeDelAllocs(cell, ue);
6931 ue->isSrGrant = FALSE;
6933 cellSchd->apisUl->rgSCHUlUeReset(cell, ue);
6935 /* Stack Crash problem for TRACE5 changes. Added the return below */
6941 * @brief RESET UL CQI and DL CQI&RI to conservative values
6942 * for a reestablishing UE.
6946 * Function : rgSCHCmnResetRiCqi
6948 * RESET UL CQI and DL CQI&RI to conservative values
6949 * for a reestablishing UE
6951 * @param[in] RgSchCellCb *cell
6952 * @param[in] RgSchUeCb *ue
6955 static Void rgSCHCmnResetRiCqi(RgSchCellCb *cell,RgSchUeCb *ue)
6957 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
6958 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
6959 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
6960 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
6963 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
6964 cell->isCpUlExtend);
6966 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
6967 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
6968 ueDl->mimoInfo.ri = 1;
6969 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
6970 (ue->mimoInfo.txMode == RGR_UE_TM_6))
6972 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
6974 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
6976 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
6979 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
6981 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
6985 /* Request for an early Aper CQI in case of reest */
6986 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
6987 if(acqiCb && acqiCb->aCqiCfg.pres)
6989 acqiCb->aCqiTrigWt = 0;
6997 * @brief Perform UE reset procedure.
7001 * Function : rgSCHCmnDlUeReset
7003 * This functions performs BO resetting and
7004 * triggers DL specific scheduler
7005 * to Perform UE reset procedure.
7007 * @param[in] RgSchCellCb *cell
7008 * @param[in] RgSchUeCb *ue
7011 static Void rgSCHCmnDlUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
7013 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7014 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
7015 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7018 if (ueDl->rachInfo.poLnk.node != NULLP)
7020 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
7023 /* Fix: syed Remove from TA List if this UE is there.
7024 * If TA Timer is running. Stop it */
7025 if (ue->dlTaLnk.node)
7027 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
7028 ue->dlTaLnk.node = (PTR)NULLP;
7030 else if (ue->taTmr.tmrEvnt != TMR_NONE)
7032 rgSCHTmrStopTmr(cell, ue->taTmr.tmrEvnt, ue);
7035 cellSchd->apisDl->rgSCHDlUeReset(cell, ue);
7039 rgSCHSCellDlUeReset(cell,ue);
7045 * @brief Perform UE reset procedure.
7049 * Function : rgSCHCmnUeReset
7051 * This functions triggers specific scheduler
7052 * to Perform UE reset procedure.
7054 * @param[in] RgSchCellCb *cell
7055 * @param[in] RgSchUeCb *ue
7060 Void rgSCHCmnUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
7064 RgInfResetHqEnt hqEntRstInfo;
7066 /* RACHO: remove UE from pdcch, handover and rapId assoc Qs */
7067 rgSCHCmnDelRachInfo(cell, ue);
7069 rgSCHPwrUeReset(cell, ue);
7071 rgSCHCmnUlUeReset(cell, ue);
7072 rgSCHCmnDlUeReset(cell, ue);
7075 /* Making allocCmnUlPdcch TRUE to allocate DCI0/1A from Common search space.
7076 As because multiple cells are added hence 2 bits CqiReq is there
7077 This flag will be set to FALSE once we will get Scell READY */
7078 ue->allocCmnUlPdcch = TRUE;
7081 /* Fix : syed RESET UL CQI and DL CQI&RI to conservative values
7082 * for a reestablishing UE */
7083 /*Reset Cqi Config for all the configured cells*/
7084 for (idx = 0;idx < CM_LTE_MAX_CELLS; idx++)
7086 if (ue->cellInfo[idx] != NULLP)
7088 rgSCHCmnResetRiCqi(ue->cellInfo[idx]->cell, ue);
7091 /*After Reset Trigger APCQI for Pcell*/
7092 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7093 if(pCellInfo->acqiCb.aCqiCfg.pres)
7095 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
7098 /* sending HqEnt reset to MAC */
7099 hqEntRstInfo.cellId = cell->cellId;
7100 hqEntRstInfo.crnti = ue->ueId;
7102 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
7103 RgSchMacRstHqEnt(&pst,&hqEntRstInfo);
7109 * @brief UE out of MeasGap or AckNackReptn.
7113 * Function : rgSCHCmnActvtUlUe
7115 * This functions triggers specific scheduler
7116 * to start considering it for scheduling.
7118 * @param[in] RgSchCellCb *cell
7119 * @param[in] RgSchUeCb *ue
7124 Void rgSCHCmnActvtUlUe(RgSchCellCb *cell,RgSchUeCb *ue)
7126 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7128 /* : take care of this in UL retransmission */
7129 cellSchd->apisUl->rgSCHUlActvtUe(cell, ue);
7134 * @brief UE out of MeasGap or AckNackReptn.
7138 * Function : rgSCHCmnActvtDlUe
7140 * This functions triggers specific scheduler
7141 * to start considering it for scheduling.
7143 * @param[in] RgSchCellCb *cell
7144 * @param[in] RgSchUeCb *ue
7149 Void rgSCHCmnActvtDlUe(RgSchCellCb *cell,RgSchUeCb *ue)
7151 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7153 cellSchd->apisDl->rgSCHDlActvtUe(cell, ue);
7158 * @brief This API is invoked to indicate scheduler of a CRC indication.
7162 * Function : rgSCHCmnHdlUlTransInd
7163 * This API is invoked to indicate scheduler of a CRC indication.
7165 * @param[in] RgSchCellCb *cell
7166 * @param[in] RgSchUeCb *ue
7167 * @param[in] CmLteTimingInfo timingInfo
7171 Void rgSCHCmnHdlUlTransInd(RgSchCellCb *cell,RgSchUeCb *ue,CmLteTimingInfo timingInfo)
7174 /* Update the latest UL dat/sig transmission time */
7175 RGSCHCPYTIMEINFO(timingInfo, ue->ul.ulTransTime);
7176 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
7178 /* Some UL Transmission from this UE.
7179 * Activate this UE if it was inactive */
7180 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7181 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7189 * @brief Compute the minimum Rank based on Codebook subset
7190 * restriction configuration for 4 Tx Ports and Tx Mode 4.
7194 * Function : rgSCHCmnComp4TxMode4
7196 * Depending on BitMap set at CBSR during Configuration
7197 * - return the least possible Rank
7200 * @param[in] uint32_t *pmiBitMap
7201 * @return RgSchCmnRank
7203 static RgSchCmnRank rgSCHCmnComp4TxMode4(uint32_t *pmiBitMap)
7205 uint32_t bitMap0, bitMap1;
7206 bitMap0 = pmiBitMap[0];
7207 bitMap1 = pmiBitMap[1];
7208 if((bitMap1) & 0xFFFF)
7210 return (RG_SCH_CMN_RANK_1);
7212 else if((bitMap1>>16) & 0xFFFF)
7214 return (RG_SCH_CMN_RANK_2);
7216 else if((bitMap0) & 0xFFFF)
7218 return (RG_SCH_CMN_RANK_3);
7220 else if((bitMap0>>16) & 0xFFFF)
7222 return (RG_SCH_CMN_RANK_4);
7226 return (RG_SCH_CMN_RANK_1);
7232 * @brief Compute the minimum Rank based on Codebook subset
7233 * restriction configuration for 2 Tx Ports and Tx Mode 4.
7237 * Function : rgSCHCmnComp2TxMode4
7239 * Depending on BitMap set at CBSR during Configuration
7240 * - return the least possible Rank
7243 * @param[in] uint32_t *pmiBitMap
7244 * @return RgSchCmnRank
7246 static RgSchCmnRank rgSCHCmnComp2TxMode4(uint32_t *pmiBitMap)
7249 bitMap0 = pmiBitMap[0];
7250 if((bitMap0>>26)& 0x0F)
7252 return (RG_SCH_CMN_RANK_1);
7254 else if((bitMap0>>30) & 3)
7256 return (RG_SCH_CMN_RANK_2);
7260 return (RG_SCH_CMN_RANK_1);
7265 * @brief Compute the minimum Rank based on Codebook subset
7266 * restriction configuration for 4 Tx Ports and Tx Mode 3.
7270 * Function : rgSCHCmnComp4TxMode3
7272 * Depending on BitMap set at CBSR during Configuration
7273 * - return the least possible Rank
7276 * @param[in] uint32_t *pmiBitMap
7277 * @return RgSchCmnRank
7279 static RgSchCmnRank rgSCHCmnComp4TxMode3(uint32_t *pmiBitMap)
7282 bitMap0 = pmiBitMap[0];
7283 if((bitMap0>>28)& 1)
7285 return (RG_SCH_CMN_RANK_1);
7287 else if((bitMap0>>29) &1)
7289 return (RG_SCH_CMN_RANK_2);
7291 else if((bitMap0>>30) &1)
7293 return (RG_SCH_CMN_RANK_3);
7295 else if((bitMap0>>31) &1)
7297 return (RG_SCH_CMN_RANK_4);
7301 return (RG_SCH_CMN_RANK_1);
7306 * @brief Compute the minimum Rank based on Codebook subset
7307 * restriction configuration for 2 Tx Ports and Tx Mode 3.
7311 * Function : rgSCHCmnComp2TxMode3
7313 * Depending on BitMap set at CBSR during Configuration
7314 * - return the least possible Rank
7317 * @param[in] uint32_t *pmiBitMap
7318 * @return RgSchCmnRank
7320 static RgSchCmnRank rgSCHCmnComp2TxMode3(uint32_t *pmiBitMap)
7323 bitMap0 = pmiBitMap[0];
7324 if((bitMap0>>30)& 1)
7326 return (RG_SCH_CMN_RANK_1);
7328 else if((bitMap0>>31) &1)
7330 return (RG_SCH_CMN_RANK_2);
7334 return (RG_SCH_CMN_RANK_1);
7339 * @brief Compute the minimum Rank based on Codebook subset
7340 * restriction configuration.
7344 * Function : rgSCHCmnComputeRank
7346 * Depending on Num Tx Ports and Transmission mode
7347 * - return the least possible Rank
7350 * @param[in] RgrTxMode txMode
7351 * @param[in] uint32_t *pmiBitMap
7352 * @param[in] uint8_t numTxPorts
7353 * @return RgSchCmnRank
7355 static RgSchCmnRank rgSCHCmnComputeRank(RgrTxMode txMode,uint32_t *pmiBitMap,uint8_t numTxPorts)
7358 if (numTxPorts ==2 && txMode == RGR_UE_TM_3)
7360 return (rgSCHCmnComp2TxMode3(pmiBitMap));
7362 else if (numTxPorts ==4 && txMode == RGR_UE_TM_3)
7364 return (rgSCHCmnComp4TxMode3(pmiBitMap));
7366 else if (numTxPorts ==2 && txMode == RGR_UE_TM_4)
7368 return (rgSCHCmnComp2TxMode4(pmiBitMap));
7370 else if (numTxPorts ==4 && txMode == RGR_UE_TM_4)
7372 return (rgSCHCmnComp4TxMode4(pmiBitMap));
7376 return (RG_SCH_CMN_RANK_1);
7383 * @brief Harq Entity Deinitialization for CMN SCH.
7387 * Function : rgSCHCmnDlDeInitHqEnt
7389 * Harq Entity Deinitialization for CMN SCH
7391 * @param[in] RgSchCellCb *cell
7392 * @param[in] RgSchDlHqEnt *hqE
7395 /*KWORK_FIX:Changed function return type to void */
7396 Void rgSCHCmnDlDeInitHqEnt(RgSchCellCb *cell,RgSchDlHqEnt *hqE)
7398 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7399 RgSchDlHqProcCb *hqP;
7403 ret = cellSchd->apisDl->rgSCHDlUeHqEntDeInit(cell, hqE);
7404 /* Free only If the Harq proc are created*/
7409 for(cnt = 0; cnt < hqE->numHqPrcs; cnt++)
7411 hqP = &hqE->procs[cnt];
7412 if ((RG_SCH_CMN_GET_DL_HQP(hqP)))
7414 rgSCHUtlFreeSBuf(cell->instIdx,
7415 (Data**)(&(hqP->sch)), (sizeof(RgSchCmnDlHqProc)));
7419 rgSCHLaaDeInitDlHqProcCb (cell, hqE);
7426 * @brief Harq Entity initialization for CMN SCH.
7430 * Function : rgSCHCmnDlInitHqEnt
7432 * Harq Entity initialization for CMN SCH
7434 * @param[in] RgSchCellCb *cell
7435 * @param[in] RgSchUeCb *ue
7440 S16 rgSCHCmnDlInitHqEnt(RgSchCellCb *cell,RgSchDlHqEnt *hqEnt)
7442 RgSchDlHqProcCb *hqP;
7444 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7446 for(cnt = 0; cnt < hqEnt->numHqPrcs; cnt++)
7448 hqP = &hqEnt->procs[cnt];
7449 if (rgSCHUtlAllocSBuf(cell->instIdx,
7450 (Data**)&(hqP->sch), (sizeof(RgSchCmnDlHqProc))) != ROK)
7456 if((cell->emtcEnable) &&(hqEnt->ue->isEmtcUe))
7458 if(ROK != cellSchd->apisEmtcDl->rgSCHDlUeHqEntInit(cell, hqEnt))
7467 if(ROK != cellSchd->apisDl->rgSCHDlUeHqEntInit(cell, hqEnt))
7474 } /* rgSCHCmnDlInitHqEnt */
7477 * @brief This function computes distribution of refresh period
7481 * Function: rgSCHCmnGetRefreshDist
7482 * Purpose: This function computes distribution of refresh period
7483 * This is required to align set of UEs refresh
7484 * around the different consecutive subframe.
7486 * Invoked by: rgSCHCmnGetRefreshPerDist
7488 * @param[in] RgSchCellCb *cell
7489 * @param[in] RgSchUeCb *ue
7493 static uint8_t rgSCHCmnGetRefreshDist(RgSchCellCb *cell,RgSchUeCb *ue)
7497 for(refOffst = 0; refOffst < RGSCH_MAX_REFRESH_OFFSET; refOffst++)
7499 if(cell->refreshUeCnt[refOffst] < RGSCH_MAX_REFRESH_GRPSZ)
7501 cell->refreshUeCnt[refOffst]++;
7502 ue->refreshOffset = refOffst;
7503 /* DU_LOG("\nINFO --> SCH : UE[%d] refresh offset[%d]. Cell refresh ue count[%d].\n", ue->ueId, refOffst, cell->refreshUeCnt[refOffst]); */
7508 DU_LOG("\nERROR --> SCH : Allocation of refresh distribution failed\n");
7509 /* We should not enter here normally, but incase of failure, allocating from last offset*/
7510 cell->refreshUeCnt[refOffst-1]++;
7511 ue->refreshOffset = refOffst-1;
7513 return (refOffst-1);
7516 * @brief This function computes initial Refresh Wait Period.
7520 * Function: rgSCHCmnGetRefreshPer
7521 * Purpose: This function computes initial Refresh Wait Period.
7522 * This is required to align multiple UEs refresh
7523 * around the same time.
7525 * Invoked by: rgSCHCmnGetRefreshPer
7527 * @param[in] RgSchCellCb *cell
7528 * @param[in] RgSchUeCb *ue
7529 * @param[in] uint32_t *waitPer
7533 static Void rgSCHCmnGetRefreshPer(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t *waitPer)
7535 uint32_t refreshPer;
7536 uint32_t crntSubFrm;
7539 refreshPer = RG_SCH_CMN_REFRESH_TIME * RG_SCH_CMN_REFRESH_TIMERES;
7540 crntSubFrm = cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot;
7541 /* Fix: syed align multiple UEs to refresh at same time */
7542 *waitPer = refreshPer - (crntSubFrm % refreshPer);
7543 *waitPer = RGSCH_CEIL(*waitPer, RG_SCH_CMN_REFRESH_TIMERES);
7544 *waitPer = *waitPer + rgSCHCmnGetRefreshDist(cell, ue);
7552 * @brief UE initialisation for scheduler.
7556 * Function : rgSCHCmnRgrSCellUeCfg
7558 * This functions intialises UE specific scheduler
7559 * information for SCELL
7560 * 0. Perform basic validations
7561 * 1. Allocate common sched UE cntrl blk
7562 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
7564 * 4. Perform DLFS cfg
7566 * @param[in] RgSchCellCb *cell
7567 * @param[in] RgSchUeCb *ue
7568 * @param[out] RgSchErrInfo *err
7573 S16 rgSCHCmnRgrSCellUeCfg(RgSchCellCb *sCell,RgSchUeCb *ue,RgrUeSecCellCfg *sCellInfoCfg,RgSchErrInfo *err)
7578 RgSchCmnAllocRecord *allRcd;
7579 RgSchDlRbAlloc *allocInfo;
7580 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
7582 RgSchCmnUlUe *ueUlPcell;
7583 RgSchCmnUe *pCellUeSchCmn;
7584 RgSchCmnUe *ueSchCmn;
7586 RgSchCmnDlUe *pCellUeDl;
7588 Inst inst = ue->cell->instIdx;
7590 uint32_t idx = (uint8_t)((sCell->cellId - rgSchCb[sCell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
7592 pCellUeSchCmn = RG_SCH_CMN_GET_UE(ue,ue->cell);
7593 pCellUeDl = &pCellUeSchCmn->dl;
7595 /* 1. Allocate Common sched control block */
7596 if((rgSCHUtlAllocSBuf(sCell->instIdx,
7597 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
7599 DU_LOG("\nERROR --> SCH : Memory allocation FAILED\n");
7600 err->errCause = RGSCHERR_SCH_CFG;
7603 ueSchCmn = RG_SCH_CMN_GET_UE(ue,sCell);
7605 /*2. Perform UEs downlink configuration */
7606 ueDl = &ueSchCmn->dl;
7609 ueDl->mimoInfo = pCellUeDl->mimoInfo;
7611 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
7612 (ue->mimoInfo.txMode == RGR_UE_TM_6))
7614 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_NO_PMI);
7616 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
7618 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_RI_1);
7620 RGSCH_ARRAY_BOUND_CHECK(sCell->instIdx, rgUeCatTbl, pCellUeSchCmn->cmn.ueCat);
7621 ueDl->maxTbBits = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlTbBits;
7624 ri = RGSCH_MIN(ri, sCell->numTxAntPorts);
7625 if(((CM_LTE_UE_CAT_6 == pCellUeSchCmn->cmn.ueCat )
7626 ||(CM_LTE_UE_CAT_7 == pCellUeSchCmn->cmn.ueCat))
7629 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[1];
7633 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[0];
7636 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
7638 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
7639 rgSchTddDlNumHarqProcTbl[sCell->ulDlCfgIdx]);
7641 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
7642 RGSCH_NUM_DL_HQ_PROC);
7645 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, ue->isEmtcUe);
7647 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, FALSE);
7651 /* ambrCfgd config moved to ueCb.dl, as it's not needed for per cell wise*/
7653 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, sCell);
7654 allocInfo->rnti = ue->ueId;
7656 /* Initializing the lastCfi value to current cfi value */
7657 ueDl->lastCfi = cellSchd->dl.currCfi;
7659 if ((cellSchd->apisDl->rgSCHRgrSCellDlUeCfg(sCell, ue, err)) != ROK)
7661 DU_LOG("\nERROR --> SCH : Spec Sched DL UE CFG FAILED\n");
7665 /* TODO: enhance for DLFS RB Allocation for SCELLs in future dev */
7667 /* DLFS UE Config */
7668 if (cellSchd->dl.isDlFreqSel)
7670 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeCfg(sCell, ue, sCellInfoCfg, err)) != ROK)
7672 DU_LOG("\nERROR --> SCH : DLFS UE config FAILED\n");
7677 /* TODO: Do UL SCELL CFG during UL CA dev */
7679 ueUl = RG_SCH_CMN_GET_UL_UE(ue, sCell);
7681 /* TODO_ULCA: SRS for SCELL needs to be handled in the below function call */
7682 rgSCHCmnUpdUeUlCqiInfo(sCell, ue, ueUl, ueSchCmn, cellSchd,
7683 sCell->isCpUlExtend);
7685 ret = rgSCHUhmHqEntInit(sCell, ue);
7688 DU_LOG("\nERROR --> SCH : SCELL UHM HARQ Ent Init "
7689 "Failed for CRNTI:%d", ue->ueId);
7693 ueUlPcell = RG_SCH_CMN_GET_UL_UE(ue, ue->cell);
7694 /* Initialize uplink HARQ related information for UE */
7695 ueUl->hqEnt.maxHqRetx = ueUlPcell->hqEnt.maxHqRetx;
7696 cmLListInit(&ueUl->hqEnt.free);
7697 cmLListInit(&ueUl->hqEnt.inUse);
7698 for(i=0; i < ueUl->hqEnt.numHqPrcs; i++)
7700 ueUl->hqEnt.hqProcCb[i].hqEnt = (void*)(&ueUl->hqEnt);
7701 ueUl->hqEnt.hqProcCb[i].procId = i;
7702 ueUl->hqEnt.hqProcCb[i].ulSfIdx = RGSCH_INVALID_INFO;
7703 ueUl->hqEnt.hqProcCb[i].alloc = NULLP;
7705 /* ccpu00139513- Initializing SPS flags*/
7706 ueUl->hqEnt.hqProcCb[i].isSpsActvnHqP = FALSE;
7707 ueUl->hqEnt.hqProcCb[i].isSpsOccnHqP = FALSE;
7709 cmLListAdd2Tail(&ueUl->hqEnt.free, &ueUl->hqEnt.hqProcCb[i].lnk);
7710 ueUl->hqEnt.hqProcCb[i].lnk.node = (PTR)&ueUl->hqEnt.hqProcCb[i];
7713 /* Allocate UL BSR allocation tracking List */
7714 cmLListInit(&ueUl->ulAllocLst);
7716 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
7718 if((rgSCHUtlAllocSBuf(sCell->instIdx,
7719 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
7721 DU_LOG("\nERROR --> SCH : SCELL Memory allocation FAILED"
7722 "for CRNTI:%d",ue->ueId);
7723 err->errCause = RGSCHERR_SCH_CFG;
7726 allRcd->allocTime = sCell->crntTime;
7727 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
7728 allRcd->lnk.node = (PTR)allRcd;
7731 /* After initialising UL part, do power related init */
7732 ret = rgSCHPwrUeSCellCfg(sCell, ue, sCellInfoCfg);
7735 DU_LOG("\nERROR --> SCH : Could not do "
7736 "power config for UE CRNTI:%d",ue->ueId);
7741 if(TRUE == ue->isEmtcUe)
7743 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
7745 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
7746 "for CRNTI:%d",ue->ueId);
7753 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
7755 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
7756 "for CRNTI:%d",ue->ueId);
7761 ue->ul.isUlCaEnabled = TRUE;
7765 } /* rgSCHCmnRgrSCellUeCfg */
7769 * @brief UE initialisation for scheduler.
7773 * Function : rgSCHCmnRgrSCellUeDel
7775 * This functions Delete UE specific scheduler
7776 * information for SCELL
7778 * @param[in] RgSchCellCb *cell
7779 * @param[in] RgSchUeCb *ue
7784 S16 rgSCHCmnRgrSCellUeDel(RgSchUeCellInfo *sCellInfo,RgSchUeCb *ue)
7786 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
7787 Inst inst = ue->cell->instIdx;
7790 cellSchd->apisDl->rgSCHRgrSCellDlUeDel(sCellInfo, ue);
7793 rgSCHCmnUlUeDelAllocs(sCellInfo->cell, ue);
7796 if(TRUE == ue->isEmtcUe)
7798 cellSchd->apisEmtcUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
7803 cellSchd->apisUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
7806 /* DLFS UE Config */
7807 if (cellSchd->dl.isDlFreqSel)
7809 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeDel(sCellInfo->cell, ue)) != ROK)
7811 DU_LOG("\nERROR --> SCH : DLFS Scell del FAILED\n");
7816 rgSCHUtlFreeSBuf(sCellInfo->cell->instIdx,
7817 (Data**)(&(sCellInfo->sch)), (sizeof(RgSchCmnUe)));
7821 } /* rgSCHCmnRgrSCellUeDel */
7827 * @brief Handles 5gtf configuration for a UE
7831 * Function : rgSCHCmn5gtfUeCfg
7837 * @param[in] RgSchCellCb *cell
7838 * @param[in] RgSchUeCb *ue
7839 * @param[in] RgrUeCfg *cfg
7844 S16 rgSCHCmn5gtfUeCfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeCfg *cfg)
7847 RgSchUeGrp *ue5gtfGrp;
7848 ue->ue5gtfCb.grpId = cfg->ue5gtfCfg.grpId;
7849 ue->ue5gtfCb.BeamId = cfg->ue5gtfCfg.BeamId;
7850 ue->ue5gtfCb.numCC = cfg->ue5gtfCfg.numCC;
7851 ue->ue5gtfCb.mcs = cfg->ue5gtfCfg.mcs;
7852 ue->ue5gtfCb.maxPrb = cfg->ue5gtfCfg.maxPrb;
7854 ue->ue5gtfCb.cqiRiPer = 100;
7855 /* 5gtf TODO: CQIs to start from (10,0)*/
7856 ue->ue5gtfCb.nxtCqiRiOccn.sfn = 10;
7857 ue->ue5gtfCb.nxtCqiRiOccn.slot = 0;
7858 ue->ue5gtfCb.rank = 1;
7860 DU_LOG("\nINFO --> SCH : schd cfg at mac,%u,%u,%u,%u,%u\n",ue->ue5gtfCb.grpId,ue->ue5gtfCb.BeamId,ue->ue5gtfCb.numCC,
7861 ue->ue5gtfCb.mcs,ue->ue5gtfCb.maxPrb);
7863 ue5gtfGrp = &(cell->cell5gtfCb.ueGrp5gConf[ue->ue5gtfCb.BeamId]);
7865 /* TODO_5GTF: Currently handling 1 group only. Need to update when multi group
7866 scheduling comes into picture */
7867 if(ue5gtfGrp->beamBitMask & (1 << ue->ue5gtfCb.BeamId))
7869 DU_LOG("\nERROR --> SCH : 5GTF_ERROR Invalid beam id CRNTI:%d",cfg->crnti);
7872 ue5gtfGrp->beamBitMask |= (1 << ue->ue5gtfCb.BeamId);
7879 * @brief UE initialisation for scheduler.
7883 * Function : rgSCHCmnRgrUeCfg
7885 * This functions intialises UE specific scheduler
7887 * 0. Perform basic validations
7888 * 1. Allocate common sched UE cntrl blk
7889 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
7891 * 4. Perform DLFS cfg
7893 * @param[in] RgSchCellCb *cell
7894 * @param[in] RgSchUeCb *ue
7895 * @param[int] RgrUeCfg *ueCfg
7896 * @param[out] RgSchErrInfo *err
7901 S16 rgSCHCmnRgrUeCfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeCfg *ueCfg,RgSchErrInfo *err)
7903 RgSchDlRbAlloc *allocInfo;
7905 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7906 RgSchCmnUe *ueSchCmn;
7910 RgSchCmnAllocRecord *allRcd;
7912 uint32_t idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
7913 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7916 /* 1. Allocate Common sched control block */
7917 if((rgSCHUtlAllocSBuf(cell->instIdx,
7918 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
7920 DU_LOG("\nERROR --> SCH : Memory allocation FAILED for CRNTI:%d",ueCfg->crnti);
7921 err->errCause = RGSCHERR_SCH_CFG;
7924 ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
7925 ue->dl.ueDlCqiCfg = ueCfg->ueDlCqiCfg;
7926 pCellInfo->acqiCb.aCqiCfg = ueCfg->ueDlCqiCfg.aprdCqiCfg;
7927 if(ueCfg->ueCatEnum > 0 )
7929 /*KWORK_FIX removed NULL chk for ueSchCmn*/
7930 ueSchCmn->cmn.ueCat = ueCfg->ueCatEnum - 1;
7934 ueSchCmn->cmn.ueCat = 0; /* Assuming enum values correctly set */
7936 cmInitTimers(&ueSchCmn->cmn.tmr, 1);
7938 /*2. Perform UEs downlink configuration */
7939 ueDl = &ueSchCmn->dl;
7940 /* RACHO : store the rapId assigned for HandOver UE.
7941 * Append UE to handover list of cmnCell */
7942 if (ueCfg->dedPreambleId.pres == PRSNT_NODEF)
7944 rgSCHCmnDelDedPreamble(cell, ueCfg->dedPreambleId.val);
7945 ueDl->rachInfo.hoRapId = ueCfg->dedPreambleId.val;
7946 cmLListAdd2Tail(&cellSchd->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
7947 ueDl->rachInfo.hoLnk.node = (PTR)ue;
7950 rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd);
7952 if (ueCfg->txMode.pres == TRUE)
7954 if ((ueCfg->txMode.txModeEnum == RGR_UE_TM_4) ||
7955 (ueCfg->txMode.txModeEnum == RGR_UE_TM_6))
7957 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
7959 if (ueCfg->txMode.txModeEnum == RGR_UE_TM_3)
7961 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
7964 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
7965 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
7968 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
7969 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
7970 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
7973 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
7977 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
7980 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
7982 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
7983 rgSchTddDlNumHarqProcTbl[cell->ulDlCfgIdx]);
7985 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
7986 RGSCH_NUM_DL_HQ_PROC);
7989 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
7991 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
7993 /* if none of the DL and UL AMBR are configured then fail the configuration
7995 if((ueCfg->ueQosCfg.dlAmbr == 0) && (ueCfg->ueQosCfg.ueBr == 0))
7997 DU_LOG("\nERROR --> SCH : UL Ambr and DL Ambr are"
7998 "configured as 0 for CRNTI:%d",ueCfg->crnti);
7999 err->errCause = RGSCHERR_SCH_CFG;
8003 ue->dl.ambrCfgd = (ueCfg->ueQosCfg.dlAmbr * RG_SCH_CMN_REFRESH_TIME)/100;
8005 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
8006 allocInfo->rnti = ue->ueId;
8008 /* Initializing the lastCfi value to current cfi value */
8009 ueDl->lastCfi = cellSchd->dl.currCfi;
8011 if(cell->emtcEnable && ue->isEmtcUe)
8013 if ((cellSchd->apisEmtcDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8015 DU_LOG("\nERROR --> SCH : Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8023 if ((cellSchd->apisDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8025 DU_LOG("\nERROR --> SCH : Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8032 /* 3. Initialize ul part */
8033 ueUl = &ueSchCmn->ul;
8035 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
8036 cell->isCpUlExtend);
8038 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8039 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8041 ue->ul.cfgdAmbr = (ueCfg->ueQosCfg.ueBr * RG_SCH_CMN_REFRESH_TIME)/100;
8042 ue->ul.effAmbr = ue->ul.cfgdAmbr;
8043 RGSCHCPYTIMEINFO(cell->crntTime, ue->ul.ulTransTime);
8045 /* Allocate UL BSR allocation tracking List */
8046 cmLListInit(&ueUl->ulAllocLst);
8048 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8050 if((rgSCHUtlAllocSBuf(cell->instIdx,
8051 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8053 DU_LOG("\nERROR --> SCH : Memory allocation FAILED"
8054 "for CRNTI:%d",ueCfg->crnti);
8055 err->errCause = RGSCHERR_SCH_CFG;
8058 allRcd->allocTime = cell->crntTime;
8059 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8060 allRcd->lnk.node = (PTR)allRcd;
8062 /* Allocate common sch cntrl blocks for LCGs */
8063 for (cnt=0; cnt<RGSCH_MAX_LCG_PER_UE; cnt++)
8065 ret = rgSCHUtlAllocSBuf(cell->instIdx,
8066 (Data**)&(ue->ul.lcgArr[cnt].sch), (sizeof(RgSchCmnLcg)));
8069 DU_LOG("\nERROR --> SCH : SCH struct alloc failed for CRNTI:%d",ueCfg->crnti);
8070 err->errCause = RGSCHERR_SCH_CFG;
8074 /* After initialising UL part, do power related init */
8075 ret = rgSCHPwrUeCfg(cell, ue, ueCfg);
8078 DU_LOG("\nERROR --> SCH : Could not do "
8079 "power config for UE CRNTI:%d",ueCfg->crnti);
8083 ret = rgSCHCmnSpsUeCfg(cell, ue, ueCfg, err);
8086 DU_LOG("\nERROR --> SCH : Could not do "
8087 "SPS config for CRNTI:%d",ueCfg->crnti);
8090 #endif /* LTEMAC_SPS */
8093 if(TRUE == ue->isEmtcUe)
8095 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
8097 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
8098 "for CRNTI:%d",ueCfg->crnti);
8105 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
8107 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
8108 "for CRNTI:%d",ueCfg->crnti);
8113 /* DLFS UE Config */
8114 if (cellSchd->dl.isDlFreqSel)
8116 if ((cellSchd->apisDlfs->rgSCHDlfsUeCfg(cell, ue, ueCfg, err)) != ROK)
8118 DU_LOG("\nERROR --> SCH : DLFS UE config FAILED"
8119 "for CRNTI:%d",ueCfg->crnti);
8124 /* Fix: syed align multiple UEs to refresh at same time */
8125 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
8126 /* Start UE Qos Refresh Timer */
8127 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
8129 rgSCHCmn5gtfUeCfg(cell, ue, ueCfg);
8133 } /* rgSCHCmnRgrUeCfg */
8136 * @brief UE TX mode reconfiguration handler.
8140 * Function : rgSCHCmnDlHdlTxModeRecfg
8142 * This functions updates UE specific scheduler
8143 * information upon UE reconfiguration.
8145 * @param[in] RgSchUeCb *ue
8146 * @param[in] RgrUeRecfg *ueRecfg
8150 static Void rgSCHCmnDlHdlTxModeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg,uint8_t numTxPorts)
8152 static Void rgSCHCmnDlHdlTxModeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg)
8155 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
8157 if (ueRecfg->txMode.pres != PRSNT_NODEF)
8161 /* ccpu00140894- Starting Timer for TxMode Transition Completion*/
8162 ue->txModeTransCmplt =FALSE;
8163 rgSCHTmrStartTmr (ue->cell, ue, RG_SCH_TMR_TXMODE_TRNSTN, RG_SCH_TXMODE_TRANS_TIMER);
8164 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_CMPLT)
8166 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell,
8167 RG_SCH_CMN_TD_TXMODE_RECFG);
8168 /* MS_WORKAROUND for ccpu00123186 MIMO Fix Start: need to set FORCE TD bitmap based on TX mode */
8169 ueDl->mimoInfo.ri = 1;
8170 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8171 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
8173 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8175 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
8177 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8179 /* MIMO Fix End: need to set FORCE TD bitmap based on TX mode */
8182 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_START)
8184 /* start afresh forceTD masking */
8185 RG_SCH_CMN_INIT_FORCE_TD(ue, cell, 0);
8186 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_TXMODE_RECFG);
8187 /* Intialize MIMO related parameters of UE */
8190 if(ueRecfg->txMode.pres)
8192 if((ueRecfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
8193 (ueRecfg->txMode.txModeEnum ==RGR_UE_TM_4))
8195 if(ueRecfg->ueCodeBookRstRecfg.pres)
8198 rgSCHCmnComputeRank(ueRecfg->txMode.txModeEnum,
8199 ueRecfg->ueCodeBookRstRecfg.pmiBitMap, numTxPorts);
8203 ueDl->mimoInfo.ri = 1;
8208 ueDl->mimoInfo.ri = 1;
8213 ueDl->mimoInfo.ri = 1;
8216 ueDl->mimoInfo.ri = 1;
8217 #endif /* TFU_UPGRADE */
8218 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8219 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
8221 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8223 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
8225 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8230 /***********************************************************
8232 * Func : rgSCHCmnUpdUeMimoInfo
8234 * Desc : Updates UL and DL Ue Information
8242 **********************************************************/
8243 static Void rgSCHCmnUpdUeMimoInfo(RgrUeCfg *ueCfg,RgSchCmnDlUe *ueDl,RgSchCellCb *cell,RgSchCmnCell *cellSchd)
8246 if(ueCfg->txMode.pres)
8248 if((ueCfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
8249 (ueCfg->txMode.txModeEnum ==RGR_UE_TM_4))
8251 if(ueCfg->ueCodeBookRstCfg.pres)
8254 rgSCHCmnComputeRank(ueCfg->txMode.txModeEnum,
8255 ueCfg->ueCodeBookRstCfg.pmiBitMap, cell->numTxAntPorts);
8259 ueDl->mimoInfo.ri = 1;
8264 ueDl->mimoInfo.ri = 1;
8269 ueDl->mimoInfo.ri = 1;
8273 ueDl->mimoInfo.ri = 1;
8274 #endif /*TFU_UPGRADE */
8275 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
8276 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
8280 /***********************************************************
8282 * Func : rgSCHCmnUpdUeUlCqiInfo
8284 * Desc : Updates UL and DL Ue Information
8292 **********************************************************/
8293 static Void rgSCHCmnUpdUeUlCqiInfo(RgSchCellCb *cell,RgSchUeCb *ue,RgSchCmnUlUe *ueUl,RgSchCmnUe *ueSchCmn,RgSchCmnCell *cellSchd,Bool isEcp)
8297 if(ue->srsCb.srsCfg.type == RGR_SCH_SRS_SETUP)
8299 if(ue->ul.ulTxAntSel.pres)
8301 ueUl->crntUlCqi[ue->srsCb.selectedAnt] = cellSchd->ul.dfltUlCqi;
8302 ueUl->validUlCqi = ueUl->crntUlCqi[ue->srsCb.selectedAnt];
8306 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
8307 ueUl->validUlCqi = ueUl->crntUlCqi[0];
8309 ue->validTxAnt = ue->srsCb.selectedAnt;
8313 ueUl->validUlCqi = cellSchd->ul.dfltUlCqi;
8317 ueUl->ulLaCb.cqiBasediTbs = rgSchCmnUlCqiToTbsTbl[isEcp]
8318 [ueUl->validUlCqi] * 100;
8319 ueUl->ulLaCb.deltaiTbs = 0;
8323 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
8324 #endif /*TFU_UPGRADE */
8325 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
8326 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
8328 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
8332 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
8337 /***********************************************************
8339 * Func : rgSCHCmnUpdUeCatCfg
8341 * Desc : Updates UL and DL Ue Information
8349 **********************************************************/
8350 static Void rgSCHCmnUpdUeCatCfg(RgSchUeCb *ue,RgSchCellCb *cell)
8352 RgSchDlHqEnt *hqE = NULLP;
8353 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
8354 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
8355 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
8356 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8359 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
8361 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
8364 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
8365 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
8366 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
8367 && (RG_SCH_MAX_TX_LYRS_4 == ri))
8369 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
8373 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
8376 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8378 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
8380 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
8384 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
8386 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8387 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8392 * @brief UE reconfiguration for scheduler.
8396 * Function : rgSChCmnRgrUeRecfg
8398 * This functions updates UE specific scheduler
8399 * information upon UE reconfiguration.
8401 * @param[in] RgSchCellCb *cell
8402 * @param[in] RgSchUeCb *ue
8403 * @param[int] RgrUeRecfg *ueRecfg
8404 * @param[out] RgSchErrInfo *err
8409 S16 rgSCHCmnRgrUeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg,RgSchErrInfo *err)
8411 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
8414 /* Basic validations */
8415 if (ueRecfg->ueRecfgTypes & RGR_UE_TXMODE_RECFG)
8418 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, cell->numTxAntPorts);
8420 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg);
8421 #endif /* TFU_UPGRADE */
8423 if(ueRecfg->ueRecfgTypes & RGR_UE_CSG_PARAM_RECFG)
8425 ue->csgMmbrSta = ueRecfg->csgMmbrSta;
8427 /* Changes for UE Category reconfiguration feature */
8428 if(ueRecfg->ueRecfgTypes & RGR_UE_UECAT_RECFG)
8430 rgSCHCmnUpdUeCatCfg(ue, cell);
8432 if (ueRecfg->ueRecfgTypes & RGR_UE_APRD_DLCQI_RECFG)
8434 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
8435 pCellInfo->acqiCb.aCqiCfg = ueRecfg->aprdDlCqiRecfg;
8438 if (ueRecfg->ueRecfgTypes & RGR_UE_PRD_DLCQI_RECFG)
8440 if ((ueRecfg->prdDlCqiRecfg.pres == TRUE)
8441 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD10)
8442 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD20))
8444 DU_LOG("\nERROR --> SCH : Unsupported periodic CQI "
8445 "reporting mode %d for old CRNIT:%d",
8446 (int)ueRecfg->prdDlCqiRecfg.prdModeEnum,ueRecfg->oldCrnti);
8447 err->errCause = RGSCHERR_SCH_CFG;
8450 ue->dl.ueDlCqiCfg.prdCqiCfg = ueRecfg->prdDlCqiRecfg;
8454 if (ueRecfg->ueRecfgTypes & RGR_UE_ULPWR_RECFG)
8456 if (rgSCHPwrUeRecfg(cell, ue, ueRecfg) != ROK)
8458 DU_LOG("\nERROR --> SCH : Power Reconfiguration Failed for OLD CRNTI:%d",ueRecfg->oldCrnti);
8463 if (ueRecfg->ueRecfgTypes & RGR_UE_QOS_RECFG)
8465 /* Uplink Sched related Initialization */
8466 if ((ueRecfg->ueQosRecfg.dlAmbr == 0) && (ueRecfg->ueQosRecfg.ueBr == 0))
8468 DU_LOG("\nERROR --> SCH : Ul Ambr and DL Ambr "
8469 "configured as 0 for OLD CRNTI:%d",ueRecfg->oldCrnti);
8470 err->errCause = RGSCHERR_SCH_CFG;
8473 ue->ul.cfgdAmbr = (ueRecfg->ueQosRecfg.ueBr * \
8474 RG_SCH_CMN_REFRESH_TIME)/100;
8475 /* Downlink Sched related Initialization */
8476 ue->dl.ambrCfgd = (ueRecfg->ueQosRecfg.dlAmbr * \
8477 RG_SCH_CMN_REFRESH_TIME)/100;
8478 /* Fix: syed Update the effAmbr and effUeBR fields w.r.t the
8479 * new QOS configuration */
8480 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
8481 /* Fix: syed align multiple UEs to refresh at same time */
8482 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
8483 rgSCHCmnApplyUeRefresh(cell, ue);
8484 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
8487 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
8489 if ((cellSchCmn->apisEmtcUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8491 DU_LOG("\nERROR --> SCH : Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8494 if ((cellSchCmn->apisEmtcDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8496 DU_LOG("\nERROR --> SCH : Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8503 if ((cellSchCmn->apisUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8505 DU_LOG("\nERROR --> SCH : Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8508 if ((cellSchCmn->apisDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8510 DU_LOG("\nERROR --> SCH : Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8514 /* DLFS UE Config */
8515 if (cellSchCmn->dl.isDlFreqSel)
8517 if ((cellSchCmn->apisDlfs->rgSCHDlfsUeRecfg(cell, ue, \
8518 ueRecfg, err)) != ROK)
8520 DU_LOG("\nERROR --> SCH : DLFS UE re-config FAILED for CRNTI:%d",ue->ueId);
8526 /* Invoke re-configuration on SPS module */
8527 if (rgSCHCmnSpsUeRecfg(cell, ue, ueRecfg, err) != ROK)
8529 DU_LOG("\nERROR --> SCH : DL SPS ReCFG FAILED for UE CRNTI:%d", ue->ueId);
8535 } /* rgSCHCmnRgrUeRecfg*/
8537 /***********************************************************
8539 * Func : rgSCHCmnUlUeDelAllocs
8541 * Desc : Deletion of all UE allocations.
8549 **********************************************************/
8550 static Void rgSCHCmnUlUeDelAllocs(RgSchCellCb *cell,RgSchUeCb *ue)
8552 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
8553 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
8556 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
8559 for (i = 0; i < ueUl->hqEnt.numHqPrcs; ++i)
8561 RgSchUlHqProcCb *proc = rgSCHUhmGetUlHqProc(cell, ue, i);
8564 /* proc can't be NULL here */
8572 /* Added Insure Fixes Of reading Dangling memory.NULLed crntAlloc */
8574 if(proc->alloc == ulSpsUe->ulSpsSchdInfo.crntAlloc)
8576 ulSpsUe->ulSpsSchdInfo.crntAlloc = NULLP;
8577 ulSpsUe->ulSpsSchdInfo.crntAllocSf = NULLP;
8581 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
8582 proc->alloc,ue->isEmtcUe);
8584 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
8587 /* PHY probably needn't be intimated since
8588 * whatever intimation it needs happens at the last minute
8591 /* Fix: syed Adaptive Msg3 Retx crash. Remove the harqProc
8592 * from adaptive retx List. */
8593 if (proc->reTxLnk.node)
8596 //TODO_SID: Need to take care
8597 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
8598 proc->reTxLnk.node = (PTR)NULLP;
8606 /***********************************************************
8608 * Func : rgSCHCmnDelUeFrmRefreshQ
8610 * Desc : Adds a UE to refresh queue, so that the UE is
8611 * periodically triggered to refresh it's GBR and
8620 **********************************************************/
8621 static Void rgSCHCmnDelUeFrmRefreshQ(RgSchCellCb *cell,RgSchUeCb *ue)
8623 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
8625 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
8628 #ifdef RGL_SPECIFIC_CHANGES
8629 if(ue->refreshOffset < RGSCH_MAX_REFRESH_GRPSZ)
8631 if(cell->refreshUeCnt[ue->refreshOffset])
8633 cell->refreshUeCnt[ue->refreshOffset]--;
8639 memset(&arg, 0, sizeof(arg));
8640 arg.tqCp = &sched->tmrTqCp;
8641 arg.tq = sched->tmrTq;
8642 arg.timers = &ueSchd->tmr;
8646 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
8652 /***********************************************************
8654 * Func : rgSCHCmnUeCcchSduDel
8656 * Desc : Clear CCCH SDU scheduling context.
8664 **********************************************************/
8665 static Void rgSCHCmnUeCcchSduDel(RgSchCellCb *cell,RgSchUeCb *ueCb)
8667 RgSchDlHqEnt *hqE = NULLP;
8668 RgSchDlHqProcCb *ccchSduHqP = NULLP;
8669 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
8672 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
8677 ccchSduHqP = hqE->ccchSduProc;
8678 if(ueCb->ccchSduLnk.node != NULLP)
8680 /* Remove the ccchSduProc if it is in the Tx list */
8681 cmLListDelFrm(&(cell->ccchSduUeLst), &(ueCb->ccchSduLnk));
8682 ueCb->ccchSduLnk.node = NULLP;
8684 else if(ccchSduHqP != NULLP)
8686 /* Fix for crash due to stale pdcch. Release ccch pdcch*/
8687 if(ccchSduHqP->pdcch)
8689 cmLListDelFrm(&ccchSduHqP->subFrm->pdcchInfo.pdcchs,
8690 &ccchSduHqP->pdcch->lnk);
8691 cmLListAdd2Tail(&cell->pdcchLst, &ccchSduHqP->pdcch->lnk);
8692 ccchSduHqP->pdcch = NULLP;
8694 if(ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node != NULLP)
8696 /* Remove the ccchSduProc if it is in the retx list */
8697 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
8698 &ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk);
8699 /* ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node = NULLP; */
8700 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
8702 else if ((ccchSduHqP->subFrm != NULLP) &&
8703 (ccchSduHqP->hqPSfLnk.node != NULLP))
8705 rgSCHUtlDlHqPTbRmvFrmTx(ccchSduHqP->subFrm,
8706 ccchSduHqP, 0, FALSE);
8707 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
8717 * @brief UE deletion for scheduler.
8721 * Function : rgSCHCmnUeDel
8723 * This functions deletes all scheduler information
8724 * pertaining to an UE.
8726 * @param[in] RgSchCellCb *cell
8727 * @param[in] RgSchUeCb *ue
8730 Void rgSCHCmnUeDel(RgSchCellCb *cell,RgSchUeCb *ue)
8732 RgSchDlHqEnt *hqE = NULLP;
8733 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
8735 RgSchCmnAllocRecord *allRcd;
8737 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
8740 if (RG_SCH_CMN_GET_UE(ue,cell) == NULLP)
8742 /* Common scheduler config has not happened yet */
8745 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
8748 /* UE Free can be triggered before MSG4 done when dlHqE is not updated */
8752 rgSCHEmtcCmnUeCcchSduDel(cell, ue);
8757 rgSCHCmnUeCcchSduDel(cell, ue);
8760 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
8762 rgSCHCmnUlUeDelAllocs(cell, ue);
8764 rgSCHCmnDelRachInfo(cell, ue);
8767 if(TRUE == ue->isEmtcUe)
8769 cellSchCmn->apisEmtcUl->rgSCHFreeUlUe(cell, ue);
8774 cellSchCmn->apisUl->rgSCHFreeUlUe(cell, ue);
8779 for(idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
8781 if(ue->cellInfo[idx] != NULLP)
8783 rgSCHSCellDelUeSCell(cell,ue,idx);
8790 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
8792 cellSchCmn->apisEmtcDl->rgSCHFreeDlUe(cell, ue);
8797 cellSchCmn->apisDl->rgSCHFreeDlUe(cell, ue);
8799 rgSCHPwrUeDel(cell, ue);
8802 rgSCHCmnSpsUeDel(cell, ue);
8803 #endif /* LTEMAC_SPS*/
8806 rgSchCmnDlSfHqDel(ue, cell);
8808 /* DLFS UE delete */
8809 if (cellSchCmn->dl.isDlFreqSel)
8811 cellSchCmn->apisDlfs->rgSCHDlfsUeDel(cell, ue);
8813 node = ueUl->ulAllocLst.first;
8815 /* ccpu00117052 - MOD - Passing double pointer in all the places of
8816 rgSCHUtlFreeSBuf function call for proper NULLP assignment*/
8819 allRcd = (RgSchCmnAllocRecord *)node->node;
8821 cmLListDelFrm(&ueUl->ulAllocLst, &allRcd->lnk);
8822 rgSCHUtlFreeSBuf(cell->instIdx,
8823 (Data**)(&allRcd), (sizeof(RgSchCmnAllocRecord)));
8826 for(cnt = 0; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
8828 if (ue->ul.lcgArr[cnt].sch != NULLP)
8830 rgSCHUtlFreeSBuf(cell->instIdx,
8831 (Data**)(&(ue->ul.lcgArr[cnt].sch)), (sizeof(RgSchCmnLcg)));
8835 /* Fix : syed Moved hqEnt deinit to rgSCHCmnDlDeInitHqEnt */
8836 idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId) & (CM_LTE_MAX_CELLS - 1));
8837 rgSCHUtlFreeSBuf(cell->instIdx,
8838 (Data**)(&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch))), (sizeof(RgSchCmnUe)));
8840 } /* rgSCHCmnUeDel */
8844 * @brief This function handles the common code rate configurations
8845 * done as part of RgrCellCfg/RgrCellRecfg.
8849 * Function: rgSCHCmnDlCnsdrCmnRt
8850 * Purpose: This function handles the common code rate configurations
8851 * done as part of RgrCellCfg/RgrCellRecfg.
8853 * Invoked by: Scheduler
8855 * @param[in] RgSchCellCb *cell
8856 * @param[in] RgrDlCmnCodeRateCfg *dlCmnCodeRate
8860 static S16 rgSCHCmnDlCnsdrCmnRt(RgSchCellCb *cell,RgrDlCmnCodeRateCfg *dlCmnCodeRate)
8862 RgSchCmnCell *cellDl = RG_SCH_CMN_GET_CELL(cell);
8864 uint32_t bitsPer2Rb;
8865 uint32_t bitsPer3Rb;
8870 /* code rate is bits per 1024 phy bits, since modl'n scheme is 2. it is
8871 * bits per 1024/2 REs */
8872 if (dlCmnCodeRate->bcchPchRaCodeRate != 0)
8874 bitsPerRb = ((dlCmnCodeRate->bcchPchRaCodeRate * 2) *
8875 cellDl->dl.noResPerRb[3])/1024;
8879 bitsPerRb = ((RG_SCH_CMN_DEF_BCCHPCCH_CODERATE * 2) *
8880 cellDl->dl.noResPerRb[3])/1024;
8882 /* Store bitsPerRb in cellDl->dl to use later to determine
8883 * Number of RBs for UEs with SI-RNTI, P-RNTI and RA-RNTI */
8884 cellDl->dl.bitsPerRb = bitsPerRb;
8885 /* ccpu00115595 end*/
8886 /* calculate the ITbs for 2 RBs. Initialize ITbs to MAX value */
8889 bitsPer2Rb = bitsPerRb * rbNum;
8890 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer2Rb))
8893 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs2Rbs = 0) :
8894 (cellDl->dl.cmnChITbs.iTbs2Rbs = i-1);
8896 /* calculate the ITbs for 3 RBs. Initialize ITbs to MAX value */
8899 bitsPer3Rb = bitsPerRb * rbNum;
8900 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer3Rb))
8903 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs3Rbs = 0) :
8904 (cellDl->dl.cmnChITbs.iTbs3Rbs = i-1);
8907 pdcchBits = 1 + /* Flag for format0/format1a differentiation */
8908 1 + /* Localized/distributed VRB assignment flag */
8911 3 + /* Harq process Id */
8913 4 + /* Harq process Id */
8914 2 + /* UL Index or DAI */
8916 1 + /* New Data Indicator */
8919 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
8920 (cell->bwCfg.dlTotalBw + 1))/2);
8921 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
8922 Since VRB is local */
8923 /* For TDD consider DAI */
8925 /* Convert the pdcchBits to actual pdcchBits required for transmission */
8926 if (dlCmnCodeRate->pdcchCodeRate != 0)
8928 pdcchBits = (pdcchBits * 1024)/dlCmnCodeRate->pdcchCodeRate;
8929 if (pdcchBits <= 288) /* 288 : Num of pdcch bits for aggrLvl=4 */
8931 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
8933 else /* 576 : Num of pdcch bits for aggrLvl=8 */
8935 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL8;
8940 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
8942 if (dlCmnCodeRate->ccchCqi == 0)
8948 cellDl->dl.ccchCqi = dlCmnCodeRate->ccchCqi;
8955 * @brief This function handles the configuration of cell for the first
8956 * time by the scheduler.
8960 * Function: rgSCHCmnDlRgrCellCfg
8961 * Purpose: Configuration received is stored into the data structures
8962 * Also, update the scheduler with the number of frames of
8963 * RACH preamble transmission.
8965 * Invoked by: BO and Scheduler
8967 * @param[in] RgSchCellCb* cell
8968 * @param[in] RgrCellCfg* cfg
8972 static S16 rgSCHCmnDlRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cfg,RgSchErrInfo *err)
8974 RgSchCmnCell *cellSch;
8977 uint8_t numPdcchSym;
8978 uint8_t noSymPerSlot;
8979 uint8_t maxDlSubfrms = cell->numDlSubfrms;
8980 uint8_t splSubfrmIdx = cfg->spclSfCfgIdx;
8981 uint8_t swPtCnt = 0;
8983 RgSchTddSubfrmInfo subfrmInfo = rgSchTddMaxUlSubfrmTbl[cell->ulDlCfgIdx];
8996 cellSch = RG_SCH_CMN_GET_CELL(cell);
8997 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->\
8998 rachCfg.preambleFormat];
8999 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
9000 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
9002 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
9003 3 TTI (MAX L1+L2 processing delay at the UE) */
9004 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
9005 rgSchCmnHarqRtt[cell->ulDlCfgIdx] + 3;
9006 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9007 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9008 if (cfg->maxUePerDlSf == 0)
9010 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9012 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
9018 if (cell->bwCfg.dlTotalBw <= 10)
9028 /* DwPTS Scheduling Changes Start */
9029 cellSch->dl.splSfCfg = splSubfrmIdx;
9031 if (cfg->isCpDlExtend == TRUE)
9033 if((0 == splSubfrmIdx) || (4 == splSubfrmIdx) ||
9034 (7 == splSubfrmIdx) || (8 == splSubfrmIdx)
9037 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
9041 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
9046 /* Refer to 36.213 Section 7.1.7 */
9047 if((0 == splSubfrmIdx) || (5 == splSubfrmIdx))
9049 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
9053 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
9056 /* DwPTS Scheduling Changes End */
9058 splSfCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
9059 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
9061 for (sfCount = 0; sfCount < maxDlSubfrms; sfCount++)
9063 sf = cell->subFrms[sfCount];
9064 /* Sfcount matches the first special subframe occurs at Index 0
9065 * or subsequent special subframes */
9066 if(subfrmInfo.switchPoints == 1)
9068 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
9069 RG_SCH_CMN_10_MS_PRD, &subfrmInfo);
9073 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
9074 RG_SCH_CMN_5_MS_PRD, &subfrmInfo);
9076 if(isSplfrm == TRUE)
9079 /* DwPTS Scheduling Changes Start */
9080 if (cell->splSubfrmCfg.isDlDataAllowed == TRUE)
9082 sf->sfType = RG_SCH_SPL_SF_DATA;
9086 sf->sfType = RG_SCH_SPL_SF_NO_DATA;
9088 /* DwPTS Scheduling Changes End */
9092 /* DwPTS Scheduling Changes Start */
9095 sf->sfType = RG_SCH_DL_SF;
9099 sf->sfType = RG_SCH_DL_SF_0;
9101 /* DwPTS Scheduling Changes End */
9104 /* Calculate the number of CCEs per subframe in the cell */
9105 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][sf->sfNum];
9106 if(cell->dynCfiCb.isDynCfiEnb == TRUE)
9108 /* In case if Dynamic CFI feature is enabled, default CFI
9109 * value 1 is used */
9110 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][1];
9114 if (sf->sfType == RG_SCH_SPL_SF_DATA)
9116 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
9120 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi)];
9125 /* Intialize the RACH response scheduling related infromation */
9126 if(rgSCHCmnDlRachInfoInit(cell) != ROK)
9131 /* Allocate PRACH preamble list */
9132 rgSCHCmnDlCreateRachPrmLst(cell);
9134 /* Initialize PHICH offset information */
9135 rgSCHCmnDlPhichOffsetInit(cell);
9137 /* Update the size of HARQ ACK/NACK feedback table */
9138 /* The array size is increased by 2 to have enough free indices, where other
9139 * indices are busy waiting for HARQ feedback */
9140 cell->ackNackFdbkArrSize = rgSchTddANFdbkMapTbl[cell->ulDlCfgIdx] + 2;
9142 /* Initialize expected HARQ ACK/NACK feedback time */
9143 rgSCHCmnDlANFdbkInit(cell);
9145 /* Initialize UL association set index */
9146 if(cell->ulDlCfgIdx != 0)
9148 rgSCHCmnDlKdashUlAscInit(cell);
9151 if (cfg->isCpDlExtend == TRUE)
9153 cp = RG_SCH_CMN_EXT_CP;
9155 cell->splSubfrmCfg.dwPts =
9156 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlDwPts;
9158 if ( cell->splSubfrmCfg.dwPts == 0 )
9160 cell->isDwPtsCnted = FALSE;
9164 cell->isDwPtsCnted = TRUE;
9167 if(cfg->isCpUlExtend == TRUE)
9169 cell->splSubfrmCfg.upPts =
9170 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlExtUpPts;
9174 cell->splSubfrmCfg.upPts =
9175 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlNorUpPts;
9180 cp = RG_SCH_CMN_NOR_CP;
9182 cell->splSubfrmCfg.dwPts =
9183 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlDwPts;
9184 cell->isDwPtsCnted = TRUE;
9186 if(cfg->isCpUlExtend == TRUE)
9188 cell->splSubfrmCfg.upPts =
9189 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlExtUpPts;
9193 cell->splSubfrmCfg.upPts =
9194 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlNorUpPts;
9198 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
9199 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++,cfiIdx++)
9201 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
9202 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
9203 [cell->numTxAntPorts]][cfiIdx];
9204 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
9205 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
9206 [cell->numTxAntPorts]][cfiIdx];
9209 /* Initializing the values of CFI parameters */
9210 if(cell->dynCfiCb.isDynCfiEnb)
9212 /* If DCFI is enabled, current CFI value will start from 1 */
9213 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
9217 /* If DCFI is disabled, current CFI value is set as default max allowed CFI value */
9218 cellSch->dl.currCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
9219 cellSch->dl.newCfi = cellSch->dl.currCfi;
9222 /* Include CRS REs while calculating Efficiency
9223 * The number of Resource Elements occupied by CRS depends on Number of
9224 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
9225 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
9226 * details of the same. Please note that PDCCH overlap symbols would not
9227 * considered in CRS REs deduction */
9228 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
9230 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
9231 - numPdcchSym) *RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
9234 /* DwPTS Scheduling Changes Start */
9235 antPortIdx = (cell->numTxAntPorts == 1)? 0:
9236 ((cell->numTxAntPorts == 2)? 1: 2);
9238 if (cp == RG_SCH_CMN_NOR_CP)
9240 splSfIdx = (splSubfrmIdx == 4)? 1: 0;
9244 splSfIdx = (splSubfrmIdx == 3)? 1: 0;
9247 numCrs = rgSchCmnDwptsCrs[splSfIdx][antPortIdx];
9249 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI-1; cfi++)
9251 /* If CFI is 2 and Ant Port is 4, don't consider the sym 1 CRS REs */
9252 if (antPortIdx == 2 && cfi == 2)
9256 cellSch->dl.numReDwPts[cfi] = ((cell->splSubfrmCfg.dwPts - cfi)*
9257 RB_SCH_CMN_NUM_SCS_PER_RB) - numCrs;
9259 /* DwPTS Scheduling Changes End */
9261 if (cfg->maxDlBwPerUe == 0)
9263 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
9267 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
9269 if (cfg->maxDlRetxBw == 0)
9271 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
9275 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
9277 /* Fix: MUE_PERTTI_DL*/
9278 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9279 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9280 if (cfg->maxUePerDlSf == 0)
9282 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9284 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
9285 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
9286 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
9288 DU_LOG("\nERROR --> SCH : Invalid configuration !: "
9289 "maxCcchPerDlSf %u > maxUePerDlSf %u",
9290 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
9294 else if (!cfg->maxCcchPerDlSf)
9296 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
9297 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
9298 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
9299 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
9300 * FLE crash in PHY as PHY has limit of 16 max*/
9301 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
9305 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
9307 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
9312 /*ccpu00118273 - ADD - start */
9313 cmLListInit(&cellSch->dl.msg4RetxLst);
9315 cmLListInit(&cellSch->dl.ccchSduRetxLst);
9318 #ifdef RG_PHASE2_SCHED
9319 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
9321 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
9323 if (cfg->dlfsCfg.isDlFreqSel)
9325 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
9331 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
9334 /* Power related configuration */
9335 ret = rgSCHPwrCellCfg(cell, cfg);
9341 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
9342 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
9343 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
9344 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
9345 cellSch->dl.msg4pAVal = cfg->msg4pAVal;
9350 * @brief This function handles the configuration of cell for the first
9351 * time by the scheduler.
9355 * Function: rgSCHCmnDlRgrCellCfg
9356 * Purpose: Configuration received is stored into the data structures
9357 * Also, update the scheduler with the number of frames of
9358 * RACH preamble transmission.
9360 * Invoked by: BO and Scheduler
9362 * @param[in] RgSchCellCb* cell
9363 * @param[in] RgrCellCfg* cfg
9364 * @param[in] RgSchErrInfo* err
9368 static S16 rgSCHCmnDlRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cfg,RgSchErrInfo *err)
9371 RgSchCmnCell *cellSch;
9373 uint8_t numPdcchSym;
9374 uint8_t noSymPerSlot;
9379 cellSch = RG_SCH_CMN_GET_CELL(cell);
9381 /* Initialize the parameters with the ones received in the */
9382 /* configuration. */
9384 /* Added matrix 'rgRaPrmblToRaFrmTbl' for computation of RA
9385 * sub-frames from preamble format */
9386 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->rachCfg.preambleFormat];
9388 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
9389 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
9391 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
9392 3 TTI (MAX L1+L2 processing delay at the UE) */
9393 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
9394 rgSchCmnHarqRtt[7] + 3;
9396 if (cell->bwCfg.dlTotalBw <= 10)
9407 if (cell->isCpDlExtend == TRUE)
9409 cp = RG_SCH_CMN_EXT_CP;
9414 cp = RG_SCH_CMN_NOR_CP;
9418 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
9419 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, cfiIdx++)
9421 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
9423 cellSch->dl.emtcCqiToTbsTbl[0][cfi] = rgSchEmtcCmnCqiToTbs[0][cp][cfiIdx];
9425 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
9426 [cell->numTxAntPorts]][cfiIdx];
9427 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
9429 cellSch->dl.emtcCqiToTbsTbl[1][cfi] = rgSchEmtcCmnCqiToTbs[1][cp][cfiIdx];
9431 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
9432 [cell->numTxAntPorts]][cfiIdx];
9435 /* Initializing the values of CFI parameters */
9436 if(cell->dynCfiCb.isDynCfiEnb)
9438 /* If DCFI is enabled, current CFI value will start from 1 */
9439 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
9443 /* If DCFI is disabled, current CFI value is set as default CFI value */
9444 cellSch->dl.currCfi = cellSch->cfiCfg.cfi;
9445 cellSch->dl.newCfi = cellSch->dl.currCfi;
9448 /* Include CRS REs while calculating Efficiency
9449 * The number of Resource Elements occupied by CRS depends on Number of
9450 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
9451 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
9452 * details of the same. Please note that PDCCH overlap symbols would not
9453 * considered in CRS REs deduction */
9454 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
9456 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
9457 - numPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
9460 if (cfg->maxDlBwPerUe == 0)
9462 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
9466 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
9468 if (cfg->maxDlRetxBw == 0)
9470 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
9474 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
9477 /* Fix: MUE_PERTTI_DL*/
9478 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9479 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9480 if (cfg->maxUePerDlSf == 0)
9482 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9484 /* Fix: MUE_PERTTI_DL syed validating Cell Configuration */
9485 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
9487 DU_LOG("\nERROR --> SCH : FAILED MaxUePerDlSf(%u) < MaxDlUeNewTxPerTti(%u)",
9488 cellSch->dl.maxUePerDlSf,
9489 cellSch->dl.maxUeNewTxPerTti);
9492 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
9493 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
9495 DU_LOG("\nERROR --> SCH : Invalid configuration !: "
9496 "maxCcchPerDlSf %u > maxUePerDlSf %u",
9497 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
9501 else if (!cfg->maxCcchPerDlSf)
9503 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
9504 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
9505 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
9506 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
9507 * FLE crash in PHY as PHY has limit of 16 max*/
9508 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
9512 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
9516 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
9520 cmLListInit(&cellSch->dl.msg4RetxLst);
9522 cmLListInit(&cellSch->dl.ccchSduRetxLst);
9525 #ifdef RG_PHASE2_SCHED
9526 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
9528 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
9530 if (cfg->dlfsCfg.isDlFreqSel)
9532 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
9538 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
9541 /* Power related configuration */
9542 ret = rgSCHPwrCellCfg(cell, cfg);
9548 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
9549 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
9550 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
9551 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
9552 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
9555 #endif /* LTE_TDD */
9557 /***********************************************************
9559 * Func : rgSCHCmnUlCalcReqRbCeil
9561 * Desc : Calculate RB required to satisfy 'bytes' for
9563 * Returns number of RBs such that requirement
9564 * is necessarily satisfied (does a 'ceiling'
9567 * Ret : Required RBs (uint8_t)
9573 **********************************************************/
9574 uint8_t rgSCHCmnUlCalcReqRbCeil(uint32_t bytes,uint8_t cqi,RgSchCmnUlCell *cellUl)
9576 uint32_t numRe = RGSCH_CEIL((bytes * 8) * 1024, rgSchCmnUlCqiTbl[cqi].eff);
9577 return ((uint8_t)RGSCH_CEIL(numRe, RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl)));
9580 /***********************************************************
9582 * Func : rgSCHCmnPrecompMsg3Vars
9584 * Desc : Precomputes the following for msg3 allocation:
9585 * 1. numSb and Imcs for msg size A
9586 * 2. numSb and Imcs otherwise
9590 * Notes: The corresponding vars in cellUl struct is filled
9595 **********************************************************/
9596 static S16 rgSCHCmnPrecompMsg3Vars(RgSchCmnUlCell *cellUl,uint8_t ccchCqi,uint16_t msgSzA,uint8_t sbSize,Bool isEcp)
9603 uint16_t msg3GrntSz = 0;
9606 if (ccchCqi > cellUl->max16qamCqi)
9608 ccchCqi = cellUl->max16qamCqi;
9610 /* #ifndef RG_SCH_CMN_EXP_CP_SUP For ECP Pick the index 1 */
9612 ccchTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
9613 ccchMcs = rgSCHCmnUlGetIMcsFrmITbs(ccchTbs, CM_LTE_UE_CAT_1);
9615 /* MCS should fit in 4 bits in RAR */
9621 /* Limit the ccchMcs to 15 as it
9622 * can be inferred from 36.213, section 6.2 that msg3 imcs
9624 * Since, UE doesn't exist right now, we use CAT_1 for ue
9626 while((ccchMcs = (rgSCHCmnUlGetIMcsFrmITbs(
9627 rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi],CM_LTE_UE_CAT_1))
9629 RG_SCH_CMN_MAX_MSG3_IMCS)
9634 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
9636 if (msgSzA < RGSCH_MIN_MSG3_GRNT_SZ)
9640 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(msgSzA, ccchCqi, cellUl), sbSize);
9642 numRb = numSb * sbSize;
9643 msg3GrntSz = 8 * msgSzA;
9645 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
9648 numRb = numSb * sbSize;
9650 while (rgSchCmnMult235Tbl[numSb].match != numSb)
9654 /* Reversed(Corrected) the assignment for preamble-GrpA
9655 * Refer- TG36.321- section- 5.1.2*/
9656 cellUl->ra.prmblBNumSb = numSb;
9657 cellUl->ra.prmblBIMcs = ccchMcs;
9658 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(RGSCH_MIN_MSG3_GRNT_SZ, \
9662 numRb = numSb * sbSize;
9663 msg3GrntSz = 8 * RGSCH_MIN_MSG3_GRNT_SZ;
9664 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
9667 numRb = numSb * sbSize;
9669 while (rgSchCmnMult235Tbl[numSb].match != numSb)
9673 /* Reversed(Corrected) the assignment for preamble-GrpA
9674 * Refer- TG36.321- section- 5.1.2*/
9675 cellUl->ra.prmblANumSb = numSb;
9676 cellUl->ra.prmblAIMcs = ccchMcs;
9680 uint32_t gPrntPucchDet=0;
9683 /***********************************************************
9685 * Func : rgSCHCmnUlCalcAvailBw
9687 * Desc : Calculates bandwidth available for PUSCH scheduling.
9689 * Ret : S16 (ROK/RFAILED)
9695 **********************************************************/
9696 static S16 rgSCHCmnUlCalcAvailBw(RgSchCellCb *cell,RgrCellCfg *cellCfg,uint8_t cfi,uint8_t *rbStartRef,uint8_t *bwAvailRef)
9699 uint8_t ulBw = cell->bwCfg.ulTotalBw;
9700 uint8_t n2Rb = cell->pucchCfg.resourceSize;
9701 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
9702 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
9703 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
9709 uint8_t exclRb; /* RBs to exclude */
9711 uint8_t puschRbStart;
9712 /* To avoid PUCCH and PUSCH collision issue */
9716 /* Maximum value of M as per Table 10.1-1 */
9717 uint8_t M[RGSCH_MAX_TDD_UL_DL_CFG] = {1, 2, 4, 3, 4, 9, 1};
9720 if (cell->isCpUlExtend)
9725 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
9727 /* Considering the max no. of CCEs for PUSCH BW calculation
9728 * based on min mi value */
9729 if (cell->ulDlCfgIdx == 0 || cell->ulDlCfgIdx == 6)
9738 totalCce = cell->dynCfiCb.cfi2NCceTbl[mi][cfi];
9740 P = rgSCHCmnGetPValFrmCCE(cell, totalCce-1);
9741 n1PlusOne = cell->rgSchTddNpValTbl[P + 1];
9742 n1Max = (M[cell->ulDlCfgIdx] - 1)*n1PlusOne + (totalCce-1) + n1Pucch;
9744 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
9746 n1RbPart = (c*n1Cs)/pucchDeltaShft;
9747 n1Rb = (n1Max - n1RbPart)/ n1PerRb;
9748 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
9750 /* get the total Number of RB's to be excluded for PUSCH */
9752 if(n1Pucch < n1RbPart)
9758 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
9760 puschRbStart = exclRb/2 + 1;
9762 /* Num of PUCCH RBs = puschRbStart*2 */
9763 if (puschRbStart * 2 >= ulBw)
9765 DU_LOG("\nERROR --> SCH : No bw available for PUSCH");
9769 *rbStartRef = puschRbStart;
9770 *bwAvailRef = ulBw - puschRbStart * 2;
9772 if(cell->pucchCfg.maxPucchRb !=0 &&
9773 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
9775 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
9782 /***********************************************************
9784 * Func : rgSCHCmnUlCalcAvailBw
9786 * Desc : Calculates bandwidth available for PUSCH scheduling.
9788 * Ret : S16 (ROK/RFAILED)
9794 **********************************************************/
9795 static S16 rgSCHCmnUlCalcAvailBw(RgSchCellCb *cell,RgrCellCfg *cellCfg,uint8_t cfi,uint8_t *rbStartRef,uint8_t *bwAvailRef)
9798 uint8_t ulBw = cell->bwCfg.ulTotalBw;
9799 uint8_t n2Rb = cell->pucchCfg.resourceSize;
9800 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
9801 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
9802 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
9808 uint8_t exclRb; /* RBs to exclude */
9810 uint8_t puschRbStart;
9812 uint16_t numOfN3PucchRb;
9813 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
9817 if (cell->isCpUlExtend)
9822 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
9824 totalCce = cell->dynCfiCb.cfi2NCceTbl[0][cfi];
9826 n1Max = n1Pucch + totalCce-1;
9828 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
9830 n1RbPart = (c*n1Cs)/pucchDeltaShft;
9831 n1Rb = (uint8_t)((n1Max - n1RbPart) / n1PerRb);
9832 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
9834 /* get the total Number of RB's to be excluded for PUSCH */
9836 if(n1Pucch < n1RbPart)
9842 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
9844 /*Support for PUCCH Format 3*/
9846 if (cell->isPucchFormat3Sptd)
9848 numOfN3PucchRb = RGSCH_CEIL(cellSch->dl.maxUePerDlSf,5);
9849 exclRb = exclRb + numOfN3PucchRb;
9852 puschRbStart = exclRb/2 + 1;
9857 DU_LOG("\nDEBUG --> SCH : CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%ld:%d:%d:%d:%d:%d]\n",
9858 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
9860 DU_LOG("\nDEBUG --> SCH : CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%d:%d:%d:%d:%d:%d]\n",
9861 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
9865 if (puschRbStart*2 >= ulBw)
9867 DU_LOG("\nERROR --> SCH : No bw available for PUSCH");
9871 *rbStartRef = puschRbStart;
9872 *bwAvailRef = ulBw - puschRbStart * 2;
9874 if(cell->pucchCfg.maxPucchRb !=0 &&
9875 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
9877 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
9886 /***********************************************************
9888 * Func : rgSCHCmnUlCellInit
9890 * Desc : Uplink scheduler initialisation for cell.
9898 **********************************************************/
9899 static S16 rgSCHCmnUlCellInit(RgSchCellCb *cell,RgrCellCfg *cellCfg)
9902 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
9903 uint8_t maxUePerUlSf = cellCfg->maxUePerUlSf;
9905 /* Added configuration for maximum number of MSG3s */
9906 uint8_t maxMsg3PerUlSf = cellCfg->maxMsg3PerUlSf;
9908 uint8_t maxUlBwPerUe = cellCfg->maxUlBwPerUe;
9909 uint8_t sbSize = cellCfg->puschSubBand.size;
9917 uint16_t ulDlCfgIdx = cell->ulDlCfgIdx;
9918 /* [ccpu00127294]-MOD-Change the max Ul subfrms size in TDD */
9919 uint8_t maxSubfrms = 2 * rgSchTddNumUlSf[ulDlCfgIdx];
9920 uint8_t ulToDlMap[12] = {0}; /* maximum 6 Subframes in UL * 2 */
9921 uint8_t maxUlsubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
9922 [RGSCH_NUM_SUB_FRAMES-1];
9926 uint8_t maxSubfrms = RG_SCH_CMN_UL_NUM_SF;
9932 #if (defined(LTE_L2_MEAS) )
9933 Inst inst = cell->instIdx;
9934 #endif /* #if (defined(LTE_L2_MEAS) || defined(DEBUGP) */
9935 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
9938 cellUl->maxUeNewTxPerTti = cellCfg->maxUlUeNewTxPerTti;
9939 if (maxUePerUlSf == 0)
9941 maxUePerUlSf = RG_SCH_CMN_MAX_UE_PER_UL_SF;
9944 if (maxMsg3PerUlSf == 0)
9946 maxMsg3PerUlSf = RG_SCH_CMN_MAX_MSG3_PER_UL_SF;
9948 /* fixed the problem while sending raRsp
9949 * if maxMsg3PerUlSf is greater than
9950 * RGSCH_MAX_RNTI_PER_RARNTI
9952 if(maxMsg3PerUlSf > RGSCH_MAX_RNTI_PER_RARNTI)
9954 maxMsg3PerUlSf = RGSCH_MAX_RNTI_PER_RARNTI;
9957 if(maxMsg3PerUlSf > maxUePerUlSf)
9959 maxMsg3PerUlSf = maxUePerUlSf;
9962 /*cellUl->maxAllocPerUlSf = maxUePerUlSf + maxMsg3PerUlSf;*/
9963 /*Max MSG3 should be a subset of Max UEs*/
9964 cellUl->maxAllocPerUlSf = maxUePerUlSf;
9965 cellUl->maxMsg3PerUlSf = maxMsg3PerUlSf;
9967 cellUl->maxAllocPerUlSf = maxUePerUlSf;
9969 /* Fix: MUE_PERTTI_UL syed validating Cell Configuration */
9970 if (cellUl->maxAllocPerUlSf < cellUl->maxUeNewTxPerTti)
9972 DU_LOG("\nERROR --> SCH : FAILED: MaxUePerUlSf(%u) < MaxUlUeNewTxPerTti(%u)",
9973 cellUl->maxAllocPerUlSf,
9974 cellUl->maxUeNewTxPerTti);
9980 for(idx = 0; idx < RGSCH_SF_ALLOC_SIZE; idx++)
9982 for(idx = 0; idx < RGSCH_NUM_SUB_FRAMES; idx++)
9986 ret = rgSCHUtlAllocSBuf(inst, (Data **)&(cell->sfAllocArr[idx].
9987 ulUeInfo.ulAllocInfo), (cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc)));
9990 DU_LOG("\nERROR --> SCH : Memory allocation failed ");
9995 if (maxUlBwPerUe == 0)
9997 /* ccpu00139362- Setting to configured UL BW instead of MAX BW(100)*/
9998 maxUlBwPerUe = cell->bwCfg.ulTotalBw;
10000 cellUl->maxUlBwPerUe = maxUlBwPerUe;
10002 /* FOR RG_SCH_CMN_EXT_CP_SUP */
10003 if (!cellCfg->isCpUlExtend)
10005 cellUl->ulNumRePerRb = 12 * (14 - RGSCH_UL_SYM_DMRS_SRS);
10009 cellUl->ulNumRePerRb = 12 * (12 - RGSCH_UL_SYM_DMRS_SRS);
10012 if (sbSize != rgSchCmnMult235Tbl[sbSize].match)
10014 DU_LOG("\nERROR --> SCH : Invalid subband size %d", sbSize);
10017 //Setting the subband size to 4 which is size of VRBG in 5GTF
10019 sbSize = MAX_5GTF_VRBG_SIZE;
10022 maxSbPerUe = maxUlBwPerUe / sbSize;
10023 if (maxSbPerUe == 0)
10025 DU_LOG("\nERROR --> SCH : rgSCHCmnUlCellInit(): "
10026 "maxUlBwPerUe/sbSize is zero");
10029 cellUl->maxSbPerUe = rgSchCmnMult235Tbl[maxSbPerUe].prvMatch;
10031 /* CQI related updations */
10032 if ((!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->ulCmnCodeRate.ccchCqi))
10033 || (!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->trgUlCqi.trgCqi)))
10035 DU_LOG("\nERROR --> SCH : rgSCHCmnUlCellInit(): "
10039 cellUl->dfltUlCqi = cellCfg->ulCmnCodeRate.ccchCqi;
10041 /* Changed the logic to determine maxUlCqi.
10042 * For a 16qam UE, maxUlCqi is the CQI Index at which
10043 * efficiency is as close as possible to RG_SCH_MAX_CODE_RATE_16QAM
10044 * Refer to 36.213-8.6.1 */
10045 for (i = RG_SCH_CMN_UL_NUM_CQI - 1;i > 0; --i)
10047 DU_LOG("\nINFO --> SCH : CQI %u:iTbs %u",i,
10048 rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i]);
10049 #ifdef MAC_SCH_STATS
10050 /* ccpu00128489 ADD Update mcs in hqFailStats here instead of at CRC
10051 * since CQI to MCS mapping does not change. The only exception is for
10052 * ITBS = 19 where the MCS can be 20 or 21 based on the UE cat. We
10053 * choose 20, instead of 21, ie UE_CAT_3 */
10054 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
10055 RG_SCH_CMN_UL_TBS_TO_MCS(iTbs, hqFailStats.ulCqiStat[i - 1].mcs);
10058 for (i = RG_SCH_CMN_UL_NUM_CQI - 1; i != 0; --i)
10060 /* Fix for ccpu00123912*/
10061 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
10062 if (iTbs <= RGSCH_UL_16QAM_MAX_ITBS) /* corresponds to 16QAM */
10064 DU_LOG("\nINFO --> SCH : 16 QAM CQI %u", i);
10065 cellUl->max16qamCqi = i;
10071 /* Precompute useful values for RA msg3 */
10072 ret = rgSCHCmnPrecompEmtcMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
10073 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
10080 /* Precompute useful values for RA msg3 */
10081 ret = rgSCHCmnPrecompMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
10082 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
10088 cellUl->sbSize = sbSize;
10091 cellUl->numUlSubfrms = maxSubfrms;
10093 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->ulSfArr,
10094 cellUl->numUlSubfrms * sizeof(RgSchUlSf));
10098 cellUl->numUlSubfrms = 0;
10102 /* store the DL subframe corresponding to the PUSCH offset
10103 * in their respective UL subframe */
10104 for(i=0; i < RGSCH_NUM_SUB_FRAMES; i++)
10106 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][i] != 0)
10108 subfrm = (i + rgSchTddPuschTxKTbl[ulDlCfgIdx][i]) % \
10109 RGSCH_NUM_SUB_FRAMES;
10110 subfrm = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subfrm]-1;
10111 dlIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][i]-1;
10112 RGSCH_ARRAY_BOUND_CHECK( cell->instIdx, ulToDlMap, subfrm);
10113 ulToDlMap[subfrm] = dlIdx;
10116 /* Copy the information in the remaining UL subframes based
10117 * on number of HARQ processes */
10118 for(i=maxUlsubfrms; i < maxSubfrms; i++)
10120 subfrm = i-maxUlsubfrms;
10121 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, i);
10122 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, subfrm)
10123 ulToDlMap[i] = ulToDlMap[subfrm];
10127 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++)
10130 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
10132 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
10141 cell->ulAvailBw = bwAvail;
10144 numSb = bwAvail/sbSize;
10146 cell->dynCfiCb.bwInfo[cfi].startRb = rbStart;
10147 cell->dynCfiCb.bwInfo[cfi].numSb = numSb;
10150 if(0 == cell->dynCfiCb.maxCfi)
10152 DU_LOG("\nERROR --> SCH : Incorrect Default CFI(%u), maxCfi(%u), maxPucchRb(%d)",
10153 cellSch->cfiCfg.cfi, cell->dynCfiCb.maxCfi,
10154 cell->pucchCfg.maxPucchRb);
10160 cellUl->dmrsArrSize = cell->dynCfiCb.bwInfo[1].numSb;
10161 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->dmrsArr,
10162 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10167 for (i = 0; i < cellUl->dmrsArrSize; ++i)
10169 cellUl->dmrsArr[i] = cellCfg->puschSubBand.dmrs[i];
10172 /* Init subframes */
10173 for (i = 0; i < maxSubfrms; ++i)
10175 ret = rgSCHUtlUlSfInit(cell, &cellUl->ulSfArr[i], i,
10176 cellUl->maxAllocPerUlSf);
10179 for (; i != 0; --i)
10181 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[i-1]);
10183 /* ccpu00117052 - MOD - Passing double pointer
10184 for proper NULLP assignment*/
10185 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)(&(cellUl->dmrsArr)),
10186 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10188 /* ccpu00117052 - MOD - Passing double pointer
10189 for proper NULLP assignment*/
10190 rgSCHUtlFreeSBuf(cell->instIdx,
10191 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
10196 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cellUl);
10201 * @brief Scheduler processing on cell configuration.
10205 * Function : rgSCHCmnRgrCellCfg
10207 * This function does requisite initialisation
10208 * and setup for scheduler1 when a cell is
10211 * @param[in] RgSchCellCb *cell
10212 * @param[in] RgrCellCfg *cellCfg
10213 * @param[out] RgSchErrInfo *err
10218 S16 rgSCHCmnRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cellCfg,RgSchErrInfo *err)
10221 RgSchCmnCell *cellSch;
10223 /* As part of RGR cell configuration, validate the CRGCellCfg
10224 * There is no trigger for crgCellCfg from SC1 */
10225 /* Removed failure check for Extended CP */
10227 if (((ret = rgSCHUtlAllocSBuf(cell->instIdx,
10228 (Data**)&(cell->sc.sch), (sizeof(RgSchCmnCell)))) != ROK))
10230 DU_LOG("\nERROR --> SCH : Memory allocation FAILED");
10231 err->errCause = RGSCHERR_SCH_CFG;
10234 cellSch = (RgSchCmnCell *)(cell->sc.sch);
10235 cellSch->cfiCfg = cellCfg->cfiCfg;
10236 cellSch->trgUlCqi.trgCqi = cellCfg->trgUlCqi.trgCqi;
10237 /* Initialize the scheduler refresh timer queues */
10238 cellSch->tmrTqCp.nxtEnt = 0;
10239 cellSch->tmrTqCp.tmrLen = RG_SCH_CMN_NUM_REFRESH_Q;
10241 /* RACHO Intialize the RACH ded Preamble Information */
10242 rgSCHCmnCfgRachDedPrm(cell);
10244 /* Initialize 'Np' value for each 'p' used for
10245 * HARQ ACK/NACK reception */
10246 rgSCHCmnDlNpValInit(cell);
10249 /* Initialize 'Np' value for each 'p' used for
10250 * HARQ ACK/NACK reception */
10252 rgSCHCmnDlNpValInit(cell);
10255 /* Now perform uplink related initializations */
10256 ret = rgSCHCmnUlCellInit(cell, cellCfg);
10259 /* There is no downlink deinit to be performed */
10260 err->errCause = RGSCHERR_SCH_CFG;
10263 ret = rgSCHCmnDlRgrCellCfg(cell, cellCfg, err);
10266 err->errCause = RGSCHERR_SCH_CFG;
10269 /* DL scheduler has no initializations to make */
10270 /* As of now DL scheduler always returns ROK */
10272 rgSCHCmnGetDciFrmtSizes(cell);
10273 rgSCHCmnGetCqiDciFrmt2AggrLvl(cell);
10275 rgSCHCmnGetEmtcDciFrmtSizes(cell);
10276 rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl(cell);
10277 #endif /* EMTC_ENABLE */
10280 if(TRUE == cellCfg->emtcEnable)
10282 cellSch->apisEmtcUl = &rgSchEmtcUlSchdTbl[0];
10283 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
10290 cellSch->apisUl = &rgSchUlSchdTbl[RG_SCH_CMN_GET_UL_SCHED_TYPE(cell)];
10291 ret = cellSch->apisUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
10297 if(TRUE == cellCfg->emtcEnable)
10299 cellSch->apisEmtcDl = &rgSchEmtcDlSchdTbl[0];
10300 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
10307 cellSch->apisDl = &rgSchDlSchdTbl[RG_SCH_CMN_GET_DL_SCHED_TYPE(cell)];
10309 /* Perform SPS specific initialization for the cell */
10310 ret = rgSCHCmnSpsCellCfg(cell, cellCfg, err);
10316 ret = cellSch->apisDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
10321 rgSCHCmnInitVars(cell);
10324 } /* rgSCHCmnRgrCellCfg*/
10328 * @brief This function handles the reconfiguration of cell.
10332 * Function: rgSCHCmnRgrCellRecfg
10333 * Purpose: Update the reconfiguration parameters.
10335 * Invoked by: Scheduler
10337 * @param[in] RgSchCellCb* cell
10341 S16 rgSCHCmnRgrCellRecfg(RgSchCellCb *cell,RgrCellRecfg *recfg,RgSchErrInfo *err)
10344 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10345 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
10348 if (recfg->recfgTypes & RGR_CELL_UL_CMNRATE_RECFG)
10350 uint8_t oldCqi = cellUl->dfltUlCqi;
10351 if (!RG_SCH_CMN_UL_IS_CQI_VALID(recfg->ulCmnCodeRate.ccchCqi))
10353 err->errCause = RGSCHERR_SCH_CFG;
10354 DU_LOG("\nERROR --> SCH : rgSCHCmnRgrCellRecfg(): "
10358 cellUl->dfltUlCqi = recfg->ulCmnCodeRate.ccchCqi;
10359 ret = rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
10360 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
10363 cellUl->dfltUlCqi = oldCqi;
10364 rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
10365 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
10370 if (recfg->recfgTypes & RGR_CELL_DL_CMNRATE_RECFG)
10372 if (rgSCHCmnDlCnsdrCmnRt(cell, &recfg->dlCmnCodeRate) != ROK)
10374 err->errCause = RGSCHERR_SCH_CFG;
10380 if(TRUE == cell->emtcEnable)
10382 /* Invoke UL sched for cell Recfg */
10383 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
10389 /* Invoke DL sched for cell Recfg */
10390 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
10399 /* Invoke UL sched for cell Recfg */
10400 ret = cellSch->apisUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
10406 /* Invoke DL sched for cell Recfg */
10407 ret = cellSch->apisDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
10414 if (recfg->recfgTypes & RGR_CELL_DLFS_RECFG)
10416 ret = cellSch->apisDlfs->rgSCHDlfsCellRecfg(cell, recfg, err);
10421 cellSch->dl.isDlFreqSel = recfg->dlfsRecfg.isDlFreqSel;
10424 if (recfg->recfgTypes & RGR_CELL_PWR_RECFG)
10426 ret = rgSCHPwrCellRecfg(cell, recfg);
10436 /***********************************************************
10438 * Func : rgSCHCmnUlCellDeinit
10440 * Desc : Uplink scheduler de-initialisation for cell.
10448 **********************************************************/
10449 static Void rgSCHCmnUlCellDeinit(RgSchCellCb *cell)
10451 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
10454 uint8_t maxSubfrms = cellUl->numUlSubfrms;
10457 CmLList *lnk = NULLP;
10458 RgSchL2MeasCb *measCb;
10462 for(ulSfIdx = 0; ulSfIdx < RGSCH_SF_ALLOC_SIZE; ulSfIdx++)
10464 for(ulSfIdx = 0; ulSfIdx < RGSCH_NUM_SUB_FRAMES; ulSfIdx++)
10467 if(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo != NULLP)
10469 /* ccpu00117052 - MOD - Passing double pointer
10470 for proper NULLP assignment*/
10471 rgSCHUtlFreeSBuf(cell->instIdx,
10472 (Data **)(&(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo)),
10473 cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc));
10475 /* ccpu00117052 - DEL - removed explicit NULLP assignment
10476 as it is done in above utility function */
10479 /* Free the memory allocated to measCb */
10480 lnk = cell->l2mList.first;
10481 while(lnk != NULLP)
10483 measCb = (RgSchL2MeasCb *)lnk->node;
10484 cmLListDelFrm(&cell->l2mList, lnk);
10486 /* ccpu00117052 - MOD - Passing double pointer
10487 for proper NULLP assignment*/
10488 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&measCb,\
10489 sizeof(RgSchL2MeasCb));
10492 if (cellUl->dmrsArr != NULLP)
10494 /* ccpu00117052 - MOD - Passing double pointer
10495 for proper NULLP assignment*/
10496 rgSCHUtlFreeSBuf(cell->instIdx,(Data **)(&(cellUl->dmrsArr)),
10497 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10499 /* De-init subframes */
10501 for (ulSfIdx = 0; ulSfIdx < maxSubfrms; ++ulSfIdx)
10503 for (ulSfIdx = 0; ulSfIdx < RG_SCH_CMN_UL_NUM_SF; ++ulSfIdx)
10506 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[ulSfIdx]);
10510 if (cellUl->ulSfArr != NULLP)
10512 /* ccpu00117052 - MOD - Passing double pointer
10513 for proper NULLP assignment*/
10514 rgSCHUtlFreeSBuf(cell->instIdx,
10515 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
10523 * @brief Scheduler processing for cell delete.
10527 * Function : rgSCHCmnCellDel
10529 * This functions de-initialises and frees memory
10530 * taken up by scheduler1 for the entire cell.
10532 * @param[in] RgSchCellCb *cell
10535 Void rgSCHCmnCellDel(RgSchCellCb *cell)
10537 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10542 if (cellSch == NULLP)
10546 /* Perform the deinit for the UL scheduler */
10547 rgSCHCmnUlCellDeinit(cell);
10549 if(TRUE == cell->emtcEnable)
10551 if (cellSch->apisEmtcUl)
10553 cellSch->apisEmtcUl->rgSCHFreeUlCell(cell);
10557 if (cellSch->apisUl)
10559 /* api pointer checks added (here and below in
10560 * this function). pl check. - antriksh */
10561 cellSch->apisUl->rgSCHFreeUlCell(cell);
10564 /* Perform the deinit for the DL scheduler */
10565 cmLListInit(&cellSch->dl.taLst);
10566 if (cellSch->apisDl)
10568 cellSch->apisDl->rgSCHFreeDlCell(cell);
10571 if (cellSch->apisEmtcDl)
10573 rgSCHEmtcInitTaLst(&cellSch->dl);
10575 cellSch->apisEmtcDl->rgSCHFreeDlCell(cell);
10579 /* DLFS de-initialization */
10580 if (cellSch->dl.isDlFreqSel && cellSch->apisDlfs)
10582 cellSch->apisDlfs->rgSCHDlfsCellDel(cell);
10585 rgSCHPwrCellDel(cell);
10587 rgSCHCmnSpsCellDel(cell);
10590 /* ccpu00117052 - MOD - Passing double pointer
10591 for proper NULLP assignment*/
10592 rgSCHUtlFreeSBuf(cell->instIdx,
10593 (Data**)(&(cell->sc.sch)), (sizeof(RgSchCmnCell)));
10595 } /* rgSCHCmnCellDel */
10599 * @brief This function validates QOS parameters for DL.
10603 * Function: rgSCHCmnValidateDlQos
10604 * Purpose: This function validates QOS parameters for DL.
10606 * Invoked by: Scheduler
10608 * @param[in] CrgLchQosCfg *dlQos
10612 static S16 rgSCHCmnValidateDlQos(RgrLchQosCfg *dlQos)
10614 uint8_t qci = dlQos->qci;
10615 if ( qci < RG_SCH_CMN_MIN_QCI || qci > RG_SCH_CMN_MAX_QCI )
10620 if ((qci >= RG_SCH_CMN_GBR_QCI_START) &&
10621 (qci <= RG_SCH_CMN_GBR_QCI_END))
10623 if ((dlQos->mbr == 0) || (dlQos->mbr < dlQos->gbr))
10632 * @brief Scheduler invocation on logical channel addition.
10636 * Function : rgSCHCmnRgrLchCfg
10638 * This functions does required processing when a new
10639 * (dedicated) logical channel is added. Assumes lcg
10640 * pointer in ulLc is set.
10642 * @param[in] RgSchCellCb *cell
10643 * @param[in] RgSchUeCb *ue
10644 * @param[in] RgSchDlLcCb *dlLc
10645 * @param[int] RgrLchCfg *lcCfg
10646 * @param[out] RgSchErrInfo *err
10651 S16 rgSCHCmnRgrLchCfg
10662 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10665 ret = rgSCHUtlAllocSBuf(cell->instIdx,
10666 (Data**)&((dlLc)->sch), (sizeof(RgSchCmnDlSvc)));
10669 DU_LOG("\nERROR --> SCH : rgSCHCmnRgrLchCfg(): "
10670 "SCH struct alloc failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10671 err->errCause = RGSCHERR_SCH_CFG;
10674 if(lcCfg->lcType != CM_LTE_LCH_DCCH)
10676 ret = rgSCHCmnValidateDlQos(&lcCfg->dlInfo.dlQos);
10679 DU_LOG("\nERROR --> SCH : rgSchCmnCrgLcCfg(): "
10680 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10681 err->errCause = RGSCHERR_SCH_CFG;
10684 /* Perform DL service activation in the scheduler */
10685 ((RgSchCmnDlSvc *)(dlLc->sch))->qci = lcCfg->dlInfo.dlQos.qci;
10686 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = rgSchCmnDlQciPrio[lcCfg->dlInfo.dlQos.qci - 1];
10687 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcCfg->dlInfo.dlQos.gbr * \
10688 RG_SCH_CMN_REFRESH_TIME)/100;
10689 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcCfg->dlInfo.dlQos.mbr * \
10690 RG_SCH_CMN_REFRESH_TIME)/100;
10694 /*assigning highest priority to DCCH */
10695 ((RgSchCmnDlSvc *)(dlLc->sch))->prio=RG_SCH_CMN_DCCH_PRIO;
10698 dlLc->lcType=lcCfg->lcType;
10701 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
10703 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcCfg(cell, ue,dlLc ,lcCfg, err);
10712 ret = cellSch->apisDl->rgSCHRgrDlLcCfg(cell, ue, dlLc, lcCfg, err);
10720 if(TRUE == ue->isEmtcUe)
10722 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
10731 ret = cellSch->apisUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
10741 rgSCHSCellDlLcCfg(cell, ue, dlLc);
10747 if(lcCfg->dlInfo.dlSpsCfg.isSpsEnabled)
10749 /* Invoke SPS module if SPS is enabled for the service */
10750 ret = rgSCHCmnSpsDlLcCfg(cell, ue, dlLc, lcCfg, err);
10753 DU_LOG("\nERROR --> SCH : rgSchCmnRgrLchCfg(): "
10754 "SPS configuration failed for DL LC for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10755 err->errCause = RGSCHERR_SCH_CFG;
10765 * @brief Scheduler invocation on logical channel addition.
10769 * Function : rgSCHCmnRgrLchRecfg
10771 * This functions does required processing when an existing
10772 * (dedicated) logical channel is reconfigured. Assumes lcg
10773 * pointer in ulLc is set to the old value.
10774 * Independent of whether new LCG is meant to be configured,
10775 * the new LCG scheduler information is accessed and possibly modified.
10777 * @param[in] RgSchCellCb *cell
10778 * @param[in] RgSchUeCb *ue
10779 * @param[in] RgSchDlLcCb *dlLc
10780 * @param[int] RgrLchRecfg *lcRecfg
10781 * @param[out] RgSchErrInfo *err
10786 S16 rgSCHCmnRgrLchRecfg
10791 RgrLchRecfg *lcRecfg,
10796 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10799 if(dlLc->lcType != CM_LTE_LCH_DCCH)
10801 ret = rgSCHCmnValidateDlQos(&lcRecfg->dlRecfg.dlQos);
10805 DU_LOG("\nERROR --> SCH : DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10806 err->errCause = RGSCHERR_SCH_CFG;
10809 if (((RgSchCmnDlSvc *)(dlLc->sch))->qci != lcRecfg->dlRecfg.dlQos.qci)
10811 DU_LOG("\nERROR --> SCH : Qci, hence lc Priority change "
10812 "not supported for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10813 err->errCause = RGSCHERR_SCH_CFG;
10816 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcRecfg->dlRecfg.dlQos.gbr * \
10817 RG_SCH_CMN_REFRESH_TIME)/100;
10818 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcRecfg->dlRecfg.dlQos.mbr * \
10819 RG_SCH_CMN_REFRESH_TIME)/100;
10823 /*assigning highest priority to DCCH */
10824 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = RG_SCH_CMN_DCCH_PRIO;
10828 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
10830 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10835 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
10844 ret = cellSch->apisDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10849 ret = cellSch->apisUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
10857 if (lcRecfg->recfgTypes & RGR_DL_LC_SPS_RECFG)
10859 /* Invoke SPS module if SPS is enabled for the service */
10860 if(lcRecfg->dlRecfg.dlSpsRecfg.isSpsEnabled)
10862 ret = rgSCHCmnSpsDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10865 DU_LOG("\nERROR --> SCH : SPS re-configuration not "
10866 "supported for dlLC Ignore this CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10877 * @brief Scheduler invocation on logical channel addition.
10881 * Function : rgSCHCmnRgrLcgCfg
10883 * This functions does required processing when a new
10884 * (dedicated) logical channel is added. Assumes lcg
10885 * pointer in ulLc is set.
10887 * @param[in] RgSchCellCb *cell,
10888 * @param[in] RgSchUeCb *ue,
10889 * @param[in] RgSchLcgCb *lcg,
10890 * @param[in] RgrLcgCfg *lcgCfg,
10891 * @param[out] RgSchErrInfo *err
10896 S16 rgSCHCmnRgrLcgCfg
10906 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10907 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCfg->ulInfo.lcgId].sch));
10910 ulLcg->cfgdGbr = (lcgCfg->ulInfo.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
10911 ulLcg->effGbr = ulLcg->cfgdGbr;
10912 ulLcg->deltaMbr = ((lcgCfg->ulInfo.mbr - lcgCfg->ulInfo.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
10913 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
10916 if(TRUE == ue->isEmtcUe)
10918 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
10927 ret = cellSch->apisUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
10933 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
10935 /* Indicate MAC that this LCG is GBR LCG */
10936 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcgCfg->ulInfo.lcgId, TRUE);
10942 * @brief Scheduler invocation on logical channel addition.
10946 * Function : rgSCHCmnRgrLcgRecfg
10948 * This functions does required processing when a new
10949 * (dedicated) logical channel is added. Assumes lcg
10950 * pointer in ulLc is set.
10952 * @param[in] RgSchCellCb *cell,
10953 * @param[in] RgSchUeCb *ue,
10954 * @param[in] RgSchLcgCb *lcg,
10955 * @param[in] RgrLcgRecfg *reCfg,
10956 * @param[out] RgSchErrInfo *err
10961 S16 rgSCHCmnRgrLcgRecfg
10966 RgrLcgRecfg *reCfg,
10971 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10972 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[reCfg->ulRecfg.lcgId].sch));
10975 ulLcg->cfgdGbr = (reCfg->ulRecfg.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
10976 ulLcg->effGbr = ulLcg->cfgdGbr;
10977 ulLcg->deltaMbr = ((reCfg->ulRecfg.mbr - reCfg->ulRecfg.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
10978 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
10981 if(TRUE == ue->isEmtcUe)
10983 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
10992 ret = cellSch->apisUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
10998 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
11000 /* Indicate MAC that this LCG is GBR LCG */
11001 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, TRUE);
11005 /* In case of RAB modification */
11006 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, FALSE);
11011 /***********************************************************
11013 * Func : rgSCHCmnRgrLchDel
11015 * Desc : Scheduler handling for a (dedicated)
11016 * uplink logical channel being deleted.
11023 **********************************************************/
11024 S16 rgSCHCmnRgrLchDel(RgSchCellCb *cell,RgSchUeCb *ue,CmLteLcId lcId,uint8_t lcgId)
11026 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11028 if(TRUE == ue->isEmtcUe)
11030 cellSch->apisEmtcUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
11035 cellSch->apisUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
11040 /***********************************************************
11042 * Func : rgSCHCmnLcgDel
11044 * Desc : Scheduler handling for a (dedicated)
11045 * uplink logical channel being deleted.
11053 **********************************************************/
11054 Void rgSCHCmnLcgDel(RgSchCellCb *cell,RgSchUeCb *ue,RgSchLcgCb *lcg)
11056 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11057 RgSchCmnLcg *lcgCmn = RG_SCH_CMN_GET_UL_LCG(lcg);
11059 if (lcgCmn == NULLP)
11064 if (RGSCH_IS_GBR_BEARER(lcgCmn->cfgdGbr))
11066 /* Indicate MAC that this LCG is GBR LCG */
11067 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcg->lcgId, FALSE);
11071 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
11073 rgSCHCmnSpsUlLcgDel(cell, ue, lcg);
11075 #endif /* LTEMAC_SPS */
11077 lcgCmn->effGbr = 0;
11078 lcgCmn->reportedBs = 0;
11079 lcgCmn->cfgdGbr = 0;
11080 /* set lcg bs to 0. Deletion of control block happens
11081 * at the time of UE deletion. */
11084 if(TRUE == ue->isEmtcUe)
11086 cellSch->apisEmtcUl->rgSCHFreeUlLcg(cell, ue, lcg);
11091 cellSch->apisUl->rgSCHFreeUlLcg(cell, ue, lcg);
11098 * @brief This function deletes a service from scheduler.
11102 * Function: rgSCHCmnFreeDlLc
11103 * Purpose: This function is made available through a FP for
11104 * making scheduler aware of a service being deleted from UE.
11106 * Invoked by: BO and Scheduler
11108 * @param[in] RgSchCellCb* cell
11109 * @param[in] RgSchUeCb* ue
11110 * @param[in] RgSchDlLcCb* svc
11114 Void rgSCHCmnFreeDlLc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlLcCb *svc)
11116 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11117 if (svc->sch == NULLP)
11122 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
11124 cellSch->apisEmtcDl->rgSCHFreeDlLc(cell, ue, svc);
11129 cellSch->apisDl->rgSCHFreeDlLc(cell, ue, svc);
11135 rgSCHSCellDlLcDel(cell, ue, svc);
11140 /* If SPS service, invoke SPS module */
11141 if (svc->dlLcSpsCfg.isSpsEnabled)
11143 rgSCHCmnSpsDlLcDel(cell, ue, svc);
11147 /* ccpu00117052 - MOD - Passing double pointer
11148 for proper NULLP assignment*/
11149 rgSCHUtlFreeSBuf(cell->instIdx,
11150 (Data**)(&(svc->sch)), (sizeof(RgSchCmnDlSvc)));
11153 rgSCHLaaDeInitDlLchCb(cell, svc);
11162 * @brief This function Processes the Final Allocations
11163 * made by the RB Allocator against the requested
11164 * CCCH SDURetx Allocations.
11168 * Function: rgSCHCmnDlCcchSduRetxFnlz
11169 * Purpose: This function Processes the Final Allocations
11170 * made by the RB Allocator against the requested
11171 * CCCH Retx Allocations.
11172 * Scans through the scheduled list of ccchSdu retrans
11173 * fills the corresponding pdcch, adds the hqProc to
11174 * the corresponding SubFrm and removes the hqP from
11177 * Invoked by: Common Scheduler
11179 * @param[in] RgSchCellCb *cell
11180 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11184 static Void rgSCHCmnDlCcchSduRetxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11187 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11188 RgSchDlRbAlloc *rbAllocInfo;
11189 RgSchDlHqProcCb *hqP;
11192 /* Traverse through the Scheduled Retx List */
11193 node = allocInfo->ccchSduAlloc.schdCcchSduRetxLst.first;
11196 hqP = (RgSchDlHqProcCb *)(node->node);
11198 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
11200 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11202 /* Remove the HqP from cell's ccchSduRetxLst */
11203 cmLListDelFrm(&cmnCellDl->ccchSduRetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
11204 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
11206 /* Fix: syed dlAllocCb reset should be performed.
11207 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11208 rgSCHCmnDlUeResetTemp(ue, hqP);
11210 /* Fix: syed dlAllocCb reset should be performed.
11211 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11212 node = allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst.first;
11215 hqP = (RgSchDlHqProcCb *)(node->node);
11218 /* reset the UE allocation Information */
11219 rgSCHCmnDlUeResetTemp(ue, hqP);
11225 * @brief This function Processes the Final Allocations
11226 * made by the RB Allocator against the requested
11227 * CCCH Retx Allocations.
11231 * Function: rgSCHCmnDlCcchRetxFnlz
11232 * Purpose: This function Processes the Final Allocations
11233 * made by the RB Allocator against the requested
11234 * CCCH Retx Allocations.
11235 * Scans through the scheduled list of msg4 retrans
11236 * fills the corresponding pdcch, adds the hqProc to
11237 * the corresponding SubFrm and removes the hqP from
11240 * Invoked by: Common Scheduler
11242 * @param[in] RgSchCellCb *cell
11243 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11247 static Void rgSCHCmnDlCcchRetxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11250 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11251 RgSchDlRbAlloc *rbAllocInfo;
11252 RgSchDlHqProcCb *hqP;
11255 /* Traverse through the Scheduled Retx List */
11256 node = allocInfo->msg4Alloc.schdMsg4RetxLst.first;
11259 hqP = (RgSchDlHqProcCb *)(node->node);
11260 raCb = hqP->hqE->raCb;
11261 rbAllocInfo = &raCb->rbAllocInfo;
11263 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11265 /* Remove the HqP from cell's msg4RetxLst */
11266 cmLListDelFrm(&cmnCellDl->msg4RetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
11267 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
11268 /* Fix: syed dlAllocCb reset should be performed.
11269 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11270 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
11271 rgSCHCmnDlHqPResetTemp(hqP);
11273 /* Fix: syed dlAllocCb reset should be performed.
11274 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11275 node = allocInfo->msg4Alloc.nonSchdMsg4RetxLst.first;
11278 hqP = (RgSchDlHqProcCb *)(node->node);
11279 raCb = hqP->hqE->raCb;
11281 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
11282 rgSCHCmnDlHqPResetTemp(hqP);
11289 * @brief This function Processes the Final Allocations
11290 * made by the RB Allocator against the requested
11291 * CCCH SDU tx Allocations.
11295 * Function: rgSCHCmnDlCcchSduTxFnlz
11296 * Purpose: This function Processes the Final Allocations
11297 * made by the RB Allocator against the requested
11298 * CCCH tx Allocations.
11299 * Scans through the scheduled list of CCCH SDU trans
11300 * fills the corresponding pdcch, adds the hqProc to
11301 * the corresponding SubFrm and removes the hqP from
11304 * Invoked by: Common Scheduler
11306 * @param[in] RgSchCellCb *cell
11307 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11311 static Void rgSCHCmnDlCcchSduTxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11315 RgSchDlRbAlloc *rbAllocInfo;
11316 RgSchDlHqProcCb *hqP;
11317 RgSchLchAllocInfo lchSchdData;
11319 /* Traverse through the Scheduled Retx List */
11320 node = allocInfo->ccchSduAlloc.schdCcchSduTxLst.first;
11323 hqP = (RgSchDlHqProcCb *)(node->node);
11324 ueCb = hqP->hqE->ue;
11326 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
11328 /* fill the pdcch and HqProc */
11329 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11331 /* Remove the raCb from cell's toBeSchdLst */
11332 cmLListDelFrm(&cell->ccchSduUeLst, &ueCb->ccchSduLnk);
11333 ueCb->ccchSduLnk.node = (PTR)NULLP;
11335 /* Fix : Resetting this required to avoid complication
11336 * in reestablishment case */
11337 ueCb->dlCcchInfo.bo = 0;
11339 /* Indicate DHM of the CCCH LC scheduling */
11340 hqP->tbInfo[0].contResCe = NOTPRSNT;
11341 lchSchdData.lcId = 0;
11342 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
11343 (RGSCH_MSG4_HDRSIZE);
11344 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
11346 /* Fix: syed dlAllocCb reset should be performed.
11347 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11348 rgSCHCmnDlUeResetTemp(ueCb, hqP);
11350 /* Fix: syed dlAllocCb reset should be performed.
11351 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11352 node = allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst.first;
11355 hqP = (RgSchDlHqProcCb *)(node->node);
11356 ueCb = hqP->hqE->ue;
11358 /* Release HqProc */
11359 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
11360 /*Fix: Removing releasing of TB1 as it will not exist for CCCH SDU and hence caused a crash*/
11361 /*rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
11362 /* reset the UE allocation Information */
11363 rgSCHCmnDlUeResetTemp(ueCb, hqP);
11370 * @brief This function Processes the Final Allocations
11371 * made by the RB Allocator against the requested
11372 * CCCH tx Allocations.
11376 * Function: rgSCHCmnDlCcchTxFnlz
11377 * Purpose: This function Processes the Final Allocations
11378 * made by the RB Allocator against the requested
11379 * CCCH tx Allocations.
11380 * Scans through the scheduled list of msg4 trans
11381 * fills the corresponding pdcch, adds the hqProc to
11382 * the corresponding SubFrm and removes the hqP from
11385 * Invoked by: Common Scheduler
11387 * @param[in] RgSchCellCb *cell
11388 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11392 static Void rgSCHCmnDlCcchTxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11396 RgSchDlRbAlloc *rbAllocInfo;
11397 RgSchDlHqProcCb *hqP;
11398 RgSchLchAllocInfo lchSchdData;
11400 /* Traverse through the Scheduled Retx List */
11401 node = allocInfo->msg4Alloc.schdMsg4TxLst.first;
11404 hqP = (RgSchDlHqProcCb *)(node->node);
11405 raCb = hqP->hqE->raCb;
11407 rbAllocInfo = &raCb->rbAllocInfo;
11409 /* fill the pdcch and HqProc */
11410 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11411 /* MSG4 Fix Start */
11413 rgSCHRamRmvFrmRaInfoSchdLst(cell, raCb);
11416 /* Indicate DHM of the CCCH LC scheduling */
11417 lchSchdData.lcId = 0;
11418 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
11419 (RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE);
11420 /* TRansmitting presence of cont Res CE across MAC-SCH interface to
11421 * identify CCCH SDU transmissions which need to be done
11423 * contention resolution CE*/
11424 hqP->tbInfo[0].contResCe = PRSNT_NODEF;
11425 /*Dont add lc if only cont res CE is being transmitted*/
11426 if(raCb->dlCcchInfo.bo)
11428 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
11433 /* Fix: syed dlAllocCb reset should be performed.
11434 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11435 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
11436 rgSCHCmnDlHqPResetTemp(hqP);
11438 node = allocInfo->msg4Alloc.nonSchdMsg4TxLst.first;
11441 hqP = (RgSchDlHqProcCb *)(node->node);
11442 raCb = hqP->hqE->raCb;
11444 rbAllocInfo = &raCb->rbAllocInfo;
11445 /* Release HqProc */
11446 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
11447 /*Fix: Removing releasing of TB1 as it will not exist for MSG4 and hence caused a crash*/
11448 /* rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
11449 /* reset the UE allocation Information */
11450 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
11451 rgSCHCmnDlHqPResetTemp(hqP);
11458 * @brief This function calculates the BI Index to be sent in the Bi header
11462 * Function: rgSCHCmnGetBiIndex
11463 * Purpose: This function Processes utilizes the previous BI time value
11464 * calculated and the difference last BI sent time and current time. To
11465 * calculate the latest BI Index. It also considers the how many UE's
11466 * Unserved in this subframe.
11468 * Invoked by: Common Scheduler
11470 * @param[in] RgSchCellCb *cell
11471 * @param[in] uint32_t ueCount
11475 uint8_t rgSCHCmnGetBiIndex(RgSchCellCb *cell,uint32_t ueCount)
11477 S16 prevVal = 0; /* To Store Intermediate Value */
11478 uint16_t newBiVal = 0; /* To store Bi Value in millisecond */
11480 uint16_t timeDiff = 0;
11483 if (cell->biInfo.prevBiTime != 0)
11486 if(cell->emtcEnable == TRUE)
11488 timeDiff =(RGSCH_CALC_SF_DIFF_EMTC(cell->crntTime, cell->biInfo.biTime));
11493 timeDiff =(RGSCH_CALC_SF_DIFF(cell->crntTime, cell->biInfo.biTime));
11496 prevVal = cell->biInfo.prevBiTime - timeDiff;
11502 newBiVal = RG_SCH_CMN_GET_BI_VAL(prevVal,ueCount);
11503 /* To be used next time when BI is calculated */
11505 if(cell->emtcEnable == TRUE)
11507 RGSCHCPYTIMEINFO_EMTC(cell->crntTime, cell->biInfo.biTime)
11512 RGSCHCPYTIMEINFO(cell->crntTime, cell->biInfo.biTime)
11515 /* Search the actual BI Index from table Backoff Parameters Value and
11516 * return that Index */
11519 if (rgSchCmnBiTbl[idx] > newBiVal)
11524 }while(idx < RG_SCH_CMN_NUM_BI_VAL-1);
11525 cell->biInfo.prevBiTime = rgSchCmnBiTbl[idx];
11526 /* For 16 Entries in Table 7.2.1 36.321.880 - 3 reserved so total 13 Entries */
11527 return (idx); /* Returning reserved value from table UE treats it has 960 ms */
11528 } /* rgSCHCmnGetBiIndex */
11532 * @brief This function Processes the Final Allocations
11533 * made by the RB Allocator against the requested
11534 * RAR allocations. Assumption: The reuqested
11535 * allocations are always satisfied completely.
11536 * Hence no roll back.
11540 * Function: rgSCHCmnDlRaRspFnlz
11541 * Purpose: This function Processes the Final Allocations
11542 * made by the RB Allocator against the requested.
11543 * Takes care of PDCCH filling.
11545 * Invoked by: Common Scheduler
11547 * @param[in] RgSchCellCb *cell
11548 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11552 static Void rgSCHCmnDlRaRspFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11554 uint32_t rarCnt = 0;
11555 RgSchDlRbAlloc *raRspAlloc;
11556 RgSchDlSf *subFrm = NULLP;
11560 RgSchRaReqInfo *raReq;
11562 RgSchUlAlloc *ulAllocRef=NULLP;
11563 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11564 uint8_t allocRapidCnt = 0;
11566 uint32_t msg3SchdIdx = 0;
11567 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
11568 uint8_t msg3Subfrm;
11572 for (rarCnt=0; rarCnt<RG_SCH_CMN_MAX_CMN_PDCCH; rarCnt++)
11574 raRspAlloc = &allocInfo->raRspAlloc[rarCnt];
11575 /* Having likely condition first for optimization */
11576 if (!raRspAlloc->pdcch)
11582 subFrm = raRspAlloc->dlSf;
11583 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
11584 /* Corrected RACH handling for multiple RAPIDs per RARNTI */
11585 allocRapidCnt = raRspAlloc->numRapids;
11586 while (allocRapidCnt)
11588 raReq = (RgSchRaReqInfo *)(reqLst->first->node);
11589 /* RACHO: If dedicated preamble, then allocate UL Grant
11590 * (consequence of handover/pdcchOrder) and continue */
11591 if (RGSCH_IS_DEDPRM(cell, raReq->raReq.rapId))
11593 rgSCHCmnHdlHoPo(cell, &subFrm->raRsp[rarCnt].contFreeUeLst,
11595 cmLListDelFrm(reqLst, reqLst->first);
11597 /* ccpu00117052 - MOD - Passing double pointer
11598 for proper NULLP assignment*/
11599 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11600 sizeof(RgSchRaReqInfo));
11604 if(cell->overLoadBackOffEnab)
11605 {/* rach Overlaod conrol is triggerd, Skipping this rach */
11606 cmLListDelFrm(reqLst, reqLst->first);
11608 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11609 sizeof(RgSchRaReqInfo));
11612 /* Attempt to include each RA request into the RSP */
11613 /* Any failure in the procedure is considered to */
11614 /* affect futher allocations in the same TTI. When */
11615 /* a failure happens, we break out and complete */
11616 /* the processing for random access */
11617 if (rgSCHRamCreateRaCb(cell, &raCb, &err) != ROK)
11621 /* Msg3 allocation request to USM */
11622 if (raReq->raReq.rapId < cell->rachCfg.sizeRaPreambleGrpA)
11626 /*ccpu00128820 - MOD - Msg3 alloc double delete issue*/
11627 rgSCHCmnMsg3GrntReq(cell, raCb->tmpCrnti, preamGrpA, \
11628 &(raCb->msg3HqProc), &ulAllocRef, &raCb->msg3HqProcId);
11629 if (ulAllocRef == NULLP)
11631 rgSCHRamDelRaCb(cell, raCb, TRUE);
11634 if (raReq->raReq.cqiPres)
11636 raCb->ccchCqi = raReq->raReq.cqiIdx;
11640 raCb->ccchCqi = cellDl->ccchCqi;
11642 raCb->rapId = raReq->raReq.rapId;
11643 raCb->ta.pres = TRUE;
11644 raCb->ta.val = raReq->raReq.ta;
11645 raCb->msg3Grnt = ulAllocRef->grnt;
11646 /* Populating the tpc value received */
11647 raCb->msg3Grnt.tpc = raReq->raReq.tpc;
11648 /* PHR handling for MSG3 */
11649 ulAllocRef->raCb = raCb;
11651 /* To the crntTime, add the MIN time at which UE will
11652 * actually send MSG3 i.e DL_DELTA+6 */
11653 raCb->msg3AllocTime = cell->crntTime;
11654 RGSCH_INCR_SUB_FRAME(raCb->msg3AllocTime, RG_SCH_CMN_MIN_MSG3_RECP_INTRVL);
11656 msg3SchdIdx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) %
11657 RGSCH_NUM_SUB_FRAMES;
11658 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
11659 special subframe */
11660 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][msg3SchdIdx] !=
11661 RG_SCH_TDD_UL_SUBFRAME)
11663 RGSCHCMNADDTOCRNTTIME(cell->crntTime,raCb->msg3AllocTime,
11664 RG_SCH_CMN_DL_DELTA)
11665 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][
11666 raCb->msg3AllocTime.slot];
11667 RGSCHCMNADDTOCRNTTIME(raCb->msg3AllocTime, raCb->msg3AllocTime,
11671 cmLListAdd2Tail(&subFrm->raRsp[rarCnt].raRspLst, &raCb->rspLnk);
11672 raCb->rspLnk.node = (PTR)raCb;
11673 cmLListDelFrm(reqLst, reqLst->first);
11675 /* ccpu00117052 - MOD - Passing double pointer
11676 for proper NULLP assignment*/
11677 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11678 sizeof(RgSchRaReqInfo));
11680 /* SR_RACH_STATS : RAR scheduled */
11685 /* Fill subframe data members */
11686 subFrm->raRsp[rarCnt].raRnti = raRspAlloc->rnti;
11687 subFrm->raRsp[rarCnt].pdcch = raRspAlloc->pdcch;
11688 subFrm->raRsp[rarCnt].tbSz = raRspAlloc->tbInfo[0].bytesAlloc;
11689 /* Fill PDCCH data members */
11690 rgSCHCmnFillPdcch(cell, subFrm->raRsp[rarCnt].pdcch, raRspAlloc);
11693 if(cell->overLoadBackOffEnab)
11694 {/* rach Overlaod conrol is triggerd, Skipping this rach */
11695 subFrm->raRsp[rarCnt].backOffInd.pres = PRSNT_NODEF;
11696 subFrm->raRsp[rarCnt].backOffInd.val = cell->overLoadBackOffval;
11701 subFrm->raRsp[rarCnt].backOffInd.pres = NOTPRSNT;
11704 /*[ccpu00125212] Avoiding sending of empty RAR in case of RAR window
11705 is short and UE is sending unauthorised preamble.*/
11706 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
11707 if ((raRspAlloc->biEstmt) && (reqLst->count))
11709 subFrm->raRsp[0].backOffInd.pres = PRSNT_NODEF;
11710 /* Added as part of Upgrade */
11711 subFrm->raRsp[0].backOffInd.val =
11712 rgSCHCmnGetBiIndex(cell, reqLst->count);
11714 /* SR_RACH_STATS : Back Off Inds */
11718 else if ((subFrm->raRsp[rarCnt].raRspLst.first == NULLP) &&
11719 (subFrm->raRsp[rarCnt].contFreeUeLst.first == NULLP))
11721 /* Return the grabbed PDCCH */
11722 rgSCHUtlPdcchPut(cell, &subFrm->pdcchInfo, raRspAlloc->pdcch);
11723 subFrm->raRsp[rarCnt].pdcch = NULLP;
11724 DU_LOG("\nERROR --> SCH : rgSCHCmnRaRspAlloc(): "
11725 "Not even one RaReq.");
11729 DU_LOG("\nDEBUG --> SCH : RNTI:%d Scheduled RAR @ (%u,%u) ",
11731 cell->crntTime.sfn,
11732 cell->crntTime.slot);
11738 * @brief This function computes rv.
11742 * Function: rgSCHCmnDlCalcRvForBcch
11743 * Purpose: This function computes rv.
11745 * Invoked by: Common Scheduler
11747 * @param[in] RgSchCellCb *cell
11748 * @param[in] Bool si
11749 * @param[in] uint16_t i
11753 static uint8_t rgSCHCmnDlCalcRvForBcch(RgSchCellCb *cell,Bool si,uint16_t i)
11756 CmLteTimingInfo frm;
11758 frm = cell->crntTime;
11759 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
11767 k = (frm.sfn/2) % 4;
11769 rv = RGSCH_CEIL(3*k, 2) % 4;
11774 * @brief This function Processes the Final Allocations
11775 * made by the RB Allocator against the requested
11776 * BCCH/PCCH allocations. Assumption: The reuqested
11777 * allocations are always satisfied completely.
11778 * Hence no roll back.
11782 * Function: rgSCHCmnDlBcchPcchFnlz
11783 * Purpose: This function Processes the Final Allocations
11784 * made by the RB Allocator against the requested.
11785 * Takes care of PDCCH filling.
11787 * Invoked by: Common Scheduler
11789 * @param[in] RgSchCellCb *cell
11790 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11794 static Void rgSCHCmnDlBcchPcchFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11796 RgSchDlRbAlloc *rbAllocInfo;
11800 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
11802 #ifdef LTEMAC_HDFDD
11803 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11805 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11809 /* Moving variables to available scope for optimization */
11810 RgSchClcDlLcCb *pcch;
11813 RgSchClcDlLcCb *bcch;
11816 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11820 rbAllocInfo = &allocInfo->pcchAlloc;
11821 if (rbAllocInfo->pdcch)
11823 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
11825 /* Added sfIdx calculation for TDD as well */
11827 #ifdef LTEMAC_HDFDD
11828 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11830 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11833 subFrm = rbAllocInfo->dlSf;
11834 pcch = rgSCHDbmGetPcch(cell);
11837 DU_LOG("\nERROR --> SCH : rgSCHCmnDlBcchPcchFnlz( ): "
11838 "No Pcch Present");
11842 /* Added Dl TB count for paging message transmission*/
11844 cell->dlUlTbCnt.tbTransDlTotalCnt++;
11846 bo = (RgSchClcBoRpt *)pcch->boLst.first->node;
11847 cmLListDelFrm(&pcch->boLst, &bo->boLstEnt);
11848 /* ccpu00117052 - MOD - Passing double pointer
11849 for proper NULLP assignment*/
11850 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
11851 /* Fill subframe data members */
11852 subFrm->pcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
11853 subFrm->pcch.pdcch = rbAllocInfo->pdcch;
11854 /* Fill PDCCH data members */
11855 rgSCHCmnFillPdcch(cell, subFrm->pcch.pdcch, rbAllocInfo);
11856 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, pcch->lcId, TRUE);
11857 /* ccpu00132314-ADD-Update the tx power allocation info
11858 TODO-Need to add a check for max tx power per symbol */
11859 subfrmAlloc->cmnLcInfo.pcchInfo.txPwrOffset = cellDl->pcchTxPwrOffset;
11863 rbAllocInfo = &allocInfo->bcchAlloc;
11864 if (rbAllocInfo->pdcch)
11866 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
11868 #ifdef LTEMAC_HDFDD
11869 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11871 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11874 subFrm = rbAllocInfo->dlSf;
11876 /* Fill subframe data members */
11877 subFrm->bcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
11878 subFrm->bcch.pdcch = rbAllocInfo->pdcch;
11879 /* Fill PDCCH data members */
11880 rgSCHCmnFillPdcch(cell, subFrm->bcch.pdcch, rbAllocInfo);
11882 if(rbAllocInfo->schdFirst)
11885 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
11886 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
11888 /*Copy the SIB1 msg buff into interface buffer */
11889 SCpyMsgMsg(cell->siCb.crntSiInfo.sib1Info.sib1,
11890 rgSchCb[cell->instIdx].rgSchInit.region,
11891 rgSchCb[cell->instIdx].rgSchInit.pool,
11892 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11893 #endif/*RGR_SI_SCH*/
11894 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
11895 rgSCHCmnDlCalcRvForBcch(cell, FALSE, 0);
11903 i = cell->siCb.siCtx.i;
11904 /*Decrement the retransmission count */
11905 cell->siCb.siCtx.retxCntRem--;
11907 /*Copy the SI msg buff into interface buffer */
11908 if(cell->siCb.siCtx.warningSiFlag == FALSE)
11910 SCpyMsgMsg(cell->siCb.siArray[cell->siCb.siCtx.siId-1].si,
11911 rgSchCb[cell->instIdx].rgSchInit.region,
11912 rgSchCb[cell->instIdx].rgSchInit.pool,
11913 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11917 pdu = rgSCHUtlGetWarningSiPdu(cell);
11918 RGSCH_NULL_CHECK(cell->instIdx, pdu);
11920 rgSchCb[cell->instIdx].rgSchInit.region,
11921 rgSchCb[cell->instIdx].rgSchInit.pool,
11922 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11923 if(cell->siCb.siCtx.retxCntRem == 0)
11925 rgSCHUtlFreeWarningSiPdu(cell);
11926 cell->siCb.siCtx.warningSiFlag = FALSE;
11931 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
11932 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
11934 if(bo->retxCnt != cell->siCfg.retxCnt-1)
11939 #endif/*RGR_SI_SCH*/
11940 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
11941 rgSCHCmnDlCalcRvForBcch(cell, TRUE, i);
11944 /* Added Dl TB count for SIB1 and SI messages transmission.
11945 * This counter will be incremented only for the first transmission
11946 * (with RV 0) of these messages*/
11948 if(subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv == 0)
11950 cell->dlUlTbCnt.tbTransDlTotalCnt++;
11954 if(bo->retxCnt == 0)
11956 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
11957 /* ccpu00117052 - MOD - Passing double pointer
11958 for proper NULLP assignment*/
11959 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
11961 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, bcch->lcId, sendInd);
11963 /*Fill the interface info */
11964 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, NULLD, NULLD);
11966 /* ccpu00132314-ADD-Update the tx power allocation info
11967 TODO-Need to add a check for max tx power per symbol */
11968 subfrmAlloc->cmnLcInfo.bcchInfo.txPwrOffset = cellDl->bcchTxPwrOffset;
11970 /*mBuf has been already copied above */
11971 #endif/*RGR_SI_SCH*/
11984 * Function: rgSCHCmnUlSetAllUnSched
11987 * Invoked by: Common Scheduler
11989 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
11993 static Void rgSCHCmnUlSetAllUnSched(RgSchCmnUlRbAllocInfo *allocInfo)
11998 node = allocInfo->contResLst.first;
12001 rgSCHCmnUlMov2NonSchdCntResLst(allocInfo, (RgSchUeCb *)node->node);
12002 node = allocInfo->contResLst.first;
12005 node = allocInfo->retxUeLst.first;
12008 rgSCHCmnUlMov2NonSchdRetxUeLst(allocInfo, (RgSchUeCb *)node->node);
12009 node = allocInfo->retxUeLst.first;
12012 node = allocInfo->ueLst.first;
12015 rgSCHCmnUlMov2NonSchdUeLst(allocInfo, (RgSchUeCb *)node->node);
12016 node = allocInfo->ueLst.first;
12028 * Function: rgSCHCmnUlAdd2CntResLst
12031 * Invoked by: Common Scheduler
12033 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
12034 * @param[in] RgSchUeCb *ue
12038 Void rgSCHCmnUlAdd2CntResLst(RgSchCmnUlRbAllocInfo *allocInfo,RgSchUeCb *ue)
12040 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,ue->cell))->alloc);
12041 cmLListAdd2Tail(&allocInfo->contResLst, &ulAllocInfo->reqLnk);
12042 ulAllocInfo->reqLnk.node = (PTR)ue;
12051 * Function: rgSCHCmnUlAdd2UeLst
12054 * Invoked by: Common Scheduler
12056 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
12057 * @param[in] RgSchUeCb *ue
12061 Void rgSCHCmnUlAdd2UeLst(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUeCb *ue)
12063 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,cell))->alloc);
12064 if (ulAllocInfo->reqLnk.node == NULLP)
12066 cmLListAdd2Tail(&allocInfo->ueLst, &ulAllocInfo->reqLnk);
12067 ulAllocInfo->reqLnk.node = (PTR)ue;
12077 * Function: rgSCHCmnAllocUlRb
12078 * Purpose: To do RB allocations for uplink
12080 * Invoked by: Common Scheduler
12082 * @param[in] RgSchCellCb *cell
12083 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12086 Void rgSCHCmnAllocUlRb(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo)
12088 RgSchUlSf *sf = allocInfo->sf;
12090 /* Schedule for new transmissions */
12091 rgSCHCmnUlRbAllocForLst(cell, sf, allocInfo->ueLst.count,
12092 &allocInfo->ueLst, &allocInfo->schdUeLst,
12093 &allocInfo->nonSchdUeLst, (Bool)TRUE);
12097 /***********************************************************
12099 * Func : rgSCHCmnUlRbAllocForLst
12101 * Desc : Allocate for a list in cmn rb alloc information passed
12110 **********************************************************/
12111 static Void rgSCHCmnUlRbAllocForLst
12117 CmLListCp *schdLst,
12118 CmLListCp *nonSchdLst,
12127 CmLteTimingInfo timeInfo;
12131 if(schdLst->count == 0)
12133 cmLListInit(schdLst);
12136 cmLListInit(nonSchdLst);
12138 if(isNewTx == TRUE)
12140 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.numUes = (uint8_t) count;
12142 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime, timeInfo, TFU_ULCNTRL_DLDELTA);
12143 k = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][timeInfo.subframe];
12144 RG_SCH_ADD_TO_CRNT_TIME(timeInfo,
12145 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo, k);
12147 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime,cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo,
12148 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
12153 for (lnk = reqLst->first; count; lnk = lnk->next, --count)
12155 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
12156 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
12161 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
12166 ueUl->subbandShare = ueUl->subbandRequired;
12167 if(isNewTx == TRUE)
12169 maxRb = RGSCH_MIN((ueUl->subbandRequired * MAX_5GTF_VRBG_SIZE), ue->ue5gtfCb.maxPrb);
12171 ret = rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole);
12174 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, schdLst);
12175 rgSCHCmnUlUeFillAllocInfo(cell, ue);
12179 gUl5gtfRbAllocFail++;
12180 #if defined (TENB_STATS) && defined (RG_5GTF)
12181 cell->tenbStats->sch.ul5gtfRbAllocFail++;
12183 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
12184 ue->isMsg4PdcchWithCrnti = FALSE;
12185 ue->isSrGrant = FALSE;
12188 if(isNewTx == TRUE)
12190 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
12191 ulAllocInfo[count - 1].rnti = ue->ueId;
12192 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
12193 ulAllocInfo[count - 1].numPrb = ue->ul.nPrb;
12196 ueUl->subbandShare = 0; /* This reset will take care of
12197 * all scheduler types */
12199 for (; count; lnk = lnk->next, --count)
12201 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
12202 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
12203 ue->isMsg4PdcchWithCrnti = FALSE;
12210 /***********************************************************
12212 * Func : rgSCHCmnUlMdfyGrntForCqi
12214 * Desc : Modify UL Grant to consider presence of
12215 * CQI along with PUSCH Data.
12220 * - Scale down iTbs based on betaOffset and
12221 * size of Acqi Size.
12222 * - Optionally attempt to increase numSb by 1
12223 * if input payload size does not fit in due
12224 * to reduced tbSz as a result of iTbsNew.
12228 **********************************************************/
12229 static S16 rgSCHCmnUlMdfyGrntForCqi
12237 uint32_t stepDownItbs,
12241 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(ue->cell);
12246 uint32_t remREsForPusch;
12247 uint32_t bitsPerRe;
12249 uint32_t betaOffVal = ue->ul.betaOffstVal;
12250 uint32_t cqiRiRptSz = ue->ul.cqiRiSz;
12251 uint32_t betaOffHqVal = rgSchCmnBetaHqOffstTbl[ue->ul.betaHqOffst];
12252 uint32_t resNumSb = *numSb;
12253 uint32_t puschEff = 1000;
12256 Bool mdfyiTbsFlg = FALSE;
12257 uint8_t resiTbs = *iTbs;
12263 iMcs = rgSCHCmnUlGetIMcsFrmITbs(resiTbs, RG_SCH_CMN_GET_UE_CTGY(ue));
12264 RG_SCH_UL_MCS_TO_MODODR(iMcs, modOdr);
12265 if (RG_SCH_CMN_GET_UE_CTGY(ue) != CM_LTE_UE_CAT_5)
12267 modOdr = RGSCH_MIN(RGSCH_QM_QPSK, modOdr);
12271 modOdr = RGSCH_MIN(RGSCH_QM_64QAM, modOdr);
12273 nPrb = resNumSb * cellUl->sbSize;
12274 /* Restricting the minumum iTbs requried to modify to 10 */
12275 if ((nPrb >= maxRb) && (resiTbs <= 10))
12277 /* Could not accomodate ACQI */
12280 totREs = nPrb * RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
12281 tbSz = rgTbSzTbl[0][resiTbs][nPrb-1];
12282 /* totalREs/tbSz = num of bits perRE. */
12283 cqiRiREs = (totREs * betaOffVal * cqiRiRptSz)/(1000 * tbSz); /* betaOffVal is represented
12284 as parts per 1000 */
12285 hqREs = (totREs * betaOffHqVal * hqSz)/(1000 * tbSz);
12286 if ((cqiRiREs + hqREs) < totREs)
12288 remREsForPusch = totREs - cqiRiREs - hqREs;
12289 bitsPerRe = (tbSz * 1000)/remREsForPusch; /* Multiplying by 1000 for Interger Oper */
12290 puschEff = bitsPerRe/modOdr;
12292 if (puschEff < effTgt)
12294 /* ensure resultant efficiency for PUSCH Data is within 0.93*/
12299 /* Alternate between increasing SB or decreasing iTbs until eff is met */
12300 if (mdfyiTbsFlg == FALSE)
12304 resNumSb = resNumSb + 1;
12306 mdfyiTbsFlg = TRUE;
12312 resiTbs-= stepDownItbs;
12314 mdfyiTbsFlg = FALSE;
12317 }while (1); /* Loop breaks if efficency is met
12318 or returns RFAILED if not able to meet the efficiency */
12327 /***********************************************************
12329 * Func : rgSCHCmnUlRbAllocForUe
12331 * Desc : Do uplink RB allocation for an UE.
12335 * Notes: Note that as of now, for retx, maxRb
12336 * is not considered. Alternatives, such
12337 * as dropping retx if it crosses maxRb
12338 * could be considered.
12342 **********************************************************/
12343 static S16 rgSCHCmnUlRbAllocForUe
12352 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
12353 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
12354 RgSchUlAlloc *alloc = NULLP;
12360 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->schdHqProcIdx];
12362 RgSchUlHqProcCb *proc = NULLP;
12366 uint8_t numVrbgTemp;
12368 TfuDciFormat dciFrmt;
12373 rgSCHUhmGetAvlHqProc(cell, ue, &proc);
12376 //DU_LOG("\nINFO --> SCH : UE [%d] HQ Proc unavailable\n", ue->ueId);
12381 if (ue->ue5gtfCb.rank == 2)
12383 dciFrmt = TFU_DCI_FORMAT_A2;
12388 dciFrmt = TFU_DCI_FORMAT_A1;
12391 /* 5gtf TODO : To pass dci frmt to this function */
12392 pdcch = rgSCHCmnPdcchAllocCrntSf(cell, ue);
12395 DU_LOG("\nDEBUG --> SCH : rgSCHCmnUlRbAllocForUe(): Could not get PDCCH for CRNTI:%d",ue->ueId);
12398 gUl5gtfPdcchSchd++;
12399 #if defined (TENB_STATS) && defined (RG_5GTF)
12400 cell->tenbStats->sch.ul5gtfPdcchSchd++;
12403 //TODO_SID using configured prb as of now
12404 nPrb = ue->ue5gtfCb.maxPrb;
12405 reqVrbg = nPrb/MAX_5GTF_VRBG_SIZE;
12406 iMcs = ue->ue5gtfCb.mcs; //gSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
12410 if((sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart > MAX_5GTF_VRBG)
12411 || (sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated > MAX_5GTF_VRBG))
12413 DU_LOG("\nINFO --> SCH : 5GTF_ERROR vrbg > 25 valstart = %d valalloc %d\n", sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart
12414 , sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated);
12419 /*TODO_SID: Workaround for alloc. Currently alloc is ulsf based. To handle multiple beams, we need a different
12420 design. Now alloc are formed based on MAX_5GTF_UE_SCH macro. */
12421 numVrbgTemp = MAX_5GTF_VRBG/MAX_5GTF_UE_SCH;
12424 alloc = rgSCHCmnUlSbAlloc(sf, numVrbgTemp,\
12427 if (alloc == NULLP)
12429 DU_LOG("\nERROR --> SCH : rgSCHCmnUlRbAllocForUe(): Could not get UlAlloc %d CRNTI:%d",numVrbg,ue->ueId);
12430 rgSCHCmnPdcchRlsCrntSf(cell, pdcch);
12433 gUl5gtfAllocAllocated++;
12434 #if defined (TENB_STATS) && defined (RG_5GTF)
12435 cell->tenbStats->sch.ul5gtfAllocAllocated++;
12437 alloc->grnt.vrbgStart = sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart;
12438 alloc->grnt.numVrbg = numVrbg;
12439 alloc->grnt.numLyr = numLyr;
12440 alloc->grnt.dciFrmt = dciFrmt;
12442 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart += numVrbg;
12443 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated += numVrbg;
12445 //rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
12447 sf->totPrb += alloc->grnt.numRb;
12448 ue->ul.nPrb = alloc->grnt.numRb;
12450 if (ue->csgMmbrSta != TRUE)
12452 cellUl->ncsgPrbCnt += alloc->grnt.numRb;
12454 cellUl->totPrbCnt += (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
12455 alloc->pdcch = pdcch;
12456 alloc->grnt.iMcs = iMcs;
12457 alloc->grnt.iMcsCrnt = iMcsCrnt;
12458 alloc->grnt.hop = 0;
12459 /* Initial Num RBs support for UCI on PUSCH */
12461 ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
12463 alloc->forMsg3 = FALSE;
12464 //RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTb5gtfSzTbl[0], (iTbs));
12466 //ueUl->alloc.allocdBytes = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
12467 /* TODO_SID Allocating based on configured MCS as of now.
12468 Currently for format A2. When doing multi grp per tti, need to update this. */
12469 ueUl->alloc.allocdBytes = (rgSch5gtfTbSzTbl[iMcs]/8) * ue->ue5gtfCb.rank;
12471 alloc->grnt.datSz = ueUl->alloc.allocdBytes;
12472 //TODO_SID Need to check mod order.
12473 RG_SCH_CMN_TBS_TO_MODODR(iMcs, alloc->grnt.modOdr);
12474 //alloc->grnt.modOdr = 6;
12475 alloc->grnt.isRtx = FALSE;
12477 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
12478 alloc->grnt.SCID = 0;
12479 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
12480 alloc->grnt.PMI = 0;
12481 alloc->grnt.uciOnxPUSCH = 0;
12482 alloc->grnt.hqProcId = proc->procId;
12484 alloc->hqProc = proc;
12485 alloc->hqProc->ulSfIdx = cellUl->schdIdx;
12487 /*commenting to retain the rnti used for transmission SPS/c-rnti */
12488 alloc->rnti = ue->ueId;
12489 ueUl->alloc.alloc = alloc;
12490 /*rntiwari-Adding the debug for generating the graph.*/
12491 /* No grant attr recorded now */
12495 /***********************************************************
12497 * Func : rgSCHCmnUlRbAllocAddUeToLst
12499 * Desc : Add UE to list (scheduled/non-scheduled list)
12500 * for UL RB allocation information.
12508 **********************************************************/
12509 Void rgSCHCmnUlRbAllocAddUeToLst(RgSchCellCb *cell,RgSchUeCb *ue,CmLListCp *lst)
12511 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
12514 gUl5gtfUeRbAllocDone++;
12515 #if defined (TENB_STATS) && defined (RG_5GTF)
12516 cell->tenbStats->sch.ul5gtfUeRbAllocDone++;
12518 cmLListAdd2Tail(lst, &ueUl->alloc.schdLstLnk);
12519 ueUl->alloc.schdLstLnk.node = (PTR)ue;
12524 * @brief This function Processes the Final Allocations
12525 * made by the RB Allocator against the requested.
12529 * Function: rgSCHCmnUlAllocFnlz
12530 * Purpose: This function Processes the Final Allocations
12531 * made by the RB Allocator against the requested.
12533 * Invoked by: Common Scheduler
12535 * @param[in] RgSchCellCb *cell
12536 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12540 static Void rgSCHCmnUlAllocFnlz(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo)
12542 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12544 /* call scheduler specific Finalization */
12545 cellSch->apisUl->rgSCHUlAllocFnlz(cell, allocInfo);
12551 * @brief This function Processes the Final Allocations
12552 * made by the RB Allocator against the requested.
12556 * Function: rgSCHCmnDlAllocFnlz
12557 * Purpose: This function Processes the Final Allocations
12558 * made by the RB Allocator against the requested.
12560 * Invoked by: Common Scheduler
12562 * @param[in] RgSchCellCb *cell
12566 Void rgSCHCmnDlAllocFnlz(RgSchCellCb *cell)
12568 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12569 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
12572 rgSCHCmnDlCcchRetxFnlz(cell, allocInfo);
12573 rgSCHCmnDlCcchTxFnlz(cell, allocInfo);
12575 /* Added below functions for handling CCCH SDU transmission received
12577 * * guard timer expiry*/
12578 rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo);
12579 rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo);
12581 rgSCHCmnDlRaRspFnlz(cell, allocInfo);
12582 /* call scheduler specific Finalization */
12583 cellSch->apisDl->rgSCHDlAllocFnlz(cell, allocInfo);
12585 /* Stack Crash problem for TRACE5 Changes. Added the return below */
12592 * @brief Update an uplink subframe.
12596 * Function : rgSCHCmnUlUpdSf
12598 * For each allocation
12599 * - if no more tx needed
12600 * - Release allocation
12602 * - Perform retransmission
12604 * @param[in] RgSchUlSf *sf
12607 static Void rgSCHCmnUlUpdSf(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUlSf *sf)
12611 while ((lnk = sf->allocs.first))
12613 RgSchUlAlloc *alloc = (RgSchUlAlloc *)lnk->node;
12616 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
12621 /* If need to handle all retx together, run another loop separately */
12622 rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc);
12624 rgSCHCmnUlRlsUlAlloc(cell, sf, alloc);
12627 /* By this time, all allocs would have been cleared and
12628 * SF is reset to be made ready for new allocations. */
12629 rgSCHCmnUlSfReset(cell, sf);
12630 /* In case there are timing problems due to msg3
12631 * allocations being done in advance, (which will
12632 * probably happen with the current FDD code that
12633 * handles 8 subframes) one solution
12634 * could be to hold the (recent) msg3 allocs in a separate
12635 * list, and then possibly add that to the actual
12636 * list later. So at this time while allocations are
12637 * traversed, the recent msg3 ones are not seen. Anytime after
12638 * this (a good time is when the usual allocations
12639 * are made), msg3 allocations could be transferred to the
12640 * normal list. Not doing this now as it is assumed
12641 * that incorporation of TDD shall take care of this.
12649 * @brief Handle uplink allocation for retransmission.
12653 * Function : rgSCHCmnUlHndlAllocRetx
12655 * Processing Steps:
12656 * - Add to queue for retx.
12657 * - Do not release here, release happends as part
12658 * of the loop that calls this function.
12660 * @param[in] RgSchCellCb *cell
12661 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12662 * @param[in] RgSchUlSf *sf
12663 * @param[in] RgSchUlAlloc *alloc
12666 static Void rgSCHCmnUlHndlAllocRetx(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUlSf *sf,RgSchUlAlloc *alloc)
12669 RgSchCmnUlUe *ueUl;
12671 rgTbSzTbl[0][rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs)]\
12672 [alloc->grnt.numRb-1]/8;
12673 if (!alloc->forMsg3)
12675 ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue);
12676 ueUl->alloc.reqBytes = bytes;
12677 rgSCHUhmRetx(alloc->hqProc);
12678 rgSCHCmnUlAdd2RetxUeLst(allocInfo, alloc->ue);
12682 /* RACHO msg3 retx handling. Part of RACH procedure changes. */
12683 retxAlloc = rgSCHCmnUlGetUlAlloc(cell, sf, alloc->numSb);
12684 if (retxAlloc == NULLP)
12686 DU_LOG("\nERROR --> SCH : rgSCHCmnUlRbAllocForUe():Could not get UlAlloc for msg3Retx RNTI:%d",
12690 retxAlloc->grnt.iMcs = alloc->grnt.iMcs;
12691 retxAlloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl\
12692 [alloc->hqProc->rvIdx];
12693 retxAlloc->grnt.nDmrs = 0;
12694 retxAlloc->grnt.hop = 0;
12695 retxAlloc->grnt.delayBit = 0;
12696 retxAlloc->rnti = alloc->rnti;
12697 retxAlloc->ue = NULLP;
12698 retxAlloc->pdcch = FALSE;
12699 retxAlloc->forMsg3 = TRUE;
12700 retxAlloc->raCb = alloc->raCb;
12701 retxAlloc->hqProc = alloc->hqProc;
12702 rgSCHUhmRetx(retxAlloc->hqProc);
12709 * @brief Uplink Scheduling Handler.
12713 * Function: rgSCHCmnUlAlloc
12714 * Purpose: This function Handles Uplink Scheduling.
12716 * Invoked by: Common Scheduler
12718 * @param[in] RgSchCellCb *cell
12721 /* ccpu00132653- The definition of this function made common for TDD and FDD*/
12722 static Void rgSCHCmnUlAlloc(RgSchCellCb *cell)
12724 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12725 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
12726 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12727 RgSchCmnUlRbAllocInfo allocInfo;
12728 RgSchCmnUlRbAllocInfo *allocInfoRef = &allocInfo;
12735 /* Initializing RgSchCmnUlRbAllocInfo structure */
12736 rgSCHCmnInitUlRbAllocInfo(allocInfoRef);
12738 /* Get Uplink Subframe */
12739 allocInfoRef->sf = &cellUl->ulSfArr[cellUl->schdIdx];
12741 /* initializing the UL PRB count */
12742 allocInfoRef->sf->totPrb = 0;
12746 rgSCHCmnSpsUlTti(cell, allocInfoRef);
12749 if(*allocInfoRef->sf->allocCountRef == 0)
12753 if ((hole = rgSCHUtlUlHoleFirst(allocInfoRef->sf)) != NULLP)
12755 /* Sanity check of holeDb */
12756 if (allocInfoRef->sf->holeDb->count == 1 && hole->start == 0)
12758 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
12759 /* Re-Initialize available subbands because of CFI change*/
12760 allocInfoRef->sf->availSubbands = cell->dynCfiCb.\
12761 bwInfo[cellDl->currCfi].numSb;
12762 /*Currently initializing 5gtf ulsf specific initialization here.
12763 need to do at proper place */
12765 allocInfoRef->sf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
12766 allocInfoRef->sf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
12767 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
12769 allocInfoRef->sf->sfBeamInfo[idx].totVrbgAllocated = 0;
12770 allocInfoRef->sf->sfBeamInfo[idx].totVrbgRequired = 0;
12771 allocInfoRef->sf->sfBeamInfo[idx].vrbgStart = 0;
12777 DU_LOG("\nERROR --> SCH : holeDb sanity check failed");
12782 /* Fix: Adaptive re-transmissions prioritised over other transmissions */
12783 /* perform adaptive retransmissions */
12784 rgSCHCmnUlSfReTxAllocs(cell, allocInfoRef->sf);
12788 /* Fix: syed Adaptive Msg3 Retx crash. Release all
12789 Harq processes for which adap Retx failed, to avoid
12790 blocking. This step should be done before New TX
12791 scheduling to make hqProc available. Right now we
12792 dont check if proc is in adap Retx list for considering
12793 it to be available. But now with this release that
12794 functionality would be correct. */
12796 rgSCHCmnUlSfRlsRetxProcs(cell, allocInfoRef->sf);
12799 /* Specific UL scheduler to perform UE scheduling */
12800 cellSch->apisUl->rgSCHUlSched(cell, allocInfoRef);
12802 /* Call UL RB allocator module */
12803 rgSCHCmnAllocUlRb(cell, allocInfoRef);
12805 /* Do group power control for PUSCH */
12806 rgSCHCmnGrpPwrCntrlPusch(cell, allocInfoRef->sf);
12808 cell->sc.apis->rgSCHDrxStrtInActvTmrInUl(cell);
12810 rgSCHCmnUlAllocFnlz(cell, allocInfoRef);
12811 if(5000 == g5gtfTtiCnt)
12813 ul5gtfsidDlAlreadyMarkUl = 0;
12814 ul5gtfsidDlSchdPass = 0;
12815 ul5gtfsidUlMarkUl = 0;
12816 ul5gtfTotSchdCnt = 0;
12824 * @brief send Subframe Allocations.
12828 * Function: rgSCHCmnSndCnsldtInfo
12829 * Purpose: Send the scheduled
12830 * allocations to MAC for StaInd generation to Higher layers and
12831 * for MUXing. PST's RgInfSfAlloc to MAC instance.
12833 * Invoked by: Common Scheduler
12835 * @param[in] RgSchCellCb *cell
12838 Void rgSCHCmnSndCnsldtInfo(RgSchCellCb *cell)
12840 RgInfSfAlloc *subfrmAlloc;
12842 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12845 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
12847 /* Send the allocations to MAC for MUXing */
12848 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
12849 subfrmAlloc->cellId = cell->cellId;
12850 /* Populate the List of UEs needing PDB-based Flow control */
12851 cellSch->apisDl->rgSCHDlFillFlwCtrlInfo(cell, subfrmAlloc);
12853 if((subfrmAlloc->rarInfo.numRaRntis) ||
12855 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
12856 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
12857 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
12859 (subfrmAlloc->ueInfo.numUes) ||
12860 (subfrmAlloc->cmnLcInfo.bitMask) ||
12861 (subfrmAlloc->ulUeInfo.numUes) ||
12862 (subfrmAlloc->flowCntrlInfo.numUes))
12864 if((subfrmAlloc->rarInfo.numRaRntis) ||
12866 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
12867 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
12868 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
12870 (subfrmAlloc->ueInfo.numUes) ||
12871 (subfrmAlloc->cmnLcInfo.bitMask) ||
12872 (subfrmAlloc->flowCntrlInfo.numUes))
12875 RgSchMacSfAlloc(&pst, subfrmAlloc);
12878 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_NUM_SUB_FRAMES;
12880 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_SF_ALLOC_SIZE;
12886 * @brief Consolidate Subframe Allocations.
12890 * Function: rgSCHCmnCnsldtSfAlloc
12891 * Purpose: Consolidate Subframe Allocations.
12893 * Invoked by: Common Scheduler
12895 * @param[in] RgSchCellCb *cell
12898 Void rgSCHCmnCnsldtSfAlloc(RgSchCellCb *cell)
12900 RgInfSfAlloc *subfrmAlloc;
12901 CmLteTimingInfo frm;
12903 CmLListCp dlDrxInactvTmrLst;
12904 CmLListCp dlInActvLst;
12905 CmLListCp ulInActvLst;
12906 RgSchCmnCell *cellSch = NULLP;
12909 cmLListInit(&dlDrxInactvTmrLst);
12910 cmLListInit(&dlInActvLst);
12911 cmLListInit(&ulInActvLst);
12913 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
12915 /* Get Downlink Subframe */
12916 frm = cell->crntTime;
12917 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
12918 dlSf = rgSCHUtlSubFrmGet(cell, frm);
12920 /* Fill the allocation Info */
12921 rgSCHUtlFillRgInfRarInfo(dlSf, subfrmAlloc, cell);
12924 rgSCHUtlFillRgInfUeInfo(dlSf, cell, &dlDrxInactvTmrLst,
12925 &dlInActvLst, &ulInActvLst);
12926 #ifdef RG_PFS_STATS
12927 cell->totalPrb += dlSf->bwAssigned;
12929 /* Mark the following Ues inactive for UL*/
12930 cellSch = RG_SCH_CMN_GET_CELL(cell);
12932 /* Calling Scheduler specific function with DRX inactive UE list*/
12933 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInActvLst);
12934 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInActvLst);
12937 /*re/start DRX inactivity timer for the UEs*/
12938 (Void)rgSCHDrxStrtInActvTmr(cell,&dlDrxInactvTmrLst,RG_SCH_DRX_DL);
12944 * @brief Initialize the DL Allocation Information Structure.
12948 * Function: rgSCHCmnInitDlRbAllocInfo
12949 * Purpose: Initialize the DL Allocation Information Structure.
12951 * Invoked by: Common Scheduler
12953 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
12956 static Void rgSCHCmnInitDlRbAllocInfo(RgSchCmnDlRbAllocInfo *allocInfo)
12958 memset(&allocInfo->pcchAlloc, 0, sizeof(RgSchDlRbAlloc));
12959 memset(&allocInfo->bcchAlloc, 0, sizeof(RgSchDlRbAlloc));
12960 memset(allocInfo->raRspAlloc, 0, RG_SCH_CMN_MAX_CMN_PDCCH*sizeof(RgSchDlRbAlloc));
12962 allocInfo->msg4Alloc.msg4DlSf = NULLP;
12963 cmLListInit(&allocInfo->msg4Alloc.msg4TxLst);
12964 cmLListInit(&allocInfo->msg4Alloc.msg4RetxLst);
12965 cmLListInit(&allocInfo->msg4Alloc.schdMsg4TxLst);
12966 cmLListInit(&allocInfo->msg4Alloc.schdMsg4RetxLst);
12967 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4TxLst);
12968 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4RetxLst);
12970 allocInfo->ccchSduAlloc.ccchSduDlSf = NULLP;
12971 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduTxLst);
12972 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduRetxLst);
12973 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduTxLst);
12974 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduRetxLst);
12975 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst);
12976 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst);
12979 allocInfo->dedAlloc.dedDlSf = NULLP;
12980 cmLListInit(&allocInfo->dedAlloc.txHqPLst);
12981 cmLListInit(&allocInfo->dedAlloc.retxHqPLst);
12982 cmLListInit(&allocInfo->dedAlloc.schdTxHqPLst);
12983 cmLListInit(&allocInfo->dedAlloc.schdRetxHqPLst);
12984 cmLListInit(&allocInfo->dedAlloc.nonSchdTxHqPLst);
12985 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxHqPLst);
12987 cmLListInit(&allocInfo->dedAlloc.txRetxHqPLst);
12988 cmLListInit(&allocInfo->dedAlloc.schdTxRetxHqPLst);
12989 cmLListInit(&allocInfo->dedAlloc.nonSchdTxRetxHqPLst);
12991 cmLListInit(&allocInfo->dedAlloc.txSpsHqPLst);
12992 cmLListInit(&allocInfo->dedAlloc.retxSpsHqPLst);
12993 cmLListInit(&allocInfo->dedAlloc.schdTxSpsHqPLst);
12994 cmLListInit(&allocInfo->dedAlloc.schdRetxSpsHqPLst);
12995 cmLListInit(&allocInfo->dedAlloc.nonSchdTxSpsHqPLst);
12996 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxSpsHqPLst);
13000 rgSCHLaaCmnInitDlRbAllocInfo (allocInfo);
13003 cmLListInit(&allocInfo->dedAlloc.errIndTxHqPLst);
13004 cmLListInit(&allocInfo->dedAlloc.schdErrIndTxHqPLst);
13005 cmLListInit(&allocInfo->dedAlloc.nonSchdErrIndTxHqPLst);
13010 * @brief Initialize the UL Allocation Information Structure.
13014 * Function: rgSCHCmnInitUlRbAllocInfo
13015 * Purpose: Initialize the UL Allocation Information Structure.
13017 * Invoked by: Common Scheduler
13019 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13022 Void rgSCHCmnInitUlRbAllocInfo(RgSchCmnUlRbAllocInfo *allocInfo)
13024 allocInfo->sf = NULLP;
13025 cmLListInit(&allocInfo->contResLst);
13026 cmLListInit(&allocInfo->schdContResLst);
13027 cmLListInit(&allocInfo->nonSchdContResLst);
13028 cmLListInit(&allocInfo->ueLst);
13029 cmLListInit(&allocInfo->schdUeLst);
13030 cmLListInit(&allocInfo->nonSchdUeLst);
13036 * @brief Scheduling for PUCCH group power control.
13040 * Function: rgSCHCmnGrpPwrCntrlPucch
13041 * Purpose: This function does group power control for PUCCH
13042 * corresponding to the subframe for which DL UE allocations
13045 * Invoked by: Common Scheduler
13047 * @param[in] RgSchCellCb *cell
13050 static Void rgSCHCmnGrpPwrCntrlPucch(RgSchCellCb *cell,RgSchDlSf *dlSf)
13052 rgSCHPwrGrpCntrlPucch(cell, dlSf);
13057 * @brief Scheduling for PUSCH group power control.
13061 * Function: rgSCHCmnGrpPwrCntrlPusch
13062 * Purpose: This function does group power control, for
13063 * the subframe for which UL allocation has (just) happened.
13065 * Invoked by: Common Scheduler
13067 * @param[in] RgSchCellCb *cell
13068 * @param[in] RgSchUlSf *ulSf
13071 static Void rgSCHCmnGrpPwrCntrlPusch(RgSchCellCb *cell,RgSchUlSf *ulSf)
13073 /*removed unused variable *cellSch*/
13074 CmLteTimingInfo frm;
13078 /* Got to pass DL SF corresponding to UL SF, so get that first.
13079 * There is no easy way of getting dlSf by having the RgSchUlSf*,
13080 * so use the UL delta from current time to get the DL SF. */
13081 frm = cell->crntTime;
13084 if(cell->emtcEnable == TRUE)
13086 RGSCH_INCR_SUB_FRAME_EMTC(frm, TFU_DLCNTRL_DLDELTA);
13091 RGSCH_INCR_SUB_FRAME(frm, TFU_DLCNTRL_DLDELTA);
13093 /* Del filling of dl.time */
13094 dlSf = rgSCHUtlSubFrmGet(cell, frm);
13096 rgSCHPwrGrpCntrlPusch(cell, dlSf, ulSf);
13101 /* Fix: syed align multiple UEs to refresh at same time */
13102 /***********************************************************
13104 * Func : rgSCHCmnApplyUeRefresh
13106 * Desc : Apply UE refresh in CMN and Specific
13107 * schedulers. Data rates and corresponding
13108 * scratchpad variables are updated.
13116 **********************************************************/
13117 static S16 rgSCHCmnApplyUeRefresh(RgSchCellCb *cell,RgSchUeCb *ue)
13119 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13120 uint32_t effGbrBsr = 0;
13121 uint32_t effNonGbrBsr = 0;
13125 /* Reset the refresh cycle variableCAP */
13126 ue->ul.effAmbr = ue->ul.cfgdAmbr;
13128 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
13130 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
13132 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
13134 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
13136 cmnLcg->effGbr = cmnLcg->cfgdGbr;
13137 cmnLcg->effDeltaMbr = cmnLcg->deltaMbr;
13138 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
13139 /* Considering GBR LCG will be prioritised by UE */
13140 effGbrBsr += cmnLcg->bs;
13141 }/* Else no remaing BS so nonLcg0 will be updated when BSR will be received */
13144 effNonGbrBsr += cmnLcg->reportedBs;
13145 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
13149 effNonGbrBsr = RGSCH_MIN(effNonGbrBsr,ue->ul.effAmbr);
13150 ue->ul.nonGbrLcgBs = effNonGbrBsr;
13152 ue->ul.nonLcg0Bs = effGbrBsr + effNonGbrBsr;
13153 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
13154 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
13157 /* call scheduler specific event handlers
13158 * for refresh timer expiry */
13159 cellSch->apisUl->rgSCHUlUeRefresh(cell, ue);
13160 cellSch->apisDl->rgSCHDlUeRefresh(cell, ue);
13165 /***********************************************************
13167 * Func : rgSCHCmnTmrExpiry
13169 * Desc : Adds an UE to refresh queue, so that the UE is
13170 * periodically triggered to refresh it's GBR and
13179 **********************************************************/
13180 static S16 rgSCHCmnTmrExpiry
13182 PTR cb, /* Pointer to timer control block */
13183 S16 tmrEvnt /* Timer Event */
13186 RgSchUeCb *ue = (RgSchUeCb *)cb;
13187 RgSchCellCb *cell = ue->cell;
13188 #if (ERRCLASS & ERRCLS_DEBUG)
13192 #if (ERRCLASS & ERRCLS_DEBUG)
13193 if (tmrEvnt != RG_SCH_CMN_EVNT_UE_REFRESH)
13195 DU_LOG("\nERROR --> SCH : rgSCHCmnTmrExpiry(): Invalid "
13196 "timer event CRNTI:%d",ue->ueId);
13203 rgSCHCmnApplyUeRefresh(cell, ue);
13205 rgSCHCmnAddUeToRefreshQ(cell, ue, RG_SCH_CMN_REFRESH_TIME);
13210 /***********************************************************
13212 * Func : rgSCHCmnTmrProc
13214 * Desc : Timer entry point per cell. Timer
13215 * processing is triggered at every frame boundary
13224 **********************************************************/
13225 static S16 rgSCHCmnTmrProc(RgSchCellCb *cell)
13227 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
13228 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
13229 /* Moving the assignment of scheduler pointer
13230 to available scope for optimization */
13232 if ((cell->crntTime.slot % RGSCH_NUM_SUB_FRAMES_5G) == 0)
13234 /* Reset the counters periodically */
13235 if ((cell->crntTime.sfn % RG_SCH_CMN_CSG_REFRESH_TIME) == 0)
13237 RG_SCH_RESET_HCSG_DL_PRB_CNTR(cmnDlCell);
13238 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cmnUlCell);
13240 if ((cell->crntTime.sfn % RG_SCH_CMN_OVRLDCTRL_REFRESH_TIME) == 0)
13243 cell->measurements.ulTpt = ((cell->measurements.ulTpt * 95) + ( cell->measurements.ulBytesCnt * 5))/100;
13244 cell->measurements.dlTpt = ((cell->measurements.dlTpt * 95) + ( cell->measurements.dlBytesCnt * 5))/100;
13246 rgSCHUtlCpuOvrLdAdjItbsCap(cell);
13247 /* reset cell level tpt measurements for next cycle */
13248 cell->measurements.ulBytesCnt = 0;
13249 cell->measurements.dlBytesCnt = 0;
13251 /* Comparing with Zero instead of % is being done for efficiency.
13252 * If Timer resolution changes then accordingly update the
13253 * macro RG_SCH_CMN_REFRESH_TIMERES */
13254 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
13255 cmPrcTmr(&sched->tmrTqCp, sched->tmrTq, (PFV)rgSCHCmnTmrExpiry);
13262 /***********************************************************
13264 * Func : rgSchCmnUpdCfiVal
13266 * Desc : Update the CFI value if CFI switch was done
13274 **********************************************************/
13275 static Void rgSchCmnUpdCfiVal(RgSchCellCb *cell,uint8_t delta)
13278 CmLteTimingInfo pdsch;
13279 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
13285 uint8_t splSfCfi = 0;
13289 pdsch = cell->crntTime;
13290 RGSCH_INCR_SUB_FRAME(pdsch, delta);
13291 dlSf = rgSCHUtlSubFrmGet(cell, pdsch);
13292 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
13293 *change happens in that SF then UL PDCCH allocation happens with old CFI
13294 *but CFI in control Req goes updated one since it was stored in the CELL
13296 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
13297 if(cell->dynCfiCb.pdcchSfIdx != 0xFF)
13300 dlIdx = rgSCHUtlGetDlSfIdx(cell, &pdsch);
13302 dlIdx = (((pdsch.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (pdsch.slot % RGSCH_NUM_SUB_FRAMES));
13303 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
13305 /* If current downlink subframe index is same as pdcch SF index,
13306 * perform the switching of CFI in this subframe */
13307 if(cell->dynCfiCb.pdcchSfIdx == dlIdx)
13309 cellCmnDl->currCfi = cellCmnDl->newCfi;
13310 cell->dynCfiCb.pdcchSfIdx = 0xFF;
13312 /* Updating the nCce value based on the new CFI */
13314 splSfCfi = cellCmnDl->newCfi;
13315 for(idx = 0; idx < cell->numDlSubfrms; idx++)
13317 tddSf = cell->subFrms[idx];
13319 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][tddSf->sfNum];
13321 if(tddSf->sfType == RG_SCH_SPL_SF_DATA)
13323 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
13325 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
13329 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][cellCmnDl->currCfi];
13332 /* Setting the switch over window length based on config index.
13333 * During switch over period all the UL trnsmissions are Acked
13335 cell->dynCfiCb.switchOvrWinLen =
13336 rgSchCfiSwitchOvrWinLen[cell->ulDlCfgIdx];
13338 cell->nCce = cell->dynCfiCb.cfi2NCceTbl[0][cellCmnDl->currCfi];
13339 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
13340 *change happens in that SF then UL PDCCH allocation happens with old CFI
13341 *but CFI in control Req goes updated one since it was stored in the CELL
13343 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
13344 cell->dynCfiCb.switchOvrWinLen = rgSchCfiSwitchOvrWinLen[7];
13352 /***********************************************************
13354 * Func : rgSchCmnUpdtPdcchSfIdx
13356 * Desc : Update the switch over window length
13364 **********************************************************/
13366 static Void rgSchCmnUpdtPdcchSfIdx(RgSchCellCb *cell,uint8_t dlIdx,uint8_t sfNum)
13368 static Void rgSchCmnUpdtPdcchSfIdx(RgSchCellCb *cell,uint8_t dlIdx)
13374 /* Resetting the parameters on CFI switching */
13375 cell->dynCfiCb.cceUsed = 0;
13376 cell->dynCfiCb.lowCceCnt = 0;
13378 cell->dynCfiCb.cceFailSum = 0;
13379 cell->dynCfiCb.cceFailCnt = 0;
13380 cell->dynCfiCb.prevCceFailIdx = 0;
13382 cell->dynCfiCb.switchOvrInProgress = TRUE;
13384 for(idx = 0; idx < cell->dynCfiCb.numFailSamples; idx++)
13386 cell->dynCfiCb.cceFailSamples[idx] = 0;
13389 cell->dynCfiCb.ttiCnt = 0;
13391 cell->dynCfiCb.cfiSwitches++;
13392 cfiSwitchCnt = cell->dynCfiCb.cfiSwitches;
13395 cell->dynCfiCb.pdcchSfIdx = (dlIdx +
13396 rgSchTddPdcchSfIncTbl[cell->ulDlCfgIdx][sfNum]) % cell->numDlSubfrms;
13398 cell->dynCfiCb.pdcchSfIdx = (dlIdx + RG_SCH_CFI_APPLY_DELTA) % \
13399 RGSCH_NUM_DL_slotS;
13403 /***********************************************************
13405 * Func : rgSchCmnUpdCfiDb
13407 * Desc : Update the counters related to dynamic
13408 * CFI feature in cellCb.
13416 **********************************************************/
13417 Void rgSchCmnUpdCfiDb(RgSchCellCb *cell,uint8_t delta)
13419 CmLteTimingInfo frm;
13425 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13426 uint8_t nCceLowerCfi = 0;
13428 uint8_t cceFailIdx;
13434 /* Get Downlink Subframe */
13435 frm = cell->crntTime;
13436 RGSCH_INCR_SUB_FRAME(frm, delta);
13439 dlIdx = rgSCHUtlGetDlSfIdx(cell, &frm);
13440 dlSf = cell->subFrms[dlIdx];
13441 isHiDci0 = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][dlSf->sfNum];
13443 /* Changing the idexing
13444 so that proper subframe is selected */
13445 dlIdx = (((frm.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (frm.slot % RGSCH_NUM_SUB_FRAMES));
13446 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
13447 dlSf = cell->subFrms[dlIdx];
13450 currCfi = cellSch->dl.currCfi;
13452 if(!cell->dynCfiCb.switchOvrInProgress)
13455 if(!cell->dynCfiCb.isDynCfiEnb)
13457 if(currCfi != cellSch->cfiCfg.cfi)
13459 if(currCfi < cellSch->cfiCfg.cfi)
13461 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
13462 cfiIncr = cell->dynCfiCb.cfiIncr;
13466 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
13467 cfiDecr = cell->dynCfiCb.cfiDecr;
13474 /* Setting ttiMod to 0 for ttiCnt > 1000 in case if this
13475 * function was not called in UL subframe*/
13476 if(cell->dynCfiCb.ttiCnt > RGSCH_CFI_TTI_MON_INTRVL)
13483 ttiMod = cell->dynCfiCb.ttiCnt % RGSCH_CFI_TTI_MON_INTRVL;
13486 dlSf->dlUlBothCmplt++;
13488 if((dlSf->dlUlBothCmplt == 2) || (!isHiDci0))
13490 if(dlSf->dlUlBothCmplt == 2)
13493 /********************STEP UP CRITERIA********************/
13494 /* Updating the CCE failure count parameter */
13495 cell->dynCfiCb.cceFailCnt += dlSf->isCceFailure;
13496 cell->dynCfiCb.cceFailSum += dlSf->isCceFailure;
13498 /* Check if cfi step up can be performed */
13499 if(currCfi < cell->dynCfiCb.maxCfi)
13501 if(cell->dynCfiCb.cceFailSum >= cell->dynCfiCb.cfiStepUpTtiCnt)
13503 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
13504 cfiIncr = cell->dynCfiCb.cfiIncr;
13509 /********************STEP DOWN CRITERIA********************/
13511 /* Updating the no. of CCE used in this dl subframe */
13512 cell->dynCfiCb.cceUsed += dlSf->cceCnt;
13514 if(currCfi > RGSCH_MIN_CFI_VAL)
13516 /* calculating the number of CCE for next lower CFI */
13518 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][dlSf->sfNum];
13519 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[mPhich][currCfi-1];
13521 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[0][currCfi-1];
13523 if(dlSf->cceCnt < nCceLowerCfi)
13525 /* Updating the count of TTIs in which no. of CCEs
13526 * used were less than the CCEs of next lower CFI */
13527 cell->dynCfiCb.lowCceCnt++;
13532 totalCce = (nCceLowerCfi * cell->dynCfiCb.cfiStepDownTtiCnt *
13533 RGSCH_CFI_CCE_PERCNTG)/100;
13535 if((!cell->dynCfiCb.cceFailSum) &&
13536 (cell->dynCfiCb.lowCceCnt >=
13537 cell->dynCfiCb.cfiStepDownTtiCnt) &&
13538 (cell->dynCfiCb.cceUsed < totalCce))
13540 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
13541 cfiDecr = cell->dynCfiCb.cfiDecr;
13547 cceFailIdx = ttiMod/cell->dynCfiCb.failSamplePrd;
13549 if(cceFailIdx != cell->dynCfiCb.prevCceFailIdx)
13551 /* New sample period has started. Subtract the old count
13552 * from the new sample period */
13553 cell->dynCfiCb.cceFailSum -= cell->dynCfiCb.cceFailSamples[cceFailIdx];
13555 /* Store the previous sample period data */
13556 cell->dynCfiCb.cceFailSamples[cell->dynCfiCb.prevCceFailIdx]
13557 = cell->dynCfiCb.cceFailCnt;
13559 cell->dynCfiCb.prevCceFailIdx = cceFailIdx;
13561 /* Resetting the CCE failure count as zero for next sample period */
13562 cell->dynCfiCb.cceFailCnt = 0;
13567 /* Restting the parametrs after Monitoring Interval expired */
13568 cell->dynCfiCb.cceUsed = 0;
13569 cell->dynCfiCb.lowCceCnt = 0;
13570 cell->dynCfiCb.ttiCnt = 0;
13573 cell->dynCfiCb.ttiCnt++;
13577 if(cellSch->dl.newCfi != cellSch->dl.currCfi)
13580 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, dlSf->sfNum);
13582 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx);
13589 * @brief Dl Scheduler for Broadcast and Common channel scheduling.
13593 * Function: rgSCHCmnDlCommonChSch
13594 * Purpose: This function schedules DL Common channels for LTE.
13595 * Invoked by TTI processing in TOM. Scheduling is done for
13596 * BCCH, PCCH, Msg4, CCCH SDU, RAR in that order
13598 * Invoked by: TOM (TTI processing)
13600 * @param[in] RgSchCellCb *cell
13603 Void rgSCHCmnDlCommonChSch(RgSchCellCb *cell)
13605 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13608 cellSch->apisDl->rgSCHDlTickForPdbTrkng(cell);
13609 rgSchCmnUpdCfiVal(cell, RG_SCH_CMN_DL_DELTA);
13611 /* handle Inactive UEs for DL */
13612 rgSCHCmnHdlDlInactUes(cell);
13614 /* Send a Tick to Refresh Timer */
13615 rgSCHCmnTmrProc(cell);
13617 if (cell->isDlDataAllwd && (cell->stopSiSch == FALSE))
13619 rgSCHCmnInitRbAlloc(cell);
13620 /* Perform DL scheduling of BCCH, PCCH */
13621 rgSCHCmnDlBcchPcchAlloc(cell);
13625 if(cell->siCb.inWindow != 0)
13627 cell->siCb.inWindow--;
13630 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
13632 rgSCHCmnDlCcchRarAlloc(cell);
13638 * @brief Scheduler invocation per TTI.
13642 * Function: rgSCHCmnUlSch
13643 * Purpose: This function implements UL scheduler alone. This is to
13644 * be able to perform scheduling with more flexibility.
13646 * Invoked by: TOM (TTI processing)
13648 * @param[in] RgSchCellCb *cell
13651 Void rgSCHCmnUlSch(RgSchCellCb *cell)
13653 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13658 if(TRUE == rgSCHLaaSCellEnabled(cell))
13664 if(cellSch->ul.schdIdx != RGSCH_INVALID_INFO)
13666 rgSchCmnUpdCfiVal(cell, TFU_ULCNTRL_DLDELTA);
13668 /* Handle Inactive UEs for UL */
13669 rgSCHCmnHdlUlInactUes(cell);
13670 /* Perform UL Scheduling EVERY TTI */
13671 rgSCHCmnUlAlloc(cell);
13673 /* Calling function to update CFI parameters*/
13674 rgSchCmnUpdCfiDb(cell, TFU_ULCNTRL_DLDELTA);
13676 if(cell->dynCfiCb.switchOvrWinLen > 0)
13678 /* Decrementing the switchover window length */
13679 cell->dynCfiCb.switchOvrWinLen--;
13681 if(!cell->dynCfiCb.switchOvrWinLen)
13683 if(cell->dynCfiCb.dynCfiRecfgPend)
13685 /* Toggling the Dynamic CFI enabling */
13686 cell->dynCfiCb.isDynCfiEnb ^= 1;
13687 rgSCHDynCfiReCfg(cell, cell->dynCfiCb.isDynCfiEnb);
13688 cell->dynCfiCb.dynCfiRecfgPend = FALSE;
13690 cell->dynCfiCb.switchOvrInProgress = FALSE;
13698 rgSCHCmnSpsUlTti(cell, NULLP);
13708 * @brief This function updates the scheduler with service for an UE.
13712 * Function: rgSCHCmnDlDedBoUpd
13713 * Purpose: This function should be called whenever there is a
13714 * change BO for a service.
13716 * Invoked by: BO and Scheduler
13718 * @param[in] RgSchCellCb* cell
13719 * @param[in] RgSchUeCb* ue
13720 * @param[in] RgSchDlLcCb* svc
13724 Void rgSCHCmnDlDedBoUpd(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlLcCb *svc)
13726 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13728 /* RACHO : if UEs idle time exceeded and a BO update
13729 * is received, then add UE to the pdcch Order Q */
13730 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
13732 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue, cell);
13733 /* If PDCCH order is already triggered and we are waiting for
13734 * RACH from UE then do not add to PdcchOdrQ. */
13735 if (ueDl->rachInfo.rapIdLnk.node == NULLP)
13737 rgSCHCmnDlAdd2PdcchOdrQ(cell, ue);
13743 /* If SPS service, invoke SPS module */
13744 if (svc->dlLcSpsCfg.isSpsEnabled)
13746 rgSCHCmnSpsDlDedBoUpd(cell, ue, svc);
13747 /* Note: Retrun from here, no update needed in other schedulers */
13752 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
13754 cellSch->apisEmtcDl->rgSCHDlDedBoUpd(cell, ue, svc);
13755 //DU_LOG("\nINFO --> SCH : rgSCHEMTCDlDedBoUpd\n");
13760 cellSch->apisDl->rgSCHDlDedBoUpd(cell, ue, svc);
13765 rgSCHSCellDlDedBoUpd(cell, ue, svc);
13773 * @brief Removes an UE from Cell's TA List.
13777 * Function: rgSCHCmnRmvFrmTaLst
13778 * Purpose: Removes an UE from Cell's TA List.
13780 * Invoked by: Specific Scheduler
13782 * @param[in] RgSchCellCb* cell
13783 * @param[in] RgSchUeCb* ue
13787 Void rgSCHCmnRmvFrmTaLst(RgSchCellCb *cell,RgSchUeCb *ue)
13789 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
13792 if(cell->emtcEnable && ue->isEmtcUe)
13794 rgSCHEmtcRmvFrmTaLst(cellCmnDl,ue);
13799 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
13800 ue->dlTaLnk.node = (PTR)NULLP;
13805 /* Fix: syed Remove the msg4Proc from cell
13806 * msg4Retx Queue. I have used CMN scheduler function
13807 * directly. Please define a new API and call this
13808 * function through that. */
13811 * @brief This function removes MSG4 HARQ process from cell RETX Queues.
13815 * Function: rgSCHCmnDlMsg4ProcRmvFrmRetx
13816 * Purpose: This function removes MSG4 HARQ process from cell RETX Queues.
13818 * Invoked by: UE/RACB deletion.
13820 * @param[in] RgSchCellCb* cell
13821 * @param[in] RgSchDlHqProc* hqP
13825 Void rgSCHCmnDlMsg4ProcRmvFrmRetx(RgSchCellCb *cell,RgSchDlHqProcCb *hqP)
13827 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13829 if (hqP->tbInfo[0].ccchSchdInfo.retxLnk.node)
13831 if (hqP->hqE->msg4Proc == hqP)
13833 cmLListDelFrm(&cellSch->dl.msg4RetxLst, \
13834 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13835 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
13838 else if(hqP->hqE->ccchSduProc == hqP)
13840 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
13841 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13842 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
13851 * @brief This function adds a HARQ process for retx.
13855 * Function: rgSCHCmnDlProcAddToRetx
13856 * Purpose: This function adds a HARQ process to retransmission
13857 * queue. This may be performed when a HARQ ack is
13860 * Invoked by: HARQ feedback processing
13862 * @param[in] RgSchCellCb* cell
13863 * @param[in] RgSchDlHqProc* hqP
13867 Void rgSCHCmnDlProcAddToRetx(RgSchCellCb *cell,RgSchDlHqProcCb *hqP)
13869 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13871 if (hqP->hqE->msg4Proc == hqP) /* indicating msg4 transmission */
13873 cmLListAdd2Tail(&cellSch->dl.msg4RetxLst, \
13874 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13875 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
13878 else if(hqP->hqE->ccchSduProc == hqP)
13880 /*If CCCH SDU being transmitted without cont res CE*/
13881 cmLListAdd2Tail(&cellSch->dl.ccchSduRetxLst,
13882 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13883 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
13889 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
13891 /* Invoke SPS module for SPS HARQ proc re-transmission handling */
13892 rgSCHCmnSpsDlProcAddToRetx(cell, hqP);
13895 #endif /* LTEMAC_SPS */
13897 if((TRUE == cell->emtcEnable)
13898 && (TRUE == hqP->hqE->ue->isEmtcUe))
13900 cellSch->apisEmtcDl->rgSCHDlProcAddToRetx(cell, hqP);
13905 cellSch->apisDl->rgSCHDlProcAddToRetx(cell, hqP);
13913 * @brief This function performs RI validation and
13914 * updates it to the ueCb.
13918 * Function: rgSCHCmnDlSetUeRi
13919 * Purpose: This function performs RI validation and
13920 * updates it to the ueCb.
13922 * Invoked by: rgSCHCmnDlCqiInd
13924 * @param[in] RgSchCellCb *cell
13925 * @param[in] RgSchUeCb *ue
13926 * @param[in] uint8_t ri
13927 * @param[in] Bool isPeriodic
13931 static Void rgSCHCmnDlSetUeRi(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t ri,Bool isPer)
13933 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
13934 RgSchCmnUeInfo *ueSchCmn = RG_SCH_CMN_GET_CMN_UE(ue);
13937 RgSchUePCqiCb *cqiCb = RG_SCH_GET_UE_CELL_CQI_CB(ue,cell);
13942 /* FIX for RRC Reconfiguration issue */
13943 /* ccpu00140894- During Tx Mode transition RI report will not entertained for
13944 * specific during which SCH expecting UE can complete TX mode transition*/
13945 if (ue->txModeTransCmplt == FALSE)
13950 /* Restrict the Number of TX layers to cell->numTxAntPorts.
13951 * Protection from invalid RI values. */
13952 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
13954 /* Special case of converting PMI to sane value when
13955 * there is a switch in RI from 1 to 2 and PMI reported
13956 * for RI=1 is invalid for RI=2 */
13957 if ((cell->numTxAntPorts == 2) && (ue->mimoInfo.txMode == RGR_UE_TM_4))
13959 if ((ri == 2) && ( ueDl->mimoInfo.ri == 1))
13961 ueDl->mimoInfo.pmi = (ueDl->mimoInfo.pmi < 2)? 1:2;
13965 /* Restrict the Number of TX layers according to the UE Category */
13966 ueDl->mimoInfo.ri = RGSCH_MIN(ri, rgUeCatTbl[ueSchCmn->ueCat].maxTxLyrs);
13968 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].riCnt[ueDl->mimoInfo.ri-1]++;
13969 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
13973 ue->tenbStats->stats.nonPersistent.sch[0].riCnt[ueDl->mimoInfo.ri-1]++;
13974 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
13980 /* If RI is from Periodic CQI report */
13981 cqiCb->perRiVal = ueDl->mimoInfo.ri;
13982 /* Reset at every Periodic RI Reception */
13983 cqiCb->invalidateCqi = FALSE;
13987 /* If RI is from Aperiodic CQI report */
13988 if (cqiCb->perRiVal != ueDl->mimoInfo.ri)
13990 /* if this aperRI is different from last reported
13991 * perRI then invalidate all CQI reports till next
13993 cqiCb->invalidateCqi = TRUE;
13997 cqiCb->invalidateCqi = FALSE;
14002 if (ueDl->mimoInfo.ri > 1)
14004 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
14006 else if (ue->mimoInfo.txMode == RGR_UE_TM_3) /* ri == 1 */
14008 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
14016 * @brief This function performs PMI validation and
14017 * updates it to the ueCb.
14021 * Function: rgSCHCmnDlSetUePmi
14022 * Purpose: This function performs PMI validation and
14023 * updates it to the ueCb.
14025 * Invoked by: rgSCHCmnDlCqiInd
14027 * @param[in] RgSchCellCb *cell
14028 * @param[in] RgSchUeCb *ue
14029 * @param[in] uint8_t pmi
14033 static S16 rgSCHCmnDlSetUePmi(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t pmi)
14035 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14037 if (ue->txModeTransCmplt == FALSE)
14042 if (cell->numTxAntPorts == 2)
14048 if (ueDl->mimoInfo.ri == 2)
14050 /*ccpu00118150 - MOD - changed pmi value validation from 0 to 2*/
14051 /* PMI 2 and 3 are invalid incase of 2 TxAnt and 2 Layered SM */
14052 if (pmi == 2 || pmi == 3)
14056 ueDl->mimoInfo.pmi = pmi+1;
14060 ueDl->mimoInfo.pmi = pmi;
14063 else if (cell->numTxAntPorts == 4)
14069 ueDl->mimoInfo.pmi = pmi;
14071 /* Reset the No PMI Flag in forceTD */
14072 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
14077 * @brief This function Updates the DL CQI on PUCCH for the UE.
14081 * Function: rgSCHCmnDlProcCqiMode10
14083 * This function updates the DL CQI on PUCCH for the UE.
14085 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14087 * Processing Steps:
14089 * @param[in] RgSchCellCb *cell
14090 * @param[in] RgSchUeCb *ue
14091 * @param[in] TfuDlCqiRpt *dlCqiRpt
14096 #ifdef RGR_CQI_REPT
14097 static inline Void rgSCHCmnDlProcCqiMode10
14101 TfuDlCqiPucch *pucchCqi,
14105 static inline Void rgSCHCmnDlProcCqiMode10
14109 TfuDlCqiPucch *pucchCqi
14113 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14115 if (pucchCqi->u.mode10Info.type == TFU_RPT_CQI)
14117 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14118 /* Checking whether the decoded CQI is a value between 1 and 15*/
14119 if((pucchCqi->u.mode10Info.u.cqi) && (pucchCqi->u.mode10Info.u.cqi
14120 < RG_SCH_CMN_MAX_CQI))
14122 ueDl->cqiFlag = TRUE;
14123 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode10Info.u.cqi;
14124 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14125 /* ccpu00117452 - MOD - Changed macro name from
14126 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14127 #ifdef RGR_CQI_REPT
14128 *isCqiAvail = TRUE;
14136 else if (pucchCqi->u.mode10Info.type == TFU_RPT_RI)
14138 if ( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode10Info.u.ri) )
14140 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode10Info.u.ri,
14145 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14146 pucchCqi->u.mode10Info.u.ri,ue->ueId);
14153 * @brief This function Updates the DL CQI on PUCCH for the UE.
14157 * Function: rgSCHCmnDlProcCqiMode11
14159 * This function updates the DL CQI on PUCCH for the UE.
14161 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14163 * Processing Steps:
14164 * Process CQI MODE 11
14165 * @param[in] RgSchCellCb *cell
14166 * @param[in] RgSchUeCb *ue
14167 * @param[in] TfuDlCqiRpt *dlCqiRpt
14172 #ifdef RGR_CQI_REPT
14173 static inline Void rgSCHCmnDlProcCqiMode11
14177 TfuDlCqiPucch *pucchCqi,
14179 Bool *is2ndCwCqiAvail
14182 static inline Void rgSCHCmnDlProcCqiMode11
14186 TfuDlCqiPucch *pucchCqi
14190 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14192 if (pucchCqi->u.mode11Info.type == TFU_RPT_CQI)
14194 ue->mimoInfo.puschFdbkVld = FALSE;
14195 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14196 if((pucchCqi->u.mode11Info.u.cqi.cqi) &&
14197 (pucchCqi->u.mode11Info.u.cqi.cqi < RG_SCH_CMN_MAX_CQI))
14199 ueDl->cqiFlag = TRUE;
14200 /* ccpu00117452 - MOD - Changed macro name from
14201 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14202 #ifdef RGR_CQI_REPT
14203 *isCqiAvail = TRUE;
14205 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode11Info.u.cqi.cqi;
14206 if (pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.pres)
14208 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
14209 ueDl->mimoInfo.cwInfo[1].cqi, \
14210 pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.val);
14211 #ifdef RGR_CQI_REPT
14212 /* ccpu00117259 - ADD - Considering second codeword CQI info
14213 incase of MIMO for CQI Reporting */
14214 *is2ndCwCqiAvail = TRUE;
14222 rgSCHCmnDlSetUePmi(cell, ue, \
14223 pucchCqi->u.mode11Info.u.cqi.pmi);
14225 else if (pucchCqi->u.mode11Info.type == TFU_RPT_RI)
14227 if( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode11Info.u.ri))
14229 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode11Info.u.ri,
14234 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14235 pucchCqi->u.mode11Info.u.ri,ue->ueId);
14242 * @brief This function Updates the DL CQI on PUCCH for the UE.
14246 * Function: rgSCHCmnDlProcCqiMode20
14248 * This function updates the DL CQI on PUCCH for the UE.
14250 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14252 * Processing Steps:
14253 * Process CQI MODE 20
14254 * @param[in] RgSchCellCb *cell
14255 * @param[in] RgSchUeCb *ue
14256 * @param[in] TfuDlCqiRpt *dlCqiRpt
14261 #ifdef RGR_CQI_REPT
14262 static inline Void rgSCHCmnDlProcCqiMode20
14266 TfuDlCqiPucch *pucchCqi,
14270 static inline Void rgSCHCmnDlProcCqiMode20
14274 TfuDlCqiPucch *pucchCqi
14278 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14280 if (pucchCqi->u.mode20Info.type == TFU_RPT_CQI)
14282 if (pucchCqi->u.mode20Info.u.cqi.isWideband)
14284 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14285 if((pucchCqi->u.mode20Info.u.cqi.u.wideCqi) &&
14286 (pucchCqi->u.mode20Info.u.cqi.u.wideCqi < RG_SCH_CMN_MAX_CQI))
14288 ueDl->cqiFlag = TRUE;
14289 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode20Info.u.cqi.\
14291 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14292 /* ccpu00117452 - MOD - Changed macro name from
14293 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14294 #ifdef RGR_CQI_REPT
14295 *isCqiAvail = TRUE;
14304 else if (pucchCqi->u.mode20Info.type == TFU_RPT_RI)
14306 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode20Info.u.ri))
14308 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode20Info.u.ri,
14313 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14314 pucchCqi->u.mode20Info.u.ri,ue->ueId);
14322 * @brief This function Updates the DL CQI on PUCCH for the UE.
14326 * Function: rgSCHCmnDlProcCqiMode21
14328 * This function updates the DL CQI on PUCCH for the UE.
14330 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14332 * Processing Steps:
14333 * Process CQI MODE 21
14334 * @param[in] RgSchCellCb *cell
14335 * @param[in] RgSchUeCb *ue
14336 * @param[in] TfuDlCqiRpt *dlCqiRpt
14341 #ifdef RGR_CQI_REPT
14342 static inline Void rgSCHCmnDlProcCqiMode21
14346 TfuDlCqiPucch *pucchCqi,
14348 Bool *is2ndCwCqiAvail
14351 static inline Void rgSCHCmnDlProcCqiMode21
14355 TfuDlCqiPucch *pucchCqi
14359 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14361 if (pucchCqi->u.mode21Info.type == TFU_RPT_CQI)
14363 ue->mimoInfo.puschFdbkVld = FALSE;
14364 if (pucchCqi->u.mode21Info.u.cqi.isWideband)
14366 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14367 if((pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi) &&
14368 (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi < RG_SCH_CMN_MAX_CQI))
14370 ueDl->cqiFlag = TRUE;
14371 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode21Info.u.cqi.\
14373 if (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.pres)
14375 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
14376 ueDl->mimoInfo.cwInfo[1].cqi, \
14377 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.val);
14378 #ifdef RGR_CQI_REPT
14379 /* ccpu00117259 - ADD - Considering second codeword CQI info
14380 incase of MIMO for CQI Reporting */
14381 *is2ndCwCqiAvail = TRUE;
14384 /* ccpu00117452 - MOD - Changed macro name from
14385 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14386 #ifdef RGR_CQI_REPT
14387 *isCqiAvail = TRUE;
14394 rgSCHCmnDlSetUePmi(cell, ue, \
14395 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.pmi);
14398 else if (pucchCqi->u.mode21Info.type == TFU_RPT_RI)
14400 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode21Info.u.ri))
14402 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode21Info.u.ri,
14407 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14408 pucchCqi->u.mode21Info.u.ri,ue->ueId);
14416 * @brief This function Updates the DL CQI on PUCCH for the UE.
14420 * Function: rgSCHCmnDlCqiOnPucchInd
14422 * This function updates the DL CQI on PUCCH for the UE.
14424 * Invoked by: rgSCHCmnDlCqiInd
14426 * Processing Steps:
14427 * - Depending on the reporting mode of the PUCCH, the CQI/PMI/RI values
14428 * are updated and stored for each UE
14430 * @param[in] RgSchCellCb *cell
14431 * @param[in] RgSchUeCb *ue
14432 * @param[in] TfuDlCqiRpt *dlCqiRpt
14437 #ifdef RGR_CQI_REPT
14438 static Void rgSCHCmnDlCqiOnPucchInd
14442 TfuDlCqiPucch *pucchCqi,
14443 RgrUeCqiRept *ueCqiRept,
14445 Bool *is2ndCwCqiAvail
14448 static Void rgSCHCmnDlCqiOnPucchInd
14452 TfuDlCqiPucch *pucchCqi
14456 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14458 /* ccpu00117452 - MOD - Changed
14459 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14460 #ifdef RGR_CQI_REPT
14461 /* Save CQI mode information in the report */
14462 ueCqiRept->cqiMode = pucchCqi->mode;
14465 switch(pucchCqi->mode)
14467 case TFU_PUCCH_CQI_MODE10:
14468 #ifdef RGR_CQI_REPT
14469 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail);
14471 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi);
14473 ueDl->cqiFlag = TRUE;
14475 case TFU_PUCCH_CQI_MODE11:
14476 #ifdef RGR_CQI_REPT
14477 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail,
14480 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi);
14482 ueDl->cqiFlag = TRUE;
14484 case TFU_PUCCH_CQI_MODE20:
14485 #ifdef RGR_CQI_REPT
14486 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail);
14488 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi);
14490 ueDl->cqiFlag = TRUE;
14492 case TFU_PUCCH_CQI_MODE21:
14493 #ifdef RGR_CQI_REPT
14494 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail,
14497 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi);
14499 ueDl->cqiFlag = TRUE;
14503 DU_LOG("\nERROR --> SCH : Unknown CQI Mode %d of UE %d",
14504 pucchCqi->mode,ue->ueId);
14505 /* ccpu00117452 - MOD - Changed macro name from
14506 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14507 #ifdef RGR_CQI_REPT
14508 *isCqiAvail = FALSE;
14515 } /* rgSCHCmnDlCqiOnPucchInd */
14519 * @brief This function Updates the DL CQI on PUSCH for the UE.
14523 * Function: rgSCHCmnDlCqiOnPuschInd
14525 * This function updates the DL CQI on PUSCH for the UE.
14527 * Invoked by: rgSCHCmnDlCqiInd
14529 * Processing Steps:
14530 * - Depending on the reporting mode of the PUSCH, the CQI/PMI/RI values
14531 * are updated and stored for each UE
14533 * @param[in] RgSchCellCb *cell
14534 * @param[in] RgSchUeCb *ue
14535 * @param[in] TfuDlCqiRpt *dlCqiRpt
14540 #ifdef RGR_CQI_REPT
14541 static Void rgSCHCmnDlCqiOnPuschInd
14545 TfuDlCqiPusch *puschCqi,
14546 RgrUeCqiRept *ueCqiRept,
14548 Bool *is2ndCwCqiAvail
14551 static Void rgSCHCmnDlCqiOnPuschInd
14555 TfuDlCqiPusch *puschCqi
14559 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14560 uint32_t prevRiVal = 0;
14561 if (puschCqi->ri.pres == PRSNT_NODEF)
14563 if (RG_SCH_CMN_IS_RI_VALID(puschCqi->ri.val))
14565 /* Saving the previous ri value to revert back
14566 in case PMI update failed */
14567 if (RGR_UE_TM_4 == ue->mimoInfo.txMode ) /* Cheking for TM4. TM8 check later */
14569 prevRiVal = ueDl->mimoInfo.ri;
14571 rgSCHCmnDlSetUeRi(cell, ue, puschCqi->ri.val, FALSE);
14575 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14576 puschCqi->ri.val,ue->ueId);
14580 ue->mimoInfo.puschFdbkVld = FALSE;
14581 /* ccpu00117452 - MOD - Changed macro name from
14582 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14583 #ifdef RGR_CQI_REPT
14584 /* Save CQI mode information in the report */
14585 ueCqiRept->cqiMode = puschCqi->mode;
14586 /* ccpu00117259 - DEL - removed default setting of isCqiAvail to TRUE */
14589 switch(puschCqi->mode)
14591 case TFU_PUSCH_CQI_MODE_20:
14592 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14593 /* Checking whether the decoded CQI is a value between 1 and 15*/
14594 if((puschCqi->u.mode20Info.wideBandCqi) &&
14595 (puschCqi->u.mode20Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
14597 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode20Info.wideBandCqi;
14598 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14599 /* ccpu00117452 - MOD - Changed macro name from
14600 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14601 #ifdef RGR_CQI_REPT
14602 *isCqiAvail = TRUE;
14610 case TFU_PUSCH_CQI_MODE_30:
14611 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14612 if((puschCqi->u.mode30Info.wideBandCqi) &&
14613 (puschCqi->u.mode30Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
14615 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode30Info.wideBandCqi;
14616 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14617 /* ccpu00117452 - MOD - Changed macro name from
14618 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14619 #ifdef RGR_CQI_REPT
14620 *isCqiAvail = TRUE;
14624 uint32_t gACqiRcvdCount;
14635 case TFU_PUSCH_CQI_MODE_12:
14636 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14637 if((puschCqi->u.mode12Info.cqiIdx[0]) &&
14638 (puschCqi->u.mode12Info.cqiIdx[0] < RG_SCH_CMN_MAX_CQI))
14640 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode12Info.cqiIdx[0];
14641 /* ccpu00117452 - MOD - Changed macro name from
14642 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14643 #ifdef RGR_CQI_REPT
14644 *isCqiAvail = TRUE;
14651 if((puschCqi->u.mode12Info.cqiIdx[1]) &&
14652 (puschCqi->u.mode12Info.cqiIdx[1] < RG_SCH_CMN_MAX_CQI))
14654 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode12Info.cqiIdx[1];
14655 /* ccpu00117452 - MOD - Changed macro name from
14656 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14657 #ifdef RGR_CQI_REPT
14658 /* ccpu00117259 - ADD - Considering second codeword CQI info
14659 incase of MIMO for CQI Reporting */
14660 *is2ndCwCqiAvail = TRUE;
14667 ue->mimoInfo.puschFdbkVld = TRUE;
14668 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_12;
14669 ue->mimoInfo.puschPmiInfo.u.mode12Info = puschCqi->u.mode12Info;
14670 /* : resetting this is time based. Make use of CQI reporting
14671 * periodicity, DELTA's in determining the exact time at which this
14672 * need to be reset. */
14674 case TFU_PUSCH_CQI_MODE_22:
14675 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14676 if((puschCqi->u.mode22Info.wideBandCqi[0]) &&
14677 (puschCqi->u.mode22Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
14679 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode22Info.wideBandCqi[0];
14680 /* ccpu00117452 - MOD - Changed macro name from
14681 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14682 #ifdef RGR_CQI_REPT
14683 *isCqiAvail = TRUE;
14690 if((puschCqi->u.mode22Info.wideBandCqi[1]) &&
14691 (puschCqi->u.mode22Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
14693 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode22Info.wideBandCqi[1];
14694 /* ccpu00117452 - MOD - Changed macro name from
14695 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14696 #ifdef RGR_CQI_REPT
14697 /* ccpu00117259 - ADD - Considering second codeword CQI info
14698 incase of MIMO for CQI Reporting */
14699 *is2ndCwCqiAvail = TRUE;
14706 rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode22Info.wideBandPmi);
14707 ue->mimoInfo.puschFdbkVld = TRUE;
14708 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_22;
14709 ue->mimoInfo.puschPmiInfo.u.mode22Info = puschCqi->u.mode22Info;
14711 case TFU_PUSCH_CQI_MODE_31:
14712 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14713 if((puschCqi->u.mode31Info.wideBandCqi[0]) &&
14714 (puschCqi->u.mode31Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
14716 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode31Info.wideBandCqi[0];
14717 /* ccpu00117452 - MOD - Changed macro name from
14718 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14719 #ifdef RGR_CQI_REPT
14720 *isCqiAvail = TRUE;
14723 if (ueDl->mimoInfo.ri > 1)
14725 if((puschCqi->u.mode31Info.wideBandCqi[1]) &&
14726 (puschCqi->u.mode31Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
14728 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode31Info.wideBandCqi[1];
14729 /* ccpu00117452 - MOD - Changed macro name from
14730 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14731 #ifdef RGR_CQI_REPT
14732 /* ccpu00117259 - ADD - Considering second codeword CQI info
14733 incase of MIMO for CQI Reporting */
14734 *is2ndCwCqiAvail = TRUE;
14738 if (rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode31Info.pmi) != ROK)
14740 /* To avoid Rank and PMI inconsistency */
14741 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
14742 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
14744 ueDl->mimoInfo.ri = prevRiVal;
14747 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_31;
14748 ue->mimoInfo.puschPmiInfo.u.mode31Info = puschCqi->u.mode31Info;
14752 DU_LOG("\nERROR --> SCH : Unknown CQI Mode %d CRNTI:%d",
14753 puschCqi->mode,ue->ueId);
14754 /* CQI decoding failed revert the RI to previous value */
14755 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
14756 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
14758 ueDl->mimoInfo.ri = prevRiVal;
14760 /* ccpu00117452 - MOD - Changed macro name from
14761 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14762 #ifdef RGR_CQI_REPT
14763 *isCqiAvail = FALSE;
14764 /* ccpu00117259 - ADD - Considering second codeword CQI info
14765 incase of MIMO for CQI Reporting */
14766 *is2ndCwCqiAvail = FALSE;
14773 } /* rgSCHCmnDlCqiOnPuschInd */
14777 * @brief This function Updates the DL CQI for the UE.
14781 * Function: rgSCHCmnDlCqiInd
14782 * Purpose: Updates the DL CQI for the UE
14786 * @param[in] RgSchCellCb *cell
14787 * @param[in] RgSchUeCb *ue
14788 * @param[in] TfuDlCqiRpt *dlCqi
14792 Void rgSCHCmnDlCqiInd
14798 CmLteTimingInfo timingInfo
14801 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14802 /* ccpu00117452 - MOD - Changed macro name from
14803 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14804 #ifdef RGR_CQI_REPT
14805 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14806 RgrUeCqiRept ueCqiRept = {{0}};
14807 Bool isCqiAvail = FALSE;
14808 /* ccpu00117259 - ADD - Considering second codeword CQI info
14809 incase of MIMO for CQI Reporting */
14810 Bool is2ndCwCqiAvail = FALSE;
14814 #ifdef RGR_CQI_REPT
14817 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
14821 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
14826 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi);
14830 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi);
14834 #ifdef CQI_CONFBITMASK_DROP
14835 if(!ue->cqiConfBitMask)
14837 if (ueDl->mimoInfo.cwInfo[0].cqi >15)
14839 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
14840 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
14842 else if ( ueDl->mimoInfo.cwInfo[0].cqi >= ue->prevCqi)
14844 ue->prevCqi = ueDl->mimoInfo.cwInfo[0].cqi;
14848 uint8_t dlCqiDeltaPrev = 0;
14849 dlCqiDeltaPrev = ue->prevCqi - ueDl->mimoInfo.cwInfo[0].cqi;
14850 if (dlCqiDeltaPrev > 3)
14851 dlCqiDeltaPrev = 3;
14852 if ((ue->prevCqi - dlCqiDeltaPrev) < 6)
14858 ue->prevCqi = ue->prevCqi - dlCqiDeltaPrev;
14860 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
14861 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
14867 /* ccpu00117452 - MOD - Changed macro name from
14868 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14869 #ifdef RGR_CQI_REPT
14870 /* ccpu00117259 - ADD - Considering second codeword CQI info
14871 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail\
14872 in 'if' condition*/
14873 if (RG_SCH_CQIR_IS_PUSHNCQI_ENBLE(ue) && (isCqiAvail || is2ndCwCqiAvail))
14875 ueCqiRept.cqi[0] = ueDl->mimoInfo.cwInfo[0].cqi;
14877 /* ccpu00117259 - ADD - Considering second codeword CQI info
14878 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail
14879 in 'if' condition*/
14880 ueCqiRept.cqi[1] = 0;
14881 if(is2ndCwCqiAvail)
14883 ueCqiRept.cqi[1] = ueDl->mimoInfo.cwInfo[1].cqi;
14885 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, &ueCqiRept);
14890 rgSCHCmnDlSetUeAllocLmtLa(cell, ue);
14891 rgSCHCheckAndSetTxScheme(cell, ue);
14894 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), ue->isEmtcUe);
14896 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), FALSE);
14900 if (cellSch->dl.isDlFreqSel)
14902 cellSch->apisDlfs->rgSCHDlfsDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo);
14905 /* Call SPS module to update CQI indication */
14906 rgSCHCmnSpsDlCqiIndHndlr(cell, ue, timingInfo);
14908 /* Call Specific scheduler to process on dlCqiInd */
14910 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
14912 cellSch->apisEmtcDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
14917 cellSch->apisDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
14920 #ifdef RG_PFS_STATS
14921 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].avgCqi +=
14922 ueDl->mimoInfo.cwInfo[0].cqi;
14923 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].totalCqiOcc++;
14927 ueDl->avgCqi += ueDl->mimoInfo.cwInfo[0].cqi;
14928 ueDl->numCqiOccns++;
14929 if (ueDl->mimoInfo.ri == 1)
14940 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
14941 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
14942 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw0Cqi ++;
14943 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw1Cqi ++;
14944 cell->tenbStats->sch.dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
14945 cell->tenbStats->sch.dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
14946 cell->tenbStats->sch.dlNumCw0Cqi ++;
14947 cell->tenbStats->sch.dlNumCw1Cqi ++;
14954 * @brief This function calculates the wideband CQI from SNR
14955 * reported for each RB.
14959 * Function: rgSCHCmnCalcWcqiFrmSnr
14960 * Purpose: Wideband CQI calculation from SNR
14962 * Invoked by: RG SCH
14964 * @param[in] RgSchCellCb *cell
14965 * @param[in] TfuSrsRpt *srsRpt,
14966 * @return Wideband CQI
14969 static uint8_t rgSCHCmnCalcWcqiFrmSnr(RgSchCellCb *cell, TfuSrsRpt *srsRpt)
14971 uint8_t wideCqi=1; /*Calculated value from SNR*/
14972 /*Need to map a certain SNR with a WideCQI value.
14973 * The CQI calculation is still primitive. Further, need to
14974 * use a improvized method for calculating WideCQI from SNR*/
14975 if (srsRpt->snr[0] <=50)
14979 else if (srsRpt->snr[0]>=51 && srsRpt->snr[0] <=100)
14983 else if (srsRpt->snr[0]>=101 && srsRpt->snr[0] <=150)
14987 else if (srsRpt->snr[0]>=151 && srsRpt->snr[0] <=200)
14991 else if (srsRpt->snr[0]>=201 && srsRpt->snr[0] <=250)
15000 }/*rgSCHCmnCalcWcqiFrmSnr*/
15004 * @brief This function Updates the SRS for the UE.
15008 * Function: rgSCHCmnSrsInd
15009 * Purpose: Updates the UL SRS for the UE
15013 * @param[in] RgSchCellCb *cell
15014 * @param[in] RgSchUeCb *ue
15015 * @param[in] TfuSrsRpt *srsRpt,
15019 Void rgSCHCmnSrsInd(RgSchCellCb *cell,RgSchUeCb *ue,TfuSrsRpt *srsRpt,CmLteTimingInfo timingInfo)
15021 uint8_t wideCqi; /*Calculated value from SNR*/
15022 uint32_t recReqTime; /*Received Time in TTI*/
15024 recReqTime = (timingInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) + timingInfo.slot;
15025 ue->srsCb.selectedAnt = (recReqTime/ue->srsCb.peri)%2;
15026 if(srsRpt->wideCqiPres)
15028 wideCqi = srsRpt->wideCqi;
15032 wideCqi = rgSCHCmnCalcWcqiFrmSnr(cell, srsRpt);
15034 rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi);
15036 }/*rgSCHCmnSrsInd*/
15041 * @brief This function is a handler for TA report for an UE.
15045 * Function: rgSCHCmnDlTARpt
15046 * Purpose: Determine based on UE_IDLE_TIME threshold,
15047 * whether UE needs to be Linked to the scheduler's TA list OR
15048 * if it needs a PDCCH Order.
15053 * @param[in] RgSchCellCb *cell
15054 * @param[in] RgSchUeCb *ue
15058 Void rgSCHCmnDlTARpt(RgSchCellCb *cell,RgSchUeCb *ue)
15060 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15061 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
15062 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15063 CmLListCp poInactvLst;
15066 /* RACHO: If UE idle time is more than threshold, then
15067 * set its poInactv pdcch order inactivity */
15068 /* Fix : syed Ignore if TaTmr is not configured */
15069 if ((ue->dl.taCb.cfgTaTmr) && (rgSCHCmnUeIdleExdThrsld(cell, ue) == ROK))
15071 uint32_t prevDlMsk = ue->dl.dlInactvMask;
15072 uint32_t prevUlMsk = ue->ul.ulInactvMask;
15073 ue->dl.dlInactvMask |= RG_PDCCHODR_INACTIVE;
15074 ue->ul.ulInactvMask |= RG_PDCCHODR_INACTIVE;
15075 /* Indicate Specific scheduler for this UEs inactivity */
15076 cmLListInit(&poInactvLst);
15077 cmLListAdd2Tail(&poInactvLst, &ueDl->rachInfo.inActUeLnk);
15078 ueDl->rachInfo.inActUeLnk.node = (PTR)ue;
15079 /* Send inactivate ind only if not already sent */
15080 if (prevDlMsk == 0)
15082 cellSch->apisDl->rgSCHDlInactvtUes(cell, &poInactvLst);
15084 if (prevUlMsk == 0)
15086 cellSch->apisUl->rgSCHUlInactvtUes(cell, &poInactvLst);
15091 /* Fix: ccpu00124009 Fix for loop in the linked list "cellDl->taLst" */
15092 if (!ue->dlTaLnk.node)
15095 if(cell->emtcEnable)
15099 rgSCHEmtcAddToTaLst(cellDl,ue);
15106 cmLListAdd2Tail(&cellDl->taLst, &ue->dlTaLnk);
15107 ue->dlTaLnk.node = (PTR)ue;
15112 DU_LOG("\nERROR --> SCH : <TA>TA duplicate entry attempt failed: UEID:%u",
15121 * @brief Indication of UL CQI.
15125 * Function : rgSCHCmnFindUlCqiUlTxAnt
15127 * - Finds the Best Tx Antenna amongst the CQIs received
15128 * from Two Tx Antennas.
15130 * @param[in] RgSchCellCb *cell
15131 * @param[in] RgSchUeCb *ue
15132 * @param[in] uint8_t wideCqi
15135 static Void rgSCHCmnFindUlCqiUlTxAnt(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t wideCqi)
15137 ue->validTxAnt = 1;
15139 } /* rgSCHCmnFindUlCqiUlTxAnt */
15143 * @brief Indication of UL CQI.
15147 * Function : rgSCHCmnUlCqiInd
15149 * - Updates uplink CQI information for the UE. Computes and
15150 * stores the lowest CQI of CQIs reported in all subbands.
15152 * @param[in] RgSchCellCb *cell
15153 * @param[in] RgSchUeCb *ue
15154 * @param[in] TfuUlCqiRpt *ulCqiInfo
15157 Void rgSCHCmnUlCqiInd(RgSchCellCb *cell,RgSchUeCb *ue,TfuUlCqiRpt *ulCqiInfo)
15159 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15160 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15165 #if (defined(SCH_STATS) || defined(TENB_STATS))
15166 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
15169 /* consider inputs from SRS handlers about SRS occassions
15170 * in determining the UL TX Antenna selection */
15171 ueUl->crntUlCqi[0] = ulCqiInfo->wideCqi;
15173 ueUl->validUlCqi = ueUl->crntUlCqi[0];
15174 ue->validTxAnt = 0;
15176 iTbsNew = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][ueUl->validUlCqi];
15177 previTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
15179 if (RG_ITBS_DIFF(iTbsNew, previTbs) > 5)
15181 /* Ignore this iTBS report and mark that last iTBS report was */
15182 /* ignored so that subsequently we reset the LA algorithm */
15183 ueUl->ulLaCb.lastiTbsIgnored = TRUE;
15187 if (ueUl->ulLaCb.lastiTbsIgnored != TRUE)
15189 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
15190 (80 * ueUl->ulLaCb.cqiBasediTbs))/100;
15194 /* Reset the LA as iTbs in use caught up with the value */
15195 /* reported by UE. */
15196 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
15197 (80 * previTbs * 100))/100;
15198 ueUl->ulLaCb.deltaiTbs = 0;
15199 ueUl->ulLaCb.lastiTbsIgnored = FALSE;
15204 rgSCHPwrUlCqiInd(cell, ue);
15206 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
15208 rgSCHCmnSpsUlCqiInd(cell, ue);
15211 /* Applicable to only some schedulers */
15213 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
15215 cellSch->apisEmtcUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
15220 cellSch->apisUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
15224 ueUl->numCqiOccns++;
15225 ueUl->avgCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15230 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15231 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulNumCqi ++;
15232 cell->tenbStats->sch.ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15233 cell->tenbStats->sch.ulNumCqi ++;
15238 } /* rgSCHCmnUlCqiInd */
15241 * @brief Returns HARQ proc for which data expected now.
15245 * Function: rgSCHCmnUlHqProcForUe
15246 * Purpose: This function returns the harq process for
15247 * which data is expected in the current subframe.
15248 * It does not validate that the HARQ process
15249 * has an allocation.
15253 * @param[in] RgSchCellCb *cell
15254 * @param[in] CmLteTimingInfo frm
15255 * @param[in] RgSchUeCb *ue
15256 * @param[out] RgSchUlHqProcCb **procRef
15259 Void rgSCHCmnUlHqProcForUe
15262 CmLteTimingInfo frm,
15264 RgSchUlHqProcCb **procRef
15268 uint8_t procId = rgSCHCmnGetUlHqProcIdx(&frm, cell);
15271 *procRef = rgSCHUhmGetUlHqProc(cell, ue, procId);
15273 *procRef = rgSCHUhmGetUlProcByTime(cell, ue, frm);
15280 * @brief Update harq process for allocation.
15284 * Function : rgSCHCmnUpdUlHqProc
15286 * This function is invoked when harq process
15287 * control block is now in a new memory location
15288 * thus requiring a pointer/reference update.
15290 * @param[in] RgSchCellCb *cell
15291 * @param[in] RgSchUlHqProcCb *curProc
15292 * @param[in] RgSchUlHqProcCb *oldProc
15297 S16 rgSCHCmnUpdUlHqProc
15300 RgSchUlHqProcCb *curProc,
15301 RgSchUlHqProcCb *oldProc
15307 #if (ERRCLASS & ERRCLS_DEBUG)
15308 if (curProc->alloc == NULLP)
15313 curProc->alloc->hqProc = curProc;
15315 } /* rgSCHCmnUpdUlHqProc */
15318 /*MS_WORKAROUND for CR FIXME */
15320 * @brief Hsndles BSR timer expiry
15324 * Function : rgSCHCmnBsrTmrExpry
15326 * This function is invoked when periodic BSR timer expires for a UE.
15328 * @param[in] RgSchUeCb *ue
15333 S16 rgSCHCmnBsrTmrExpry(RgSchUeCb *ueCb)
15335 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ueCb->cell);
15338 ueCb->isSrGrant = TRUE;
15341 emtcStatsUlBsrTmrTxp++;
15345 if(ueCb->cell->emtcEnable)
15349 cellSch->apisEmtcUl->rgSCHSrRcvd(ueCb->cell, ueCb);
15356 cellSch->apisUl->rgSCHSrRcvd(ueCb->cell, ueCb);
15363 * @brief Short BSR update.
15367 * Function : rgSCHCmnUpdBsrShort
15369 * This functions does requisite updates to handle short BSR reporting.
15371 * @param[in] RgSchCellCb *cell
15372 * @param[in] RgSchUeCb *ue
15373 * @param[in] RgSchLcgCb *ulLcg
15374 * @param[in] uint8_t bsr
15375 * @param[out] RgSchErrInfo *err
15380 S16 rgSCHCmnUpdBsrShort
15391 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15393 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15394 RgSchCmnLcg *cmnLcg = NULLP;
15400 if (!RGSCH_LCG_ISCFGD(ulLcg))
15402 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
15405 for (lcgCnt=0; lcgCnt<4; lcgCnt++)
15408 /* Set BS of all other LCGs to Zero.
15409 If Zero BSR is reported in Short BSR include this LCG too */
15410 if ((lcgCnt != ulLcg->lcgId) ||
15411 (!bsr && !ueUl->hqEnt.numBusyHqProcs))
15413 /* If old BO is zero do nothing */
15414 if(((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs != 0)
15416 for(idx = 0; idx < ue->ul.lcgArr[lcgCnt].numLch; idx++)
15418 if((ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount) &&
15419 (ue->ulActiveLCs & (1 <<
15420 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1))))
15423 ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount--;
15424 ue->ulActiveLCs &= ~(1 <<
15425 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1));
15431 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgCnt]))
15433 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs = 0;
15434 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->reportedBs = 0;
15439 if(ulLcg->lcgId && bsr && (((RgSchCmnLcg *)(ulLcg->sch))->bs == 0))
15441 for(idx = 0; idx < ulLcg->numLch; idx++)
15444 if (!(ue->ulActiveLCs & (1 << (ulLcg->lcArray[idx]->qciCb->qci -1))))
15446 ulLcg->lcArray[idx]->qciCb->ulUeCount++;
15447 ue->ulActiveLCs |= (1 << (ulLcg->lcArray[idx]->qciCb->qci -1));
15452 /* Resetting the nonGbrLcgBs info here */
15453 ue->ul.nonGbrLcgBs = 0;
15454 ue->ul.nonLcg0Bs = 0;
15456 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
15458 if (TRUE == ue->ul.useExtBSRSizes)
15460 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
15464 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
15466 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15468 /* TBD check for effGbr != 0 */
15469 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15471 else if (0 == ulLcg->lcgId)
15473 /* This is added for handling LCG0 */
15474 cmnLcg->bs = cmnLcg->reportedBs;
15478 /* Update non GBR LCG's BS*/
15479 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
15480 cmnLcg->bs = ue->ul.nonGbrLcgBs;
15482 ue->ul.totalBsr = cmnLcg->bs;
15485 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (bsr == 0))
15487 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
15491 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
15493 rgSCHCmnSpsBsrRpt(cell, ue, ulLcg);
15496 rgSCHCmnUpdUlCompEffBsr(ue);
15499 if(cell->emtcEnable)
15503 cellSch->apisEmtcUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
15510 cellSch->apisUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
15514 if (ue->ul.isUlCaEnabled && ue->numSCells)
15516 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
15518 #ifndef PAL_ENABLE_UL_CA
15519 if((ue->cellInfo[sCellIdx] != NULLP) &&
15520 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
15522 if(ue->cellInfo[sCellIdx] != NULLP)
15525 cellSch->apisUl->rgSCHUpdBsrShort(ue->cellInfo[sCellIdx]->cell,
15536 * @brief Truncated BSR update.
15540 * Function : rgSCHCmnUpdBsrTrunc
15542 * This functions does required updates to handle truncated BSR report.
15545 * @param[in] RgSchCellCb *cell
15546 * @param[in] RgSchUeCb *ue
15547 * @param[in] RgSchLcgCb *ulLcg
15548 * @param[in] uint8_t bsr
15549 * @param[out] RgSchErrInfo *err
15554 S16 rgSCHCmnUpdBsrTrunc
15563 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15564 RgSchCmnLcg *cmnLcg = NULLP;
15571 if (!RGSCH_LCG_ISCFGD(ulLcg))
15573 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
15576 /* set all higher prio lcgs bs to 0 and update this lcgs bs and
15577 total bsr= sumofall lcgs bs */
15580 for (cnt = ulLcg->lcgId-1; cnt >= 0; cnt--)
15583 /* If Existing BO is zero the don't do anything */
15584 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs != 0)
15586 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
15589 if((ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount) &&
15590 (ue->ulActiveLCs & (1 <<
15591 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
15593 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount--;
15594 ue->ulActiveLCs &= ~(1 <<
15595 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
15600 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs = 0;
15601 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->reportedBs = 0;
15606 for (cnt = ulLcg->lcgId; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
15608 if (ulLcg->lcgId == 0)
15612 /* If Existing BO is zero the don't do anything */
15613 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs == 0)
15615 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
15618 if (!(ue->ulActiveLCs & (1 <<
15619 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
15621 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount++;
15622 ue->ulActiveLCs |= (1 <<
15623 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
15629 ue->ul.nonGbrLcgBs = 0;
15630 ue->ul.nonLcg0Bs = 0;
15631 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
15632 if (TRUE == ue->ul.useExtBSRSizes)
15634 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
15638 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
15640 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15642 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15644 else if(ulLcg->lcgId == 0)
15646 /* This is for handeling LCG0 */
15647 cmnLcg->bs = cmnLcg->reportedBs;
15651 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
15652 cmnLcg->bs = ue->ul.nonGbrLcgBs;
15654 ue->ul.totalBsr = cmnLcg->bs;
15656 for (cnt = ulLcg->lcgId+1; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
15658 /* TODO: The bs for the other LCGs may be stale because some or all of
15659 * the part of bs may have been already scheduled/data received. Please
15660 * consider this when truncated BSR is tested/implemented */
15661 ue->ul.totalBsr += ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs;
15664 rgSCHCmnUpdUlCompEffBsr(ue);
15667 if(cell->emtcEnable)
15671 cellSch->apisEmtcUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
15678 cellSch->apisUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
15682 if (ue->ul.isUlCaEnabled && ue->numSCells)
15684 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
15686 #ifndef PAL_ENABLE_UL_CA
15687 if((ue->cellInfo[sCellIdx] != NULLP) &&
15688 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
15690 if(ue->cellInfo[sCellIdx] != NULLP)
15693 cellSch->apisUl->rgSCHUpdBsrTrunc(ue->cellInfo[sCellIdx]->cell, ue, ulLcg, bsr);
15703 * @brief Long BSR update.
15707 * Function : rgSCHCmnUpdBsrLong
15709 * - Update BSRs for all configured LCGs.
15710 * - Update priority of LCGs if needed.
15711 * - Update UE's position within/across uplink scheduling queues.
15714 * @param[in] RgSchCellCb *cell
15715 * @param[in] RgSchUeCb *ue
15716 * @param[in] uint8_t bsArr[]
15717 * @param[out] RgSchErrInfo *err
15722 S16 rgSCHCmnUpdBsrLong
15730 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15731 uint32_t tmpBsArr[4] = {0, 0, 0, 0};
15732 uint32_t nonGbrBs = 0;
15740 for(idx1 = 1; idx1 < RGSCH_MAX_LCG_PER_UE; idx1++)
15742 /* If Old BO is non zero then do nothing */
15743 if ((((RgSchCmnLcg *)(ue->ul.lcgArr[idx1].sch))->bs == 0)
15746 for(idx2 = 0; idx2 < ue->ul.lcgArr[idx1].numLch; idx2++)
15749 if (!(ue->ulActiveLCs & (1 <<
15750 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1))))
15752 ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->ulUeCount++;
15753 ue->ulActiveLCs |= (1 <<
15754 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1));
15760 ue->ul.nonGbrLcgBs = 0;
15761 ue->ul.nonLcg0Bs = 0;
15763 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[0]))
15765 if (TRUE == ue->ul.useExtBSRSizes)
15767 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnExtBsrTbl[bsArr[0]];
15768 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnExtBsrTbl[bsArr[0]];
15769 tmpBsArr[0] = rgSchCmnExtBsrTbl[bsArr[0]];
15773 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnBsrTbl[bsArr[0]];
15774 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnBsrTbl[bsArr[0]];
15775 tmpBsArr[0] = rgSchCmnBsrTbl[bsArr[0]];
15778 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
15780 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
15782 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
15784 if (TRUE == ue->ul.useExtBSRSizes)
15786 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsArr[lcgId]];
15790 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsArr[lcgId]];
15792 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15794 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15795 tmpBsArr[lcgId] = cmnLcg->bs;
15799 nonGbrBs += cmnLcg->reportedBs;
15800 tmpBsArr[lcgId] = cmnLcg->reportedBs;
15801 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
15805 ue->ul.nonGbrLcgBs = RGSCH_MIN(nonGbrBs,ue->ul.effAmbr);
15807 ue->ul.totalBsr = tmpBsArr[0] + tmpBsArr[1] + tmpBsArr[2] + tmpBsArr[3];
15809 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (ue->ul.totalBsr == 0))
15811 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
15816 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE) /* SPS_FIX */
15818 if(ue->ul.totalBsr - tmpBsArr[1] == 0)
15819 {/* Updaing the BSR to SPS only if LCG1 BS is present in sps active state */
15820 rgSCHCmnSpsBsrRpt(cell, ue, &ue->ul.lcgArr[1]);
15824 rgSCHCmnUpdUlCompEffBsr(ue);
15827 if(cell->emtcEnable)
15831 cellSch->apisEmtcUl->rgSCHUpdBsrLong(cell, ue, bsArr);
15838 cellSch->apisUl->rgSCHUpdBsrLong(cell, ue, bsArr);
15842 if (ue->ul.isUlCaEnabled && ue->numSCells)
15844 for(uint8_t idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
15846 #ifndef PAL_ENABLE_UL_CA
15847 if((ue->cellInfo[idx] != NULLP) &&
15848 (ue->cellInfo[idx]->sCellState == RG_SCH_SCELL_ACTIVE))
15850 if(ue->cellInfo[idx] != NULLP)
15853 cellSch->apisUl->rgSCHUpdBsrLong(ue->cellInfo[idx]->cell, ue, bsArr);
15863 * @brief PHR update.
15867 * Function : rgSCHCmnUpdExtPhr
15869 * Updates extended power headroom information for an UE.
15871 * @param[in] RgSchCellCb *cell
15872 * @param[in] RgSchUeCb *ue
15873 * @param[in] uint8_t phr
15874 * @param[out] RgSchErrInfo *err
15879 S16 rgSCHCmnUpdExtPhr
15883 RgInfExtPhrCEInfo *extPhr,
15887 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15888 RgSchCmnAllocRecord *allRcd;
15889 CmLList *node = ueUl->ulAllocLst.last;
15892 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
15899 allRcd = (RgSchCmnAllocRecord *)node->node;
15901 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
15903 rgSCHPwrUpdExtPhr(cell, ue, extPhr, allRcd);
15908 if(ulSpsUe->isUlSpsActv)
15910 rgSCHCmnSpsPhrInd(cell,ue);
15915 } /* rgSCHCmnUpdExtPhr */
15921 * @brief PHR update.
15925 * Function : rgSCHCmnUpdPhr
15927 * Updates power headroom information for an UE.
15929 * @param[in] RgSchCellCb *cell
15930 * @param[in] RgSchUeCb *ue
15931 * @param[in] uint8_t phr
15932 * @param[out] RgSchErrInfo *err
15945 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15946 RgSchCmnAllocRecord *allRcd;
15947 CmLList *node = ueUl->ulAllocLst.last;
15950 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
15957 allRcd = (RgSchCmnAllocRecord *)node->node;
15959 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
15961 rgSCHPwrUpdPhr(cell, ue, phr, allRcd, RG_SCH_CMN_PWR_USE_CFG_MAX_PWR);
15966 if(ulSpsUe->isUlSpsActv)
15968 rgSCHCmnSpsPhrInd(cell,ue);
15973 } /* rgSCHCmnUpdPhr */
15976 * @brief UL grant for contention resolution.
15980 * Function : rgSCHCmnContResUlGrant
15982 * Add UE to another queue specifically for CRNTI based contention
15986 * @param[in] RgSchUeCb *ue
15987 * @param[out] RgSchErrInfo *err
15992 S16 rgSCHCmnContResUlGrant
15999 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16002 if(cell->emtcEnable)
16006 cellSch->apisEmtcUl->rgSCHContResUlGrant(cell, ue);
16013 cellSch->apisUl->rgSCHContResUlGrant(cell, ue);
16019 * @brief SR reception handling.
16023 * Function : rgSCHCmnSrRcvd
16025 * - Update UE's position within/across uplink scheduling queues
16026 * - Update priority of LCGs if needed.
16028 * @param[in] RgSchCellCb *cell
16029 * @param[in] RgSchUeCb *ue
16030 * @param[in] CmLteTimingInfo frm
16031 * @param[out] RgSchErrInfo *err
16040 CmLteTimingInfo frm,
16044 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16045 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16046 CmLList *node = ueUl->ulAllocLst.last;
16050 emtcStatsUlTomSrInd++;
16053 RGSCH_INCR_SUB_FRAME(frm, 1); /* 1 TTI after the time SR was sent */
16056 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)node->node;
16057 if (RGSCH_TIMEINFO_SAME(frm, allRcd->allocTime))
16063 //TODO_SID Need to check when it is getting triggered
16064 ue->isSrGrant = TRUE;
16066 if(cell->emtcEnable)
16070 cellSch->apisEmtcUl->rgSCHSrRcvd(cell, ue);
16077 cellSch->apisUl->rgSCHSrRcvd(cell, ue);
16083 * @brief Returns first uplink allocation to send reception
16088 * Function: rgSCHCmnFirstRcptnReq(cell)
16089 * Purpose: This function returns the first uplink allocation
16090 * (or NULLP if there is none) in the subframe
16091 * in which is expected to prepare and send reception
16096 * @param[in] RgSchCellCb *cell
16097 * @return RgSchUlAlloc*
16099 RgSchUlAlloc *rgSCHCmnFirstRcptnReq(RgSchCellCb *cell)
16101 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16103 RgSchUlAlloc* alloc = NULLP;
16106 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
16108 RgSchUlSf* sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16109 alloc = rgSCHUtlUlAllocFirst(sf);
16111 if (alloc && alloc->hqProc == NULLP)
16113 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16121 * @brief Returns first uplink allocation to send reception
16126 * Function: rgSCHCmnNextRcptnReq(cell)
16127 * Purpose: This function returns the next uplink allocation
16128 * (or NULLP if there is none) in the subframe
16129 * in which is expected to prepare and send reception
16134 * @param[in] RgSchCellCb *cell
16135 * @return RgSchUlAlloc*
16137 RgSchUlAlloc *rgSCHCmnNextRcptnReq(RgSchCellCb *cell,RgSchUlAlloc *alloc)
16139 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16141 //RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16144 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
16146 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16148 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16149 if (alloc && alloc->hqProc == NULLP)
16151 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16162 * @brief Collates DRX enabled UE's scheduled in this SF
16166 * Function: rgSCHCmnDrxStrtInActvTmrInUl(cell)
16167 * Purpose: This function collates the link
16168 * of UE's scheduled in this SF who
16169 * have drx enabled. It then calls
16170 * DRX specific function to start/restart
16171 * inactivity timer in Ul
16175 * @param[in] RgSchCellCb *cell
16178 Void rgSCHCmnDrxStrtInActvTmrInUl(RgSchCellCb *cell)
16180 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16181 RgSchUlSf *sf = &(cellUl->ulSfArr[cellUl->schdIdx]);
16182 RgSchUlAlloc *alloc = rgSCHUtlUlAllocFirst(sf);
16187 cmLListInit(&ulUeLst);
16195 if (!(alloc->grnt.isRtx) && ueCb->isDrxEnabled && !(ueCb->isSrGrant)
16197 /* ccpu00139513- DRX inactivity timer should not be started for
16198 * UL SPS occasions */
16199 && (alloc->hqProc->isSpsOccnHqP == FALSE)
16203 cmLListAdd2Tail(&ulUeLst,&(ueCb->ulDrxInactvTmrLnk));
16204 ueCb->ulDrxInactvTmrLnk.node = (PTR)ueCb;
16208 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16211 (Void)rgSCHDrxStrtInActvTmr(cell,&ulUeLst,RG_SCH_DRX_UL);
16218 * @brief Returns first uplink allocation to send HARQ feedback
16223 * Function: rgSCHCmnFirstHqFdbkAlloc
16224 * Purpose: This function returns the first uplink allocation
16225 * (or NULLP if there is none) in the subframe
16226 * for which it is expected to prepare and send HARQ
16231 * @param[in] RgSchCellCb *cell
16232 * @param[in] uint8_t idx
16233 * @return RgSchUlAlloc*
16235 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc(RgSchCellCb *cell,uint8_t idx)
16237 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16239 RgSchUlAlloc *alloc = NULLP;
16242 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
16244 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
16245 alloc = rgSCHUtlUlAllocFirst(sf);
16247 while (alloc && (alloc->hqProc == NULLP))
16249 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16257 * @brief Returns next allocation to send HARQ feedback for.
16261 * Function: rgSCHCmnNextHqFdbkAlloc(cell)
16262 * Purpose: This function returns the next uplink allocation
16263 * (or NULLP if there is none) in the subframe
16264 * for which HARQ feedback needs to be sent.
16268 * @param[in] RgSchCellCb *cell
16269 * @return RgSchUlAlloc*
16271 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc(RgSchCellCb *cell,RgSchUlAlloc *alloc,uint8_t idx)
16273 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16275 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
16277 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
16279 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16280 while (alloc && (alloc->hqProc == NULLP))
16282 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16292 /***********************************************************
16294 * Func : rgSCHCmnUlGetITbsFrmIMcs
16296 * Desc : Returns the Itbs that is mapped to an Imcs
16297 * for the case of uplink.
16305 **********************************************************/
16306 uint8_t rgSCHCmnUlGetITbsFrmIMcs(uint8_t iMcs)
16308 return (rgUlIMcsTbl[iMcs].iTbs);
16311 /***********************************************************
16313 * Func : rgSCHCmnUlGetIMcsFrmITbs
16315 * Desc : Returns the Imcs that is mapped to an Itbs
16316 * for the case of uplink.
16320 * Notes: For iTbs 19, iMcs is dependant on modulation order.
16321 * Refer to 36.213, Table 8.6.1-1 and 36.306 Table 4.1-2
16322 * for UE capability information
16326 **********************************************************/
16327 uint8_t rgSCHCmnUlGetIMcsFrmITbs(uint8_t iTbs,CmLteUeCategory ueCtg)
16335 /*a higher layer can force a 64QAM UE to transmit at 16QAM.
16336 * We currently do not support this. Once the support for such
16337 * is added, ueCtg should be replaced by current transmit
16338 * modulation configuration.Refer to 36.213 -8.6.1
16340 else if ( iTbs < 19 )
16344 else if ((iTbs == 19) && (ueCtg != CM_LTE_UE_CAT_5))
16354 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
16355 was seen when IMCS exceeds 20 on T2k TDD*/
16365 /***********************************************************
16367 * Func : rgSCHCmnUlMinTbBitsForITbs
16369 * Desc : Returns the minimum number of bits that can
16370 * be given as grant for a specific CQI.
16378 **********************************************************/
16379 uint32_t rgSCHCmnUlMinTbBitsForITbs(RgSchCmnUlCell *cellUl,uint8_t iTbs)
16382 RGSCH_ARRAY_BOUND_CHECK(0, rgTbSzTbl[0], iTbs);
16384 return (rgTbSzTbl[0][iTbs][cellUl->sbSize-1]);
16387 /***********************************************************
16389 * Func : rgSCHCmnUlSbAlloc
16391 * Desc : Given a required 'number of subbands' and a hole,
16392 * returns a suitable alloc such that the subband
16393 * allocation size is valid
16397 * Notes: Does not assume either passed numSb or hole size
16398 * to be valid for allocation, and hence arrives at
16399 * an acceptable value.
16402 **********************************************************/
16403 RgSchUlAlloc *rgSCHCmnUlSbAlloc
16410 uint8_t holeSz; /* valid hole size */
16411 RgSchUlAlloc *alloc;
16413 if ((holeSz = rgSchCmnMult235Tbl[hole->num].prvMatch) == hole->num)
16415 numSb = rgSchCmnMult235Tbl[numSb].match;
16416 if (numSb >= holeSz)
16418 alloc = rgSCHUtlUlAllocGetCompHole(sf, hole);
16422 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
16427 if (numSb < holeSz)
16429 numSb = rgSchCmnMult235Tbl[numSb].match;
16433 numSb = rgSchCmnMult235Tbl[numSb].prvMatch;
16436 if ( numSb >= holeSz )
16440 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
16446 * @brief To fill the RgSchCmnUeUlAlloc structure of UeCb.
16450 * Function: rgSCHCmnUlUeFillAllocInfo
16451 * Purpose: Specific scheduler to call this API to fill the alloc
16454 * Invoked by: Scheduler
16456 * @param[in] RgSchCellCb *cell
16457 * @param[out] RgSchUeCb *ue
16460 Void rgSCHCmnUlUeFillAllocInfo(RgSchCellCb *cell,RgSchUeCb *ue)
16462 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16463 RgSchCmnUeUlAlloc *ulAllocInfo;
16464 RgSchCmnUlUe *ueUl;
16467 ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16468 ulAllocInfo = &ueUl->alloc;
16470 /* Fill alloc structure */
16471 rgSCHCmnUlAllocFillTpc(cell, ue, ulAllocInfo->alloc);
16472 rgSCHCmnUlAllocFillNdmrs(cellUl, ulAllocInfo->alloc);
16473 rgSCHCmnUlAllocLnkHqProc(ue, ulAllocInfo->alloc, ulAllocInfo->alloc->hqProc,
16474 ulAllocInfo->alloc->hqProc->isRetx);
16476 rgSCHCmnUlFillPdcchWithAlloc(ulAllocInfo->alloc->pdcch,
16477 ulAllocInfo->alloc, ue);
16478 /* Recording information about this allocation */
16479 rgSCHCmnUlRecordUeAlloc(cell, ue);
16481 /* Update the UE's outstanding allocation */
16482 if (!ulAllocInfo->alloc->hqProc->isRetx)
16484 rgSCHCmnUlUpdOutStndAlloc(cell, ue, ulAllocInfo->allocdBytes);
16491 * @brief Update the UEs outstanding alloc based on the BSR report's timing.
16496 * Function: rgSCHCmnUpdUlCompEffBsr
16497 * Purpose: Clear off all the allocations from outstanding allocation that
16498 * are later than or equal to BSR timing information (stored in UEs datIndTime).
16500 * Invoked by: Scheduler
16502 * @param[in] RgSchUeCb *ue
16505 static Void rgSCHCmnUpdUlCompEffBsr(RgSchUeCb *ue)
16507 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,ue->cell);
16508 CmLList *node = ueUl->ulAllocLst.last;
16509 RgSchCmnAllocRecord *allRcd;
16510 uint32_t outStndAlloc=0;
16511 uint32_t nonLcg0OutStndAllocBs=0;
16512 uint32_t nonLcg0Bsr=0;
16514 RgSchCmnLcg *cmnLcg = NULLP;
16518 allRcd = (RgSchCmnAllocRecord *)node->node;
16519 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
16528 allRcd = (RgSchCmnAllocRecord *)node->node;
16530 outStndAlloc += allRcd->alloc;
16533 cmnLcg = (RgSchCmnLcg *)(ue->ul.lcgArr[0].sch);
16534 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
16535 if (cmnLcg->bs > outStndAlloc)
16537 cmnLcg->bs -= outStndAlloc;
16538 ue->ul.minReqBytes = cmnLcg->bs;
16543 nonLcg0OutStndAllocBs = outStndAlloc - cmnLcg->bs;
16547 for(lcgId = 1;lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
16549 if(RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
16551 cmnLcg = ((RgSchCmnLcg *) (ue->ul.lcgArr[lcgId].sch));
16552 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
16554 nonLcg0Bsr += cmnLcg->bs;
16558 nonLcg0Bsr += ue->ul.nonGbrLcgBs;
16559 if (nonLcg0OutStndAllocBs > nonLcg0Bsr)
16565 nonLcg0Bsr -= nonLcg0OutStndAllocBs;
16567 ue->ul.nonLcg0Bs = nonLcg0Bsr;
16568 /* Cap effBsr with nonLcg0Bsr and append lcg0 bs.
16569 * nonLcg0Bsr limit applies only to lcg1,2,3 */
16570 /* better be handled in individual scheduler */
16571 ue->ul.effBsr = nonLcg0Bsr +\
16572 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16577 * @brief Records information about the current allocation.
16581 * Function: rgSCHCmnUlRecordUeAlloc
16582 * Purpose: Records information about the curent allocation.
16583 * This includes the allocated bytes, as well
16584 * as some power information.
16586 * Invoked by: Scheduler
16588 * @param[in] RgSchCellCb *cell
16589 * @param[in] RgSchUeCb *ue
16592 Void rgSCHCmnUlRecordUeAlloc(RgSchCellCb *cell,RgSchUeCb *ue)
16595 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16597 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16598 CmLListCp *lst = &ueUl->ulAllocLst;
16599 CmLList *node = ueUl->ulAllocLst.first;
16600 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
16601 RgSchCmnUeUlAlloc *ulAllocInfo = &ueUl->alloc;
16602 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
16604 cmLListDelFrm(lst, &allRcd->lnk);
16606 /* To the crntTime, add the MIN time at which UE will
16607 * actually send the BSR i.e DELTA+4 */
16608 allRcd->allocTime = cell->crntTime;
16609 /*ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
16611 if(ue->isEmtcUe == TRUE)
16613 RGSCH_INCR_SUB_FRAME_EMTC(allRcd->allocTime,
16614 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
16619 RGSCH_INCR_SUB_FRAME(allRcd->allocTime,
16620 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
16623 allRcd->allocTime = cellUl->schdTime;
16625 cmLListAdd2Tail(lst, &allRcd->lnk);
16627 /* Filling in the parameters to be recorded */
16628 allRcd->alloc = ulAllocInfo->allocdBytes;
16629 //allRcd->numRb = ulAllocInfo->alloc->grnt.numRb;
16630 allRcd->numRb = (ulAllocInfo->alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
16631 /*Recording the UL CQI derived from the maxUlCqi */
16632 allRcd->cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
16633 allRcd->tpc = ulAllocInfo->alloc->grnt.tpc;
16635 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
16637 cell->measurements.ulBytesCnt += ulAllocInfo->allocdBytes;
16642 /** PHR handling for MSG3
16643 * @brief Records allocation information of msg3 in the the UE.
16647 * Function: rgSCHCmnUlRecMsg3Alloc
16648 * Purpose: Records information about msg3 allocation.
16649 * This includes the allocated bytes, as well
16650 * as some power information.
16652 * Invoked by: Scheduler
16654 * @param[in] RgSchCellCb *cell
16655 * @param[in] RgSchUeCb *ue
16656 * @param[in] RgSchRaCb *raCb
16659 Void rgSCHCmnUlRecMsg3Alloc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchRaCb *raCb)
16661 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16662 CmLListCp *lst = &ueUl->ulAllocLst;
16663 CmLList *node = ueUl->ulAllocLst.first;
16664 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
16666 /* Stack Crash problem for TRACE5 changes */
16668 cmLListDelFrm(lst, node);
16669 allRcd->allocTime = raCb->msg3AllocTime;
16670 cmLListAdd2Tail(lst, node);
16672 /* Filling in the parameters to be recorded */
16673 allRcd->alloc = raCb->msg3Grnt.datSz;
16674 allRcd->numRb = raCb->msg3Grnt.numRb;
16675 allRcd->cqi = raCb->ccchCqi;
16676 allRcd->tpc = raCb->msg3Grnt.tpc;
16678 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
16683 * @brief Keeps track of the most recent RG_SCH_CMN_MAX_ALLOC_TRACK
16684 * allocations to track. Adds this allocation to the ueUl's ulAllocLst.
16689 * Function: rgSCHCmnUlUpdOutStndAlloc
16690 * Purpose: Recent Allocation shall be at First Pos'n.
16691 * Remove the last node, update the fields
16692 * with the new allocation and add at front.
16694 * Invoked by: Scheduler
16696 * @param[in] RgSchCellCb *cell
16697 * @param[in] RgSchUeCb *ue
16698 * @param[in] uint32_t alloc
16701 Void rgSCHCmnUlUpdOutStndAlloc(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t alloc)
16703 uint32_t nonLcg0Alloc=0;
16705 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
16706 if (((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs > alloc)
16708 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs -= alloc;
16712 nonLcg0Alloc = alloc - ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16713 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = 0;
16716 if (nonLcg0Alloc >= ue->ul.nonLcg0Bs)
16718 ue->ul.nonLcg0Bs = 0;
16722 ue->ul.nonLcg0Bs -= nonLcg0Alloc;
16724 /* Cap effBsr with effAmbr and append lcg0 bs.
16725 * effAmbr limit applies only to lcg1,2,3 non GBR LCG's*/
16726 /* better be handled in individual scheduler */
16727 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
16728 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16730 if (ue->ul.effBsr == 0)
16732 if (ue->bsrTmr.tmrEvnt != TMR_NONE)
16734 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
16737 if (FALSE == ue->isSrGrant)
16739 if (ue->ul.bsrTmrCfg.isPrdBsrTmrPres)
16742 rgSCHTmrStartTmr(cell, ue, RG_SCH_TMR_BSR,
16743 ue->ul.bsrTmrCfg.prdBsrTmr);
16749 /* Resetting UEs lower Cap */
16750 ue->ul.minReqBytes = 0;
16757 * @brief Returns the "Itbs" for a given UE.
16761 * Function: rgSCHCmnUlGetITbs
16762 * Purpose: This function returns the "Itbs" for a given UE.
16764 * Invoked by: Scheduler
16766 * @param[in] RgSchUeCb *ue
16769 uint8_t rgSCHCmnUlGetITbs
16776 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16777 /* CQI will be capped to maxUlCqi for 16qam UEs */
16778 CmLteUeCategory ueCtgy = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
16782 uint8_t maxiTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ueUl->maxUlCqi];
16786 /* #ifdef RG_SCH_CMN_EXT_CP_SUP For ECP pick index 1 */
16788 if ( (ueCtgy != CM_LTE_UE_CAT_5) &&
16789 (ueUl->validUlCqi > ueUl->maxUlCqi)
16792 cqi = ueUl->maxUlCqi;
16796 cqi = ueUl->validUlCqi;
16800 iTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
16802 RG_SCH_CHK_ITBS_RANGE(iTbs, maxiTbs);
16804 iTbs = RGSCH_MIN(iTbs, ue->cell->thresholds.maxUlItbs);
16807 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
16808 was seen when IMCS exceeds 20 on T2k TDD */
16817 if ( (ueCtgy != CM_LTE_UE_CAT_5) && (ueUl->crntUlCqi[0] > ueUl->maxUlCqi ))
16819 cqi = ueUl->maxUlCqi;
16823 cqi = ueUl->crntUlCqi[0];
16826 return (rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][cqi]);
16830 * @brief This function adds the UE to DLRbAllocInfo TX lst.
16834 * Function: rgSCHCmnDlRbInfoAddUeTx
16835 * Purpose: This function adds the UE to DLRbAllocInfo TX lst.
16837 * Invoked by: Common Scheduler
16839 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16840 * @param[in] RgSchUeCb *ue
16841 * @param[in] RgSchDlHqProcCb *hqP
16845 static Void rgSCHCmnDlRbInfoAddUeTx
16848 RgSchCmnDlRbAllocInfo *allocInfo,
16850 RgSchDlHqProcCb *hqP
16853 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16856 if (hqP->reqLnk.node == NULLP)
16858 if (cellSch->dl.isDlFreqSel)
16860 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16861 &allocInfo->dedAlloc.txHqPLst, hqP);
16866 cmLListAdd2Tail(&allocInfo->dedAlloc.txHqPLst, &hqP->reqLnk);
16868 hqP->reqLnk.node = (PTR)hqP;
16875 * @brief This function adds the UE to DLRbAllocInfo RETX lst.
16879 * Function: rgSCHCmnDlRbInfoAddUeRetx
16880 * Purpose: This function adds the UE to DLRbAllocInfo RETX lst.
16882 * Invoked by: Common Scheduler
16884 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16885 * @param[in] RgSchUeCb *ue
16886 * @param[in] RgSchDlHqProcCb *hqP
16890 static Void rgSCHCmnDlRbInfoAddUeRetx
16893 RgSchCmnDlRbAllocInfo *allocInfo,
16895 RgSchDlHqProcCb *hqP
16898 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
16901 if (cellSch->dl.isDlFreqSel)
16903 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16904 &allocInfo->dedAlloc.retxHqPLst, hqP);
16908 /* checking UE's presence in this lst is unnecessary */
16909 cmLListAdd2Tail(&allocInfo->dedAlloc.retxHqPLst, &hqP->reqLnk);
16910 hqP->reqLnk.node = (PTR)hqP;
16916 * @brief This function adds the UE to DLRbAllocInfo TX-RETX lst.
16920 * Function: rgSCHCmnDlRbInfoAddUeRetxTx
16921 * Purpose: This adds the UE to DLRbAllocInfo TX-RETX lst.
16923 * Invoked by: Common Scheduler
16925 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16926 * @param[in] RgSchUeCb *ue
16927 * @param[in] RgSchDlHqProcCb *hqP
16931 static Void rgSCHCmnDlRbInfoAddUeRetxTx
16934 RgSchCmnDlRbAllocInfo *allocInfo,
16936 RgSchDlHqProcCb *hqP
16939 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
16942 if (cellSch->dl.isDlFreqSel)
16944 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16945 &allocInfo->dedAlloc.txRetxHqPLst, hqP);
16949 cmLListAdd2Tail(&allocInfo->dedAlloc.txRetxHqPLst, &hqP->reqLnk);
16950 hqP->reqLnk.node = (PTR)hqP;
16956 * @brief This function adds the UE to DLRbAllocInfo NonSchdRetxLst.
16960 * Function: rgSCHCmnDlAdd2NonSchdRetxLst
16961 * Purpose: During RB estimation for RETX, if allocation fails
16962 * then appending it to NonSchdRetxLst, the further
16963 * action is taken as part of Finalization in
16964 * respective schedulers.
16966 * Invoked by: Common Scheduler
16968 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16969 * @param[in] RgSchUeCb *ue
16970 * @param[in] RgSchDlHqProcCb *hqP
16974 static Void rgSCHCmnDlAdd2NonSchdRetxLst
16976 RgSchCmnDlRbAllocInfo *allocInfo,
16978 RgSchDlHqProcCb *hqP
16981 CmLList *schdLnkNode;
16985 if ( (hqP->sch != (RgSchCmnDlHqProc *)NULLP) &&
16986 (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP)))
16992 schdLnkNode = &hqP->schdLstLnk;
16993 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
16994 cmLListAdd2Tail(&allocInfo->dedAlloc.nonSchdRetxHqPLst, schdLnkNode);
17002 * @brief This function adds the UE to DLRbAllocInfo NonSchdTxRetxLst.
17006 * Function: rgSCHCmnDlAdd2NonSchdTxRetxLst
17007 * Purpose: During RB estimation for TXRETX, if allocation fails
17008 * then appending it to NonSchdTxRetxLst, the further
17009 * action is taken as part of Finalization in
17010 * respective schedulers.
17012 * Invoked by: Common Scheduler
17014 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
17015 * @param[in] RgSchUeCb *ue
17016 * @param[in] RgSchDlHqProcCb *hqP
17022 * @brief This function handles the initialisation of DL HARQ/ACK feedback
17023 * timing information for eaach DL subframe.
17027 * Function: rgSCHCmnDlANFdbkInit
17028 * Purpose: Each DL subframe stores the sfn and subframe
17029 * information of UL subframe in which it expects
17030 * HARQ ACK/NACK feedback for this subframe.It
17031 * generates the information based on Downlink
17032 * Association Set Index table.
17034 * Invoked by: Scheduler
17036 * @param[in] RgSchCellCb* cell
17040 static S16 rgSCHCmnDlANFdbkInit(RgSchCellCb *cell)
17043 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17044 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17048 uint8_t calcSfnOffset;
17050 uint8_t ulSfCnt =0;
17051 RgSchTddSubfrmInfo ulSubfrmInfo;
17052 uint8_t maxUlSubfrms;
17055 ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17056 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17058 /* Generate HARQ ACK/NACK feedback information for each DL sf in a radio frame
17059 * Calculate this information based on DL Association set Index table */
17060 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17062 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17063 RG_SCH_TDD_UL_SUBFRAME)
17065 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17069 for(idx=0; idx < rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
17070 numFdbkSubfrms; idx++)
17072 calcSfNum = sfNum - rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
17076 calcSfnOffset = RGSCH_CEIL(-calcSfNum, RGSCH_NUM_SUB_FRAMES);
17083 calcSfNum = ((RGSCH_NUM_SUB_FRAMES * calcSfnOffset) + calcSfNum)\
17084 % RGSCH_NUM_SUB_FRAMES;
17086 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17090 else if((ulSubfrmInfo.switchPoints == 2) && (calcSfNum <= \
17091 RG_SCH_CMN_SPL_SUBFRM_6))
17093 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17097 dlIdx = calcSfNum - maxUlSubfrms;
17100 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = sfNum;
17101 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = calcSfnOffset;
17102 cell->subFrms[dlIdx]->dlFdbkInfo.m = idx;
17104 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17107 /* DL subframes in the subsequent radio frames are initialized
17108 * with the previous radio frames */
17109 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;\
17112 sfNum = dlIdx - rgSchTddNumDlSubfrmTbl[ulDlCfgIdx]\
17113 [RGSCH_NUM_SUB_FRAMES-1];
17114 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = \
17115 cell->subFrms[sfNum]->dlFdbkInfo.subframe;
17116 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = \
17117 cell->subFrms[sfNum]->dlFdbkInfo.sfnOffset;
17118 cell->subFrms[dlIdx]->dlFdbkInfo.m = cell->subFrms[sfNum]->dlFdbkInfo.m;
17124 * @brief This function handles the initialization of uplink association
17125 * set information for each DL subframe.
17130 * Function: rgSCHCmnDlKdashUlAscInit
17131 * Purpose: Each DL sf stores the sfn and sf information of UL sf
17132 * in which it expects HQ ACK/NACK trans. It generates the information
17133 * based on k` in UL association set index table.
17135 * Invoked by: Scheduler
17137 * @param[in] RgSchCellCb* cell
17141 static S16 rgSCHCmnDlKdashUlAscInit(RgSchCellCb *cell)
17144 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17145 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17150 uint8_t ulSfCnt =0;
17151 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17152 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17153 uint8_t dlPres = 0;
17156 /* Generate ACK/NACK offset information for each DL subframe in a radio frame
17157 * Calculate this information based on K` in UL Association Set table */
17158 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17160 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17161 RG_SCH_TDD_UL_SUBFRAME)
17163 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17167 calcSfNum = (sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum] + \
17168 RGSCH_NUM_SUB_FRAMES) % RGSCH_NUM_SUB_FRAMES;
17169 calcSfnOffset = sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum];
17170 if(calcSfnOffset < 0)
17172 calcSfnOffset = RGSCH_CEIL(-calcSfnOffset, RGSCH_NUM_SUB_FRAMES);
17179 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17183 else if((ulSubfrmInfo.switchPoints == 2) &&
17184 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
17186 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17190 dlIdx = calcSfNum - maxUlSubfrms;
17193 cell->subFrms[dlIdx]->ulAscInfo.subframe = sfNum;
17194 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset = calcSfnOffset;
17196 /* set dlIdx for which ulAscInfo is updated */
17197 dlPres = dlPres | (1 << dlIdx);
17198 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17201 /* Set Invalid information for which ulAscInfo is not present */
17203 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17206 /* If dlPres is 0, ulAscInfo is not present in that DL index */
17207 if(! ((dlPres >> sfCount)&0x01))
17209 cell->subFrms[sfCount]->ulAscInfo.sfnOffset =
17210 RGSCH_INVALID_INFO;
17211 cell->subFrms[sfCount]->ulAscInfo.subframe =
17212 RGSCH_INVALID_INFO;
17216 /* DL subframes in the subsequent radio frames are initialized
17217 * with the previous radio frames */
17218 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;
17222 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17223 cell->subFrms[dlIdx]->ulAscInfo.subframe =
17224 cell->subFrms[sfNum]->ulAscInfo.subframe;
17225 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset =
17226 cell->subFrms[sfNum]->ulAscInfo.sfnOffset;
17233 * @brief This function initialises the 'Np' value for 'p'
17237 * Function: rgSCHCmnDlNpValInit
17238 * Purpose: To initialise the 'Np' value for each 'p'. It is used
17239 * to find the mapping between nCCE and 'p' and used in
17240 * HARQ ACK/NACK reception.
17242 * Invoked by: Scheduler
17244 * @param[in] RgSchCellCb* cell
17248 static S16 rgSCHCmnDlNpValInit(RgSchCellCb *cell)
17253 /* Always Np is 0 for p=0 */
17254 cell->rgSchTddNpValTbl[0] = 0;
17256 for(idx=1; idx < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; idx++)
17258 np = cell->bwCfg.dlTotalBw * (idx * RG_SCH_CMN_NUM_SUBCAR - 4);
17259 cell->rgSchTddNpValTbl[idx] = (uint8_t) (np/36);
17266 * @brief This function handles the creation of RACH preamble
17267 * list to queue the preambles and process at the scheduled
17272 * Function: rgSCHCmnDlCreateRachPrmLst
17273 * Purpose: To create RACH preamble list based on RA window size.
17274 * It is used to queue the preambles and process it at the
17277 * Invoked by: Scheduler
17279 * @param[in] RgSchCellCb* cell
17283 static S16 rgSCHCmnDlCreateRachPrmLst(RgSchCellCb *cell)
17289 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
17291 lstSize = raArrSz * RGSCH_MAX_RA_RNTI_PER_SUBFRM * RGSCH_NUM_SUB_FRAMES;
17293 cell->raInfo.maxRaSize = raArrSz;
17294 ret = rgSCHUtlAllocSBuf(cell->instIdx,
17295 (Data **)(&cell->raInfo.raReqLst), (Size)(lstSize * sizeof(CmLListCp)));
17301 cell->raInfo.lstSize = lstSize;
17308 * @brief This function handles the initialization of RACH Response
17309 * information at each DL subframe.
17313 * Function: rgSCHCmnDlRachInfoInit
17314 * Purpose: Each DL subframe stores the sfn and subframe information of
17315 * possible RACH response allowed for UL subframes. It generates
17316 * the information based on PRACH configuration.
17318 * Invoked by: Scheduler
17320 * @param[in] RgSchCellCb* cell
17324 static S16 rgSCHCmnDlRachInfoInit(RgSchCellCb *cell)
17327 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17329 uint8_t ulSfCnt =0;
17330 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
17331 [RGSCH_NUM_SUB_FRAMES-1];
17333 RgSchTddRachRspLst rachRspLst[3][RGSCH_NUM_SUB_FRAMES];
17338 uint8_t endSubfrmIdx;
17339 uint8_t startSubfrmIdx;
17341 RgSchTddRachDelInfo *delInfo;
17343 uint8_t numSubfrms;
17346 memset(rachRspLst, 0, sizeof(rachRspLst));
17348 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
17350 /* Include Special subframes */
17351 maxUlSubfrms = maxUlSubfrms + \
17352 rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx].switchPoints;
17353 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17355 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] ==
17356 RG_SCH_TDD_DL_SUBFRAME)
17358 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17362 startWin = (sfNum + RG_SCH_CMN_RARSP_WAIT_PRD + \
17363 ((RgSchCmnCell *)cell->sc.sch)->dl.numRaSubFrms);
17364 endWin = (startWin + cell->rachCfg.raWinSize - 1);
17366 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][startWin%RGSCH_NUM_SUB_FRAMES];
17367 /* Find the next DL subframe starting from Subframe 0 */
17368 if((startSubfrmIdx % RGSCH_NUM_SUB_FRAMES) == 0)
17370 startWin = RGSCH_CEIL(startWin, RGSCH_NUM_SUB_FRAMES);
17371 startWin = startWin * RGSCH_NUM_SUB_FRAMES;
17375 rgSchTddLowDlSubfrmIdxTbl[ulDlCfgIdx][endWin%RGSCH_NUM_SUB_FRAMES];
17376 endWin = (endWin/RGSCH_NUM_SUB_FRAMES) * RGSCH_NUM_SUB_FRAMES \
17378 if(startWin > endWin)
17382 /* Find all the possible RACH Response transmission
17383 * time within the RA window size */
17384 startSubfrmIdx = startWin%RGSCH_NUM_SUB_FRAMES;
17385 for(sfnIdx = startWin/RGSCH_NUM_SUB_FRAMES;
17386 sfnIdx <= endWin/RGSCH_NUM_SUB_FRAMES; sfnIdx++)
17388 if(sfnIdx == endWin/RGSCH_NUM_SUB_FRAMES)
17390 endSubfrmIdx = endWin%RGSCH_NUM_SUB_FRAMES;
17394 endSubfrmIdx = RGSCH_NUM_SUB_FRAMES-1;
17397 /* Find all the possible RACH Response transmission
17398 * time within radio frame */
17399 for(subfrmIdx = startSubfrmIdx;
17400 subfrmIdx <= endSubfrmIdx; subfrmIdx++)
17402 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][subfrmIdx] ==
17403 RG_SCH_TDD_UL_SUBFRAME)
17407 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
17408 /* Find the next DL subframe starting from Subframe 0 */
17409 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
17413 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx], subfrmIdx);
17415 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
17416 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset = sfnIdx;
17417 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].subframe[numSubfrms]
17419 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms++;
17421 startSubfrmIdx = RG_SCH_CMN_SUBFRM_0;
17423 /* Update the subframes to be deleted at this subframe */
17424 /* Get the subframe after the end of RA window size */
17427 sfnOffset = endWin/RGSCH_NUM_SUB_FRAMES;
17430 sfnOffset += raArrSz;
17432 sfnIdx = (endWin/RGSCH_NUM_SUB_FRAMES) % raArrSz;
17434 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx],endSubfrmIdx-1);
17435 if((endSubfrmIdx == RGSCH_NUM_SUB_FRAMES) ||
17436 (rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx] ==
17437 RGSCH_NUM_SUB_FRAMES))
17440 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][RG_SCH_CMN_SUBFRM_0];
17444 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx];
17447 delInfo = &rachRspLst[sfnIdx][subfrmIdx].delInfo;
17448 delInfo->sfnOffset = sfnOffset;
17449 delInfo->subframe[delInfo->numSubfrms] = sfNum;
17450 delInfo->numSubfrms++;
17452 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17455 ret = rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz);
17465 * @brief This function handles the initialization of PHICH information
17466 * for each DL subframe based on PHICH table.
17470 * Function: rgSCHCmnDlPhichOffsetInit
17471 * Purpose: Each DL subf stores the sfn and subf information of UL subframe
17472 * for which it trnsmts PHICH in this subframe. It generates the information
17473 * based on PHICH table.
17475 * Invoked by: Scheduler
17477 * @param[in] RgSchCellCb* cell
17481 static S16 rgSCHCmnDlPhichOffsetInit(RgSchCellCb *cell)
17484 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17485 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17488 uint8_t dlPres = 0;
17489 uint8_t calcSfnOffset;
17491 uint8_t ulSfCnt =0;
17492 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17493 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
17494 [RGSCH_NUM_SUB_FRAMES-1];
17497 /* Generate PHICH offset information for each DL subframe in a radio frame
17498 * Calculate this information based on K in PHICH table */
17499 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17501 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17502 RG_SCH_TDD_UL_SUBFRAME)
17504 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17508 calcSfNum = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) % \
17509 RGSCH_NUM_SUB_FRAMES;
17510 calcSfnOffset = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) / \
17511 RGSCH_NUM_SUB_FRAMES;
17513 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17517 else if((ulSubfrmInfo.switchPoints == 2) &&
17518 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
17520 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17524 dlIdx = calcSfNum - maxUlSubfrms;
17527 cell->subFrms[dlIdx]->phichOffInfo.subframe = sfNum;
17528 cell->subFrms[dlIdx]->phichOffInfo.numSubfrms = 1;
17530 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset = calcSfnOffset;
17532 /* set dlIdx for which phich offset is updated */
17533 dlPres = dlPres | (1 << dlIdx);
17534 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17537 /* Set Invalid information for which phich offset is not present */
17539 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17542 /* If dlPres is 0, phich offset is not present in that DL index */
17543 if(! ((dlPres >> sfCount)&0x01))
17545 cell->subFrms[sfCount]->phichOffInfo.sfnOffset =
17546 RGSCH_INVALID_INFO;
17547 cell->subFrms[sfCount]->phichOffInfo.subframe =
17548 RGSCH_INVALID_INFO;
17549 cell->subFrms[sfCount]->phichOffInfo.numSubfrms = 0;
17553 /* DL subframes in the subsequent radio frames are
17554 * initialized with the previous radio frames */
17555 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms;
17556 dlIdx < maxDlSubfrms; dlIdx++)
17559 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17561 cell->subFrms[dlIdx]->phichOffInfo.subframe =
17562 cell->subFrms[sfNum]->phichOffInfo.subframe;
17564 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset =
17565 cell->subFrms[sfNum]->phichOffInfo.sfnOffset;
17572 * @brief Updation of Sch vars per TTI.
17576 * Function: rgSCHCmnUpdVars
17577 * Purpose: Updation of Sch vars per TTI.
17579 * @param[in] RgSchCellCb *cell
17583 Void rgSCHCmnUpdVars(RgSchCellCb *cell)
17585 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
17586 CmLteTimingInfo timeInfo;
17588 uint8_t ulSubframe;
17589 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17590 uint8_t msg3Subfrm;
17593 /* ccpu00132654-ADD- Initializing all the indices in every subframe*/
17594 rgSCHCmnInitVars(cell);
17596 idx = (cell->crntTime.slot + TFU_ULCNTRL_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
17597 /* Calculate the UL scheduling subframe idx based on the
17599 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][idx] != 0)
17601 /* PUSCH transmission is based on offset from DL
17602 * PDCCH scheduling */
17603 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
17604 ulSubframe = rgSchTddPuschTxKTbl[ulDlCfgIdx][timeInfo.subframe];
17605 /* Add the DCI-0 to PUSCH time to get the time of UL subframe */
17606 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, ulSubframe);
17608 cellUl->schdTti = timeInfo.sfn * 10 + timeInfo.subframe;
17610 /* Fetch the corresponding UL subframe Idx in UL sf array */
17611 cellUl->schdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17612 /* Fetch the corresponding UL Harq Proc ID */
17613 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
17614 cellUl->schdTime = timeInfo;
17616 Mval = rgSchTddPhichMValTbl[ulDlCfgIdx][idx];
17619 /* Fetch the tx time for DL HIDCI-0 */
17620 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
17621 /* Fetch the corresponding n-k tx time of PUSCH */
17622 cellUl->hqFdbkIdx[0] = rgSCHCmnGetPhichUlSfIdx(&timeInfo, cell);
17623 /* Retx will happen according to the Pusch k table */
17624 cellUl->reTxIdx[0] = cellUl->schdIdx;
17626 if(ulDlCfgIdx == 0)
17628 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[0] */
17629 cellUl->reTxIdx[0] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
17630 cellUl->hqFdbkIdx[0]);
17633 /* At Idx 1 store the UL SF adjacent(left) to the UL SF
17635 cellUl->hqFdbkIdx[1] = (cellUl->hqFdbkIdx[0]-1 +
17636 cellUl->numUlSubfrms) % cellUl->numUlSubfrms;
17637 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[1] */
17638 cellUl->reTxIdx[1] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
17639 cellUl->hqFdbkIdx[1]);
17644 idx = (cell->crntTime.slot + TFU_RECPREQ_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
17645 if (rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] == RG_SCH_TDD_UL_SUBFRAME)
17647 RGSCHCMNADDTOCRNTTIME(cell->crntTime, timeInfo, TFU_RECPREQ_DLDELTA)
17648 cellUl->rcpReqIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17650 idx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) % RGSCH_NUM_SUB_FRAMES;
17652 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
17653 special subframe */
17654 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] != RG_SCH_TDD_UL_SUBFRAME)
17656 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
17657 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
17658 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, msg3Subfrm);
17659 cellUl->msg3SchdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17660 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
17663 if(!rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][idx])
17665 cellUl->spsUlRsrvIdx = RGSCH_INVALID_INFO;
17669 /* introduce some reuse with above code? */
17671 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
17672 //offst = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
17673 offst = rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][timeInfo.subframe];
17674 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, offst);
17675 cellUl->spsUlRsrvIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17676 /* The harq proc continues to be accessed and used the same delta before
17677 * actual data occurance, and hence use the same idx */
17678 cellUl->spsUlRsrvHqProcIdx = cellUl->schdHqProcIdx;
17682 /* RACHO: update cmn sched specific RACH variables,
17683 * mainly the prachMaskIndex */
17684 rgSCHCmnUpdRachParam(cell);
17690 * @brief To get 'p' value from nCCE.
17694 * Function: rgSCHCmnGetPValFrmCCE
17695 * Purpose: Gets 'p' value for HARQ ACK/NACK reception from CCE.
17697 * @param[in] RgSchCellCb *cell
17698 * @param[in] uint8_t cce
17702 uint8_t rgSCHCmnGetPValFrmCCE(RgSchCellCb *cell,uint8_t cce)
17706 for(i=1; i < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; i++)
17708 if(cce < cell->rgSchTddNpValTbl[i])
17717 /***********************************************************
17719 * Func : rgSCHCmnUlAdapRetx
17721 * Desc : Adaptive retransmission for an allocation.
17729 **********************************************************/
17730 static Void rgSCHCmnUlAdapRetx(RgSchUlAlloc *alloc,RgSchUlHqProcCb *proc)
17733 rgSCHUhmRetx(proc, alloc);
17735 if (proc->rvIdx != 0)
17737 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[proc->rvIdx];
17742 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
17748 * @brief Scheduler invocation per TTI.
17752 * Function: rgSCHCmnHdlUlInactUes
17755 * Invoked by: Common Scheduler
17757 * @param[in] RgSchCellCb *cell
17760 static Void rgSCHCmnHdlUlInactUes(RgSchCellCb *cell)
17762 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17763 CmLListCp ulInactvLst;
17764 /* Get a List of Inactv UEs for UL*/
17765 cmLListInit(&ulInactvLst);
17767 /* Trigger Spfc Schedulers with Inactive UEs */
17768 rgSCHMeasGapANRepGetUlInactvUe (cell, &ulInactvLst);
17769 /* take care of this in UL retransmission */
17770 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInactvLst);
17776 * @brief Scheduler invocation per TTI.
17780 * Function: rgSCHCmnHdlDlInactUes
17783 * Invoked by: Common Scheduler
17785 * @param[in] RgSchCellCb *cell
17788 static Void rgSCHCmnHdlDlInactUes(RgSchCellCb *cell)
17790 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17791 CmLListCp dlInactvLst;
17792 /* Get a List of Inactv UEs for DL */
17793 cmLListInit(&dlInactvLst);
17795 /* Trigger Spfc Schedulers with Inactive UEs */
17796 rgSCHMeasGapANRepGetDlInactvUe (cell, &dlInactvLst);
17798 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInactvLst);
17802 /* RACHO: Rach handover functions start here */
17803 /***********************************************************
17805 * Func : rgSCHCmnUeIdleExdThrsld
17807 * Desc : RETURN ROK if UE has been idle more
17816 **********************************************************/
17817 static S16 rgSCHCmnUeIdleExdThrsld(RgSchCellCb *cell,RgSchUeCb *ue)
17819 /* Time difference in subframes */
17820 uint32_t sfDiff = RGSCH_CALC_SF_DIFF(cell->crntTime, ue->ul.ulTransTime);
17822 if (sfDiff > (uint32_t)RG_SCH_CMN_UE_IDLE_THRSLD(ue))
17834 * @brief Scheduler processing for Ded Preambles on cell configuration.
17838 * Function : rgSCHCmnCfgRachDedPrm
17840 * This function does requisite initialisation
17841 * for RACH Ded Preambles.
17844 * @param[in] RgSchCellCb *cell
17847 static Void rgSCHCmnCfgRachDedPrm(RgSchCellCb *cell)
17849 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
17850 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
17854 if (cell->macPreambleSet.pres == NOTPRSNT)
17858 cellSch->rachCfg.numDedPrm = cell->macPreambleSet.size;
17859 cellSch->rachCfg.dedPrmStart = cell->macPreambleSet.start;
17860 /* Initialize handover List */
17861 cmLListInit(&cellSch->rachCfg.hoUeLst);
17862 /* Initialize pdcch Order List */
17863 cmLListInit(&cellSch->rachCfg.pdcchOdrLst);
17865 /* Intialize the rapId to UE mapping structure */
17866 for (cnt = 0; cnt<cellSch->rachCfg.numDedPrm; cnt++)
17868 cellSch->rachCfg.rapIdMap[cnt].rapId = cellSch->rachCfg.dedPrmStart + \
17870 cmLListInit(&cellSch->rachCfg.rapIdMap[cnt].assgndUes);
17872 /* Perform Prach Mask Idx, remDedPrm, applFrm initializations */
17873 /* Set remDedPrm as numDedPrm */
17874 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
17875 /* Initialize applFrm */
17876 cellSch->rachCfg.prachMskIndx = 0;
17877 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_EVEN)
17879 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + \
17880 (cell->crntTime.sfn % 2)) % RGSCH_MAX_SFN;
17883 else if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ODD)
17885 if((cell->crntTime.sfn%2) == 0)
17887 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + 1)\
17894 cellSch->rachCfg.applFrm.sfn = cell->crntTime.sfn;
17896 /* Initialize cellSch->rachCfg.applFrm as >= crntTime.
17897 * This is because of RGSCH_CALC_SF_DIFF logic */
17898 if (cellSch->rachCfg.applFrm.sfn == cell->crntTime.sfn)
17900 while (cellSch->rachCfg.prachMskIndx < cell->rachCfg.raOccasion.size)
17902 if (cell->crntTime.slot <\
17903 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx])
17907 cellSch->rachCfg.prachMskIndx++;
17909 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size)
17911 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
17913 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) %\
17918 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) %\
17921 cellSch->rachCfg.prachMskIndx = 0;
17923 cellSch->rachCfg.applFrm.slot = \
17924 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17928 cellSch->rachCfg.applFrm.slot = \
17929 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17932 /* Note first param to this macro should always be the latest in time */
17933 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
17934 while (sfDiff <= gap)
17936 rgSCHCmnUpdNxtPrchMskIdx(cell);
17937 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
17944 * @brief Updates the PRACH MASK INDEX.
17948 * Function: rgSCHCmnUpdNxtPrchMskIdx
17949 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
17950 * CFG is always >= "n"+"DELTA", where "n" is the crntTime
17951 * of the cell. If not, applFrm is updated to the next avl
17952 * PRACH oppurtunity as per the PRACH Cfg Index configuration.
17955 * Invoked by: Common Scheduler
17957 * @param[in] RgSchCellCb *cell
17960 static Void rgSCHCmnUpdNxtPrchMskIdx(RgSchCellCb *cell)
17962 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
17964 /* Determine the next prach mask Index */
17965 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size - 1)
17967 /* PRACH within applFrm.sfn are done, go to next AVL sfn */
17968 cellSch->rachCfg.prachMskIndx = 0;
17969 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
17971 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) % \
17974 else/* RGR_SFN_EVEN or RGR_SFN_ODD */
17976 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) % \
17979 cellSch->rachCfg.applFrm.slot = cell->rachCfg.raOccasion.\
17982 else /* applFrm.sfn is still valid */
17984 cellSch->rachCfg.prachMskIndx += 1;
17985 if ( cellSch->rachCfg.prachMskIndx < RGR_MAX_SUBFRAME_NUM )
17987 cellSch->rachCfg.applFrm.slot = \
17988 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17995 * @brief Updates the Ded preamble RACH parameters
18000 * Function: rgSCHCmnUpdRachParam
18001 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
18002 * CFG is always >= "n"+"6"+"DELTA", where "n" is the crntTime
18003 * of the cell. If not, applFrm is updated to the next avl
18004 * PRACH oppurtunity as per the PRACH Cfg Index configuration,
18005 * accordingly the "remDedPrm" is reset to "numDedPrm" and
18006 * "prachMskIdx" field is updated as per "applFrm".
18009 * Invoked by: Common Scheduler
18011 * @param[in] RgSchCellCb *cell
18014 static Void rgSCHCmnUpdRachParam(RgSchCellCb *cell)
18017 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18018 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
18021 if (cell->macPreambleSet.pres == NOTPRSNT)
18025 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, \
18029 /* applFrm is still a valid next Prach Oppurtunity */
18032 rgSCHCmnUpdNxtPrchMskIdx(cell);
18033 /* Reset remDedPrm as numDedPrm */
18034 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
18040 * @brief Dedicated Preamble allocation function.
18044 * Function: rgSCHCmnAllocPOParam
18045 * Purpose: Allocate pdcch, rapId and PrachMskIdx.
18046 * Set mapping of UE with the allocated rapId.
18048 * Invoked by: Common Scheduler
18050 * @param[in] RgSchCellCb *cell
18051 * @param[in] RgSchDlSf *dlSf
18052 * @param[in] RgSchUeCb *ue
18053 * @param[out] RgSchPdcch **pdcch
18054 * @param[out] uint8_t *rapId
18055 * @param[out] uint8_t *prachMskIdx
18058 static S16 rgSCHCmnAllocPOParam
18063 RgSchPdcch **pdcch,
18065 uint8_t *prachMskIdx
18069 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18070 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18073 if (cell->macPreambleSet.pres == PRSNT_NODEF)
18075 if (cellSch->rachCfg.remDedPrm == 0)
18079 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
18080 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
18084 /* The stored prachMskIdx is the index of PRACH Oppurtunities in
18085 * raOccasions.subframes[].
18086 * Converting the same to the actual PRACHMskIdx to be transmitted. */
18087 *prachMskIdx = cellSch->rachCfg.prachMskIndx + 1;
18088 /* Distribution starts from dedPrmStart till dedPrmStart + numDedPrm */
18089 *rapId = cellSch->rachCfg.dedPrmStart +
18090 cellSch->rachCfg.numDedPrm - cellSch->rachCfg.remDedPrm;
18091 cellSch->rachCfg.remDedPrm--;
18092 /* Map UE with the allocated RapId */
18093 ueDl->rachInfo.asgnOppr = cellSch->rachCfg.applFrm;
18094 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, cellSch->rachCfg.rapIdMap, (*rapId - cellSch->rachCfg.dedPrmStart));
18095 cmLListAdd2Tail(&cellSch->rachCfg.rapIdMap[*rapId - cellSch->rachCfg.dedPrmStart].assgndUes,
18096 &ueDl->rachInfo.rapIdLnk);
18097 ueDl->rachInfo.rapIdLnk.node = (PTR)ue;
18098 ueDl->rachInfo.poRapId = *rapId;
18100 else /* if dedicated preambles not configured */
18102 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
18103 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
18115 * @brief Dowlink Scheduling Handler.
18119 * Function: rgSCHCmnGenPdcchOrder
18120 * Purpose: For each UE in PO Q, grab a PDCCH,
18121 * get an available ded RapId and fill PDCCH
18122 * with PO information.
18124 * Invoked by: Common Scheduler
18126 * @param[in] RgSchCellCb *cell
18127 * @param[in] RgSchDlSf *dlSf
18130 static Void rgSCHCmnGenPdcchOrder(RgSchCellCb *cell,RgSchDlSf *dlSf)
18132 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18133 CmLList *node = cellSch->rachCfg.pdcchOdrLst.first;
18136 uint8_t prachMskIdx;
18137 RgSchPdcch *pdcch = NULLP;
18141 ue = (RgSchUeCb *)node->node;
18143 /* Skip sending for this subframe is Measuring or inActive in UL due
18144 * to MeasGap or inactie due to DRX
18146 if ((ue->measGapCb.isMeasuring == TRUE) ||
18147 (ue->ul.ulInactvMask & RG_MEASGAP_INACTIVE) ||
18148 (ue->isDrxEnabled &&
18149 ue->dl.dlInactvMask & RG_DRX_INACTIVE)
18154 if (rgSCHCmnAllocPOParam(cell, dlSf, ue, &pdcch, &rapId,\
18155 &prachMskIdx) != ROK)
18157 /* No More rapIds left for the valid next avl Oppurtunity.
18158 * Unsatisfied UEs here would be given a chance, when the
18159 * prach Mask Index changes as per rachUpd every TTI */
18161 /* PDDCH can also be ordered with rapId=0, prachMskIdx=0
18162 * so that UE triggers a RACH procedure with non-dedicated preamble.
18163 * But the implementation here does not do this. Instead, the "break"
18164 * here implies, that PDCCH Odr always given with valid rapId!=0,
18165 * prachMskIdx!=0 if dedicated preambles are configured.
18166 * If not configured, then trigger a PO with rapId=0,prchMskIdx=0*/
18169 /* Fill pdcch with pdcch odr information */
18170 rgSCHCmnFillPdcchOdr2Sf(cell, ue, pdcch, rapId, prachMskIdx);
18171 /* Remove this UE from the PDCCH ORDER QUEUE */
18172 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
18173 /* Reset UE's power state */
18174 rgSCHPwrUeReset(cell, ue);
18181 * @brief This function add UE to PdcchOdr Q if not already present.
18185 * Function: rgSCHCmnDlAdd2PdcchOdrQ
18188 * Invoked by: CMN Scheduler
18190 * @param[in] RgSchCellCb* cell
18191 * @param[in] RgSchUeCb* ue
18195 static Void rgSCHCmnDlAdd2PdcchOdrQ(RgSchCellCb *cell,RgSchUeCb *ue)
18197 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18198 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18201 if (ueDl->rachInfo.poLnk.node == NULLP)
18203 cmLListAdd2Tail(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
18204 ueDl->rachInfo.poLnk.node = (PTR)ue;
18211 * @brief This function rmvs UE to PdcchOdr Q if not already present.
18215 * Function: rgSCHCmnDlRmvFrmPdcchOdrQ
18218 * Invoked by: CMN Scheduler
18220 * @param[in] RgSchCellCb* cell
18221 * @param[in] RgSchUeCb* ue
18225 static Void rgSCHCmnDlRmvFrmPdcchOdrQ(RgSchCellCb *cell,RgSchUeCb *ue)
18227 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18228 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18230 cmLListDelFrm(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
18231 ueDl->rachInfo.poLnk.node = NULLP;
18236 * @brief Fill pdcch with PDCCH order information.
18240 * Function: rgSCHCmnFillPdcchOdr2Sf
18241 * Purpose: Fill PDCCH with PDCCH order information,
18243 * Invoked by: Common Scheduler
18245 * @param[in] RgSchUeCb *ue
18246 * @param[in] RgSchPdcch *pdcch
18247 * @param[in] uint8_t rapId
18248 * @param[in] uint8_t prachMskIdx
18251 static Void rgSCHCmnFillPdcchOdr2Sf
18257 uint8_t prachMskIdx
18260 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
18263 pdcch->rnti = ue->ueId;
18264 pdcch->dci.dciFormat = TFU_DCI_FORMAT_1A;
18265 pdcch->dci.u.format1aInfo.isPdcchOrder = TRUE;
18266 pdcch->dci.u.format1aInfo.t.pdcchOrder.preambleIdx = rapId;
18267 pdcch->dci.u.format1aInfo.t.pdcchOrder.prachMaskIdx = prachMskIdx;
18269 /* Request for APer CQI immediately after PDCCH Order */
18270 /* CR ccpu00144525 */
18272 if(ue->dl.ueDlCqiCfg.aprdCqiCfg.pres)
18274 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
18275 acqiCb->aCqiTrigWt = 0;
18284 * @brief UE deletion for scheduler.
18288 * Function : rgSCHCmnDelRachInfo
18290 * This functions deletes all scheduler information
18291 * pertaining to an UE.
18293 * @param[in] RgSchCellCb *cell
18294 * @param[in] RgSchUeCb *ue
18297 static Void rgSCHCmnDelRachInfo(RgSchCellCb *cell,RgSchUeCb *ue)
18299 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18300 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18304 if (ueDl->rachInfo.poLnk.node)
18306 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
18308 if (ueDl->rachInfo.hoLnk.node)
18310 cmLListDelFrm(&cellSch->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
18311 ueDl->rachInfo.hoLnk.node = NULLP;
18313 if (ueDl->rachInfo.rapIdLnk.node)
18315 rapIdIdx = ueDl->rachInfo.poRapId - cellSch->rachCfg.dedPrmStart;
18316 cmLListDelFrm(&cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes,
18317 &ueDl->rachInfo.rapIdLnk);
18318 ueDl->rachInfo.rapIdLnk.node = NULLP;
18324 * @brief This function retrieves the ue which has sent this raReq
18325 * and it allocates grant for UEs undergoing (for which RAR
18326 * is being generated) HandOver/PdcchOrder.
18331 * Function: rgSCHCmnHdlHoPo
18332 * Purpose: This function retrieves the ue which has sent this raReq
18333 * and it allocates grant for UEs undergoing (for which RAR
18334 * is being generated) HandOver/PdcchOrder.
18336 * Invoked by: Common Scheduler
18338 * @param[in] RgSchCellCb *cell
18339 * @param[out] CmLListCp *raRspLst
18340 * @param[in] RgSchRaReqInfo *raReq
18344 static Void rgSCHCmnHdlHoPo
18347 CmLListCp *raRspLst,
18348 RgSchRaReqInfo *raReq
18351 RgSchUeCb *ue = raReq->ue;
18353 if ( ue->isDrxEnabled )
18355 rgSCHDrxDedRa(cell,ue);
18357 rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq);
18362 * @brief This function retrieves the UE which has sent this raReq
18363 * for handover case.
18368 * Function: rgSCHCmnGetHoUe
18369 * Purpose: This function retrieves the UE which has sent this raReq
18370 * for handover case.
18372 * Invoked by: Common Scheduler
18374 * @param[in] RgSchCellCb *cell
18375 * @param[in] RgSchRaReqInfo *raReq
18376 * @return RgSchUeCb*
18379 RgSchUeCb* rgSCHCmnGetHoUe(RgSchCellCb *cell,uint16_t rapId)
18381 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18385 RgSchCmnDlUe *ueDl;
18387 ueLst = &cellSch->rachCfg.hoUeLst;
18388 node = ueLst->first;
18391 ue = (RgSchUeCb *)node->node;
18393 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18394 if (ueDl->rachInfo.hoRapId == rapId)
18402 static Void rgSCHCmnDelDedPreamble(RgSchCellCb *cell,uint8_t preambleId)
18404 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18408 RgSchCmnDlUe *ueDl;
18410 ueLst = &cellSch->rachCfg.hoUeLst;
18411 node = ueLst->first;
18414 ue = (RgSchUeCb *)node->node;
18416 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18417 if (ueDl->rachInfo.hoRapId == preambleId)
18419 cmLListDelFrm(ueLst, &ueDl->rachInfo.hoLnk);
18420 ueDl->rachInfo.hoLnk.node = (PTR)NULLP;
18426 * @brief This function retrieves the UE which has sent this raReq
18427 * for PDCCh Order case.
18432 * Function: rgSCHCmnGetPoUe
18433 * Purpose: This function retrieves the UE which has sent this raReq
18434 * for PDCCH Order case.
18436 * Invoked by: Common Scheduler
18438 * @param[in] RgSchCellCb *cell
18439 * @param[in] RgSchRaReqInfo *raReq
18440 * @return RgSchUeCb*
18443 RgSchUeCb* rgSCHCmnGetPoUe
18447 CmLteTimingInfo timingInfo
18450 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18454 RgSchCmnDlUe *ueDl;
18457 rapIdIdx = rapId -cellSch->rachCfg.dedPrmStart;
18458 ueLst = &cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes;
18459 node = ueLst->first;
18462 ue = (RgSchUeCb *)node->node;
18464 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18465 /* Remove UEs irrespective.
18466 * Old UE associations are removed.*/
18467 cmLListDelFrm(ueLst, &ueDl->rachInfo.rapIdLnk);
18468 ueDl->rachInfo.rapIdLnk.node = (PTR)NULLP;
18469 if (RGSCH_TIMEINFO_SAME(ueDl->rachInfo.asgnOppr, timingInfo))
18480 * @brief This function returns the valid UL cqi for a given UE.
18484 * Function: rgSCHCmnUlGetCqi
18485 * Purpose: This function returns the "valid UL cqi" for a given UE
18486 * based on UE category
18488 * Invoked by: Scheduler
18490 * @param[in] RgSchUeCb *ue
18491 * @param[in] uint8_t ueCtgy
18494 uint8_t rgSCHCmnUlGetCqi
18498 CmLteUeCategory ueCtgy
18501 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18505 cqi = ueUl->maxUlCqi;
18507 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
18508 (ueUl->validUlCqi > ueUl->maxUlCqi)))
18510 cqi = ueUl->validUlCqi;
18513 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
18514 (ueUl->crntUlCqi[0] > ueUl->maxUlCqi )))
18516 cqi = ueUl->crntUlCqi[0];
18520 }/* End of rgSCHCmnUlGetCqi */
18522 /***********************************************************
18524 * Func : rgSCHCmnUlRbAllocForPoHoUe
18526 * Desc : Do uplink RB allocation for a HO/PO UE.
18530 * Notes: Note that as of now, for retx, maxRb
18531 * is not considered. Alternatives, such
18532 * as dropping retx if it crosses maxRb
18533 * could be considered.
18537 **********************************************************/
18538 static S16 rgSCHCmnUlRbAllocForPoHoUe
18546 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18547 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18548 uint8_t sbSize = cellUl->sbSize;
18549 uint32_t maxBits = ue->ul.maxBytesPerUePerTti*8;
18551 RgSchUlAlloc *alloc;
18561 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->msg3SchdHqProcIdx];
18562 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18564 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
18568 /*MS_WORKAROUND for HO ccpu00121116*/
18569 cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
18570 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend], cqi);
18571 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
18572 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
18573 while(iMcs > RG_SCH_CMN_MAX_MSG3_IMCS)
18576 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
18577 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg);
18579 /* Filling the modorder in the grant structure*/
18580 RG_SCH_UL_MCS_TO_MODODR(iMcs,modOdr);
18581 if (!cell->isCpUlExtend)
18583 eff = rgSchCmnNorUlEff[0][iTbs];
18587 eff = rgSchCmnExtUlEff[0][iTbs];
18590 bits = ueUl->alloc.reqBytes * 8;
18592 #if (ERRCLASS & ERRCLS_DEBUG)
18599 if (bits < rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs))
18602 nPrb = numSb * sbSize;
18606 if (bits > maxBits)
18609 nPrb = bits * 1024 / eff / RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
18614 numSb = nPrb / sbSize;
18618 /*ccpu00128775:MOD-Change to get upper threshold nPrb*/
18619 nPrb = RGSCH_CEIL((RGSCH_CEIL(bits * 1024, eff)),
18620 RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl));
18625 numSb = RGSCH_DIV_ROUND(nPrb, sbSize);
18630 alloc = rgSCHCmnUlSbAlloc(sf, (uint8_t)RGSCH_MIN(numSb, cellUl->maxSbPerUe),\
18632 if (alloc == NULLP)
18634 DU_LOG("\nERROR --> SCH : rgSCHCmnUlRbAllocForPoHoUe(): Could not get UlAlloc");
18637 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
18639 /* Filling the modorder in the grant structure start*/
18640 alloc->grnt.modOdr = (TfuModScheme) modOdr;
18641 alloc->grnt.iMcs = iMcs;
18642 alloc->grnt.iMcsCrnt = iMcsCrnt;
18643 alloc->grnt.hop = 0;
18644 /* Fix for ccpu00123915*/
18645 alloc->forMsg3 = TRUE;
18646 alloc->hqProc = proc;
18647 alloc->hqProc->ulSfIdx = cellUl->msg3SchdIdx;
18649 alloc->rnti = ue->ueId;
18650 /* updating initNumRbs in case of HO */
18652 ue->initNumRbs = alloc->grnt.numRb;
18654 ueUl->alloc.alloc = alloc;
18655 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
18656 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
18657 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
18658 /* MS_WORKAROUND for HO ccpu00121124*/
18659 /*[Adi temp change] Need to fil modOdr */
18660 RG_SCH_UL_MCS_TO_MODODR(alloc->grnt.iMcsCrnt,alloc->grnt.modOdr);
18661 rgSCHUhmNewTx(proc, ueUl->hqEnt.maxHqRetx, alloc);
18662 /* No grant attr recorded now */
18667 * @brief This function allocates grant for UEs undergoing (for which RAR
18668 * is being generated) HandOver/PdcchOrder.
18673 * Function: rgSCHCmnAllocPoHoGrnt
18674 * Purpose: This function allocates grant for UEs undergoing (for which RAR
18675 * is being generated) HandOver/PdcchOrder.
18677 * Invoked by: Common Scheduler
18679 * @param[in] RgSchCellCb *cell
18680 * @param[out] CmLListCp *raRspLst,
18681 * @param[in] RgSchUeCb *ue
18682 * @param[in] RgSchRaReqInfo *raReq
18686 static Void rgSCHCmnAllocPoHoGrnt
18689 CmLListCp *raRspLst,
18691 RgSchRaReqInfo *raReq
18694 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18695 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18697 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
18700 /* Clearing previous allocs if any*/
18701 rgSCHCmnUlUeDelAllocs(cell, ue);
18702 /* Fix : syed allocs are limited */
18703 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
18707 ueUl->alloc.reqBytes = RG_SCH_MIN_GRNT_HOPO;
18708 if (rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, RGSCH_MAX_UL_RB) != ROK)
18713 /* Fill grant information */
18714 grnt = &ueUl->alloc.alloc->grnt;
18719 DU_LOG("\nERROR --> SCH : Failed to get"
18720 "the grant for HO/PDCCH Order. CRNTI:%d",ue->ueId);
18723 ue->ul.rarGrnt.rapId = raReq->raReq.rapId;
18724 ue->ul.rarGrnt.hop = grnt->hop;
18725 ue->ul.rarGrnt.rbStart = grnt->rbStart;
18726 ue->ul.rarGrnt.numRb = grnt->numRb;
18727 ue->ul.rarGrnt.tpc = grnt->tpc;
18728 ue->ul.rarGrnt.iMcsCrnt = grnt->iMcsCrnt;
18729 ue->ul.rarGrnt.ta.pres = TRUE;
18730 ue->ul.rarGrnt.ta.val = raReq->raReq.ta;
18731 ue->ul.rarGrnt.datSz = grnt->datSz;
18732 if((sf->numACqiCount < RG_SCH_MAX_ACQI_PER_ULSF) && (RG_SCH_APCQI_NO != ue->dl.reqForCqi))
18736 /* Send two bits cqireq field if more than one cells are configured else one*/
18737 for (idx = 1;idx < CM_LTE_MAX_CELLS;idx++)
18739 if (ue->cellInfo[idx] != NULLP)
18741 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
18745 if (idx == CM_LTE_MAX_CELLS)
18748 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
18750 ue->dl.reqForCqi = RG_SCH_APCQI_NO;
18751 sf->numACqiCount++;
18755 ue->ul.rarGrnt.cqiReqBit = 0;
18757 /* Attach Ho/Po allocation to RAR Rsp cont free Lst */
18758 cmLListAdd2Tail(raRspLst, &ue->ul.rarGrnt.raRspLnk);
18759 ue->ul.rarGrnt.raRspLnk.node = (PTR)ue;
18765 * @brief This is a utility function to set the fields in
18766 * an UL harq proc which is identified for non-adaptive retx
18770 * Function: rgSCHCmnUlNonadapRetx
18771 * Purpose: Sets the fields in UL Harq proc for non-adaptive retx
18773 * @param[in] RgSchCmnUlCell *cellUl
18774 * @param[out] RgSchUlAlloc *alloc
18775 * @param[in] uint8_t idx
18780 static Void rgSCHCmnUlNonadapRetx
18782 RgSchCmnUlCell *cellUl,
18783 RgSchUlAlloc *alloc,
18787 rgSCHUhmRetx(alloc->hqProc, alloc);
18789 /* Update alloc to retx */
18790 alloc->hqProc->isRetx = TRUE;
18791 alloc->hqProc->ulSfIdx = cellUl->reTxIdx[idx];
18793 if (alloc->hqProc->rvIdx != 0)
18795 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[alloc->hqProc->rvIdx];
18799 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
18801 alloc->grnt.isRtx = TRUE;
18802 alloc->pdcch = NULLP;
18806 * @brief Check if 2 allocs overlap
18810 * Function : rgSCHCmnUlAllocsOvrLap
18812 * - Return TRUE if alloc1 and alloc2 overlap.
18814 * @param[in] RgSchUlAlloc *alloc1
18815 * @param[in] RgSchUlAlloc *alloc2
18818 static Bool rgSCHCmnUlAllocsOvrLap(RgSchUlAlloc *alloc1,RgSchUlAlloc *alloc2)
18821 if (((alloc1->sbStart >= alloc2->sbStart) &&
18822 (alloc1->sbStart <= alloc2->sbStart + alloc2->numSb-1)) ||
18823 ((alloc2->sbStart >= alloc1->sbStart) &&
18824 (alloc2->sbStart <= alloc1->sbStart + alloc1->numSb-1)))
18831 * @brief Copy allocation Info from src to dst.
18835 * Function : rgSCHCmnUlCpyAllocInfo
18837 * - Copy allocation Info from src to dst.
18839 * @param[in] RgSchUlAlloc *srcAlloc
18840 * @param[in] RgSchUlAlloc *dstAlloc
18843 static Void rgSCHCmnUlCpyAllocInfo(RgSchCellCb *cell,RgSchUlAlloc *srcAlloc,RgSchUlAlloc *dstAlloc)
18845 RgSchCmnUlUe *ueUl;
18847 dstAlloc->grnt = srcAlloc->grnt;
18848 dstAlloc->hqProc = srcAlloc->hqProc;
18849 /* Fix : syed During UE context release, hqProc->alloc
18850 * was pointing to srcAlloc instead of dstAlloc and
18851 * freeing from incorrect sf->allocDb was
18852 * corrupting the list. */
18853 /* In case of SPS Occasion Allocation is done in advance and
18854 at a later time Hq Proc is linked. Hence HqProc
18855 pointer in alloc shall be NULL */
18857 if (dstAlloc->hqProc)
18860 dstAlloc->hqProc->alloc = dstAlloc;
18862 dstAlloc->ue = srcAlloc->ue;
18863 dstAlloc->rnti = srcAlloc->rnti;
18864 dstAlloc->forMsg3 = srcAlloc->forMsg3;
18865 dstAlloc->raCb = srcAlloc->raCb;
18866 dstAlloc->pdcch = srcAlloc->pdcch;
18867 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
18870 ueUl = RG_SCH_CMN_GET_UL_UE(dstAlloc->ue,cell);
18871 ueUl->alloc.alloc = dstAlloc;
18873 if (dstAlloc->ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
18875 if((dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc != NULLP)
18876 && (dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc == srcAlloc))
18878 dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc = dstAlloc;
18887 * @brief Update TX and RETX subframe's allocation
18892 * Function : rgSCHCmnUlInsAllocFrmNewSf2OldSf
18894 * - Release all preassigned allocations of newSf and merge
18896 * - If alloc of newSf collide with one or more allocs of oldSf
18897 * - mark all such allocs of oldSf for Adaptive Retx.
18898 * - Swap the alloc and hole DB references of oldSf and newSf.
18900 * @param[in] RgSchCellCb *cell
18901 * @param[in] RgSchUlSf *newSf
18902 * @param[in] RgSchUlSf *oldSf
18903 * @param[in] RgSchUlAlloc *srcAlloc
18906 static Void rgSCHCmnUlInsAllocFrmNewSf2OldSf
18911 RgSchUlAlloc *srcAlloc
18914 RgSchUlAlloc *alloc, *dstAlloc, *nxtAlloc;
18916 /* MS_WORKAROUND ccpu00120827 */
18917 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
18920 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
18924 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
18925 /* If there is an overlap between alloc and srcAlloc
18926 * then alloc is marked for Adaptive retx and it is released
18928 if (rgSCHCmnUlAllocsOvrLap(alloc, srcAlloc) == TRUE)
18930 rgSCHCmnUlUpdAllocRetx(cell, alloc);
18931 rgSCHUtlUlAllocRls(oldSf, alloc);
18933 /* No further allocs spanning the srcAlloc subbands */
18934 if (srcAlloc->sbStart + srcAlloc->numSb - 1 <= alloc->sbStart)
18938 } while ((alloc = nxtAlloc) != NULLP);
18941 /* After freeing all the colliding allocs, request for an allocation
18942 * specifying the start and numSb with in txSf. This function should
18943 * always return positively with a nonNULL dstAlloc */
18944 /* MS_WORKAROUND ccpu00120827 */
18945 remAllocs = schCmnCell->ul.maxAllocPerUlSf - *oldSf->allocCountRef;
18948 /* Fix : If oldSf already has max Allocs then release the
18949 * old RETX alloc to make space for new alloc of newSf.
18950 * newSf allocs(i.e new Msg3s) are given higher priority
18951 * over retx allocs. */
18952 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
18956 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
18957 if (!alloc->mrgdNewTxAlloc)
18959 /* If alloc is for RETX */
18960 /* TODO: Incase of this ad also in case of choosing
18961 * and alloc for ADAP RETX, we need to send ACK for
18962 * the corresponding alloc in PHICH */
18963 #ifndef EMTC_ENABLE
18964 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc);
18966 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc,FALSE);
18970 }while((alloc = nxtAlloc) != NULLP);
18973 dstAlloc = rgSCHUtlUlGetSpfcAlloc(oldSf, srcAlloc->sbStart, srcAlloc->numSb);
18975 /* This should never happen */
18976 if (dstAlloc == NULLP)
18978 DU_LOG("\nERROR --> SCH : CRNTI:%d "
18979 "rgSCHUtlUlGetSpfcAlloc failed in rgSCHCmnUlInsAllocFrmNewSf2OldSf",
18984 /* Copy the srcAlloc's state information in to dstAlloc */
18985 rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc);
18986 /* Set new Tx merged Alloc Flag to TRUE, indicating that this
18987 * alloc shall not be processed for non-adaptive retransmission */
18988 dstAlloc->mrgdNewTxAlloc = TRUE;
18992 * @brief Merge all allocations of newSf to oldSf.
18996 * Function : rgSCHCmnUlMergeSfAllocs
18998 * - Merge all allocations of newSf to oldSf.
18999 * - If newSf's alloc collides with oldSf's alloc
19000 * then oldSf's alloc is marked for adaptive Retx
19001 * and is released from oldSf to create space for
19004 * @param[in] RgSchCellCb *cell
19005 * @param[in] RgSchUlSf *oldSf
19006 * @param[in] RgSchUlSf *newSf
19009 static Void rgSCHCmnUlMergeSfAllocs(RgSchCellCb *cell,RgSchUlSf *oldSf,RgSchUlSf *newSf)
19011 RgSchUlAlloc *alloc, *nxtAlloc;
19014 /* Merge each alloc of newSf in to oldSf
19015 * and release it from newSf */
19016 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
19020 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
19021 rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, alloc);
19022 rgSCHUtlUlAllocRls(newSf, alloc);
19023 } while((alloc = nxtAlloc) != NULLP);
19028 * @brief Swap Hole/Alloc DB context of newSf and oldSf.
19032 * Function : rgSCHCmnUlSwapSfAllocs
19034 * - Swap Hole/Alloc DB context of newSf and oldSf.
19036 * @param[in] RgSchCellCb *cell
19037 * @param[in] RgSchUlSf *oldSf
19038 * @param[in] RgSchUlSf *newSf
19041 static Void rgSCHCmnUlSwapSfAllocs(RgSchCellCb *cell,RgSchUlSf *oldSf,RgSchUlSf *newSf)
19043 RgSchUlAllocDb *tempAllocDb = newSf->allocDb;
19044 RgSchUlHoleDb *tempHoleDb = newSf->holeDb;
19045 uint8_t tempAvailSbs = newSf->availSubbands;
19049 newSf->allocDb = oldSf->allocDb;
19050 newSf->holeDb = oldSf->holeDb;
19051 newSf->availSubbands = oldSf->availSubbands;
19053 oldSf->allocDb = tempAllocDb;
19054 oldSf->holeDb = tempHoleDb;
19055 oldSf->availSubbands = tempAvailSbs;
19057 /* Fix ccpu00120610*/
19058 newSf->allocCountRef = &newSf->allocDb->count;
19059 oldSf->allocCountRef = &oldSf->allocDb->count;
19063 * @brief Perform non-adaptive RETX for non-colliding allocs.
19067 * Function : rgSCHCmnUlPrcNonAdptRetx
19069 * - Perform non-adaptive RETX for non-colliding allocs.
19071 * @param[in] RgSchCellCb *cell
19072 * @param[in] RgSchUlSf *newSf
19073 * @param[in] uint8_t idx
19076 static Void rgSCHCmnUlPrcNonAdptRetx(RgSchCellCb *cell,RgSchUlSf *newSf,uint8_t idx)
19078 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19079 RgSchUlAlloc *alloc, *nxtAlloc;
19081 /* perform non-adaptive retx allocation(adjustment) */
19082 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
19086 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
19087 /* A merged new TX alloc, reset the state and skip */
19088 if (alloc->mrgdNewTxAlloc)
19090 alloc->mrgdNewTxAlloc = FALSE;
19095 rgSCHCmnUlNonadapRetx(cellUl, alloc, idx);
19097 } while((alloc = nxtAlloc) != NULLP);
19103 * @brief Update TX and RETX subframe's allocation
19108 * Function : rgSCHCmnUlPrfmSfMerge
19110 * - Release all preassigned allocations of newSf and merge
19112 * - If alloc of newSf collide with one or more allocs of oldSf
19113 * - mark all such allocs of oldSf for Adaptive Retx.
19114 * - Swap the alloc and hole DB references of oldSf and newSf.
19115 * - The allocs which did not collide with pre-assigned msg3
19116 * allocs are marked for non-adaptive RETX.
19118 * @param[in] RgSchCellCb *cell
19119 * @param[in] RgSchUlSf *oldSf
19120 * @param[in] RgSchUlSf *newSf
19121 * @param[in] uint8_t idx
19124 static Void rgSCHCmnUlPrfmSfMerge
19132 /* Preassigned resources for msg3 in newSf.
19133 * Hence do adaptive retx for all NACKED TXs */
19134 rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf);
19135 /* swap alloc and hole DBs of oldSf and newSf. */
19136 rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf);
19137 /* Here newSf has the resultant merged allocs context */
19138 /* Perform non-adaptive RETX for non-colliding allocs */
19139 rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx);
19145 * @brief Update TX and RETX subframe's allocation
19150 * Function : rgSCHCmnUlRmvCmpltdAllocs
19152 * - Free all Transmission which are ACKED
19153 * OR for which MAX retransmission have
19157 * @param[in] RgSchCellCb *cell,
19158 * @param[in] RgSchUlSf *sf
19161 static Void rgSCHCmnUlRmvCmpltdAllocs(RgSchCellCb *cell,RgSchUlSf *sf)
19163 RgSchUlAlloc *alloc, *nxtAlloc;
19165 if ((alloc = rgSCHUtlUlAllocFirst(sf)) == NULLP)
19171 nxtAlloc = rgSCHUtlUlAllocNxt(sf, alloc);
19173 DU_LOG("\nDEBUG --> SCH : rgSCHCmnUlRmvCmpltdAllocs:time(%d %d) alloc->hqProc->remTx %d hqProcId(%d) \n",cell->crntTime.sfn,cell->crntTime.slot,alloc->hqProc->remTx, alloc->grnt.hqProcId);
19175 alloc->hqProc->rcvdCrcInd = TRUE;
19176 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
19179 /* SR_RACH_STATS : MSG 3 MAX RETX FAIL*/
19180 if ((alloc->forMsg3 == TRUE) && (alloc->hqProc->remTx == 0))
19182 rgNumMsg3FailMaxRetx++;
19184 cell->tenbStats->sch.msg3Fail++;
19188 #ifdef MAC_SCH_STATS
19189 if(alloc->ue != NULLP)
19191 /* access from ulHarqProc*/
19192 RgSchUeCb *ueCb = alloc->ue;
19193 RgSchCmnUe *cmnUe = (RgSchCmnUe*)ueCb->sch;
19194 RgSchCmnUlUe *ulUe = &(cmnUe->ul);
19195 uint8_t cqi = ulUe->crntUlCqi[0];
19196 uint16_t numUlRetx = ueCb->ul.hqEnt.maxHqRetx - alloc->hqProc->remTx;
19198 hqRetxStats.ulCqiStat[(cqi - 1)].mcs = alloc->grnt.iMcs;
19203 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1++;
19206 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2++;
19209 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3++;
19212 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4++;
19215 hqRetxStats.ulCqiStat[(cqi - 1)].totalTx = \
19216 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1 + \
19217 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2 * 2) + \
19218 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3 * 3) + \
19219 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4 * 4);
19222 #endif /*MAC_SCH_STATS*/
19223 rgSCHCmnUlFreeAllocation(cell, sf, alloc);
19225 /*ccpu00106104 MOD added check for AckNackRep */
19226 /*added check for acknack so that adaptive retx considers ue
19227 inactivity due to ack nack repetition*/
19228 else if((alloc->ue != NULLP) && (TRUE != alloc->forMsg3))
19230 rgSCHCmnUlUpdAllocRetx(cell, alloc);
19231 rgSCHUtlUlAllocRls(sf, alloc);
19233 } while ((alloc = nxtAlloc) != NULLP);
19239 * @brief Update an uplink subframe.
19243 * Function : rgSCHCmnRlsUlSf
19245 * For each allocation
19246 * - if no more tx needed
19247 * - Release allocation
19249 * - Perform retransmission
19251 * @param[in] RgSchUlSf *sf
19252 * @param[in] uint8_t idx
19255 Void rgSCHCmnRlsUlSf(RgSchCellCb *cell,uint8_t idx)
19258 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19259 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
19261 RgSchUlSf *oldSf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
19263 /* Initialize the reTxLst of UL HqProcs for RETX subframe */
19264 if (rgSCHUtlUlAllocFirst(oldSf) == NULLP)
19268 /* Release all completed TX allocs from sf */
19269 rgSCHCmnUlRmvCmpltdAllocs(cell, oldSf);
19271 oldSf->numACqiCount = 0;
19277 * @brief Handle uplink allocation for retransmission.
19281 * Function : rgSCHCmnUlUpdAllocRetx
19283 * - Perform adaptive retransmission
19285 * @param[in] RgSchUlSf *sf
19286 * @param[in] RgSchUlAlloc *alloc
19289 static Void rgSCHCmnUlUpdAllocRetx(RgSchCellCb *cell,RgSchUlAlloc *alloc)
19291 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
19293 alloc->hqProc->reTxAlloc.rnti = alloc->rnti;
19294 alloc->hqProc->reTxAlloc.numSb = alloc->numSb;
19295 alloc->hqProc->reTxAlloc.iMcs = alloc->grnt.iMcs;
19297 alloc->hqProc->reTxAlloc.dciFrmt = alloc->grnt.dciFrmt;
19298 alloc->hqProc->reTxAlloc.numLyr = alloc->grnt.numLyr;
19299 alloc->hqProc->reTxAlloc.vrbgStart = alloc->grnt.vrbgStart;
19300 alloc->hqProc->reTxAlloc.numVrbg = alloc->grnt.numVrbg;
19301 alloc->hqProc->reTxAlloc.modOdr = alloc->grnt.modOdr;
19303 //iTbs = rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs);
19304 //iTbs = alloc->grnt.iMcs;
19305 //RGSCH_ARRAY_BOUND_CHECK( 0, rgTbSzTbl[0], iTbs);
19306 alloc->hqProc->reTxAlloc.tbSz = alloc->grnt.datSz;
19307 //rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1]/8;
19308 alloc->hqProc->reTxAlloc.ue = alloc->ue;
19309 alloc->hqProc->reTxAlloc.forMsg3 = alloc->forMsg3;
19310 alloc->hqProc->reTxAlloc.raCb = alloc->raCb;
19312 /* Set as retransmission is pending */
19313 alloc->hqProc->isRetx = TRUE;
19314 alloc->hqProc->alloc = NULLP;
19315 alloc->hqProc->ulSfIdx = RGSCH_INVALID_INFO;
19317 DU_LOG("\nDEBUG --> SCH : Adding Harq Proc Id in the retx list hqProcId %d \n",alloc->grnt.hqProcId);
19319 cmLListAdd2Tail(&cmnUlCell->reTxLst, &alloc->hqProc->reTxLnk);
19320 alloc->hqProc->reTxLnk.node = (PTR)alloc->hqProc;
19325 * @brief Attempts allocation for msg3s for which ADAP retransmissions
19330 * Function : rgSCHCmnUlAdapRetxAlloc
19332 * Attempts allocation for msg3s for which ADAP retransmissions
19335 * @param[in] RgSchCellCb *cell
19336 * @param[in] RgSchUlSf *sf
19337 * @param[in] RgSchUlHqProcCb *proc;
19338 * @param[in] RgSchUlHole *hole;
19341 static Bool rgSCHCmnUlAdapRetxAlloc
19345 RgSchUlHqProcCb *proc,
19349 uint8_t numSb = proc->reTxAlloc.numSb;
19350 uint8_t iMcs = proc->reTxAlloc.iMcs;
19351 CmLteTimingInfo frm = cell->crntTime;
19352 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19355 RgSchUlAlloc *alloc;
19357 /* Fetch PDCCH for msg3 */
19358 /* ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
19359 /* Introduced timing delta for UL control */
19360 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
19361 dlSf = rgSCHUtlSubFrmGet(cell, frm);
19362 pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
19363 if (pdcch == NULLP)
19368 /* Fetch UL Alloc for msg3 */
19369 if (numSb <= hole->num)
19371 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
19376 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
19377 DU_LOG("\nERROR --> SCH : UL Alloc fail for msg3 retx for rnti: %d\n",
19378 proc->reTxAlloc.rnti);
19382 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
19383 alloc->grnt.iMcs = iMcs;
19384 alloc->grnt.datSz = proc->reTxAlloc.tbSz;
19387 //RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
19389 /* Fill UL Alloc for msg3 */
19390 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
19391 alloc->grnt.nDmrs = 0;
19392 alloc->grnt.hop = 0;
19393 alloc->grnt.delayBit = 0;
19394 alloc->grnt.isRtx = TRUE;
19395 proc->ulSfIdx = cellUl->schdIdx;
19397 proc->schdTime = cellUl->schdTime;
19398 alloc->grnt.hqProcId = proc->procId;
19399 alloc->grnt.dciFrmt = proc->reTxAlloc.dciFrmt;
19400 alloc->grnt.numLyr = proc->reTxAlloc.numLyr;
19401 alloc->grnt.vrbgStart = proc->reTxAlloc.vrbgStart;
19402 alloc->grnt.numVrbg = proc->reTxAlloc.numVrbg;
19403 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
19404 alloc->grnt.modOdr = proc->reTxAlloc.modOdr;
19406 /* TODO : Hardcoding these as of now */
19407 alloc->grnt.hop = 0;
19408 alloc->grnt.SCID = 0;
19409 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
19410 alloc->grnt.PMI = 0;
19411 alloc->grnt.uciOnxPUSCH = 0;
19413 alloc->rnti = proc->reTxAlloc.rnti;
19414 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
19415 alloc->ue = proc->reTxAlloc.ue;
19416 alloc->pdcch = pdcch;
19417 alloc->forMsg3 = proc->reTxAlloc.forMsg3;
19418 alloc->raCb = proc->reTxAlloc.raCb;
19419 alloc->hqProc = proc;
19420 alloc->isAdaptive = TRUE;
19422 sf->totPrb += alloc->grnt.numRb;
19424 /* FIX : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
19427 alloc->raCb->msg3Grnt= alloc->grnt;
19429 /* To the crntTime, add the time at which UE will
19430 * actually send MSG3 */
19431 alloc->raCb->msg3AllocTime = cell->crntTime;
19432 RGSCH_INCR_SUB_FRAME(alloc->raCb->msg3AllocTime, RG_SCH_CMN_MIN_RETXMSG3_RECP_INTRVL);
19434 alloc->raCb->msg3AllocTime = cellUl->schdTime;
19436 rgSCHCmnUlAdapRetx(alloc, proc);
19437 /* Fill PDCCH with alloc info */
19438 pdcch->rnti = alloc->rnti;
19439 pdcch->dci.dciFormat = TFU_DCI_FORMAT_0;
19440 pdcch->dci.u.format0Info.hoppingEnbld = alloc->grnt.hop;
19441 pdcch->dci.u.format0Info.rbStart = alloc->grnt.rbStart;
19442 pdcch->dci.u.format0Info.numRb = alloc->grnt.numRb;
19443 pdcch->dci.u.format0Info.mcs = alloc->grnt.iMcsCrnt;
19444 pdcch->dci.u.format0Info.ndi = alloc->hqProc->ndi;
19445 pdcch->dci.u.format0Info.nDmrs = alloc->grnt.nDmrs;
19446 pdcch->dci.u.format0Info.tpcCmd = alloc->grnt.tpc;
19450 /* ulIdx setting for cfg 0 shall be appropriately fixed thru ccpu00109015 */
19451 pdcch->dci.u.format0Info.ulIdx = RG_SCH_ULIDX_MSB;
19452 pdcch->dci.u.format0Info.dai = RG_SCH_MAX_DAI_IDX;
19455 pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_0];
19459 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue,cell);
19461 alloc->ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
19464 ue->ul.nPrb = alloc->grnt.numRb;
19466 ueUl->alloc.alloc = alloc;
19467 /* FIx: Removed the call to rgSCHCmnUlAdapRetx */
19468 rgSCHCmnUlUeFillAllocInfo(cell, alloc->ue);
19469 /* Setting csireq as false for Adaptive Retx*/
19470 ueUl->alloc.alloc->pdcch->dci.u.format0Info.cqiReq = RG_SCH_APCQI_NO;
19471 pdcch->dciNumOfBits = alloc->ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
19473 /* Reset as retransmission is done */
19474 proc->isRetx = FALSE;
19476 else /* Intg fix */
19478 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
19479 DU_LOG("\nERROR --> SCH : Num SB not suffiecient for adap retx for rnti: %d",
19480 proc->reTxAlloc.rnti);
19486 /* Fix: syed Adaptive Msg3 Retx crash. */
19488 * @brief Releases all Adaptive Retx HqProcs which failed for
19489 * allocations in this scheduling occassion.
19493 * Function : rgSCHCmnUlSfRlsRetxProcs
19496 * @param[in] RgSchCellCb *cell
19497 * @param[in] RgSchUlSf *sf
19501 static Void rgSCHCmnUlSfRlsRetxProcs(RgSchCellCb *cell,RgSchUlSf *sf)
19505 RgSchUlHqProcCb *proc;
19506 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19508 cp = &(cellUl->reTxLst);
19512 proc = (RgSchUlHqProcCb *)node->node;
19514 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
19515 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
19516 proc->reTxLnk.node = (PTR)NULLP;
19523 * @brief Attempts allocation for UEs for which retransmissions
19528 * Function : rgSCHCmnUlSfReTxAllocs
19530 * Attempts allocation for UEs for which retransmissions
19533 * @param[in] RgSchCellCb *cell
19534 * @param[in] RgSchUlSf *sf
19537 static Void rgSCHCmnUlSfReTxAllocs(RgSchCellCb *cell,RgSchUlSf *sf)
19541 RgSchUlHqProcCb *proc;
19544 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
19545 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19547 cp = &(cellUl->reTxLst);
19551 proc = (RgSchUlHqProcCb *)node->node;
19552 ue = proc->reTxAlloc.ue;
19554 /*ccpu00106104 MOD added check for AckNackRep */
19555 /*added check for acknack so that adaptive retx considers ue
19556 inactivity due to ack nack repetition*/
19557 if((ue != NULLP) &&
19558 ((ue->measGapCb.isMeasuring == TRUE)||
19559 (ue->ackNakRepCb.isAckNakRep == TRUE)))
19563 /* Fix for ccpu00123917: Check if maximum allocs per UL sf have been exhausted */
19564 if (((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
19565 || (sf->allocDb->count == schCmnCell->ul.maxAllocPerUlSf))
19567 /* No more UL BW then return */
19570 /* perform adaptive retx for UE's */
19571 if (rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole) == FALSE)
19575 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
19576 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
19577 /* Fix: syed Adaptive Msg3 Retx crash. */
19578 proc->reTxLnk.node = (PTR)NULLP;
19584 * @brief Handles RB allocation for downlink.
19588 * Function : rgSCHCmnDlRbAlloc
19590 * Invoking Module Processing:
19591 * - This function is invoked for DL RB allocation
19593 * Processing Steps:
19594 * - If cell is frequency selecive,
19595 * - Call rgSCHDlfsAllocRb().
19597 * - Call rgSCHCmnNonDlfsRbAlloc().
19599 * @param[in] RgSchCellCb *cell
19600 * @param[in] RgSchDlRbAllocInfo *allocInfo
19604 static Void rgSCHCmnDlRbAlloc(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
19606 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
19608 if (cellSch->dl.isDlFreqSel)
19610 DU_LOG("\nINFO --> SCH : 5GTF_ERROR DLFS SCH Enabled\n");
19611 cellSch->apisDlfs->rgSCHDlfsAllocRb(cell, allocInfo);
19615 rgSCHCmnNonDlfsRbAlloc(cell, allocInfo);
19623 * @brief Determines number of RBGs and RBG subset sizes for the given DL
19624 * bandwidth and rbgSize
19627 * Function : rgSCHCmnDlGetRbgInfo
19630 * Processing Steps:
19631 * - Fill-up rbgInfo data structure for given DL bandwidth and rbgSize
19633 * @param[in] uint8_t dlTotalBw
19634 * @param[in] uint8_t dlSubsetBw
19635 * @param[in] uint8_t maxRaType1SubsetBw
19636 * @param[in] uint8_t rbgSize
19637 * @param[out] RgSchBwRbgInfo *rbgInfo
19640 Void rgSCHCmnDlGetRbgInfo
19643 uint8_t dlSubsetBw,
19644 uint8_t maxRaType1SubsetBw,
19646 RgSchBwRbgInfo *rbgInfo
19649 #ifdef RGSCH_SPS_UNUSED
19651 uint8_t lastRbgIdx = ((dlTotalBw + rbgSize - 1)/rbgSize) - 1;
19652 uint8_t currRbgSize = rbgSize;
19653 uint8_t subsetSizeIdx = 0;
19654 uint8_t subsetSize[RG_SCH_NUM_RATYPE1_SUBSETS] = {0};
19655 uint8_t lastRbgSize = rbgSize - (dlTotalBw - ((dlTotalBw/rbgSize) * rbgSize));
19656 uint8_t numRaType1Rbgs = (maxRaType1SubsetBw + rbgSize - 1)/rbgSize;
19659 /* Compute maximum number of SPS RBGs for the cell */
19660 rbgInfo->numRbgs = ((dlSubsetBw + rbgSize - 1)/rbgSize);
19662 #ifdef RGSCH_SPS_UNUSED
19663 /* Distribute RBGs across subsets except last RBG */
19664 for (;idx < numRaType1Rbgs - 1; ++idx)
19666 subsetSize[subsetSizeIdx] += currRbgSize;
19667 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
19670 /* Computation for last RBG */
19671 if (idx == lastRbgIdx)
19673 currRbgSize = lastRbgSize;
19675 subsetSize[subsetSizeIdx] += currRbgSize;
19676 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
19679 /* Update the computed sizes */
19680 #ifdef RGSCH_SPS_UNUSED
19681 rbgInfo->lastRbgSize = currRbgSize;
19683 rbgInfo->lastRbgSize = rbgSize -
19684 (dlSubsetBw - ((dlSubsetBw/rbgSize) * rbgSize));
19685 #ifdef RGSCH_SPS_UNUSED
19686 memcpy(rbgInfo->rbgSubsetSize, subsetSize, 4 * sizeof(uint8_t));
19688 rbgInfo->numRbs = (rbgInfo->numRbgs * rbgSize > dlTotalBw) ?
19689 dlTotalBw:(rbgInfo->numRbgs * rbgSize);
19690 rbgInfo->rbgSize = rbgSize;
19694 * @brief Handles RB allocation for Resource allocation type 0
19698 * Function : rgSCHCmnDlRaType0Alloc
19700 * Invoking Module Processing:
19701 * - This function is invoked for DL RB allocation for resource allocation
19704 * Processing Steps:
19705 * - Determine the available positions in the rbgMask.
19706 * - Allocate RBGs in the available positions.
19707 * - Update RA Type 0, RA Type 1 and RA type 2 masks.
19709 * @param[in] RgSchDlSfAllocInfo *allocedInfo
19710 * @param[in] uint8_t rbsReq
19711 * @param[in] RgSchBwRbgInfo *rbgInfo
19712 * @param[out] uint8_t *numAllocRbs
19713 * @param[out] RgSchDlSfAllocInfo *resAllocInfo
19714 * @param[in] Bool isPartialAlloc
19719 uint8_t rgSCHCmnDlRaType0Alloc
19721 RgSchDlSfAllocInfo *allocedInfo,
19723 RgSchBwRbgInfo *rbgInfo,
19724 uint8_t *numAllocRbs,
19725 RgSchDlSfAllocInfo *resAllocInfo,
19726 Bool isPartialAlloc
19729 /* Note: This function atttempts allocation only full allocation */
19730 uint32_t remNumRbs, rbgPosInRbgMask, ueRaType2Mask;
19731 uint8_t type2MaskIdx, cnt, rbIdx;
19732 uint8_t maskSize, rbg;
19733 uint8_t bestNumAvailRbs = 0;
19734 uint8_t usedRbs = 0;
19735 uint8_t numAllocRbgs = 0;
19736 uint8_t rbgSize = rbgInfo->rbgSize;
19737 uint32_t *rbgMask = &(resAllocInfo->raType0Mask);
19738 #ifdef RGSCH_SPS_UNUSED
19740 uint32_t ueRaType1Mask;
19741 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
19742 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
19744 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
19746 uint32_t allocedMask = allocedInfo->raType0Mask;
19748 maskSize = rbgInfo->numRbgs;
19751 RG_SCH_CMN_DL_COUNT_ONES(allocedMask, maskSize, &usedRbs);
19752 if (maskSize == usedRbs)
19754 /* All RBGs are allocated, including the last one */
19759 remNumRbs = (maskSize - usedRbs - 1) * rbgSize; /* vamsee: removed minus 1 */
19761 /* If last RBG is available, add last RBG size */
19762 if (!(allocedMask & (1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(maskSize - 1))))
19764 remNumRbs += rbgInfo->lastRbgSize;
19768 /* If complete allocation is needed, check if total requested RBs are available else
19769 * check the best available RBs */
19770 if (!isPartialAlloc)
19772 if (remNumRbs >= rbsReq)
19774 bestNumAvailRbs = rbsReq;
19779 bestNumAvailRbs = remNumRbs > rbsReq ? rbsReq : remNumRbs;
19782 /* Allocate for bestNumAvailRbs */
19783 if (bestNumAvailRbs)
19785 for (rbg = 0; rbg < maskSize - 1; ++rbg)
19787 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
19788 if (!(allocedMask & rbgPosInRbgMask))
19790 /* Update RBG mask */
19791 *rbgMask |= rbgPosInRbgMask;
19793 /* Compute RB index of the first RB of the RBG allocated */
19794 rbIdx = rbg * rbgSize;
19796 for (cnt = 0; cnt < rbgSize; ++cnt)
19798 #ifdef RGSCH_SPS_UNUSED
19799 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
19801 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19802 #ifdef RGSCH_SPS_UNUSED
19803 /* Update RBG mask for RA type 1 */
19804 raType1Mask[rbgSubset] |= ueRaType1Mask;
19805 raType1UsedRbs[rbgSubset]++;
19807 /* Update RA type 2 mask */
19808 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19811 *numAllocRbs += rbgSize;
19812 remNumRbs -= rbgSize;
19814 if (*numAllocRbs >= bestNumAvailRbs)
19820 /* If last RBG available and allocation is not completed, allocate
19822 if (*numAllocRbs < bestNumAvailRbs)
19824 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
19825 *rbgMask |= rbgPosInRbgMask;
19826 *numAllocRbs += rbgInfo->lastRbgSize;
19828 /* Compute RB index of the first RB of the last RBG */
19829 rbIdx = ((rbgInfo->numRbgs - 1 ) * rbgSize ); /* removed minus 1 vamsee */
19831 for (cnt = 0; cnt < rbgInfo->lastRbgSize; ++cnt)
19833 #ifdef RGSCH_SPS_UNUSED
19834 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
19836 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19837 #ifdef RGSCH_SPS_UNUSED
19838 /* Update RBG mask for RA type 1 */
19839 raType1Mask[rbgSubset] |= ueRaType1Mask;
19840 raType1UsedRbs[rbgSubset]++;
19842 /* Update RA type 2 mask */
19843 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19846 remNumRbs -= rbgInfo->lastRbgSize;
19849 /* Note: this should complete allocation, not checking for the
19853 return (numAllocRbgs);
19856 #ifdef RGSCH_SPS_UNUSED
19858 * @brief Handles RB allocation for Resource allocation type 1
19862 * Function : rgSCHCmnDlRaType1Alloc
19864 * Invoking Module Processing:
19865 * - This function is invoked for DL RB allocation for resource allocation
19868 * Processing Steps:
19869 * - Determine the available positions in the subsets.
19870 * - Allocate RB in the available subset.
19871 * - Update RA Type1, RA type 0 and RA type 2 masks.
19873 * @param[in] RgSchDlSfAllocInfo *allocedInfo
19874 * @param[in] uint8_t rbsReq
19875 * @param[in] RgSchBwRbgInfo *rbgInfo
19876 * @param[in] uint8_t startRbgSubset
19877 * @param[in] uint8_t *allocRbgSubset
19878 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
19879 * @param[in] Bool isPartialAlloc
19882 * Number of allocated RBs
19885 uint8_t rgSCHCmnDlRaType1Alloc
19887 RgSchDlSfAllocInfo *allocedInfo,
19889 RgSchBwRbgInfo *rbgInfo,
19890 uint8_t startRbgSubset,
19891 uint8_t *allocRbgSubset,
19892 RgSchDlSfAllocInfo *resAllocInfo,
19893 Bool isPartialAlloc
19896 /* Note: This function atttempts only full allocation */
19897 uint8_t *rbgSubsetSzArr;
19898 uint8_t type2MaskIdx, subsetIdx, rbIdx, rbInSubset, rbgInSubset;
19899 uint8_t offset, rbg, maskSize, bestSubsetIdx;
19900 uint8_t startPos = 0;
19901 uint8_t bestNumAvailRbs = 0;
19902 uint8_t numAllocRbs = 0;
19903 uint32_t ueRaType2Mask, ueRaType0Mask, rbPosInSubset;
19904 uint32_t remNumRbs, allocedMask;
19905 uint8_t usedRbs = 0;
19906 uint8_t rbgSize = rbgInfo->rbgSize;
19907 uint8_t rbgSubset = startRbgSubset;
19908 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
19909 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
19910 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
19911 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
19912 uint32_t *allocMask = allocedInfo->raType1Mask;
19914 /* Initialize the subset size Array */
19915 rbgSubsetSzArr = rbgInfo->rbgSubsetSize;
19917 /* Perform allocation for RA type 1 */
19918 for (subsetIdx = 0;subsetIdx < rbgSize; ++subsetIdx)
19920 allocedMask = allocMask[rbgSubset];
19921 maskSize = rbgSubsetSzArr[rbgSubset];
19923 /* Determine number of available RBs in the subset */
19924 usedRbs = allocedInfo->raType1UsedRbs[subsetIdx];
19925 remNumRbs = maskSize - usedRbs;
19927 if (remNumRbs >= rbsReq)
19929 bestNumAvailRbs = rbsReq;
19930 bestSubsetIdx = rbgSubset;
19933 else if (isPartialAlloc && (remNumRbs > bestNumAvailRbs))
19935 bestNumAvailRbs = remNumRbs;
19936 bestSubsetIdx = rbgSubset;
19939 rbgSubset = (rbgSubset + 1) % rbgSize;
19940 } /* End of for (each rbgsubset) */
19942 if (bestNumAvailRbs)
19944 /* Initialize alloced mask and subsetSize depending on the RBG
19945 * subset of allocation */
19946 uint8_t startIdx = 0;
19947 maskSize = rbgSubsetSzArr[bestSubsetIdx];
19948 allocedMask = allocMask[bestSubsetIdx];
19949 RG_SCH_CMN_DL_GET_START_POS(allocedMask, maskSize,
19951 for (; startIdx < rbgSize; ++startIdx, ++startPos)
19953 for (rbInSubset = startPos; rbInSubset < maskSize;
19954 rbInSubset = rbInSubset + rbgSize)
19956 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
19957 if (!(allocedMask & rbPosInSubset))
19959 raType1Mask[bestSubsetIdx] |= rbPosInSubset;
19960 raType1UsedRbs[bestSubsetIdx]++;
19962 /* Compute RB index value for the RB being allocated */
19963 rbgInSubset = rbInSubset /rbgSize;
19964 offset = rbInSubset % rbgSize;
19965 rbg = (rbgInSubset * rbgSize) + bestSubsetIdx;
19966 rbIdx = (rbg * rbgSize) + offset;
19968 /* Update RBG mask for RA type 0 allocation */
19969 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
19970 *rbgMask |= ueRaType0Mask;
19972 /* Update RA type 2 mask */
19973 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19974 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19976 /* Update the counters */
19979 if (numAllocRbs == bestNumAvailRbs)
19984 } /* End of for (each position in the subset mask) */
19985 if (numAllocRbs == bestNumAvailRbs)
19989 } /* End of for startIdx = 0 to rbgSize */
19991 *allocRbgSubset = bestSubsetIdx;
19992 } /* End of if (bestNumAvailRbs) */
19994 return (numAllocRbs);
19998 * @brief Handles RB allocation for Resource allocation type 2
20002 * Function : rgSCHCmnDlRaType2Alloc
20004 * Invoking Module Processing:
20005 * - This function is invoked for DL RB allocation for resource allocation
20008 * Processing Steps:
20009 * - Determine the available positions in the mask
20010 * - Allocate best fit cosecutive RBs.
20011 * - Update RA Type2, RA type 1 and RA type 0 masks.
20013 * @param[in] RgSchDlSfAllocInfo *allocedInfo
20014 * @param[in] uint8_t rbsReq
20015 * @param[in] RgSchBwRbgInfo *rbgInfo
20016 * @param[out] uint8_t *rbStart
20017 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
20018 * @param[in] Bool isPartialAlloc
20021 * Number of allocated RBs
20024 uint8_t rgSCHCmnDlRaType2Alloc
20026 RgSchDlSfAllocInfo *allocedInfo,
20028 RgSchBwRbgInfo *rbgInfo,
20030 RgSchDlSfAllocInfo *resAllocInfo,
20031 Bool isPartialAlloc
20034 uint8_t numAllocRbs = 0;
20036 uint8_t rbgSize = rbgInfo->rbgSize;
20037 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
20038 #ifdef RGSCH_SPS_UNUSED
20039 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
20041 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
20042 #ifdef RGSCH_SPS_UNUSED
20043 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
20045 uint32_t *allocedMask = allocedInfo->raType2Mask;
20047 /* Note: This function atttempts only full allocation */
20048 rgSCHCmnDlGetBestFitHole(allocedMask, rbgInfo->numRbs,
20049 raType2Mask, rbsReq, rbStart, &numAllocRbs, isPartialAlloc);
20052 /* Update the allocation in RA type 0 and RA type 1 masks */
20053 uint8_t rbCnt = numAllocRbs;
20054 #ifdef RGSCH_SPS_UNUSED
20056 uint32_t ueRaType1Mask;
20058 uint32_t ueRaType0Mask;
20063 /* Update RBG mask for RA type 0 allocation */
20064 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
20065 *rbgMask |= ueRaType0Mask;
20067 #ifdef RGSCH_SPS_UNUSED
20068 /* Update RBG mask for RA type 1 */
20069 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
20070 raType1Mask[rbgSubset] |= ueRaType1Mask;
20071 raType1UsedRbs[rbgSubset]++;
20073 /* Update the counters */
20079 return (numAllocRbs);
20083 * @brief Determines RA type 0 mask from given RB index.
20087 * Function : rgSCHCmnGetRaType0Mask
20090 * Processing Steps:
20091 * - Determine RA Type 0 mask for given rbIdex and rbg size.
20093 * @param[in] uint8_t rbIdx
20094 * @param[in] uint8_t rbgSize
20095 * @return uint32_t RA type 0 mask
20097 static uint32_t rgSCHCmnGetRaType0Mask(uint8_t rbIdx,uint8_t rbgSize)
20100 uint32_t rbgPosInRbgMask = 0;
20102 rbg = rbIdx/rbgSize;
20103 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
20105 return (rbgPosInRbgMask);
20108 #ifdef RGSCH_SPS_UNUSED
20110 * @brief Determines RA type 1 mask from given RB index.
20114 * Function : rgSCHCmnGetRaType1Mask
20117 * Processing Steps:
20118 * - Determine RA Type 1 mask for given rbIdex and rbg size.
20120 * @param[in] uint8_t rbIdx
20121 * @param[in] uint8_t rbgSize
20122 * @param[out] uint8_t *type1Subset
20123 * @return uint32_t RA type 1 mask
20125 static uint32_t rgSCHCmnGetRaType1Mask(uint8_t rbIdx,uint8_t rbgSize,uint8_t *type1Subset)
20127 uint8_t rbg, rbgSubset, rbgInSubset, offset, rbInSubset;
20128 uint32_t rbPosInSubset;
20130 rbg = rbIdx/rbgSize;
20131 rbgSubset = rbg % rbgSize;
20132 rbgInSubset = rbg/rbgSize;
20133 offset = rbIdx % rbgSize;
20134 rbInSubset = rbgInSubset * rbgSize + offset;
20135 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
20137 *type1Subset = rbgSubset;
20138 return (rbPosInSubset);
20140 #endif /* RGSCH_SPS_UNUSED */
20142 * @brief Determines RA type 2 mask from given RB index.
20146 * Function : rgSCHCmnGetRaType2Mask
20149 * Processing Steps:
20150 * - Determine RA Type 2 mask for given rbIdx and rbg size.
20152 * @param[in] uint8_t rbIdx
20153 * @param[out] uint8_t *maskIdx
20154 * @return uint32_t RA type 2 mask
20156 static uint32_t rgSCHCmnGetRaType2Mask(uint8_t rbIdx,uint8_t *maskIdx)
20158 uint32_t rbPosInType2;
20160 *maskIdx = rbIdx / 32;
20161 rbPosInType2 = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbIdx % 32);
20163 return (rbPosInType2);
20167 * @brief Performs resource allocation for a non-SPS UE in SPS bandwidth
20171 * Function : rgSCHCmnAllocUeInSpsBw
20174 * Processing Steps:
20175 * - Determine allocation for the UE.
20176 * - Use resource allocation type 0, 1 and 2 for allocation
20177 * within maximum SPS bandwidth.
20179 * @param[in] RgSchDlSf *dlSf
20180 * @param[in] RgSchCellCb *cell
20181 * @param[in] RgSchUeCb *ue
20182 * @param[in] RgSchDlRbAlloc *rbAllocInfo
20183 * @param[in] Bool isPartialAlloc
20188 Bool rgSCHCmnAllocUeInSpsBw
20193 RgSchDlRbAlloc *rbAllocInfo,
20194 Bool isPartialAlloc
20197 uint8_t rbgSize = cell->rbgSize;
20198 uint8_t numAllocRbs = 0;
20199 uint8_t numAllocRbgs = 0;
20200 uint8_t rbStart = 0;
20201 uint8_t idx, noLyr, iTbs;
20202 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
20203 RgSchDlSfAllocInfo *dlSfAlloc = &rbAllocInfo->dlSf->dlSfAllocInfo;
20204 RgSchBwRbgInfo *spsRbgInfo = &cell->spsBwRbgInfo;
20206 /* SPS_FIX : Check if this Hq proc is scheduled */
20207 if ((0 == rbAllocInfo->tbInfo[0].schdlngForTb) &&
20208 (0 == rbAllocInfo->tbInfo[1].schdlngForTb))
20213 /* Check if the requirement can be accomodated in SPS BW */
20214 if (dlSf->spsAllocdBw == spsRbgInfo->numRbs)
20216 /* SPS Bandwidth has been exhausted: no further allocations possible */
20219 if (!isPartialAlloc)
20221 if((dlSf->spsAllocdBw + rbAllocInfo->rbsReq) > spsRbgInfo->numRbs)
20227 /* Perform allocation for RA type 0 if rbsReq is multiple of RBG size (also
20228 * if RBG size = 1) */
20229 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20231 rbAllocInfo->rbsReq += (rbgSize - rbAllocInfo->rbsReq % rbgSize);
20232 numAllocRbgs = rgSCHCmnDlRaType0Alloc(dlSfAlloc,
20233 rbAllocInfo->rbsReq, spsRbgInfo, &numAllocRbs,
20234 &rbAllocInfo->resAllocInfo, isPartialAlloc);
20236 #ifdef RGSCH_SPS_UNUSED
20237 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
20239 /* If no RBS could be allocated, attempt RA TYPE 1 */
20241 numAllocRbs = rgSCHCmnDlRaType1Alloc(dlSfAlloc,
20242 rbAllocInfo->rbsReq, spsRbgInfo, (uint8_t)dlSfAlloc->nxtRbgSubset,
20243 &rbAllocInfo->allocInfo.raType1.rbgSubset,
20244 &rbAllocInfo->resAllocInfo, isPartialAlloc);
20248 dlSfAlloc->nxtRbgSubset =
20249 (rbAllocInfo->allocInfo.raType1.rbgSubset + 1 ) % rbgSize;
20253 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20255 numAllocRbs = rgSCHCmnDlRaType2Alloc(dlSfAlloc,
20256 rbAllocInfo->rbsReq, spsRbgInfo,
20257 &rbStart, &rbAllocInfo->resAllocInfo, isPartialAlloc);
20264 if (!(rbAllocInfo->pdcch =
20265 rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi,\
20266 rbAllocInfo->dciFormat, FALSE)))
20268 /* Note: Returning TRUE since PDCCH might be available for another UE */
20272 /* Update Tb info for each scheduled TB */
20273 iTbs = rbAllocInfo->tbInfo[0].iTbs;
20274 noLyr = rbAllocInfo->tbInfo[0].noLyr;
20275 rbAllocInfo->tbInfo[0].bytesAlloc =
20276 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
20278 if (rbAllocInfo->tbInfo[1].schdlngForTb)
20280 iTbs = rbAllocInfo->tbInfo[1].iTbs;
20281 noLyr = rbAllocInfo->tbInfo[1].noLyr;
20282 rbAllocInfo->tbInfo[1].bytesAlloc =
20283 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
20286 /* Update rbAllocInfo with the allocation information */
20287 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20289 rbAllocInfo->allocInfo.raType0.dlAllocBitMask =
20290 rbAllocInfo->resAllocInfo.raType0Mask;
20291 rbAllocInfo->allocInfo.raType0.numDlAlloc = numAllocRbgs;
20293 #ifdef RGSCH_SPS_UNUSED
20294 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
20296 rbAllocInfo->allocInfo.raType1.dlAllocBitMask =
20297 rbAllocInfo->resAllocInfo.raType1Mask[rbAllocInfo->allocInfo.raType1.rbgSubset];
20298 rbAllocInfo->allocInfo.raType1.numDlAlloc = numAllocRbs;
20299 rbAllocInfo->allocInfo.raType1.shift = 0;
20302 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20304 rbAllocInfo->allocInfo.raType2.isLocal = TRUE;
20305 rbAllocInfo->allocInfo.raType2.rbStart = rbStart;
20306 rbAllocInfo->allocInfo.raType2.numRb = numAllocRbs;
20309 rbAllocInfo->rbsAlloc = numAllocRbs;
20310 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
20312 /* Update allocation masks for RA types 0, 1 and 2 in DL SF */
20314 /* Update type 0 allocation mask */
20315 dlSfAlloc->raType0Mask |= rbAllocInfo->resAllocInfo.raType0Mask;
20316 #ifdef RGSCH_SPS_UNUSED
20317 /* Update type 1 allocation masks */
20318 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
20320 dlSfAlloc->raType1Mask[idx] |= rbAllocInfo->resAllocInfo.raType1Mask[idx];
20321 dlSfAlloc->raType1UsedRbs[idx] +=
20322 rbAllocInfo->resAllocInfo.raType1UsedRbs[idx];
20325 /* Update type 2 allocation masks */
20326 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
20328 dlSfAlloc->raType2Mask[idx] |= rbAllocInfo->resAllocInfo.raType2Mask[idx];
20331 dlSf->spsAllocdBw += numAllocRbs;
20335 /***********************************************************
20337 * Func : rgSCHCmnDlGetBestFitHole
20340 * Desc : Converts the best fit hole into allocation and returns the
20341 * allocation information.
20351 **********************************************************/
20352 static Void rgSCHCmnDlGetBestFitHole
20354 uint32_t *allocMask,
20355 uint8_t numMaskRbs,
20356 uint32_t *crntAllocMask,
20358 uint8_t *allocStart,
20359 uint8_t *allocNumRbs,
20360 Bool isPartialAlloc
20363 uint8_t maskSz = (numMaskRbs + 31)/32;
20364 uint8_t maxMaskPos = (numMaskRbs % 32);
20365 uint8_t maskIdx, maskPos;
20366 uint8_t numAvailRbs = 0;
20367 uint8_t bestAvailNumRbs = 0;
20368 S8 bestStartPos = -1;
20370 uint32_t tmpMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
20371 uint32_t bestMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
20373 *allocNumRbs = numAvailRbs;
20376 for (maskIdx = 0; maskIdx < maskSz; ++maskIdx)
20379 if (maskIdx == (maskSz - 1))
20381 if (numMaskRbs % 32)
20383 maxMaskPos = numMaskRbs % 32;
20386 for (maskPos = 0; maskPos < maxMaskPos; ++maskPos)
20388 if (!(allocMask[maskIdx] & (1 << (31 - maskPos))))
20390 tmpMask[maskIdx] |= (1 << (31 - maskPos));
20391 if (startPos == -1)
20393 startPos = maskIdx * 32 + maskPos;
20396 if (numAvailRbs == rbsReq)
20398 *allocStart = (uint8_t)startPos;
20399 *allocNumRbs = rbsReq;
20405 if (numAvailRbs > bestAvailNumRbs)
20407 bestAvailNumRbs = numAvailRbs;
20408 bestStartPos = startPos;
20409 memcpy(bestMask, tmpMask, 4 * sizeof(uint32_t));
20413 memset(tmpMask, 0, 4 * sizeof(uint32_t));
20416 if (*allocNumRbs == rbsReq)
20422 if (*allocNumRbs == rbsReq)
20424 /* Convert the hole into allocation */
20425 memcpy(crntAllocMask, tmpMask, 4 * sizeof(uint32_t));
20430 if (bestAvailNumRbs && isPartialAlloc)
20432 /* Partial allocation could have been done */
20433 *allocStart = (uint8_t)bestStartPos;
20434 *allocNumRbs = bestAvailNumRbs;
20435 /* Convert the hole into allocation */
20436 memcpy(crntAllocMask, bestMask, 4 * sizeof(uint32_t));
20442 #endif /* LTEMAC_SPS */
20444 /***************************************************************************
20446 * NON-DLFS Allocation functions
20448 * *************************************************************************/
20452 * @brief Function to find out code rate
20456 * Function : rgSCHCmnFindCodeRate
20458 * Processing Steps:
20460 * @param[in] RgSchCellCb *cell
20461 * @param[in] RgSchDlSf *dlSf
20462 * @param[in,out] RgSchDlRbAlloc *allocInfo
20466 static Void rgSCHCmnFindCodeRate
20470 RgSchDlRbAlloc *allocInfo,
20479 /* Adjust the Imcs and bytes allocated also with respect to the adjusted
20480 RBs - Here we will find out the Imcs by identifying first Highest
20481 number of bits compared to the original bytes allocated. */
20483 * @brief Adjust IMCS according to tbSize and ITBS
20487 * Function : rgSCHCmnNonDlfsPbchTbImcsAdj
20489 * Processing Steps:
20490 * - Adjust Imcs according to tbSize and ITBS.
20492 * @param[in,out] RgSchDlRbAlloc *allocInfo
20493 * @param[in] uint8_t *idx
20496 static Void rgSCHCmnNonDlfsPbchTbImcsAdj
20499 RgSchDlRbAlloc *allocInfo,
20504 uint8_t noLyrs = 0;
20506 uint32_t origBytesReq;
20507 uint8_t noRbgs = 0;
20509 RgSchDlSf *dlSf = allocInfo->dlSf;
20511 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
20512 noLyrs = allocInfo->tbInfo[idx].noLyr;
20514 if((allocInfo->raType == RG_SCH_CMN_RA_TYPE0))
20516 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + dlSf->lstRbgDfct), cell->rbgSize);
20517 noRbs = (noRbgs * cell->rbgSize) - dlSf->lstRbgDfct;
20521 noRbs = allocInfo->rbsReq;
20524 /* This line will help in case if tbs is zero and reduction in MCS is not possible */
20525 if (allocInfo->rbsReq == 0 )
20529 origBytesReq = rgTbSzTbl[noLyrs - 1][tbs][rbsReq - 1]/8;
20531 /* Find out the ITbs & Imcs by identifying first Highest
20532 number of bits compared to the original bytes allocated.*/
20535 if(((rgTbSzTbl[noLyrs - 1][0][noRbs - 1])/8) < origBytesReq)
20537 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyrs - 1], tbs);
20538 while(((rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1])/8) > origBytesReq)
20547 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1]/8;
20548 allocInfo->tbInfo[idx].iTbs = tbs;
20549 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
20554 /* Added funcion to adjust TBSize*/
20556 * @brief Function to adjust the tbsize in case of subframe 0 & 5 when
20557 * we were not able to do RB alloc adjustment by adding extra required Rbs
20561 * Function : rgSCHCmnNonDlfsPbchTbSizeAdj
20563 * Processing Steps:
20565 * @param[in,out] RgSchDlRbAlloc *allocInfo
20566 * @param[in] uint8_t numOvrlapgPbchRb
20567 * @param[in] uint8_t idx
20568 * @param[in] uint8_t pbchSsRsSym
20571 static Void rgSCHCmnNonDlfsPbchTbSizeAdj
20573 RgSchDlRbAlloc *allocInfo,
20574 uint8_t numOvrlapgPbchRb,
20575 uint8_t pbchSsRsSym,
20580 uint32_t reducedTbs = 0;
20581 uint8_t noLyrs = 0;
20584 noLyrs = allocInfo->tbInfo[idx].noLyr;
20586 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
20588 reducedTbs = bytesReq - (((uint32_t)numOvrlapgPbchRb * (uint32_t)pbchSsRsSym * 6)/8);
20590 /* find out the ITbs & Imcs by identifying first Highest
20591 number of bits compared with reduced bits considering the bits that are
20592 reserved for PBCH/PSS/SSS */
20593 if(((rgTbSzTbl[noLyrs - 1][0][allocInfo->rbsReq - 1])/8) < reducedTbs)
20595 while(((rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1])/8) > reducedTbs)
20604 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1]/8;
20605 allocInfo->tbInfo[idx].iTbs = tbs;
20606 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
20611 /* Added this function to find num of ovrlapping PBCH rb*/
20613 * @brief Function to find out how many additional rbs are available
20614 * in the entire bw which can be allocated to a UE
20617 * Function : rgSCHCmnFindNumAddtlRbsAvl
20619 * Processing Steps:
20620 * - Calculates number of additinal rbs available
20622 * @param[in] RgSchCellCb *cell
20623 * @param[in] RgSchDlSf *dlSf
20624 * @param[in,out] RgSchDlRbAlloc *allocInfo
20625 * @param[out] uint8_t addtlRbsAvl
20628 static uint8_t rgSCHCmnFindNumAddtlRbsAvl(RgSchCellCb *cell,RgSchDlSf *dlSf,RgSchDlRbAlloc *allocInfo)
20630 uint8_t addtlRbsAvl = 0;
20631 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20633 addtlRbsAvl = (((dlSf->type0End - dlSf->type2End + 1)*\
20634 cell->rbgSize) - dlSf->lstRbgDfct) - allocInfo->rbsReq;
20636 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20638 addtlRbsAvl = (dlSf->bw - dlSf->bwAlloced) - allocInfo->rbsReq;
20641 return (addtlRbsAvl);
20644 /* Added this function to find num of ovrlapping PBCH rb*/
20646 * @brief Function to find out how many of the requested RBs are
20647 * falling in the center 6 RBs of the downlink bandwidth.
20650 * Function : rgSCHCmnFindNumPbchOvrlapRbs
20652 * Processing Steps:
20653 * - Calculates number of overlapping rbs
20655 * @param[in] RgSchCellCb *cell
20656 * @param[in] RgSchDlSf *dlSf
20657 * @param[in,out] RgSchDlRbAlloc *allocInfo
20658 * @param[out] uint8_t* numOvrlapgPbchRb
20661 static Void rgSCHCmnFindNumPbchOvrlapRbs
20665 RgSchDlRbAlloc *allocInfo,
20666 uint8_t *numOvrlapgPbchRb
20669 *numOvrlapgPbchRb = 0;
20670 /*Find if we have already crossed the start boundary for PBCH 6 RBs,
20671 * if yes then lets find the number of RBs which are getting overlapped
20672 * with this allocation.*/
20673 if(dlSf->bwAlloced <= (cell->pbchRbStart))
20675 /*We have not crossed the start boundary of PBCH RBs. Now we need
20676 * to know that if take this allocation then how much PBCH RBs
20677 * are overlapping with this allocation.*/
20678 /* Find out the overlapping RBs in the centre 6 RBs */
20679 if((dlSf->bwAlloced + allocInfo->rbsReq) > cell->pbchRbStart)
20681 *numOvrlapgPbchRb = (dlSf->bwAlloced + allocInfo->rbsReq) - (cell->pbchRbStart);
20682 if(*numOvrlapgPbchRb > 6)
20683 *numOvrlapgPbchRb = 6;
20686 else if ((dlSf->bwAlloced > (cell->pbchRbStart)) &&
20687 (dlSf->bwAlloced < (cell->pbchRbEnd)))
20689 /*We have already crossed the start boundary of PBCH RBs.We need to
20690 * find that if we take this allocation then how much of the RBs for
20691 * this allocation will overlap with PBCH RBs.*/
20692 /* Find out the overlapping RBs in the centre 6 RBs */
20693 if(dlSf->bwAlloced + allocInfo->rbsReq < (cell->pbchRbEnd))
20695 /*If we take this allocation then also we are not crossing the
20696 * end boundary of PBCH 6 RBs.*/
20697 *numOvrlapgPbchRb = allocInfo->rbsReq;
20701 /*If we take this allocation then we are crossing the
20702 * end boundary of PBCH 6 RBs.*/
20703 *numOvrlapgPbchRb = (cell->pbchRbEnd) - dlSf->bwAlloced;
20710 * @brief Performs RB allocation adjustment if the requested RBs are
20711 * falling in the center 6 RBs of the downlink bandwidth.
20714 * Function : rgSCHCmnNonDlfsPbchRbAllocAdj
20716 * Processing Steps:
20717 * - Allocate consecutively available RBs.
20719 * @param[in] RgSchCellCb *cell
20720 * @param[in,out] RgSchDlRbAlloc *allocInfo
20721 * @param[in] uint8_t pbchSsRsSym
20724 static Void rgSCHCmnNonDlfsPbchRbAllocAdj
20727 RgSchDlRbAlloc *allocInfo,
20728 uint8_t pbchSsRsSym,
20732 RgSchDlSf *dlSf = allocInfo->dlSf;
20733 uint8_t numOvrlapgPbchRb = 0;
20734 uint8_t numOvrlapgAdtlPbchRb = 0;
20736 uint8_t addtlRbsReq = 0;
20737 uint8_t moreAddtlRbsReq = 0;
20738 uint8_t addtlRbsAdd = 0;
20739 uint8_t moreAddtlRbsAdd = 0;
20741 uint8_t origRbsReq = 0;
20749 origRbsReq = allocInfo->rbsReq;
20750 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20752 totSym = (cell->isCpDlExtend) ? RGSCH_TOT_NUM_SYM_EXTCP : RGSCH_TOT_NUM_SYM_NORCP;
20754 /* Additional RBs are allocated by considering the loss due to
20755 the reserved symbols for CFICH, PBCH, PSS, SSS and cell specific RS */
20757 divResult = (numOvrlapgPbchRb * pbchSsRsSym)/totSym;
20758 if((numOvrlapgPbchRb * pbchSsRsSym) % totSym)
20762 addtlRbsReq = divResult;
20764 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, addtlRbsReq, addtlRbsAdd)
20766 /*Now RBs requires is original requested RBs + these additional RBs to make
20767 * up for PSS/SSS/BCCH.*/
20768 allocInfo->rbsReq = allocInfo->rbsReq + addtlRbsAdd;
20770 /*Check if with these additional RBs we have taken up, these are also falling
20771 * under PBCH RBs range, if yes then we would need to account for
20772 * PSS/BSS/BCCH for these additional RBs too.*/
20773 if(addtlRbsAdd && ((dlSf->bwAlloced + allocInfo->rbsReq - addtlRbsAdd) < (cell->pbchRbEnd)))
20775 if((dlSf->bwAlloced + allocInfo->rbsReq) <= (cell->pbchRbEnd))
20777 /*With additional RBs taken into account, we are not crossing the
20778 * PBCH RB end boundary.Thus here we need to account just for
20779 * overlapping PBCH RBs for these additonal RBs.*/
20780 divResult = (addtlRbsAdd * pbchSsRsSym)/totSym;
20781 if((addtlRbsAdd * pbchSsRsSym) % totSym)
20786 moreAddtlRbsReq = divResult;
20788 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
20790 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
20795 /*Here we have crossed the PBCH RB end boundary, thus we need to take
20796 * into account the overlapping RBs for additional RBs which will be
20797 * subset of addtlRbs.*/
20798 numOvrlapgAdtlPbchRb = (cell->pbchRbEnd) - ((dlSf->bwAlloced + allocInfo->rbsReq) - addtlRbsAdd);
20800 divResult = (numOvrlapgAdtlPbchRb * pbchSsRsSym)/totSym;
20801 if((numOvrlapgAdtlPbchRb * pbchSsRsSym) % totSym)
20806 moreAddtlRbsReq = divResult;
20808 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
20810 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
20813 if (isBcchPcch == TRUE)
20818 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
20821 /* This case might be for Imcs value 6 and NPrb = 1 case - Not
20822 Adjusting either RBs or Imcs or Bytes Allocated */
20823 allocInfo->rbsReq = allocInfo->rbsReq - addtlRbsAdd - moreAddtlRbsAdd;
20825 else if(tbs && ((0 == addtlRbsAdd) && (moreAddtlRbsAdd == 0)))
20827 /*In case of a situation where we the entire bandwidth is already occupied
20828 * and we dont have room to add additional Rbs then in order to decrease the
20829 * code rate we reduce the tbsize such that we reduce the present calculated
20830 * tbsize by number of bytes that would be occupied by PBCH/PSS/SSS in overlapping
20831 * rbs and find the nearest tbsize which would be less than this deduced value*/
20833 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20835 noLyr = allocInfo->tbInfo[0].noLyr;
20836 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyr - 1], tbs);
20837 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
20839 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,bytesReq);
20841 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20843 noLyr = allocInfo->tbInfo[1].noLyr;
20844 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
20845 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,bytesReq);
20849 else if(tbs && ((addtlRbsAdd != addtlRbsReq) ||
20850 (addtlRbsAdd && (moreAddtlRbsReq != moreAddtlRbsAdd))))
20852 /*In case of a situation where we were not able to add required number of
20853 * additional RBs then we adjust the Imcs based on original RBs requested.
20854 * Doing this would comensate for the few extra Rbs we have added but inorder
20855 * to comensate for number of RBS we couldnt add we again do the TBSize adjustment*/
20857 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
20859 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20861 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
20864 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20865 numOvrlapgPbchRb = numOvrlapgPbchRb - (addtlRbsAdd + moreAddtlRbsAdd);
20867 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,allocInfo->tbInfo[0].bytesReq);
20869 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20871 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,allocInfo->tbInfo[1].bytesReq);
20877 /*We hit this code when we were able to add the required additional RBS
20878 * hence we should adjust the IMcs based on orignals RBs requested*/
20880 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
20882 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20884 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
20889 } /* end of rgSCHCmnNonDlfsPbchRbAllocAdj */
20893 * @brief Performs RB allocation for frequency non-selective cell.
20897 * Function : rgSCHCmnNonDlfsCmnRbAlloc
20899 * Processing Steps:
20900 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
20902 * @param[in] RgSchCellCb *cell
20903 * @param[in, out] RgSchDlRbAlloc *allocInfo
20908 static S16 rgSCHCmnNonDlfsCmnRbAlloc(RgSchCellCb *cell,RgSchDlRbAlloc *allocInfo)
20913 uint8_t pbchSsRsSym = 0;
20914 uint8_t pbchFrame = 0;
20916 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
20918 RgSchDlSf *dlSf = allocInfo->dlSf;
20920 uint8_t rbStart = 0;
20921 uint8_t spsRbsAlloc = 0;
20922 RgSchDlSfAllocInfo *dlSfAlloc = &allocInfo->dlSf->dlSfAllocInfo;
20925 allocInfo->tbInfo[0].noLyr = 1;
20928 /* Note: Initialize the masks to 0, this might not be needed since alloInfo
20929 * is initialized to 0 at the beginning of allcoation */
20930 allocInfo->resAllocInfo.raType0Mask = 0;
20931 memset(allocInfo->resAllocInfo.raType1Mask, 0,
20932 RG_SCH_NUM_RATYPE1_32BIT_MASK * sizeof (uint32_t));
20933 memset(allocInfo->resAllocInfo.raType2Mask, 0,
20934 RG_SCH_NUM_RATYPE2_32BIT_MASK * sizeof (uint32_t));
20936 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
20937 (dlSf->bwAlloced == dlSf->bw))
20939 if(dlSf->bwAlloced == dlSf->bw)
20945 if (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced))
20948 if ((allocInfo->tbInfo[0].imcs < 29) && (dlSf->bwAlloced < dlSf->bw))
20950 if(allocInfo->tbInfo[0].imcs < 29)
20953 /* set the remaining RBs for the requested UE */
20954 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
20955 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
20956 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[0][tbs][allocInfo->rbsReq - 1]/8;
20961 /* Attempt RA Type 2 allocation in SPS Bandwidth */
20962 if (dlSf->spsAllocdBw < cell->spsBwRbgInfo.numRbs)
20965 rgSCHCmnDlRaType2Alloc(dlSfAlloc,
20966 allocInfo->rbsReq, &cell->spsBwRbgInfo, &rbStart,
20967 &allocInfo->resAllocInfo, FALSE);
20968 /* rbsAlloc assignment moved from line 16671 to here to avoid
20969 * compilation error. Recheck */
20970 dlSf->spsAllocdBw += spsRbsAlloc;
20973 #endif /* LTEMAC_SPS */
20981 /* Update allocation information */
20982 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
20983 if (allocInfo->pdcch == NULLP)
20987 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
20988 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
20989 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
20990 allocInfo->allocInfo.raType2.isLocal = TRUE;
20994 allocInfo->allocInfo.raType2.rbStart = rbStart;
20995 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
20996 allocInfo->rbsAlloc = allocInfo->rbsReq;
21007 if(!(dlSf->sfNum == 5))
21009 /* case for subframes 1 to 9 except 5 */
21011 allocInfo->allocInfo.raType2.rbStart = rbStart;
21013 /*Fix for ccpu00123918*/
21014 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21019 pbchFrame = 1; /* case for subframe 5 */
21020 /* In subframe 5, symbols are reserved for PSS and SSS and CFICH
21021 and Cell Specific Reference Signals */
21022 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PSS_SSS_SYM) *
21023 RGSCH_NUM_SC_IN_RB + cell->numCellRSPerSf);
21029 /* In subframe 0, symbols are reserved for PSS, SSS, PBCH, CFICH and
21030 and Cell Specific Reference signals */
21031 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PBCH_SYM +
21032 RGSCH_NUM_PSS_SSS_SYM) * RGSCH_NUM_SC_IN_RB +
21033 cell->numCellRSPerSf);
21034 } /* end of outer else */
21037 (((dlSf->bwAlloced + allocInfo->rbsReq) - cell->pbchRbStart) > 0)&&
21038 (dlSf->bwAlloced < cell->pbchRbEnd))
21040 if(allocInfo->tbInfo[0].imcs < 29)
21042 rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo, pbchSsRsSym, TRUE);
21054 /*Fix for ccpu00123918*/
21055 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21056 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
21057 allocInfo->rbsAlloc = allocInfo->rbsReq;
21059 /* LTE_ADV_FLAG_REMOVED_START */
21061 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
21063 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
21064 allocInfo->allocInfo.raType2.rbStart, \
21065 allocInfo->allocInfo.raType2.numRb);
21070 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
21071 allocInfo->allocInfo.raType2.rbStart, \
21072 allocInfo->allocInfo.raType2.numRb);
21078 /* LTE_ADV_FLAG_REMOVED_END */
21079 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21086 /* Update type 0, 1 and 2 masks */
21087 dlSfAlloc->raType0Mask |= allocInfo->resAllocInfo.raType0Mask;
21088 #ifdef RGSCH_SPS_UNUSED
21089 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
21091 dlSfAlloc->raType1Mask[idx] |=
21092 allocInfo->resAllocInfo.raType1Mask[idx];
21093 dlSfAlloc->raType1UsedRbs[idx] +=
21094 allocInfo->resAllocInfo.raType1UsedRbs[idx];
21097 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
21099 dlSfAlloc->raType2Mask[idx] |=
21100 allocInfo->resAllocInfo.raType2Mask[idx];
21110 * @brief Performs RB allocation for frequency non-selective cell.
21114 * Function : rgSCHCmnNonDlfsCmnRbAllocRar
21116 * Processing Steps:
21117 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
21119 * @param[in] RgSchCellCb *cell
21120 * @param[in, out] RgSchDlRbAlloc *allocInfo
21125 static S16 rgSCHCmnNonDlfsCmnRbAllocRar(RgSchCellCb *cell,RgSchDlRbAlloc *allocInfo)
21127 RgSchDlSf *dlSf = allocInfo->dlSf;
21129 if(dlSf->bwAlloced == dlSf->bw)
21134 allocInfo->tbInfo[0].noLyr = 1;
21136 /* Update allocation information */
21137 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
21138 if (allocInfo->pdcch == NULLP)
21142 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
21143 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
21144 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
21145 allocInfo->allocInfo.raType2.isLocal = TRUE;
21147 /*Fix for ccpu00123918*/
21148 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21149 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
21150 allocInfo->rbsAlloc = allocInfo->rbsReq;
21152 /* LTE_ADV_FLAG_REMOVED_END */
21153 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21156 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, NULLP, dlSf, 13, TFU_DCI_FORMAT_B1, FALSE);
21157 if (allocInfo->pdcch == NULLP)
21161 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
21162 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
21164 DU_LOG("\nINFO --> SCH : 5GTF_ERROR vrbg allocated > 25\n");
21168 allocInfo->tbInfo[0].cmnGrnt.vrbgStart = beamInfo->vrbgStart;
21169 allocInfo->tbInfo[0].cmnGrnt.numVrbg = allocInfo->vrbgReq;
21171 /* Update allocation information */
21172 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
21174 allocInfo->tbInfo[0].cmnGrnt.xPDSCHRange = 1;
21175 allocInfo->tbInfo[0].cmnGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
21176 allocInfo->tbInfo[0].cmnGrnt.vrbgStart, allocInfo->tbInfo[0].cmnGrnt.numVrbg);
21178 allocInfo->tbInfo[0].cmnGrnt.rbStrt = (allocInfo->tbInfo[0].cmnGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
21179 allocInfo->tbInfo[0].cmnGrnt.numRb = (allocInfo->tbInfo[0].cmnGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
21181 beamInfo->vrbgStart += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
21182 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
21183 allocInfo->tbInfo[0].cmnGrnt.rv = 0;
21184 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21187 DU_LOG("\nINFO --> SCH : [%s],allocInfo->tbInfo[0].bytesAlloc:%u,vrbgReq:%u\n",
21188 __func__,allocInfo->tbInfo[0].bytesAlloc,allocInfo->vrbgReq);
21194 /* LTE_ADV_FLAG_REMOVED_START */
21197 * @brief To check if DL BW available for non-DLFS allocation.
21201 * Function : rgSCHCmnNonDlfsBwAvlbl
21203 * Processing Steps:
21204 * - Determine availability based on RA Type.
21206 * @param[in] RgSchCellCb *cell
21207 * @param[in] RgSchDlSf *dlSf
21208 * @param[in] RgSchDlRbAlloc *allocInfo
21215 static Bool rgSCHCmnNonDlfsSFRBwAvlbl
21218 RgSchSFRPoolInfo **sfrpoolInfo,
21220 RgSchDlRbAlloc *allocInfo,
21228 RgSchSFRPoolInfo *sfrPool;
21229 RgSchSFRPoolInfo *sfrCEPool;
21233 RgSchSFRPoolInfo *poolWithMaxAvlblBw = NULLP;
21234 uint32_t bwAvlbl = 0;
21235 uint32_t addtnlPRBs = 0;
21237 if (dlSf->bw <= dlSf->bwAlloced)
21239 DU_LOG("\nERROR --> SCH : BW is fully allocated for subframe (%d) CRNTI:%d", dlSf->sfNum,allocInfo->rnti);
21243 if (dlSf->sfrTotalPoolInfo.ccBwFull == TRUE)
21245 DU_LOG("\nERROR --> SCH : BW is fully allocated for CC Pool CRNTI:%d",allocInfo->rnti);
21249 if ((dlSf->sfrTotalPoolInfo.ceBwFull == TRUE) && (isUeCellEdge))
21251 DU_LOG("\nERROR --> SCH : BW is fully allocated for CE Pool CRNTI:%d",allocInfo->rnti);
21255 /* We first check if the ue scheduled is a cell edge or cell centre and accordingly check the avaialble
21256 memory in their pool. If the cell centre UE doesnt have Bw available in its pool, then it will check
21257 Bw availability in cell edge pool but the other way around is NOT possible. */
21260 l = &dlSf->sfrTotalPoolInfo.cePool;
21264 l = &dlSf->sfrTotalPoolInfo.ccPool;
21267 n = cmLListFirst(l);
21271 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
21273 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21275 /* MS_FIX for ccpu00123919 : Number of RBs in case of RETX should be same as that of initial transmission. */
21276 if(allocInfo->tbInfo[0].tbCb->txCntr)
21278 /* If RB assignment is being done for RETX. Then if reqRbs are a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
21279 * not a multiple of rbgSize then check if lsgRbgDfct exists */
21280 if (allocInfo->rbsReq % cell->rbgSize == 0)
21282 if ((sfrPool->type2End == dlSf->type2End) && dlSf->lstRbgDfct)
21284 /* In this scenario we are wasting the last RBG for this dlSf */
21285 sfrPool->type0End--;
21286 sfrPool->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21288 dlSf->lstRbgDfct = 0;
21290 /*ABHINAV To check if these variables need to be taken care of*/
21292 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21297 if (dlSf->lstRbgDfct)
21299 /* Check if type0 allocation can cater to this RETX requirement */
21300 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
21306 if (sfrPool->type2End != dlSf->type2End) /*Search again for some pool which has the END RBG of the BandWidth*/
21314 /* cannot allocate same number of required RBs */
21320 /*rg002.301 ccpu00120391 MOD condition is modified approprialtely to find if rbsReq is less than available RBS*/
21321 if(allocInfo->rbsReq <= (((sfrPool->type0End - sfrPool->type2End + 1)*\
21322 cell->rbgSize) - dlSf->lstRbgDfct))
21324 *sfrpoolInfo = sfrPool;
21329 if (sfrPool->bw <= sfrPool->bwAlloced + cell->rbgSize)
21331 n = cmLListNext(l);
21332 /* If the ue is cell centre then it will simply check the memory available in next pool.
21333 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
21335 if((!isUeCellEdge) && (!n->node))
21337 l = &dlSf->sfrTotalPoolInfo.cePool;
21338 n = cmLListFirst(l);
21344 /* MS_FIX: Number of RBs in case of RETX should be same as that of initial transmission */
21345 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21347 /*rg002.301 ccpu00120391 MOD setting the remaining RBs for the requested UE*/
21348 allocInfo->rbsReq = (((sfrPool->type0End - sfrPool->type2End + 1)*\
21349 cell->rbgSize) - dlSf->lstRbgDfct);
21350 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21351 noLyrs = allocInfo->tbInfo[0].noLyr;
21352 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21353 *sfrpoolInfo = sfrPool;
21358 n = cmLListNext(l);
21360 /* If the ue is cell centre then it will simply check the memory available in next pool.
21361 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
21362 if((!isUeCellEdge) && (!n->node))
21364 l = &dlSf->sfrTotalPoolInfo.cePool;
21365 n = cmLListFirst(l);
21374 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
21376 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21377 /* This is a Case where a UE was CC and had more RBs allocated than present in CE pool.
21378 In case this UE whn become CE with retx going on, then BW is not sufficient for Retx */
21379 if ((isUeCellEdge) &&
21380 (allocInfo->tbInfo[0].tbCb->txCntr != 0))
21382 if(allocInfo->rbsReq > (sfrPool->bw - sfrPool->bwAlloced))
21384 /* Adjust CE BW such that Retx alloc is successful */
21385 /* Check if merging CE with adjacent CC pool will be sufficient to process Retx */
21387 /* If no Type 0 allocations are made from this pool */
21388 if (sfrPool->type0End == (((sfrPool->poolendRB + 1) / cell->rbgSize) - 1))
21390 if (sfrPool->adjCCPool &&
21391 (sfrPool->adjCCPool->type2Start == sfrPool->poolendRB + 1) &&
21392 (allocInfo->rbsReq <= ((sfrPool->bw - sfrPool->bwAlloced) +
21393 ((sfrPool->adjCCPool->bw - sfrPool->adjCCPool->bwAlloced)))))
21395 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
21397 /* Adjusting CE Pool Info */
21398 sfrPool->bw += addtnlPRBs;
21399 sfrPool->type0End = ((sfrPool->poolendRB + addtnlPRBs + 1) /
21400 cell->rbgSize) - 1;
21402 /* Adjusting CC Pool Info */
21403 sfrPool->adjCCPool->type2Start += addtnlPRBs;
21404 sfrPool->adjCCPool->type2End = RGSCH_CEIL(sfrPool->adjCCPool->type2Start,
21406 sfrPool->adjCCPool->bw -= addtnlPRBs;
21407 *sfrpoolInfo = sfrPool;
21414 /* Check if CC pool is one of the following:
21415 * 1. |CE| + |CC "CCPool2Exists" = TRUE|
21416 * 2. |CC "CCPool2Exists" = FALSE| + |CE| + |CC "CCPool2Exists" = TRUE|
21418 if(TRUE == sfrPool->CCPool2Exists)
21420 l1 = &dlSf->sfrTotalPoolInfo.cePool;
21421 n1 = cmLListFirst(l1);
21422 sfrCEPool = (RgSchSFRPoolInfo*)(n1->node);
21423 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced))
21425 *sfrpoolInfo = sfrCEPool;
21428 else if(allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
21430 *sfrpoolInfo = sfrPool;
21433 /* Check if CE and CC boundary has unallocated prbs */
21434 else if ((sfrPool->poolstartRB == sfrPool->type2Start) &&
21435 (sfrCEPool->type0End == ((sfrCEPool->poolendRB + 1) / cell->rbgSize) - 1))
21437 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced) +
21438 (sfrPool->bw - sfrPool->bwAlloced))
21440 /* Checking if BW can be allocated partly from CE pool and partly
21443 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
21444 /* Updating CE and CC type2 parametrs based on the RBs allocated
21445 * from these pools*/
21446 sfrPool->type2Start -= addtnlPRBs;
21447 sfrPool->type2End = RGSCH_CEIL(sfrPool->type2Start, cell->rbgSize);
21448 sfrPool->bw += addtnlPRBs;
21449 if (addtnlPRBs == (sfrCEPool->bw - sfrCEPool->bwAlloced))
21451 sfrCEPool->bwAlloced = sfrCEPool->bw;
21452 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21456 sfrCEPool->bw -= addtnlPRBs;
21457 sfrCEPool->type0End = ((sfrCEPool->poolendRB + 1 - addtnlPRBs) / cell->rbgSize) - 1;
21459 *sfrpoolInfo = sfrPool;
21462 else if ( bwAvlbl <
21463 ((sfrCEPool->bw - sfrCEPool->bwAlloced) +
21464 (sfrPool->bw - sfrPool->bwAlloced)))
21466 /* All the Prbs from CE BW shall be allocated */
21467 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21469 sfrPool->type2Start = sfrCEPool->type2Start;
21470 sfrPool->bw += sfrCEPool->bw - sfrCEPool->bwAlloced;
21471 sfrCEPool->type2Start = sfrCEPool->poolendRB + 1;
21472 sfrCEPool->bwAlloced = sfrCEPool->bw;
21473 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21475 /* set the remaining RBs for the requested UE */
21476 allocInfo->rbsReq = (sfrPool->bw - sfrPool->bwAlloced);
21477 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21478 noLyrs = allocInfo->tbInfo[0].noLyr;
21479 allocInfo->tbInfo[0].bytesReq =
21480 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21481 *sfrpoolInfo = sfrPool;
21492 /* Checking if no. of RBs required can be allocated from
21494 * 1. If available return the SFR pool.
21495 * 2. Else update the RBs required parameter based on the
21496 * BW available in the pool
21497 * 3. Return FALSE if no B/W is available.
21499 if (allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
21501 *sfrpoolInfo = sfrPool;
21506 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21508 if (bwAvlbl < sfrPool->bw - sfrPool->bwAlloced)
21512 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21514 bwAvlbl = sfrPool->bw - sfrPool->bwAlloced;
21515 poolWithMaxAvlblBw = sfrPool;
21517 n = cmLListNext(l);
21519 if ((isUeCellEdge == FALSE) && (n == NULLP))
21521 if(l != &dlSf->sfrTotalPoolInfo.cePool)
21523 l = &dlSf->sfrTotalPoolInfo.cePool;
21524 n = cmLListFirst(l);
21534 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21538 dlSf->sfrTotalPoolInfo.ccBwFull = TRUE;
21544 /* set the remaining RBs for the requested UE */
21545 allocInfo->rbsReq = poolWithMaxAvlblBw->bw -
21546 poolWithMaxAvlblBw->bwAlloced;
21547 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21548 noLyrs = allocInfo->tbInfo[0].noLyr;
21549 allocInfo->tbInfo[0].bytesReq =
21550 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21551 *sfrpoolInfo = poolWithMaxAvlblBw;
21558 n = cmLListNext(l);
21560 if ((isUeCellEdge == FALSE) && (n == NULLP))
21562 if(l != &dlSf->sfrTotalPoolInfo.cePool)
21564 l = &dlSf->sfrTotalPoolInfo.cePool;
21565 n = cmLListFirst(l);
21581 #endif /* end of ifndef LTE_TDD*/
21582 /* LTE_ADV_FLAG_REMOVED_END */
21585 * @brief To check if DL BW available for non-DLFS allocation.
21589 * Function : rgSCHCmnNonDlfsUeRbAlloc
21591 * Processing Steps:
21592 * - Determine availability based on RA Type.
21594 * @param[in] RgSchCellCb *cell
21595 * @param[in] RgSchDlSf *dlSf
21596 * @param[in] RgSchDlRbAlloc *allocInfo
21603 static Bool rgSCHCmnNonDlfsBwAvlbl
21607 RgSchDlRbAlloc *allocInfo
21612 uint8_t ignoredDfctRbg = FALSE;
21614 if (dlSf->bw <= dlSf->bwAlloced)
21616 DU_LOG("\nERROR --> SCH : (%d:%d)FAILED CRNTI:%d",
21617 dlSf->bw, dlSf->bwAlloced,allocInfo->rnti);
21620 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
21622 /* Fix for ccpu00123919 : Number of RBs in case of RETX should be same as
21623 * that of initial transmission. */
21624 if(allocInfo->tbInfo[0].tbCb->txCntr)
21626 /* If RB assignment is being done for RETX. Then if reqRbs are
21627 * a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
21628 * not a multiple of rbgSize then check if lsgRbgDfct exists */
21629 if (allocInfo->rbsReq % cell->rbgSize == 0)
21631 if (dlSf->lstRbgDfct)
21633 /* In this scenario we are wasting the last RBG for this dlSf */
21636 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21637 /* Fix: MUE_PERTTI_DL */
21638 dlSf->lstRbgDfct = 0;
21639 ignoredDfctRbg = TRUE;
21645 if (dlSf->lstRbgDfct)
21647 /* Check if type0 allocation can cater to this RETX requirement */
21648 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
21655 /* cannot allocate same number of required RBs */
21661 /* Condition is modified approprialtely to find
21662 * if rbsReq is less than available RBS*/
21663 if(allocInfo->rbsReq <= (((dlSf->type0End - dlSf->type2End + 1)*\
21664 cell->rbgSize) - dlSf->lstRbgDfct))
21668 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
21669 * allocation in TDD when requested RBs are more than available RBs*/
21672 /* MS_WORKAROUND for ccpu00122022 */
21673 if (dlSf->bw < dlSf->bwAlloced + cell->rbgSize)
21675 /* ccpu00132358- Re-assigning the values which were updated above
21676 * if it is RETX and Last RBG available*/
21677 if(ignoredDfctRbg == TRUE)
21680 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
21681 dlSf->lstRbgDfct = 1;
21687 /* Fix: Number of RBs in case of RETX should be same as
21688 * that of initial transmission. */
21689 if(allocInfo->tbInfo[0].tbCb->txCntr == 0
21691 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
21695 /* Setting the remaining RBs for the requested UE*/
21696 allocInfo->rbsReq = (((dlSf->type0End - dlSf->type2End + 1)*\
21697 cell->rbgSize) - dlSf->lstRbgDfct);
21698 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21699 noLyrs = allocInfo->tbInfo[0].noLyr;
21700 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21701 /* DwPts Scheduling Changes Start */
21703 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
21705 allocInfo->tbInfo[0].bytesReq =
21706 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
21709 /* DwPts Scheduling Changes End */
21713 /* ccpu00132358- Re-assigning the values which were updated above
21714 * if it is RETX and Last RBG available*/
21715 if(ignoredDfctRbg == TRUE)
21718 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
21719 dlSf->lstRbgDfct = 1;
21722 DU_LOG("\nERROR --> SCH : FAILED for CRNTI:%d",
21724 DU_LOG("\nERROR --> SCH : RB Alloc failed for LAA TB type 0\n");
21730 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
21732 if (allocInfo->rbsReq <= (dlSf->bw - dlSf->bwAlloced))
21736 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
21737 * allocation in TDD when requested RBs are more than available RBs*/
21740 /* Fix: Number of RBs in case of RETX should be same as
21741 * that of initial transmission. */
21742 if((allocInfo->tbInfo[0].tbCb->txCntr == 0)
21744 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
21748 /* set the remaining RBs for the requested UE */
21749 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
21750 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21751 noLyrs = allocInfo->tbInfo[0].noLyr;
21752 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21753 /* DwPts Scheduling Changes Start */
21755 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
21757 allocInfo->tbInfo[0].bytesReq =
21758 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
21761 /* DwPts Scheduling Changes End */
21765 DU_LOG("\nERROR --> SCH : RB Alloc failed for LAA TB type 2\n");
21766 DU_LOG("\nERROR --> SCH : FAILED for CRNTI:%d",allocInfo->rnti);
21769 /* Fix: Number of RBs in case of RETX should be same as
21770 * that of initial transmission. */
21774 DU_LOG("\nERROR --> SCH : FAILED for CRNTI:%d",allocInfo->rnti);
21778 /* LTE_ADV_FLAG_REMOVED_START */
21781 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21785 * Function : rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
21787 * Processing Steps:
21789 * @param[in] RgSchCellCb *cell
21790 * @param[in] RgSchDlSf *dlSf
21791 * @param[in] uint8_t rbStrt
21792 * @param[in] uint8_t numRb
21796 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
21806 RgSchSFRPoolInfo *sfrPool;
21808 l = &dlSf->sfrTotalPoolInfo.ccPool;
21810 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
21811 dlSf->bwAlloced += numRb;
21812 dlSf->type2Start += numRb;
21813 n = cmLListFirst(l);
21817 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21818 n = cmLListNext(l);
21820 /* If the pool contains some RBs allocated in this allocation, e.g: Pool is [30.50]. Pool->type2Start is 40 , dlSf->type2Start is 45. then update the variables in pool */
21821 if((sfrPool->poolendRB >= dlSf->type2Start) && (sfrPool->type2Start < dlSf->type2Start))
21823 sfrPool->type2End = dlSf->type2End;
21824 sfrPool->bwAlloced = dlSf->type2Start - sfrPool->poolstartRB;
21825 sfrPool->type2Start = dlSf->type2Start;
21829 /* If the pool contains all RBs allocated in this allocation*/
21830 if(dlSf->type2Start > sfrPool->poolendRB)
21832 sfrPool->type2End = sfrPool->type0End + 1;
21833 sfrPool->bwAlloced = sfrPool->bw;
21834 sfrPool->type2Start = sfrPool->poolendRB + 1;
21839 if (l != &dlSf->sfrTotalPoolInfo.cePool)
21841 l = &dlSf->sfrTotalPoolInfo.cePool;
21842 n = cmLListFirst(l);
21852 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21856 * Function : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
21858 * Processing Steps:
21860 * @param[in] RgSchCellCb *cell
21861 * @param[in] RgSchDlSf *dlSf
21862 * @param[in] uint8_t rbStrt
21863 * @param[in] uint8_t numRb
21868 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
21879 RgSchSFRPoolInfo *sfrCCPool1 = NULL;
21880 RgSchSFRPoolInfo *sfrCCPool2 = NULL;
21883 /* Move the type2End pivot forward */
21886 l = &dlSf->sfrTotalPoolInfo.ccPool;
21887 n = cmLListFirst(l);
21890 sfrCCPool1 = (RgSchSFRPoolInfo*)(n->node);
21892 if (sfrCCPool1 == NULLP)
21894 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21895 "sfrCCPool1 is NULL for CRNTI:%d",ue->ueId);
21898 n = cmLListNext(l);
21901 sfrCCPool2 = (RgSchSFRPoolInfo*)(n->node);
21902 n = cmLListNext(l);
21904 if((sfrCCPool1) && (sfrCCPool2))
21906 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
21907 if(((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
21908 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb)) ||
21909 ((dlSf->type2Start >= sfrCCPool2->pwrHiCCRange.startRb) &&
21910 (dlSf->type2Start + numRb < sfrCCPool2->pwrHiCCRange.endRb)))
21912 ue->lteAdvUeCb.isCCUePHigh = TRUE;
21914 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
21915 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
21918 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21919 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
21926 if((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
21927 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb))
21929 ue->lteAdvUeCb.isCCUePHigh = TRUE;
21931 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
21932 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
21935 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21936 "rgSCHCmnBuildRntpInfo() function returned RFAILED CRNTI:%d",ue->ueId);
21942 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
21944 dlSf->bwAlloced += numRb;
21945 /*MS_FIX for ccpu00123918*/
21946 dlSf->type2Start += numRb;
21952 #endif /* end of ifndef LTE_TDD*/
21953 /* LTE_ADV_FLAG_REMOVED_END */
21955 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21959 * Function : rgSCHCmnNonDlfsUpdTyp2Alloc
21961 * Processing Steps:
21963 * @param[in] RgSchCellCb *cell
21964 * @param[in] RgSchDlSf *dlSf
21965 * @param[in] uint8_t rbStrt
21966 * @param[in] uint8_t numRb
21970 static Void rgSCHCmnNonDlfsUpdTyp2Alloc
21978 /* Move the type2End pivot forward */
21979 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
21980 //#ifndef LTEMAC_SPS
21981 dlSf->bwAlloced += numRb;
21982 /*Fix for ccpu00123918*/
21983 dlSf->type2Start += numRb;
21989 * @brief To do DL allocation using TYPE0 RA.
21993 * Function : rgSCHCmnNonDlfsType0Alloc
21995 * Processing Steps:
21996 * - Perform TYPE0 allocation using the RBGs between
21997 * type0End and type2End.
21998 * - Build the allocation mask as per RBG positioning.
21999 * - Update the allocation parameters.
22001 * @param[in] RgSchCellCb *cell
22002 * @param[in] RgSchDlSf *dlSf
22003 * @param[in] RgSchDlRbAlloc *allocInfo
22008 static Void rgSCHCmnNonDlfsType0Alloc
22012 RgSchDlRbAlloc *allocInfo,
22016 uint32_t dlAllocMsk = 0;
22017 uint8_t rbgFiller = dlSf->lstRbgDfct;
22018 uint8_t noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
22019 //uint8_t noRbgs = (allocInfo->rbsReq + rbgFiller)/ cell->rbgSize;
22023 uint32_t tb1BytesAlloc = 0;
22024 uint32_t tb2BytesAlloc = 0;
22025 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22027 //if(noRbgs == 0) noRbgs = 1; /* Not required as ceilling is used above*/
22029 /* Fix for ccpu00123919*/
22030 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22031 if (dlSf->bwAlloced + noRbs > dlSf->bw)
22037 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22040 /* Fix for ccpu00138701: Ceilling is using to derive num of RBGs, Therefore,
22041 * after this operation,checking Max TB size and Max RBs are not crossed
22042 * if it is crossed then decrement num of RBGs. */
22043 //if((noRbs + rbgFiller) % cell->rbgSize)
22044 if((noRbs > allocInfo->rbsReq) &&
22045 (allocInfo->rbsReq + rbgFiller) % cell->rbgSize)
22046 {/* considering ue category limitation
22047 * due to ceiling */
22050 if (rgSCHLaaIsLaaTB(allocInfo)== FALSE)
22053 if ((allocInfo->tbInfo[0].schdlngForTb) && (!allocInfo->tbInfo[0].tbCb->txCntr))
22055 iTbs = allocInfo->tbInfo[0].iTbs;
22056 noLyr = allocInfo->tbInfo[0].noLyr;
22057 tb1BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22060 if ((allocInfo->tbInfo[1].schdlngForTb) && (!allocInfo->tbInfo[1].tbCb->txCntr))
22062 iTbs = allocInfo->tbInfo[1].iTbs;
22063 noLyr = allocInfo->tbInfo[1].noLyr;
22064 tb2BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22068 /* Only Check for New Tx No need for Retx */
22069 if (tb1BytesAlloc || tb2BytesAlloc)
22071 if (( ue->dl.aggTbBits >= dlUe->maxTbBits) ||
22072 (tb1BytesAlloc >= dlUe->maxTbSz/8) ||
22073 (tb2BytesAlloc >= dlUe->maxTbSz/8) ||
22074 (noRbs >= dlUe->maxRb))
22080 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22084 /* type0End would have been initially (during subfrm Init) at the bit position
22085 * (cell->noOfRbgs - 1), 0 being the most significant.
22086 * Getting DlAllocMsk for noRbgs and at the appropriate position */
22087 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - dlSf->type0End));
22088 /* Move backwards the type0End pivot */
22089 dlSf->type0End -= noRbgs;
22090 /*Fix for ccpu00123919*/
22091 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
22092 /* Update the bwAlloced field accordingly */
22093 //#ifndef LTEMAC_SPS /* ccpu00129474*/
22094 dlSf->bwAlloced += noRbs;
22096 /* Update Type0 Alloc Info */
22097 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
22098 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
22099 allocInfo->rbsAlloc = noRbs;
22101 /* Update Tb info for each scheduled TB */
22102 iTbs = allocInfo->tbInfo[0].iTbs;
22103 noLyr = allocInfo->tbInfo[0].noLyr;
22104 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
22105 * RETX TB Size is same as Init TX TB Size */
22106 if (allocInfo->tbInfo[0].tbCb->txCntr)
22108 allocInfo->tbInfo[0].bytesAlloc =
22109 allocInfo->tbInfo[0].bytesReq;
22113 allocInfo->tbInfo[0].bytesAlloc =
22114 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22115 /* DwPts Scheduling Changes Start */
22117 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
22119 allocInfo->tbInfo[0].bytesAlloc =
22120 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
22123 /* DwPts Scheduling Changes End */
22126 if (allocInfo->tbInfo[1].schdlngForTb)
22128 iTbs = allocInfo->tbInfo[1].iTbs;
22129 noLyr = allocInfo->tbInfo[1].noLyr;
22130 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
22131 * RETX TB Size is same as Init TX TB Size */
22132 if (allocInfo->tbInfo[1].tbCb->txCntr)
22134 allocInfo->tbInfo[1].bytesAlloc =
22135 allocInfo->tbInfo[1].bytesReq;
22139 allocInfo->tbInfo[1].bytesAlloc =
22140 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22141 /* DwPts Scheduling Changes Start */
22143 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
22145 allocInfo->tbInfo[1].bytesAlloc =
22146 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
22149 /* DwPts Scheduling Changes End */
22153 /* The last RBG which can be smaller than the RBG size is consedered
22154 * only for the first time allocation of TYPE0 UE */
22155 dlSf->lstRbgDfct = 0;
22162 * @brief To prepare RNTP value from the PRB allocation (P-High -> 1 and P-Low -> 0)
22166 * Function : rgSCHCmnBuildRntpInfo
22168 * Processing Steps:
22170 * @param[in] uint8_t *rntpPtr
22171 * @param[in] uint8_t startRb
22172 * @param[in] uint8_t numRb
22177 static S16 rgSCHCmnBuildRntpInfo
22186 uint16_t rbPtrStartIdx; /* Start Index of Octete Buffer to be filled */
22187 uint16_t rbPtrEndIdx; /* End Index of Octete Buffer to be filled */
22188 uint16_t rbBitLoc; /* Bit Location to be set as 1 in the current Byte */
22189 uint16_t nmbRbPerByte; /* PRB's to be set in the current Byte (in case of multiple Bytes) */
22192 rbPtrStartIdx = (startRb)/8;
22193 rbPtrEndIdx = (startRb + nmbRb)/8;
22195 if (rntpPtr == NULLP)
22197 DU_LOG("\nERROR --> SCH : rgSCHCmnBuildRntpInfo():"
22198 "rntpPtr can't be NULLP (Memory Allocation Failed)");
22202 while(rbPtrStartIdx <= rbPtrEndIdx)
22204 rbBitLoc = (startRb)%8;
22206 /* case 1: startRb and endRb lies in same Byte */
22207 if (rbPtrStartIdx == rbPtrEndIdx)
22209 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
22210 | (((1<<nmbRb)-1)<<rbBitLoc);
22213 /* case 2: startRb and endRb lies in different Byte */
22214 if (rbPtrStartIdx != rbPtrEndIdx)
22216 nmbRbPerByte = 8 - rbBitLoc;
22217 nmbRb = nmbRb - nmbRbPerByte;
22218 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
22219 | (((1<<nmbRbPerByte)-1)<<rbBitLoc);
22220 startRb = startRb + nmbRbPerByte;
22226 /* dsfr_pal_fixes ** 21-March-2013 ** SKS ** Adding Debug logs */
22228 /* dsfr_pal_fixes ** 25-March-2013 ** SKS ** Adding Debug logs to print RNTP */
22234 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
22238 * Function : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
22240 * Processing Steps:
22242 * @param[in] RgSchCellCb *cell
22243 * @param[in] RgSchDlSf *dlSf
22244 * @param[in] uint8_t rbStrt
22245 * @param[in] uint8_t numRb
22249 static S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
22254 RgSchSFRPoolInfo *sfrPool,
22263 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22264 sfrPool->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22267 dlSf->type2Start += numRb;
22268 dlSf->bwAlloced += numRb;
22270 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
22272 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
22273 if(FALSE == ue->lteAdvUeCb.rgrLteAdvUeCfg.isUeCellEdge)
22275 if((sfrPool->type2Start >= sfrPool->pwrHiCCRange.startRb) &&
22276 (sfrPool->type2Start + numRb < sfrPool->pwrHiCCRange.endRb))
22278 ue->lteAdvUeCb.isCCUePHigh = TRUE;
22280 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
22281 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
22284 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
22285 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
22292 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
22293 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
22296 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
22297 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
22302 sfrPool->type2Start += numRb;
22303 sfrPool->bwAlloced += numRb;
22310 * @brief To do DL allocation using TYPE0 RA.
22314 * Function : rgSCHCmnNonDlfsSFRPoolType0Alloc
22316 * Processing Steps:
22317 * - Perform TYPE0 allocation using the RBGs between type0End and type2End.
22318 * - Build the allocation mask as per RBG positioning.
22319 * - Update the allocation parameters.
22321 * @param[in] RgSchCellCb *cell
22322 * @param[in] RgSchDlSf *dlSf
22323 * @param[in] RgSchDlRbAlloc *allocInfo
22327 static Void rgSCHCmnNonDlfsSFRPoolType0Alloc
22331 RgSchSFRPoolInfo *poolInfo,
22332 RgSchDlRbAlloc *allocInfo
22335 uint32_t dlAllocMsk = 0;
22336 uint8_t rbgFiller = 0;
22337 uint8_t noRbgs = 0;
22343 if (poolInfo->poolstartRB + poolInfo->bw == dlSf->bw)
22345 if (poolInfo->type0End == dlSf->bw/4)
22347 rbgFiller = dlSf->lstRbgDfct;
22348 /* The last RBG which can be smaller than the RBG size is consedered
22349 * only for the first time allocation of TYPE0 UE */
22350 dlSf->lstRbgDfct = 0;
22354 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
22356 /* Abhinav to-do start */
22357 /* MS_FIX for ccpu00123919*/
22358 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22359 if (dlSf->bwAlloced + noRbs > dlSf->bw)
22365 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22367 /* Abhinav to-do end */
22371 /* type0End would have been initially (during subfrm Init) at the bit position
22372 * (cell->noOfRbgs - 1), 0 being the most significant.
22373 * Getting DlAllocMsk for noRbgs and at the appropriate position */
22374 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - poolInfo->type0End));
22375 /* Move backwards the type0End pivot */
22376 poolInfo->type0End -= noRbgs;
22377 /*MS_FIX for ccpu00123919*/
22378 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
22379 /* Update the bwAlloced field accordingly */
22380 poolInfo->bwAlloced += noRbs + dlSf->lstRbgDfct;
22381 dlSf->bwAlloced += noRbs + dlSf->lstRbgDfct;
22383 /* Update Type0 Alloc Info */
22384 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
22385 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
22386 allocInfo->rbsAlloc = noRbs;
22388 /* Update Tb info for each scheduled TB */
22389 iTbs = allocInfo->tbInfo[0].iTbs;
22390 noLyr = allocInfo->tbInfo[0].noLyr;
22391 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
22392 * RETX TB Size is same as Init TX TB Size */
22393 if (allocInfo->tbInfo[0].tbCb->txCntr)
22395 allocInfo->tbInfo[0].bytesAlloc =
22396 allocInfo->tbInfo[0].bytesReq;
22400 allocInfo->tbInfo[0].bytesAlloc =
22401 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22404 if (allocInfo->tbInfo[1].schdlngForTb)
22406 iTbs = allocInfo->tbInfo[1].iTbs;
22407 noLyr = allocInfo->tbInfo[1].noLyr;
22408 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
22409 * RETX TB Size is same as Init TX TB Size */
22410 if (allocInfo->tbInfo[1].tbCb->txCntr)
22412 allocInfo->tbInfo[1].bytesAlloc =
22413 allocInfo->tbInfo[1].bytesReq;
22417 allocInfo->tbInfo[1].bytesAlloc =
22418 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22422 /* The last RBG which can be smaller than the RBG size is consedered
22423 * only for the first time allocation of TYPE0 UE */
22424 dlSf->lstRbgDfct = 0;
22429 * @brief Computes RNTP Info for a subframe.
22433 * Function : rgSCHCmnNonDlfsDsfrRntpComp
22435 * Processing Steps:
22436 * - Computes RNTP info from individual pools.
22438 * @param[in] RgSchDlSf *dlSf
22443 static void rgSCHCmnNonDlfsDsfrRntpComp(RgSchCellCb *cell,RgSchDlSf *dlSf)
22445 static uint16_t samples = 0;
22447 uint16_t bwBytes = (dlSf->bw-1)/8;
22448 RgrLoadInfIndInfo *rgrLoadInf;
22450 uint16_t ret = ROK;
22453 len = (dlSf->bw % 8 == 0) ? dlSf->bw/8 : dlSf->bw/8 + 1;
22455 /* RNTP info is ORed every TTI and the sample is stored in cell control block */
22456 for(i = 0; i <= bwBytes; i++)
22458 cell->rntpAggrInfo.val[i] |= dlSf->rntpInfo.val[i];
22460 samples = samples + 1;
22461 /* After every 1000 ms, the RNTP info will be sent to application to be further sent to all neighbouring eNB
22462 informing them about the load indication for cell edge users */
22463 if(RG_SCH_MAX_RNTP_SAMPLES == samples)
22466 ret = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&rgrLoadInf,
22467 sizeof(RgrLoadInfIndInfo));
22470 DU_LOG("\nERROR --> SCH : Could not "
22471 "allocate memory for sending LoadInfo");
22475 rgrLoadInf->u.rntpInfo.pres = cell->rntpAggrInfo.pres;
22476 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
22477 rgrLoadInf->u.rntpInfo.len = len;
22479 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
22480 rgrLoadInf->u.rntpInfo.val = cell->rntpAggrInfo.val;
22481 rgrLoadInf->cellId = cell->cellId;
22483 /* dsfr_pal_fixes ** 22-March-2013 ** SKS */
22484 rgrLoadInf->bw = dlSf->bw;
22485 rgrLoadInf->type = RGR_SFR;
22487 ret = rgSCHUtlRgrLoadInfInd(cell, rgrLoadInf);
22490 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsDsfrRntpComp():"
22491 "rgSCHUtlRgrLoadInfInd() returned RFAILED");
22494 memset(cell->rntpAggrInfo.val,0,len);
22498 /* LTE_ADV_FLAG_REMOVED_END */
22500 /* LTE_ADV_FLAG_REMOVED_START */
22502 * @brief Performs RB allocation per UE from a pool.
22506 * Function : rgSCHCmnSFRNonDlfsUeRbAlloc
22508 * Processing Steps:
22509 * - Allocate consecutively available RBs.
22511 * @param[in] RgSchCellCb *cell
22512 * @param[in] RgSchUeCb *ue
22513 * @param[in] RgSchDlSf *dlSf
22514 * @param[out] uint8_t *isDlBwAvail
22521 static S16 rgSCHCmnSFRNonDlfsUeRbAlloc
22526 uint8_t *isDlBwAvail
22529 RgSchDlRbAlloc *allocInfo;
22530 RgSchCmnDlUe *dlUe;
22532 RgSchSFRPoolInfo *sfrpoolInfo = NULLP;
22535 isUECellEdge = RG_SCH_CMN_IS_UE_CELL_EDGE(ue);
22537 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22538 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
22539 *isDlBwAvail = TRUE;
22541 /*Find which pool is available for this UE*/
22542 if (rgSCHCmnNonDlfsSFRBwAvlbl(cell, &sfrpoolInfo, dlSf, allocInfo, isUECellEdge) != TRUE)
22544 /* SFR_FIX - If this is CE UE there may be BW available in CC Pool
22545 So CC UEs will be scheduled */
22548 *isDlBwAvail = TRUE;
22552 *isDlBwAvail = FALSE;
22557 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX || dlUe->proc->tbInfo[1].isAckNackDtx)
22559 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
22563 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
22566 if (!(allocInfo->pdcch))
22568 /* Returning ROK since PDCCH might be available for another UE and further allocations could be done */
22573 allocInfo->rnti = ue->ueId;
22576 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22578 allocInfo->allocInfo.raType2.isLocal = TRUE;
22579 /* rg004.201 patch - ccpu00109921 fix end */
22580 /* MS_FIX for ccpu00123918*/
22581 allocInfo->allocInfo.raType2.rbStart = (uint8_t)sfrpoolInfo->type2Start;
22582 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22583 /* rg007.201 - Changes for MIMO feature addition */
22584 /* rg008.201 - Removed dependency on MIMO compile-time flag */
22585 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrpoolInfo, \
22586 allocInfo->allocInfo.raType2.rbStart, \
22587 allocInfo->allocInfo.raType2.numRb);
22588 allocInfo->rbsAlloc = allocInfo->rbsReq;
22589 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22591 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22593 rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, sfrpoolInfo, allocInfo);
22597 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,0);
22598 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
22600 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,1);
22605 #if defined(LTEMAC_SPS)
22606 /* Update the sub-frame with new allocation */
22607 dlSf->bwAlloced += allocInfo->rbsReq;
22613 /* LTE_ADV_FLAG_REMOVED_END */
22614 #endif /* LTE_TDD */
22617 * @brief Performs RB allocation per UE for frequency non-selective cell.
22621 * Function : rgSCHCmnNonDlfsUeRbAlloc
22623 * Processing Steps:
22624 * - Allocate consecutively available RBs.
22626 * @param[in] RgSchCellCb *cell
22627 * @param[in] RgSchUeCb *ue
22628 * @param[in] RgSchDlSf *dlSf
22629 * @param[out] uint8_t *isDlBwAvail
22635 static S16 rgSCHCmnNonDlfsUeRbAlloc
22640 uint8_t *isDlBwAvail
22643 RgSchDlRbAlloc *allocInfo;
22644 RgSchCmnDlUe *dlUe;
22646 uint32_t dbgRbsReq = 0;
22650 RgSch5gtfUeCb *ue5gtfCb = &(ue->ue5gtfCb);
22651 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[ue5gtfCb->BeamId]);
22653 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22654 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
22655 *isDlBwAvail = TRUE;
22657 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
22659 DU_LOG("\nERROR --> SCH : 5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
22661 DU_LOG("\nERROR --> SCH : 5GTF_ERROR vrbg allocated > 25\n");
22665 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX
22666 || dlUe->proc->tbInfo[1].isAckNackDtx)
22668 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
22672 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
22674 if (!(allocInfo->pdcch))
22676 /* Returning ROK since PDCCH might be available for another UE and
22677 * further allocations could be done */
22678 DU_LOG("\nERROR --> SCH : 5GTF_ERROR : PDCCH allocation failed :ue (%u)",
22680 DU_LOG("\nERROR --> SCH : 5GTF_ERROR PDCCH allocation failed\n");
22684 //maxPrb = RGSCH_MIN((allocInfo->vrbgReq * MAX_5GTF_VRBG_SIZE), ue5gtfCb->maxPrb);
22685 //maxPrb = RGSCH_MIN(maxPrb,
22686 //((beamInfo->totVrbgAvail - beamInfo->vrbgStart)* MAX_5GTF_VRBG_SIZE)));
22687 //TODO_SID Need to check for vrbg available after scheduling for same beam.
22688 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
22689 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
22690 //TODO_SID: Setting for max TP
22691 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
22692 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
22693 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
22694 allocInfo->tbInfo[0].tbCb->dlGrnt.SCID = 0;
22695 allocInfo->tbInfo[0].tbCb->dlGrnt.dciFormat = allocInfo->dciFormat;
22696 //Filling temporarily
22697 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
22698 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
22700 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
22701 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
22702 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22710 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
22714 * Function : rgSCHCmnNonDlfsCcchSduAlloc
22716 * Processing Steps:
22717 * - For each element in the list, Call rgSCHCmnNonDlfsCcchSduRbAlloc().
22718 * - If allocation is successful, add the ueCb to scheduled list of CCCH
22720 * - else, add UeCb to non-scheduled list.
22722 * @param[in] RgSchCellCb *cell
22723 * @param[in, out] RgSchCmnCcchSduRbAlloc *allocInfo
22724 * @param[in] uint8_t isRetx
22728 static Void rgSCHCmnNonDlfsCcchSduAlloc
22731 RgSchCmnCcchSduRbAlloc *allocInfo,
22736 CmLListCp *ccchSduLst = NULLP;
22737 CmLListCp *schdCcchSduLst = NULLP;
22738 CmLListCp *nonSchdCcchSduLst = NULLP;
22739 CmLList *schdLnkNode = NULLP;
22740 CmLList *toBeSchdLnk = NULLP;
22741 RgSchDlSf *dlSf = allocInfo->ccchSduDlSf;
22742 RgSchUeCb *ueCb = NULLP;
22743 RgSchDlHqProcCb *hqP = NULLP;
22747 /* Initialize re-transmitting lists */
22748 ccchSduLst = &(allocInfo->ccchSduRetxLst);
22749 schdCcchSduLst = &(allocInfo->schdCcchSduRetxLst);
22750 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduRetxLst);
22754 /* Initialize transmitting lists */
22755 ccchSduLst = &(allocInfo->ccchSduTxLst);
22756 schdCcchSduLst = &(allocInfo->schdCcchSduTxLst);
22757 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduTxLst);
22760 /* Perform allocaations for the list */
22761 toBeSchdLnk = cmLListFirst(ccchSduLst);
22762 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
22764 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
22765 ueCb = hqP->hqE->ue;
22766 schdLnkNode = &hqP->schdLstLnk;
22767 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
22768 ret = rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf);
22771 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
22772 * list and return */
22775 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
22776 ueCb = hqP->hqE->ue;
22777 schdLnkNode = &hqP->schdLstLnk;
22778 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
22779 cmLListAdd2Tail(nonSchdCcchSduLst, schdLnkNode);
22780 toBeSchdLnk = toBeSchdLnk->next;
22781 } while(toBeSchdLnk);
22785 /* Allocation successful: Add UE to the scheduled list */
22786 cmLListAdd2Tail(schdCcchSduLst, schdLnkNode);
22794 * @brief Performs RB allocation for CcchSdu for frequency non-selective cell.
22798 * Function : rgSCHCmnNonDlfsCcchSduRbAlloc
22800 * Processing Steps:
22802 * - Allocate consecutively available RBs
22804 * @param[in] RgSchCellCb *cell
22805 * @param[in] RgSchUeCb *ueCb
22806 * @param[in] RgSchDlSf *dlSf
22811 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc
22818 RgSchDlRbAlloc *allocInfo;
22819 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
22823 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb,cell);
22825 /* [ccpu00138802]-MOD-If Bw is less than required, return fail
22826 It will be allocated in next TTI */
22828 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
22829 (dlSf->bwAlloced == dlSf->bw))
22831 if((dlSf->bwAlloced == dlSf->bw) ||
22832 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
22837 /* Retrieve PDCCH */
22838 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
22839 if (ueDl->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
22841 /* allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, dlSf, y, ueDl->cqi,
22842 * TFU_DCI_FORMAT_1A, TRUE);*/
22843 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, TRUE);
22847 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE);
22849 if (!(allocInfo->pdcch))
22851 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
22855 /* Update allocation information */
22856 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
22857 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
22858 allocInfo->allocInfo.raType2.isLocal = TRUE;
22860 /*Fix for ccpu00123918*/
22861 /* Push this harq process back to the free queue */
22862 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
22863 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22864 allocInfo->rbsAlloc = allocInfo->rbsReq;
22865 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22866 /* Update the sub-frame with new allocation */
22868 /* LTE_ADV_FLAG_REMOVED_START */
22870 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
22872 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf,
22873 allocInfo->allocInfo.raType2.rbStart,
22874 allocInfo->allocInfo.raType2.numRb);
22877 #endif /* end of ifndef LTE_TDD*/
22879 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf,
22880 allocInfo->allocInfo.raType2.rbStart,
22881 allocInfo->allocInfo.raType2.numRb);
22884 /* LTE_ADV_FLAG_REMOVED_END */
22885 /* ccpu00131941 - bwAlloced is updated from SPS bandwidth */
22893 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
22897 * Function : rgSCHCmnNonDlfsMsg4RbAlloc
22899 * Processing Steps:
22901 * - Allocate consecutively available RBs
22903 * @param[in] RgSchCellCb *cell
22904 * @param[in] RgSchRaCb *raCb
22905 * @param[in] RgSchDlSf *dlSf
22910 static S16 rgSCHCmnNonDlfsMsg4RbAlloc
22917 RgSchDlRbAlloc *allocInfo;
22920 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_RACB(raCb);
22923 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
22924 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
22926 DU_LOG("\nERROR --> SCH : 5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
22928 DU_LOG("\nERROR --> SCH : 5GTF_ERROR vrbg allocated > 25\n");
22933 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
22934 (dlSf->bwAlloced == dlSf->bw))
22936 if((dlSf->bwAlloced == dlSf->bw) ||
22937 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
22944 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
22945 if (raCb->dlHqE->msg4Proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
22947 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, TRUE);
22951 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, FALSE);
22953 if (!(allocInfo->pdcch))
22955 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
22960 /* SR_RACH_STATS : MSG4 TX Failed */
22961 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
22963 /* Update allocation information */
22964 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
22965 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
22966 allocInfo->allocInfo.raType2.isLocal = TRUE;
22969 /*Fix for ccpu00123918*/
22970 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
22971 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22972 /* LTE_ADV_FLAG_REMOVED_START */
22974 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
22976 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
22977 allocInfo->allocInfo.raType2.rbStart, \
22978 allocInfo->allocInfo.raType2.numRb);
22981 #endif /* end of ifndef LTE_TDD */
22983 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
22984 allocInfo->allocInfo.raType2.rbStart, \
22985 allocInfo->allocInfo.raType2.numRb);
22987 /* LTE_ADV_FLAG_REMOVED_END */
22989 allocInfo->rbsAlloc = allocInfo->rbsReq;
22990 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22994 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
22996 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
22997 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
22999 /* Update allocation information */
23000 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23002 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
23003 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
23004 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
23006 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
23007 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
23010 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
23011 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
23012 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23020 * @brief Performs RB allocation for Msg4 lists of frequency non-selective cell.
23024 * Function : rgSCHCmnNonDlfsMsg4Alloc
23026 * Processing Steps:
23027 * - For each element in the list, Call rgSCHCmnNonDlfsMsg4RbAlloc().
23028 * - If allocation is successful, add the raCb to scheduled list of MSG4.
23029 * - else, add RaCb to non-scheduled list.
23031 * @param[in] RgSchCellCb *cell
23032 * @param[in, out] RgSchCmnMsg4RbAlloc *allocInfo
23033 * @param[in] uint8_t isRetx
23037 static Void rgSCHCmnNonDlfsMsg4Alloc
23040 RgSchCmnMsg4RbAlloc *allocInfo,
23045 CmLListCp *msg4Lst = NULLP;
23046 CmLListCp *schdMsg4Lst = NULLP;
23047 CmLListCp *nonSchdMsg4Lst = NULLP;
23048 CmLList *schdLnkNode = NULLP;
23049 CmLList *toBeSchdLnk = NULLP;
23050 RgSchDlSf *dlSf = allocInfo->msg4DlSf;
23051 RgSchRaCb *raCb = NULLP;
23052 RgSchDlHqProcCb *hqP = NULLP;
23056 /* Initialize re-transmitting lists */
23057 msg4Lst = &(allocInfo->msg4RetxLst);
23058 schdMsg4Lst = &(allocInfo->schdMsg4RetxLst);
23059 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4RetxLst);
23063 /* Initialize transmitting lists */
23064 msg4Lst = &(allocInfo->msg4TxLst);
23065 schdMsg4Lst = &(allocInfo->schdMsg4TxLst);
23066 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4TxLst);
23069 /* Perform allocaations for the list */
23070 toBeSchdLnk = cmLListFirst(msg4Lst);
23071 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
23073 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23074 raCb = hqP->hqE->raCb;
23075 schdLnkNode = &hqP->schdLstLnk;
23076 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23077 ret = rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf);
23080 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
23081 * list and return */
23084 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23085 raCb = hqP->hqE->raCb;
23086 schdLnkNode = &hqP->schdLstLnk;
23087 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23088 cmLListAdd2Tail(nonSchdMsg4Lst, schdLnkNode);
23089 toBeSchdLnk = toBeSchdLnk->next;
23090 } while(toBeSchdLnk);
23094 /* Allocation successful: Add UE to the scheduled list */
23095 cmLListAdd2Tail(schdMsg4Lst, schdLnkNode);
23106 * @brief Performs RB allocation for the list of UEs of a frequency
23107 * non-selective cell.
23111 * Function : rgSCHCmnNonDlfsDedRbAlloc
23113 * Processing Steps:
23114 * - For each element in the list, Call rgSCHCmnNonDlfsUeRbAlloc().
23115 * - If allocation is successful, add the ueCb to scheduled list of UEs.
23116 * - else, add ueCb to non-scheduled list of UEs.
23118 * @param[in] RgSchCellCb *cell
23119 * @param[in, out] RgSchCmnUeRbAlloc *allocInfo
23120 * @param[in] CmLListCp *ueLst,
23121 * @param[in, out] CmLListCp *schdHqPLst,
23122 * @param[in, out] CmLListCp *nonSchdHqPLst
23126 Void rgSCHCmnNonDlfsDedRbAlloc
23129 RgSchCmnUeRbAlloc *allocInfo,
23131 CmLListCp *schdHqPLst,
23132 CmLListCp *nonSchdHqPLst
23136 CmLList *schdLnkNode = NULLP;
23137 CmLList *toBeSchdLnk = NULLP;
23138 RgSchDlSf *dlSf = allocInfo->dedDlSf;
23139 RgSchUeCb *ue = NULLP;
23140 RgSchDlHqProcCb *hqP = NULLP;
23141 uint8_t isDlBwAvail;
23144 /* Perform allocaations for the list */
23145 toBeSchdLnk = cmLListFirst(ueLst);
23146 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
23148 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23150 schdLnkNode = &hqP->schdLstLnk;
23151 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23153 ret = rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, &isDlBwAvail);
23156 /* Allocation failed: Add remaining UEs to non-scheduled
23157 * list and return */
23160 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23162 schdLnkNode = &hqP->schdLstLnk;
23163 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23164 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
23165 toBeSchdLnk = toBeSchdLnk->next;
23166 } while(toBeSchdLnk);
23172 #if defined (TENB_STATS) && defined (RG_5GTF)
23173 cell->tenbStats->sch.dl5gtfRbAllocPass++;
23175 /* Allocation successful: Add UE to the scheduled list */
23176 cmLListAdd2Tail(schdHqPLst, schdLnkNode);
23180 #if defined (TENB_STATS) && defined (RG_5GTF)
23181 cell->tenbStats->sch.dl5gtfRbAllocFail++;
23183 /* Allocation failed : Add UE to the non-scheduled list */
23184 DU_LOG("\nERROR --> SCH : 5GTF_ERROR Dl rb alloc failed adding nonSchdHqPLst\n");
23185 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
23193 * @brief Handles RB allocation for frequency non-selective cell.
23197 * Function : rgSCHCmnNonDlfsRbAlloc
23199 * Invoking Module Processing:
23200 * - SCH shall invoke this if downlink frequency selective is disabled for
23201 * the cell for RB allocation.
23202 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
23203 * estimate and subframe for each allocation to be made to SCH.
23205 * Processing Steps:
23206 * - Allocate sequentially for common channels.
23207 * - For transmitting and re-transmitting UE list.
23209 * - Perform wide-band allocations for UE in increasing order of
23211 * - Determine Imcs for the allocation.
23212 * - Determine RA type.
23213 * - Determine DCI format.
23215 * @param[in] RgSchCellCb *cell
23216 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
23220 Void rgSCHCmnNonDlfsRbAlloc
23223 RgSchCmnDlRbAllocInfo *allocInfo
23226 uint8_t raRspCnt = 0;
23227 RgSchDlRbAlloc *reqAllocInfo;
23229 /* Allocate for MSG4 retransmissions */
23230 if (allocInfo->msg4Alloc.msg4RetxLst.count)
23232 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc RetxLst\n");
23233 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), TRUE);
23236 /* Allocate for MSG4 transmissions */
23237 /* Assuming all the nodes in the list need allocations: rbsReq is valid */
23238 if (allocInfo->msg4Alloc.msg4TxLst.count)
23240 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc txLst\n");
23241 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), FALSE);
23244 /* Allocate for CCCH SDU (received after guard timer expiry)
23245 * retransmissions */
23246 if (allocInfo->ccchSduAlloc.ccchSduRetxLst.count)
23248 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
23249 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), TRUE);
23252 /* Allocate for CCCD SDU transmissions */
23253 /* Allocate for CCCH SDU (received after guard timer expiry) transmissions */
23254 if (allocInfo->ccchSduAlloc.ccchSduTxLst.count)
23256 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
23257 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), FALSE);
23261 /* Allocate for Random access response */
23262 for (raRspCnt = 0; raRspCnt < RG_SCH_CMN_MAX_CMN_PDCCH; ++raRspCnt)
23264 /* Assuming that the requests will be filled in sequentially */
23265 reqAllocInfo = &(allocInfo->raRspAlloc[raRspCnt]);
23266 if (!reqAllocInfo->rbsReq)
23270 DU_LOG("\nINFO --> SCH : 5GTF_ERROR calling RAR rgSCHCmnNonDlfsCmnRbAlloc\n");
23271 // if ((rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo)) != ROK)
23272 if ((rgSCHCmnNonDlfsCmnRbAllocRar(cell, reqAllocInfo)) != ROK)
23278 /* Allocate for RETX+TX UEs */
23279 if(allocInfo->dedAlloc.txRetxHqPLst.count)
23281 DU_LOG("\nDEBUG --> SCH : 5GTF_ERROR TX RETX rgSCHCmnNonDlfsDedRbAlloc\n");
23282 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23283 &(allocInfo->dedAlloc.txRetxHqPLst),
23284 &(allocInfo->dedAlloc.schdTxRetxHqPLst),
23285 &(allocInfo->dedAlloc.nonSchdTxRetxHqPLst));
23288 if((allocInfo->dedAlloc.retxHqPLst.count))
23290 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23291 &(allocInfo->dedAlloc.retxHqPLst),
23292 &(allocInfo->dedAlloc.schdRetxHqPLst),
23293 &(allocInfo->dedAlloc.nonSchdRetxHqPLst));
23296 /* Allocate for transmitting UEs */
23297 if((allocInfo->dedAlloc.txHqPLst.count))
23299 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23300 &(allocInfo->dedAlloc.txHqPLst),
23301 &(allocInfo->dedAlloc.schdTxHqPLst),
23302 &(allocInfo->dedAlloc.nonSchdTxHqPLst));
23305 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cell);
23306 if ((allocInfo->dedAlloc.txRetxHqPLst.count +
23307 allocInfo->dedAlloc.retxHqPLst.count +
23308 allocInfo->dedAlloc.txHqPLst.count) >
23309 cmnCell->dl.maxUePerDlSf)
23311 #ifndef ALIGN_64BIT
23312 DU_LOG("\nERROR --> SCH : UEs selected by"
23313 " scheduler exceed maximumUePerDlSf(%u)tx-retx %ld retx %ld tx %ld\n",
23314 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
23315 allocInfo->dedAlloc.retxHqPLst.count,
23316 allocInfo->dedAlloc.txHqPLst.count);
23318 DU_LOG("\nERROR --> SCH : UEs selected by"
23319 " scheduler exceed maximumUePerDlSf(%u)tx-retx %d retx %d tx %d\n",
23320 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
23321 allocInfo->dedAlloc.retxHqPLst.count,
23322 allocInfo->dedAlloc.txHqPLst.count);
23327 /* LTE_ADV_FLAG_REMOVED_START */
23328 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
23330 DU_LOG("\nINFO --> SCH : 5GTF_ERROR RETX rgSCHCmnNonDlfsDsfrRntpComp\n");
23331 rgSCHCmnNonDlfsDsfrRntpComp(cell, allocInfo->dedAlloc.dedDlSf);
23333 /* LTE_ADV_FLAG_REMOVED_END */
23334 #endif /* LTE_TDD */
23338 /***********************************************************
23340 * Func : rgSCHCmnCalcRiv
23342 * Desc : This function calculates RIV.
23348 * File : rg_sch_utl.c
23350 **********************************************************/
23352 uint32_t rgSCHCmnCalcRiv
23359 uint32_t rgSCHCmnCalcRiv
23367 uint8_t numRbMinus1 = numRb - 1;
23371 if (numRbMinus1 <= bw/2)
23373 riv = bw * numRbMinus1 + rbStart;
23377 riv = bw * (bw - numRbMinus1) + (bw - rbStart - 1);
23380 } /* rgSCHCmnCalcRiv */
23384 * @brief This function allocates and copies the RACH response scheduling
23385 * related information into cell control block.
23389 * Function: rgSCHCmnDlCpyRachInfo
23390 * Purpose: This function allocates and copies the RACH response
23391 * scheduling related information into cell control block
23392 * for each DL subframe.
23395 * Invoked by: Scheduler
23397 * @param[in] RgSchCellCb* cell
23398 * @param[in] RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES]
23399 * @param[in] uint8_t raArrSz
23403 static S16 rgSCHCmnDlCpyRachInfo
23406 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
23410 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
23413 uint16_t subfrmIdx;
23415 uint8_t numSubfrms;
23420 /* Allocate RACH response information for each DL
23421 * subframe in a radio frame */
23422 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cell->rachRspLst,
23423 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1] *
23424 sizeof(RgSchTddRachRspLst));
23430 for(sfnIdx=raArrSz-1; sfnIdx>=0; sfnIdx--)
23432 for(subfrmIdx=0; subfrmIdx < RGSCH_NUM_SUB_FRAMES; subfrmIdx++)
23434 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
23435 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
23440 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx],subfrmIdx);
23442 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
23444 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddNumDlSubfrmTbl[ulDlCfgIdx],subfrmIdx);
23445 sfNum = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][subfrmIdx]-1;
23446 numRfs = cell->rachRspLst[sfNum].numRadiofrms;
23447 /* For each DL subframe in which RACH response can
23448 * be sent is updated */
23451 cell->rachRspLst[sfNum].rachRsp[numRfs].sfnOffset =
23452 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset;
23453 for(sfcount=0; sfcount < numSubfrms; sfcount++)
23455 cell->rachRspLst[sfNum].rachRsp[numRfs].\
23456 subframe[sfcount] =
23457 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].\
23460 cell->rachRspLst[sfNum].rachRsp[numRfs].numSubfrms =
23461 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
23462 cell->rachRspLst[sfNum].numRadiofrms++;
23465 /* Copy the subframes to be deleted at ths subframe */
23467 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
23470 cell->rachRspLst[sfNum].delInfo.sfnOffset =
23471 rachRspLst[sfnIdx][subfrmIdx].delInfo.sfnOffset;
23472 for(sfcount=0; sfcount < numSubfrms; sfcount++)
23474 cell->rachRspLst[sfNum].delInfo.subframe[sfcount] =
23475 rachRspLst[sfnIdx][subfrmIdx].delInfo.subframe[sfcount];
23477 cell->rachRspLst[sfNum].delInfo.numSubfrms =
23478 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
23486 * @brief This function determines the iTbs based on the new CFI,
23487 * CQI and BLER based delta iTbs
23491 * Function: rgSchCmnFetchItbs
23492 * Purpose: Fetch the new iTbs when CFI changes.
23494 * @param[in] RgSchCellCb *cell
23495 * @param[in] RgSchCmnDlUe *ueDl
23496 * @param[in] uint8_t cqi
23502 static S32 rgSchCmnFetchItbs
23505 RgSchCmnDlUe *ueDl,
23513 static S32 rgSchCmnFetchItbs
23516 RgSchCmnDlUe *ueDl,
23525 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
23530 /* Special Handling for Spl Sf when CFI is 3 as
23531 * CFI in Spl Sf will be max 2 */
23532 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
23534 if((cellDl->currCfi == 3) ||
23535 ((cell->bwCfg.dlTotalBw <= 10) && (cellDl->currCfi == 1)))
23537 /* Use CFI 2 in this case */
23538 iTbs = (ueDl->laCb[cwIdx].deltaiTbs +
23539 ((*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][2]))[cqi])* 100)/100;
23541 RG_SCH_CHK_ITBS_RANGE(iTbs, RGSCH_NUM_ITBS - 1);
23545 iTbs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1];
23547 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
23549 else /* CFI Changed. Update with new iTbs Reset the BLER*/
23552 S32 tmpiTbs = (*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][cfi]))[cqi];
23554 iTbs = (ueDl->laCb[cwIdx].deltaiTbs + tmpiTbs*100)/100;
23556 RG_SCH_CHK_ITBS_RANGE(iTbs, tmpiTbs);
23558 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
23560 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1] = iTbs;
23562 ueDl->lastCfi = cfi;
23563 ueDl->laCb[cwIdx].deltaiTbs = 0;
23570 * @brief This function determines the RBs and Bytes required for BO
23571 * transmission for UEs configured with TM 1/2/6/7.
23575 * Function: rgSCHCmnDlAllocTxRb1Tb1Cw
23576 * Purpose: Allocate TB1 on CW1.
23578 * Reference Parameter effBo is filled with alloced bytes.
23579 * Returns RFAILED if BO not satisfied at all.
23581 * Invoked by: rgSCHCmnDlAllocTxRbTM1/2/6/7
23583 * @param[in] RgSchCellCb *cell
23584 * @param[in] RgSchDlSf *subFrm
23585 * @param[in] RgSchUeCb *ue
23586 * @param[in] uint32_t bo
23587 * @param[out] uint32_t *effBo
23588 * @param[in] RgSchDlHqProcCb *proc
23589 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23593 static Void rgSCHCmnDlAllocTxRb1Tb1Cw
23600 RgSchDlHqProcCb *proc,
23601 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23604 RgSchDlRbAlloc *allocInfo;
23609 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
23611 if (ue->ue5gtfCb.rank == 2)
23613 allocInfo->dciFormat = TFU_DCI_FORMAT_B2;
23617 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23620 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
23621 allocInfo->raType);
23623 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
23624 bo, &numRb, effBo);
23625 if (ret == RFAILED)
23627 /* If allocation couldn't be made then return */
23630 /* Adding UE to RbAllocInfo TX Lst */
23631 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
23632 /* Fill UE alloc Info */
23633 allocInfo->rbsReq = numRb;
23634 allocInfo->dlSf = subFrm;
23636 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
23644 * @brief This function determines the RBs and Bytes required for BO
23645 * retransmission for UEs configured with TM 1/2/6/7.
23649 * Function: rgSCHCmnDlAllocRetxRb1Tb1Cw
23650 * Purpose: Allocate TB1 on CW1.
23652 * Reference Parameter effBo is filled with alloced bytes.
23653 * Returns RFAILED if BO not satisfied at all.
23655 * Invoked by: rgSCHCmnDlAllocRetxRbTM1/2/6/7
23657 * @param[in] RgSchCellCb *cell
23658 * @param[in] RgSchDlSf *subFrm
23659 * @param[in] RgSchUeCb *ue
23660 * @param[in] uint32_t bo
23661 * @param[out] uint32_t *effBo
23662 * @param[in] RgSchDlHqProcCb *proc
23663 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23667 static Void rgSCHCmnDlAllocRetxRb1Tb1Cw
23674 RgSchDlHqProcCb *proc,
23675 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23678 RgSchDlRbAlloc *allocInfo;
23683 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
23686 /* 5GTF: RETX DCI format same as TX */
23687 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
23688 &allocInfo->raType);
23691 /* Get the Allocation in terms of RBs that are required for
23692 * this retx of TB1 */
23693 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
23695 if (ret == RFAILED)
23697 /* Allocation couldn't be made for Retx */
23698 /* Fix : syed If TxRetx allocation failed then add the UE along with the proc
23699 * to the nonSchdTxRetxUeLst and let spfc scheduler take care of it during
23701 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
23704 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
23705 /* Fill UE alloc Info */
23706 allocInfo->rbsReq = numRb;
23707 allocInfo->dlSf = subFrm;
23709 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
23717 * @brief This function determines the RBs and Bytes required for BO
23718 * transmission for UEs configured with TM 2.
23722 * Function: rgSCHCmnDlAllocTxRbTM1
23725 * Reference Parameter effBo is filled with alloced bytes.
23726 * Returns RFAILED if BO not satisfied at all.
23728 * Invoked by: rgSCHCmnDlAllocTxRb
23730 * @param[in] RgSchCellCb *cell
23731 * @param[in] RgSchDlSf *subFrm
23732 * @param[in] RgSchUeCb *ue
23733 * @param[in] uint32_t bo
23734 * @param[out] uint32_t *effBo
23735 * @param[in] RgSchDlHqProcCb *proc
23736 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23740 static Void rgSCHCmnDlAllocTxRbTM1
23747 RgSchDlHqProcCb *proc,
23748 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23751 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23757 * @brief This function determines the RBs and Bytes required for BO
23758 * retransmission for UEs configured with TM 2.
23762 * Function: rgSCHCmnDlAllocRetxRbTM1
23765 * Reference Parameter effBo is filled with alloced bytes.
23766 * Returns RFAILED if BO not satisfied at all.
23768 * Invoked by: rgSCHCmnDlAllocRetxRb
23770 * @param[in] RgSchCellCb *cell
23771 * @param[in] RgSchDlSf *subFrm
23772 * @param[in] RgSchUeCb *ue
23773 * @param[in] uint32_t bo
23774 * @param[out] uint32_t *effBo
23775 * @param[in] RgSchDlHqProcCb *proc
23776 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23780 static Void rgSCHCmnDlAllocRetxRbTM1
23787 RgSchDlHqProcCb *proc,
23788 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23791 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23797 * @brief This function determines the RBs and Bytes required for BO
23798 * transmission for UEs configured with TM 2.
23802 * Function: rgSCHCmnDlAllocTxRbTM2
23805 * Reference Parameter effBo is filled with alloced bytes.
23806 * Returns RFAILED if BO not satisfied at all.
23808 * Invoked by: rgSCHCmnDlAllocTxRb
23810 * @param[in] RgSchCellCb *cell
23811 * @param[in] RgSchDlSf *subFrm
23812 * @param[in] RgSchUeCb *ue
23813 * @param[in] uint32_t bo
23814 * @param[out] uint32_t *effBo
23815 * @param[in] RgSchDlHqProcCb *proc
23816 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23820 static Void rgSCHCmnDlAllocTxRbTM2
23827 RgSchDlHqProcCb *proc,
23828 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23831 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23837 * @brief This function determines the RBs and Bytes required for BO
23838 * retransmission for UEs configured with TM 2.
23842 * Function: rgSCHCmnDlAllocRetxRbTM2
23845 * Reference Parameter effBo is filled with alloced bytes.
23846 * Returns RFAILED if BO not satisfied at all.
23848 * Invoked by: rgSCHCmnDlAllocRetxRb
23850 * @param[in] RgSchCellCb *cell
23851 * @param[in] RgSchDlSf *subFrm
23852 * @param[in] RgSchUeCb *ue
23853 * @param[in] uint32_t bo
23854 * @param[out] uint32_t *effBo
23855 * @param[in] RgSchDlHqProcCb *proc
23856 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23860 static Void rgSCHCmnDlAllocRetxRbTM2
23867 RgSchDlHqProcCb *proc,
23868 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23871 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23877 * @brief This function determines the RBs and Bytes required for BO
23878 * transmission for UEs configured with TM 3.
23882 * Function: rgSCHCmnDlAllocTxRbTM3
23885 * Reference Parameter effBo is filled with alloced bytes.
23886 * Returns RFAILED if BO not satisfied at all.
23888 * Invoked by: rgSCHCmnDlAllocTxRb
23890 * @param[in] RgSchCellCb *cell
23891 * @param[in] RgSchDlSf *subFrm
23892 * @param[in] RgSchUeCb *ue
23893 * @param[in] uint32_t bo
23894 * @param[out] uint32_t *effBo
23895 * @param[in] RgSchDlHqProcCb *proc
23896 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23900 static Void rgSCHCmnDlAllocTxRbTM3
23907 RgSchDlHqProcCb *proc,
23908 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23913 /* Both TBs free for TX allocation */
23914 rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo,\
23915 proc, cellWdAllocInfo);
23922 * @brief This function determines the RBs and Bytes required for BO
23923 * retransmission for UEs configured with TM 3.
23927 * Function: rgSCHCmnDlAllocRetxRbTM3
23930 * Reference Parameter effBo is filled with alloced bytes.
23931 * Returns RFAILED if BO not satisfied at all.
23933 * Invoked by: rgSCHCmnDlAllocRetxRb
23935 * @param[in] RgSchCellCb *cell
23936 * @param[in] RgSchDlSf *subFrm
23937 * @param[in] RgSchUeCb *ue
23938 * @param[in] uint32_t bo
23939 * @param[out] uint32_t *effBo
23940 * @param[in] RgSchDlHqProcCb *proc
23941 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23945 static Void rgSCHCmnDlAllocRetxRbTM3
23952 RgSchDlHqProcCb *proc,
23953 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23958 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
23959 (proc->tbInfo[1].state == HQ_TB_NACKED))
23962 DU_LOG("\nDEBUG --> SCH : RETX RB TM3 nack for both hqp %d cell %d \n", proc->procId, proc->hqE->cell->cellId);
23964 /* Both TBs require RETX allocation */
23965 rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo,\
23966 proc, cellWdAllocInfo);
23970 /* One of the TBs need RETX allocation. Other TB may/maynot
23971 * be available for new TX allocation. */
23972 rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo,\
23973 proc, cellWdAllocInfo);
23981 * @brief This function performs the DCI format selection in case of
23982 * Transmit Diversity scheme where there can be more
23983 * than 1 option for DCI format selection.
23987 * Function: rgSCHCmnSlctPdcchFrmt
23988 * Purpose: 1. If DLFS is enabled, then choose TM specific
23989 * DCI format for Transmit diversity. All the
23990 * TM Specific DCI Formats support Type0 and/or
23991 * Type1 resource allocation scheme. DLFS
23992 * supports only Type-0&1 Resource allocation.
23993 * 2. If DLFS is not enabled, select a DCI format
23994 * which is of smaller size. Since Non-DLFS
23995 * scheduler supports all Resource allocation
23996 * schemes, selection is based on efficiency.
23998 * Invoked by: DL UE Allocation by Common Scheduler.
24000 * @param[in] RgSchCellCb *cell
24001 * @param[in] RgSchUeCb *ue
24002 * @param[out] uint8_t *raType
24003 * @return TfuDciFormat
24006 TfuDciFormat rgSCHCmnSlctPdcchFrmt
24013 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
24016 /* ccpu00140894- Selective DCI Format and RA type should be selected only
24017 * after TX Mode transition is completed*/
24018 if ((cellSch->dl.isDlFreqSel) && (ue->txModeTransCmplt))
24020 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciRAType;
24021 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciFrmt);
24025 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciRAType;
24026 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciFrmt);
24032 * @brief This function handles Retx allocation in case of TM3 UEs
24033 * where both the TBs were NACKED previously.
24037 * Function: rgSCHCmnDlTM3RetxRetx
24038 * Purpose: If forceTD flag enabled
24039 * TD for TB1 on CW1.
24041 * DCI Frmt 2A and RA Type 0
24042 * RI layered SM of both TBs on 2 CWs
24043 * Add UE to cell Alloc Info.
24044 * Fill UE alloc Info.
24047 * Successful allocation is indicated by non-zero effBo value.
24049 * Invoked by: rgSCHCmnDlAllocRbTM3
24051 * @param[in] RgSchCellCb *cell
24052 * @param[in] RgSchDlSf *subFrm
24053 * @param[in] RgSchUeCb *ue
24054 * @param[in] uint32_t bo
24055 * @param[out] uint32_t *effBo
24056 * @param[in] RgSchDlHqProcCb *proc
24057 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24061 static Void rgSCHCmnDlTM3RetxRetx
24068 RgSchDlHqProcCb *proc,
24069 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24073 RgSchDlRbAlloc *allocInfo;
24078 uint8_t precInfoAntIdx;
24082 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24084 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
24086 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
24087 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
24089 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
24091 if (ret == RFAILED)
24093 /* Allocation couldn't be made for Retx */
24094 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24097 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
24098 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
24099 #ifdef FOUR_TX_ANTENNA
24100 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1 should
24101 * have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
24102 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
24105 proc->cwSwpEnabled = TRUE;
24108 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24109 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
24113 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24116 /* Adding UE to allocInfo RETX Lst */
24117 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24119 /* Fill UE alloc Info scratch pad */
24120 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24121 precInfo, noTxLyrs, subFrm);
24128 * @brief This function handles Retx allocation in case of TM4 UEs
24129 * where both the TBs were NACKED previously.
24133 * Function: rgSCHCmnDlTM4RetxRetx
24134 * Purpose: If forceTD flag enabled
24135 * TD for TB1 on CW1.
24137 * DCI Frmt 2 and RA Type 0
24139 * 1 layer SM of TB1 on CW1.
24141 * RI layered SM of both TBs on 2 CWs
24142 * Add UE to cell Alloc Info.
24143 * Fill UE alloc Info.
24146 * Successful allocation is indicated by non-zero effBo value.
24148 * Invoked by: rgSCHCmnDlAllocRbTM4
24150 * @param[in] RgSchCellCb *cell
24151 * @param[in] RgSchDlSf *subFrm
24152 * @param[in] RgSchUeCb *ue
24153 * @param[in] uint32_t bo
24154 * @param[out] uint32_t *effBo
24155 * @param[in] RgSchDlHqProcCb *proc
24156 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24160 static Void rgSCHCmnDlTM4RetxRetx
24167 RgSchDlHqProcCb *proc,
24168 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24172 RgSchDlRbAlloc *allocInfo;
24174 Bool swpFlg = FALSE;
24176 #ifdef FOUR_TX_ANTENNA
24177 uint8_t precInfoAntIdx;
24183 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24185 /* Irrespective of RI Schedule both CWs */
24186 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
24187 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
24189 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
24191 if (ret == RFAILED)
24193 /* Allocation couldn't be made for Retx */
24194 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24197 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
24199 #ifdef FOUR_TX_ANTENNA
24200 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1
24201 * should have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
24202 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
24205 proc->cwSwpEnabled = TRUE;
24207 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24208 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
24212 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24215 /* Adding UE to allocInfo RETX Lst */
24216 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24218 /* Fill UE alloc Info scratch pad */
24219 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24220 precInfo, noTxLyrs, subFrm);
24228 * @brief This function determines Transmission attributes
24229 * incase of Spatial multiplexing for TX and RETX TBs.
24233 * Function: rgSCHCmnDlSMGetAttrForTxRetx
24234 * Purpose: 1. Reached here for a TM3/4 UE's HqP whose one of the TBs is
24235 * NACKED and the other TB is either NACKED or WAITING.
24236 * 2. Select the NACKED TB for RETX allocation.
24237 * 3. Allocation preference for RETX TB by mapping it to a better
24238 * CW (better in terms of efficiency).
24239 * 4. Determine the state of the other TB.
24240 * Determine if swapFlag were to be set.
24241 * Swap flag would be set if Retx TB is cross
24243 * 5. If UE has new data available for TX and if the other TB's state
24244 * is ACKED then set furtherScope as TRUE.
24246 * Invoked by: rgSCHCmnDlTM3[4]TxRetx
24248 * @param[in] RgSchUeCb *ue
24249 * @param[in] RgSchDlHqProcCb *proc
24250 * @param[out] RgSchDlHqTbCb **retxTb
24251 * @param[out] RgSchDlHqTbCb **txTb
24252 * @param[out] Bool *frthrScp
24253 * @param[out] Bool *swpFlg
24257 static Void rgSCHCmnDlSMGetAttrForTxRetx
24260 RgSchDlHqProcCb *proc,
24261 RgSchDlHqTbCb **retxTb,
24262 RgSchDlHqTbCb **txTb,
24267 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,proc->hqE->cell);
24268 RgSchDlRbAlloc *allocInfo;
24271 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24273 *retxTb = &proc->tbInfo[0];
24274 *txTb = &proc->tbInfo[1];
24275 /* TENB_BRDCM_TM4- Currently disabling swapflag for TM3/TM4, since
24276 * HqFeedback processing does not consider a swapped hq feedback */
24277 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 1))
24280 proc->cwSwpEnabled = TRUE;
24282 if (proc->tbInfo[1].state == HQ_TB_ACKED)
24284 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
24285 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
24290 *retxTb = &proc->tbInfo[1];
24291 *txTb = &proc->tbInfo[0];
24292 /* TENB_BRDCM_TM4 - Currently disabling swapflag for TM3/TM4, since
24293 * HqFeedback processing does not consider a swapped hq feedback */
24294 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 0))
24297 proc->cwSwpEnabled = TRUE;
24299 if (proc->tbInfo[0].state == HQ_TB_ACKED)
24301 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
24302 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
24310 * @brief Determine Precoding information for TM3 2 TX Antenna.
24314 * Function: rgSCHCmnDlTM3PrecInf2
24317 * Invoked by: rgSCHCmnDlGetAttrForTM3
24319 * @param[in] RgSchUeCb *ue
24320 * @param[in] uint8_t numTxLyrs
24321 * @param[in] Bool bothCwEnbld
24325 static uint8_t rgSCHCmnDlTM3PrecInf2
24339 * @brief Determine Precoding information for TM4 2 TX Antenna.
24343 * Function: rgSCHCmnDlTM4PrecInf2
24344 * Purpose: To determine a logic of deriving precoding index
24345 * information from 36.212 table 5.3.3.1.5-4
24347 * Invoked by: rgSCHCmnDlGetAttrForTM4
24349 * @param[in] RgSchUeCb *ue
24350 * @param[in] uint8_t numTxLyrs
24351 * @param[in] Bool bothCwEnbld
24355 static uint8_t rgSCHCmnDlTM4PrecInf2
24363 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24367 if (ueDl->mimoInfo.ri == numTxLyrs)
24369 if (ueDl->mimoInfo.ri == 2)
24371 /* PrecInfo corresponding to 2 CW
24373 if (ue->mimoInfo.puschFdbkVld)
24379 precIdx = ueDl->mimoInfo.pmi - 1;
24384 /* PrecInfo corresponding to 1 CW
24386 if (ue->mimoInfo.puschFdbkVld)
24392 precIdx = ueDl->mimoInfo.pmi + 1;
24396 else if (ueDl->mimoInfo.ri > numTxLyrs)
24398 /* In case of choosing among the columns of a
24399 * precoding matrix, choose the column corresponding
24400 * to the MAX-CQI */
24401 if (ue->mimoInfo.puschFdbkVld)
24407 precIdx = (ueDl->mimoInfo.pmi- 1)* 2 + 1;
24410 else /* if RI < numTxLyrs */
24412 precIdx = (ueDl->mimoInfo.pmi < 2)? 0:1;
24419 * @brief Determine Precoding information for TM3 4 TX Antenna.
24423 * Function: rgSCHCmnDlTM3PrecInf4
24424 * Purpose: To determine a logic of deriving precoding index
24425 * information from 36.212 table 5.3.3.1.5A-2
24427 * Invoked by: rgSCHCmnDlGetAttrForTM3
24429 * @param[in] RgSchUeCb *ue
24430 * @param[in] uint8_t numTxLyrs
24431 * @param[in] Bool bothCwEnbld
24435 static uint8_t rgSCHCmnDlTM3PrecInf4
24448 precIdx = numTxLyrs - 2;
24450 else /* one 1 CW transmission */
24459 * @brief Determine Precoding information for TM4 4 TX Antenna.
24463 * Function: rgSCHCmnDlTM4PrecInf4
24464 * Purpose: To determine a logic of deriving precoding index
24465 * information from 36.212 table 5.3.3.1.5-5
24467 * Invoked by: rgSCHCmnDlGetAttrForTM4
24469 * @param[in] RgSchUeCb *ue
24470 * @param[in] uint8_t numTxLyrs
24471 * @param[in] Bool bothCwEnbld
24475 static uint8_t rgSCHCmnDlTM4PrecInf4
24483 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24484 uint8_t precInfoBaseIdx, precIdx;
24487 precInfoBaseIdx = (ue->mimoInfo.puschFdbkVld)? (16):
24488 (ueDl->mimoInfo.pmi);
24491 precIdx = precInfoBaseIdx + (numTxLyrs-2)*17;
24493 else /* one 1 CW transmission */
24495 precInfoBaseIdx += 1;
24496 precIdx = precInfoBaseIdx + (numTxLyrs-1)*17;
24503 * @brief This function determines Transmission attributes
24504 * incase of TM3 scheduling.
24508 * Function: rgSCHCmnDlGetAttrForTM3
24509 * Purpose: Determine retx TB and tx TB based on TB states.
24510 * If forceTD enabled
24511 * perform only retx TB allocation.
24512 * If retxTB == TB2 then DCI Frmt = 2A, RA Type = 0.
24513 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
24515 * perform retxTB allocation on CW1.
24517 * Determine further Scope and Swap Flag attributes
24518 * assuming a 2 CW transmission of RetxTB and new Tx TB.
24519 * If no further scope for new TX allocation
24520 * Allocate only retx TB using 2 layers if
24521 * this TB was previously transmitted using 2 layers AND
24522 * number of Tx antenna ports == 4.
24523 * otherwise do single layer precoding.
24525 * Invoked by: rgSCHCmnDlTM3TxRetx
24527 * @param[in] RgSchUeCb *ue
24528 * @param[in] RgSchDlHqProcCb *proc
24529 * @param[out] uint8_t *numTxLyrs
24530 * @param[out] Bool *isTraDiv
24531 * @param[out] uint8_t *prcdngInf
24532 * @param[out] uint8_t *raType
24536 static Void rgSCHCmnDlGetAttrForTM3
24540 RgSchDlHqProcCb *proc,
24541 uint8_t *numTxLyrs,
24542 TfuDciFormat *dciFrmt,
24543 uint8_t *prcdngInf,
24544 RgSchDlHqTbCb **retxTb,
24545 RgSchDlHqTbCb **txTb,
24551 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24552 uint8_t precInfoAntIdx;
24555 /* Avoiding Tx-Retx for LAA cell as firstSchedTime is associated with
24557 /* Integration_fix: SPS Proc shall always have only one Cw */
24559 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
24560 (ueDl->mimoInfo.forceTD))
24562 ||(TRUE == rgSCHLaaSCellEnabled(cell))
24566 if ((ueDl->mimoInfo.forceTD)
24568 || (TRUE == rgSCHLaaSCellEnabled(cell))
24573 /* Transmit Diversity. Format based on dlfsEnabled
24574 * No further scope */
24575 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24577 *retxTb = &proc->tbInfo[0];
24578 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24582 *retxTb = &proc->tbInfo[1];
24583 *dciFrmt = TFU_DCI_FORMAT_2A;
24584 *raType = RG_SCH_CMN_RA_TYPE0;
24592 /* Determine the 2 TB transmission attributes */
24593 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
24597 /* Prefer allocation of RETX TB over 2 layers rather than combining
24598 * it with a new TX. */
24599 if ((ueDl->mimoInfo.ri == 2)
24600 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
24602 /* Allocate TB on CW1, using 2 Lyrs,
24603 * Format 2, precoding accordingly */
24609 *numTxLyrs= ((*retxTb)->numLyrs + ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)].noLyr);
24611 if((*retxTb)->tbIdx == 0 && ((*retxTb)->numLyrs == 2 ) && *numTxLyrs ==3)
24614 proc->cwSwpEnabled = TRUE;
24616 else if((*retxTb)->tbIdx == 1 && ((*retxTb)->numLyrs == 1) && *numTxLyrs ==3)
24619 proc->cwSwpEnabled = TRUE;
24623 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24624 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])\
24625 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
24626 *dciFrmt = TFU_DCI_FORMAT_2A;
24627 *raType = RG_SCH_CMN_RA_TYPE0;
24629 else /* frthrScp == FALSE */
24631 if (cell->numTxAntPorts == 2)
24633 /* Transmit Diversity */
24635 if ((*retxTb)->tbIdx == 0)
24637 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24641 /* If retxTB is TB2 then use format 2A */
24642 *dciFrmt = TFU_DCI_FORMAT_2A;
24643 *raType = RG_SCH_CMN_RA_TYPE0;
24648 else /* NumAntPorts == 4 */
24650 if ((*retxTb)->numLyrs == 2)
24652 /* Allocate TB on CW1, using 2 Lyrs,
24653 * Format 2A, precoding accordingly */
24655 *dciFrmt = TFU_DCI_FORMAT_2A;
24656 *raType = RG_SCH_CMN_RA_TYPE0;
24657 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24658 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, *numTxLyrs, *frthrScp);
24663 /* Transmit Diversity */
24665 if ((*retxTb)->tbIdx == 0)
24667 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24671 /* If retxTB is TB2 then use format 2A */
24672 *dciFrmt = TFU_DCI_FORMAT_2A;
24673 *raType = RG_SCH_CMN_RA_TYPE0;
24687 * @brief This function determines Transmission attributes
24688 * incase of TM4 scheduling.
24692 * Function: rgSCHCmnDlGetAttrForTM4
24693 * Purpose: Determine retx TB and tx TB based on TB states.
24694 * If forceTD enabled
24695 * perform only retx TB allocation.
24696 * If retxTB == TB2 then DCI Frmt = 2, RA Type = 0.
24697 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
24699 * perform retxTB allocation on CW1.
24701 * Determine further Scope and Swap Flag attributes
24702 * assuming a 2 CW transmission of RetxTB and new Tx TB.
24703 * If no further scope for new TX allocation
24704 * Allocate only retx TB using 2 layers if
24705 * this TB was previously transmitted using 2 layers AND
24706 * number of Tx antenna ports == 4.
24707 * otherwise do single layer precoding.
24709 * Invoked by: rgSCHCmnDlTM4TxRetx
24711 * @param[in] RgSchUeCb *ue
24712 * @param[in] RgSchDlHqProcCb *proc
24713 * @param[out] uint8_t *numTxLyrs
24714 * @param[out] Bool *isTraDiv
24715 * @param[out] uint8_t *prcdngInf
24716 * @param[out] uint8_t *raType
24720 static Void rgSCHCmnDlGetAttrForTM4
24724 RgSchDlHqProcCb *proc,
24725 uint8_t *numTxLyrs,
24726 TfuDciFormat *dciFrmt,
24727 uint8_t *prcdngInf,
24728 RgSchDlHqTbCb **retxTb,
24729 RgSchDlHqTbCb **txTb,
24735 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24736 uint8_t precInfoAntIdx;
24740 /* Integration_fix: SPS Proc shall always have only one Cw */
24742 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
24743 (ueDl->mimoInfo.forceTD))
24745 ||(TRUE == rgSCHLaaSCellEnabled(cell))
24749 if ((ueDl->mimoInfo.forceTD)
24751 || (TRUE == rgSCHLaaSCellEnabled(cell))
24756 /* Transmit Diversity. Format based on dlfsEnabled
24757 * No further scope */
24758 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24760 *retxTb = &proc->tbInfo[0];
24761 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24765 *retxTb = &proc->tbInfo[1];
24766 *dciFrmt = TFU_DCI_FORMAT_2;
24767 *raType = RG_SCH_CMN_RA_TYPE0;
24775 if (ueDl->mimoInfo.ri == 1)
24777 /* single layer precoding. Format 2.
24778 * No further scope */
24779 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24781 *retxTb = &proc->tbInfo[0];
24785 *retxTb = &proc->tbInfo[1];
24788 *dciFrmt = TFU_DCI_FORMAT_2;
24789 *raType = RG_SCH_CMN_RA_TYPE0;
24791 *prcdngInf = 0; /*When RI= 1*/
24795 /* Determine the 2 TB transmission attributes */
24796 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
24798 *dciFrmt = TFU_DCI_FORMAT_2;
24799 *raType = RG_SCH_CMN_RA_TYPE0;
24802 /* Prefer allocation of RETX TB over 2 layers rather than combining
24803 * it with a new TX. */
24804 if ((ueDl->mimoInfo.ri == 2)
24805 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
24807 /* Allocate TB on CW1, using 2 Lyrs,
24808 * Format 2, precoding accordingly */
24812 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24813 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])
24814 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
24816 else /* frthrScp == FALSE */
24818 if (cell->numTxAntPorts == 2)
24820 /* single layer precoding. Format 2. */
24822 *prcdngInf = (getPrecInfoFunc[1][cell->numTxAntPorts/2 - 1])\
24823 (cell, ue, *numTxLyrs, *frthrScp);
24826 else /* NumAntPorts == 4 */
24828 if ((*retxTb)->numLyrs == 2)
24830 /* Allocate TB on CW1, using 2 Lyrs,
24831 * Format 2, precoding accordingly */
24833 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24834 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
24835 (cell, ue, *numTxLyrs, *frthrScp);
24840 /* Allocate TB with 1 lyr precoding,
24841 * Format 2, precoding info accordingly */
24843 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24844 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
24845 (cell, ue, *numTxLyrs, *frthrScp);
24856 * @brief This function handles Retx allocation in case of TM3 UEs
24857 * where previously one of the TBs was NACKED and the other
24858 * TB is either ACKED/WAITING.
24862 * Function: rgSCHCmnDlTM3TxRetx
24863 * Purpose: Determine the TX attributes for TM3 TxRetx Allocation.
24864 * If futher Scope for New Tx Allocation on other TB
24865 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
24866 * Add UE to cell wide RetxTx List.
24868 * Perform only RETX alloc'n on CW1.
24869 * Add UE to cell wide Retx List.
24871 * effBo is set to a non-zero value if allocation is
24874 * Invoked by: rgSCHCmnDlAllocRbTM3
24876 * @param[in] RgSchCellCb *cell
24877 * @param[in] RgSchDlSf *subFrm
24878 * @param[in] RgSchUeCb *ue
24879 * @param[in] uint32_t bo
24880 * @param[out] uint32_t *effBo
24881 * @param[in] RgSchDlHqProcCb *proc
24882 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24886 static Void rgSCHCmnDlTM3TxRetx
24893 RgSchDlHqProcCb *proc,
24894 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24898 RgSchDlRbAlloc *allocInfo;
24900 RgSchDlHqTbCb *retxTb, *txTb;
24909 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24912 /* Determine the transmission attributes */
24913 rgSCHCmnDlGetAttrForTM3(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
24914 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
24915 &allocInfo->raType);
24920 DU_LOG("\nDEBUG --> SCH : TX RETX called from proc %d cell %d \n",proc->procId, cell->cellId);
24922 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
24924 if (ret == RFAILED)
24926 /* Allocation couldn't be made for Retx */
24927 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24930 /* Adding UE to RbAllocInfo RETX-TX Lst */
24931 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
24935 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
24936 numTxLyrs, &numRb, effBo);
24937 if (ret == RFAILED)
24939 /* Allocation couldn't be made for Retx */
24940 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24944 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24947 /* Adding UE to allocInfo RETX Lst */
24948 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24951 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24952 prcdngInf, numTxLyrs, subFrm);
24959 * @brief This function handles Retx allocation in case of TM4 UEs
24960 * where previously one of the TBs was NACKED and the other
24961 * TB is either ACKED/WAITING.
24965 * Function: rgSCHCmnDlTM4TxRetx
24966 * Purpose: Determine the TX attributes for TM4 TxRetx Allocation.
24967 * If futher Scope for New Tx Allocation on other TB
24968 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
24969 * Add UE to cell wide RetxTx List.
24971 * Perform only RETX alloc'n on CW1.
24972 * Add UE to cell wide Retx List.
24974 * effBo is set to a non-zero value if allocation is
24977 * Invoked by: rgSCHCmnDlAllocRbTM4
24979 * @param[in] RgSchCellCb *cell
24980 * @param[in] RgSchDlSf *subFrm
24981 * @param[in] RgSchUeCb *ue
24982 * @param[in] uint32_t bo
24983 * @param[out] uint32_t *effBo
24984 * @param[in] RgSchDlHqProcCb *proc
24985 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24989 static Void rgSCHCmnDlTM4TxRetx
24996 RgSchDlHqProcCb *proc,
24997 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25001 RgSchDlRbAlloc *allocInfo;
25003 RgSchDlHqTbCb *retxTb, *txTb;
25011 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25014 /* Determine the transmission attributes */
25015 rgSCHCmnDlGetAttrForTM4(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
25016 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
25017 &allocInfo->raType);
25021 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
25023 if (ret == RFAILED)
25025 /* Fix : syed If TxRetx allocation failed then add the UE along
25026 * with the proc to the nonSchdTxRetxUeLst and let spfc scheduler
25027 * take care of it during finalization. */
25028 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25031 /* Adding UE to RbAllocInfo RETX-TX Lst */
25032 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
25036 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
25037 numTxLyrs, &numRb, effBo);
25038 if (ret == RFAILED)
25040 /* Allocation couldn't be made for Retx */
25041 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25045 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25048 /* Adding UE to allocInfo RETX Lst */
25049 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
25052 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
25053 prcdngInf, numTxLyrs, subFrm)
25060 * @brief This function handles Retx allocation in case of TM4 UEs
25061 * where previously both the TBs were ACKED and ACKED
25066 * Function: rgSCHCmnDlTM3TxTx
25067 * Purpose: Reached here for a TM3 UE's HqP's fresh allocation
25068 * where both the TBs are free for TX scheduling.
25069 * If forceTD flag is set
25070 * perform TD on CW1 with TB1.
25075 * RI layered precoding 2 TB on 2 CW.
25076 * Set precoding info.
25077 * Add UE to cellAllocInfo.
25078 * Fill ueAllocInfo.
25080 * effBo is set to a non-zero value if allocation is
25083 * Invoked by: rgSCHCmnDlAllocRbTM3
25085 * @param[in] RgSchCellCb *cell
25086 * @param[in] RgSchDlSf *subFrm
25087 * @param[in] RgSchUeCb *ue
25088 * @param[in] uint32_t bo
25089 * @param[out] uint32_t *effBo
25090 * @param[in] RgSchDlHqProcCb *proc
25091 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25095 static Void rgSCHCmnDlTM3TxTx
25102 RgSchDlHqProcCb *proc,
25103 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25106 RgSchCmnDlUe *ueDl;
25107 RgSchDlRbAlloc *allocInfo;
25112 uint8_t precInfoAntIdx;
25116 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25117 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25119 /* Integration_fix: SPS Proc shall always have only one Cw */
25121 #ifdef FOUR_TX_ANTENNA
25122 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25123 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
25125 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25126 (ueDl->mimoInfo.forceTD))
25129 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
25132 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
25133 &allocInfo->raType);
25134 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25135 bo, &numRb, effBo);
25136 if (ret == RFAILED)
25138 /* If allocation couldn't be made then return */
25142 precInfo = 0; /* TD */
25144 else /* Precoding */
25146 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
25147 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
25149 /* Spatial Multiplexing using 2 CWs */
25150 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
25151 if (ret == RFAILED)
25153 /* If allocation couldn't be made then return */
25156 noTxLyrs = ueDl->mimoInfo.ri;
25157 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
25158 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, getPrecInfoFunc[0], precInfoAntIdx);
25159 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
25163 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25166 /* Adding UE to RbAllocInfo TX Lst */
25167 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25169 /* Fill UE allocInfo scrath pad */
25170 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
25171 precInfo, noTxLyrs, subFrm);
25178 * @brief This function handles Retx allocation in case of TM4 UEs
25179 * where previously both the TBs were ACKED and ACKED
25184 * Function: rgSCHCmnDlTM4TxTx
25185 * Purpose: Reached here for a TM4 UE's HqP's fresh allocation
25186 * where both the TBs are free for TX scheduling.
25187 * If forceTD flag is set
25188 * perform TD on CW1 with TB1.
25194 * Single layer precoding of TB1 on CW1.
25195 * Set precoding info.
25197 * RI layered precoding 2 TB on 2 CW.
25198 * Set precoding info.
25199 * Add UE to cellAllocInfo.
25200 * Fill ueAllocInfo.
25202 * effBo is set to a non-zero value if allocation is
25205 * Invoked by: rgSCHCmnDlAllocRbTM4
25207 * @param[in] RgSchCellCb *cell
25208 * @param[in] RgSchDlSf *subFrm
25209 * @param[in] RgSchUeCb *ue
25210 * @param[in] uint32_t bo
25211 * @param[out] uint32_t *effBo
25212 * @param[in] RgSchDlHqProcCb *proc
25213 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25217 static Void rgSCHCmnDlTM4TxTx
25224 RgSchDlHqProcCb *proc,
25225 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25228 RgSchCmnDlUe *ueDl;
25229 RgSchDlRbAlloc *allocInfo;
25233 uint8_t precInfoAntIdx;
25238 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25239 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25241 /* Integration_fix: SPS Proc shall always have only one Cw */
25243 #ifdef FOUR_TX_ANTENNA
25244 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25245 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
25247 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25248 (ueDl->mimoInfo.forceTD))
25251 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
25254 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
25255 &allocInfo->raType);
25257 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25258 bo, &numRb, effBo);
25259 if (ret == RFAILED)
25261 /* If allocation couldn't be made then return */
25265 precInfo = 0; /* TD */
25267 else /* Precoding */
25269 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
25270 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
25272 if (ueDl->mimoInfo.ri == 1)
25274 /* Single Layer SM using FORMAT 2 */
25275 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25276 bo, &numRb, effBo);
25277 if (ret == RFAILED)
25279 /* If allocation couldn't be made then return */
25283 precInfo = 0; /* PrecInfo as 0 for RI=1*/
25287 /* Spatial Multiplexing using 2 CWs */
25288 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
25289 if (ret == RFAILED)
25291 /* If allocation couldn't be made then return */
25294 noTxLyrs = ueDl->mimoInfo.ri;
25295 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
25296 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
25302 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25305 /* Adding UE to RbAllocInfo TX Lst */
25306 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25309 /* Fill UE allocInfo scrath pad */
25310 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
25311 precInfo, noTxLyrs, subFrm);
25318 * @brief This function determines the RBs and Bytes required for BO
25319 * transmission for UEs configured with TM 4.
25323 * Function: rgSCHCmnDlAllocTxRbTM4
25324 * Purpose: Invokes the functionality particular to the
25325 * current state of the TBs of the "proc".
25327 * Reference Parameter effBo is filled with alloced bytes.
25328 * Returns RFAILED if BO not satisfied at all.
25330 * Invoked by: rgSCHCmnDlAllocTxRb
25332 * @param[in] RgSchCellCb *cell
25333 * @param[in] RgSchDlSf *subFrm
25334 * @param[in] RgSchUeCb *ue
25335 * @param[in] uint32_t bo
25336 * @param[out] uint32_t *effBo
25337 * @param[in] RgSchDlHqProcCb *proc
25338 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25342 static Void rgSCHCmnDlAllocTxRbTM4
25349 RgSchDlHqProcCb *proc,
25350 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25354 /* Both TBs free for TX allocation */
25355 rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo,\
25356 proc, cellWdAllocInfo);
25363 * @brief This function determines the RBs and Bytes required for BO
25364 * retransmission for UEs configured with TM 4.
25368 * Function: rgSCHCmnDlAllocRetxRbTM4
25369 * Purpose: Invokes the functionality particular to the
25370 * current state of the TBs of the "proc".
25372 * Reference Parameter effBo is filled with alloced bytes.
25373 * Returns RFAILED if BO not satisfied at all.
25375 * Invoked by: rgSCHCmnDlAllocRetxRb
25377 * @param[in] RgSchCellCb *cell
25378 * @param[in] RgSchDlSf *subFrm
25379 * @param[in] RgSchUeCb *ue
25380 * @param[in] uint32_t bo
25381 * @param[out] uint32_t *effBo
25382 * @param[in] RgSchDlHqProcCb *proc
25383 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25387 static Void rgSCHCmnDlAllocRetxRbTM4
25394 RgSchDlHqProcCb *proc,
25395 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25399 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
25400 (proc->tbInfo[1].state == HQ_TB_NACKED))
25402 /* Both TBs require RETX allocation */
25403 rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo,\
25404 proc, cellWdAllocInfo);
25408 /* One of the TBs need RETX allocation. Other TB may/maynot
25409 * be available for new TX allocation. */
25410 rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo,\
25411 proc, cellWdAllocInfo);
25420 * @brief This function determines the RBs and Bytes required for BO
25421 * transmission for UEs configured with TM 5.
25425 * Function: rgSCHCmnDlAllocTxRbTM5
25428 * Reference Parameter effBo is filled with alloced bytes.
25429 * Returns RFAILED if BO not satisfied at all.
25431 * Invoked by: rgSCHCmnDlAllocTxRb
25433 * @param[in] RgSchCellCb *cell
25434 * @param[in] RgSchDlSf *subFrm
25435 * @param[in] RgSchUeCb *ue
25436 * @param[in] uint32_t bo
25437 * @param[out] uint32_t *effBo
25438 * @param[in] RgSchDlHqProcCb *proc
25439 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25443 static Void rgSCHCmnDlAllocTxRbTM5
25450 RgSchDlHqProcCb *proc,
25451 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25454 #if (ERRCLASS & ERRCLS_DEBUG)
25455 DU_LOG("\nERROR --> SCH : Invalid TM 5 for CRNTI:%d",ue->ueId);
25462 * @brief This function determines the RBs and Bytes required for BO
25463 * retransmission for UEs configured with TM 5.
25467 * Function: rgSCHCmnDlAllocRetxRbTM5
25470 * Reference Parameter effBo is filled with alloced bytes.
25471 * Returns RFAILED if BO not satisfied at all.
25473 * Invoked by: rgSCHCmnDlAllocRetxRb
25475 * @param[in] RgSchCellCb *cell
25476 * @param[in] RgSchDlSf *subFrm
25477 * @param[in] RgSchUeCb *ue
25478 * @param[in] uint32_t bo
25479 * @param[out] uint32_t *effBo
25480 * @param[in] RgSchDlHqProcCb *proc
25481 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25485 static Void rgSCHCmnDlAllocRetxRbTM5
25492 RgSchDlHqProcCb *proc,
25493 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25496 #if (ERRCLASS & ERRCLS_DEBUG)
25497 DU_LOG("\nERROR --> SCH : Invalid TM 5 for CRNTI:%d",ue->ueId);
25505 * @brief This function determines the RBs and Bytes required for BO
25506 * transmission for UEs configured with TM 6.
25510 * Function: rgSCHCmnDlAllocTxRbTM6
25513 * Reference Parameter effBo is filled with alloced bytes.
25514 * Returns RFAILED if BO not satisfied at all.
25516 * Invoked by: rgSCHCmnDlAllocTxRb
25518 * @param[in] RgSchCellCb *cell
25519 * @param[in] RgSchDlSf *subFrm
25520 * @param[in] RgSchUeCb *ue
25521 * @param[in] uint32_t bo
25522 * @param[out] uint32_t *effBo
25523 * @param[in] RgSchDlHqProcCb *proc
25524 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25528 static Void rgSCHCmnDlAllocTxRbTM6
25535 RgSchDlHqProcCb *proc,
25536 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25539 RgSchDlRbAlloc *allocInfo;
25540 RgSchCmnDlUe *ueDl;
25546 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25547 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25549 if (ueDl->mimoInfo.forceTD)
25551 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25552 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25556 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
25557 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25558 /* Fill precoding information for FORMAT 1B */
25559 /* First 4 least significant bits to indicate PMI.
25560 * 4th most significant corresponds to pmi Confirmation.
25562 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
25563 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
25565 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25566 bo, &numRb, effBo);
25567 if (ret == RFAILED)
25569 /* If allocation couldn't be made then return */
25574 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25577 /* Adding UE to RbAllocInfo TX Lst */
25578 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25580 /* Fill UE alloc Info */
25581 allocInfo->rbsReq = numRb;
25582 allocInfo->dlSf = subFrm;
25588 * @brief This function determines the RBs and Bytes required for BO
25589 * retransmission for UEs configured with TM 6.
25593 * Function: rgSCHCmnDlAllocRetxRbTM6
25596 * Reference Parameter effBo is filled with alloced bytes.
25597 * Returns RFAILED if BO not satisfied at all.
25599 * Invoked by: rgSCHCmnDlAllocRetxRb
25601 * @param[in] RgSchCellCb *cell
25602 * @param[in] RgSchDlSf *subFrm
25603 * @param[in] RgSchUeCb *ue
25604 * @param[in] uint32_t bo
25605 * @param[out] uint32_t *effBo
25606 * @param[in] RgSchDlHqProcCb *proc
25607 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25611 static Void rgSCHCmnDlAllocRetxRbTM6
25618 RgSchDlHqProcCb *proc,
25619 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25622 RgSchDlRbAlloc *allocInfo;
25623 RgSchCmnDlUe *ueDl;
25629 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25630 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25632 if (ueDl->mimoInfo.forceTD)
25634 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25635 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25639 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
25640 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25641 /* Fill precoding information for FORMAT 1B */
25642 /* First 4 least significant bits to indicate PMI.
25643 * 4th most significant corresponds to pmi Confirmation.
25645 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
25646 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
25649 /* Get the Allocation in terms of RBs that are required for
25650 * this retx of TB1 */
25651 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
25653 if (ret == RFAILED)
25655 /* Allocation couldn't be made for Retx */
25656 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25659 /* Adding UE to allocInfo RETX Lst */
25660 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
25661 /* Fill UE alloc Info */
25662 allocInfo->rbsReq = numRb;
25663 allocInfo->dlSf = subFrm;
25669 * @brief This function determines the RBs and Bytes required for BO
25670 * transmission for UEs configured with TM 7.
25674 * Function: rgSCHCmnDlAllocTxRbTM7
25677 * Reference Parameter effBo is filled with alloced bytes.
25678 * Returns RFAILED if BO not satisfied at all.
25680 * Invoked by: rgSCHCmnDlAllocTxRb
25682 * @param[in] RgSchCellCb *cell
25683 * @param[in] RgSchDlSf *subFrm
25684 * @param[in] RgSchUeCb *ue
25685 * @param[in] uint32_t bo
25686 * @param[out] uint32_t *effBo
25687 * @param[in] RgSchDlHqProcCb *proc
25688 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25692 static Void rgSCHCmnDlAllocTxRbTM7
25699 RgSchDlHqProcCb *proc,
25700 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25703 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
25709 * @brief This function determines the RBs and Bytes required for BO
25710 * retransmission for UEs configured with TM 7.
25714 * Function: rgSCHCmnDlAllocRetxRbTM7
25717 * Reference Parameter effBo is filled with alloced bytes.
25718 * Returns RFAILED if BO not satisfied at all.
25720 * Invoked by: rgSCHCmnDlAllocRetxRb
25722 * @param[in] RgSchCellCb *cell
25723 * @param[in] RgSchDlSf *subFrm
25724 * @param[in] RgSchUeCb *ue
25725 * @param[in] uint32_t bo
25726 * @param[out] uint32_t *effBo
25727 * @param[in] RgSchDlHqProcCb *proc
25728 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25732 static Void rgSCHCmnDlAllocRetxRbTM7
25739 RgSchDlHqProcCb *proc,
25740 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25743 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
25749 * @brief This function invokes the TM specific DL TX RB Allocation routine.
25753 * Function: rgSCHCmnDlAllocTxRb
25754 * Purpose: This function invokes the TM specific
25755 * DL TX RB Allocation routine.
25757 * Invoked by: Specific Schedulers
25759 * @param[in] RgSchCellCb *cell
25760 * @param[in] RgSchDlSf *subFrm
25761 * @param[in] RgSchUeCb *ue
25762 * @param[in] uint32_t bo
25763 * @param[out] uint32_t *effBo
25764 * @param[in] RgSchDlHqProcCb *proc
25765 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25769 S16 rgSCHCmnDlAllocTxRb
25776 RgSchDlHqProcCb *proc,
25777 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25780 uint32_t newSchBits = 0;
25781 uint32_t prevSchBits = 0;
25782 RgSchDlRbAlloc *allocInfo;
25785 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
25787 ue->dl.aggTbBits = 0;
25791 /* Calculate totals bits previously allocated */
25792 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25793 if (allocInfo->tbInfo[0].schdlngForTb)
25795 prevSchBits += allocInfo->tbInfo[0].bytesReq;
25797 if (allocInfo->tbInfo[1].schdlngForTb)
25799 prevSchBits += allocInfo->tbInfo[1].bytesReq;
25802 /* Call TM specific RB allocation routine */
25803 (dlAllocTxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
25804 proc, cellWdAllocInfo);
25808 /* Calculate totals bits newly allocated */
25809 if (allocInfo->tbInfo[0].schdlngForTb)
25811 newSchBits += allocInfo->tbInfo[0].bytesReq;
25813 if (allocInfo->tbInfo[1].schdlngForTb)
25815 newSchBits += allocInfo->tbInfo[1].bytesReq;
25817 if (newSchBits > prevSchBits)
25819 ue->dl.aggTbBits += ((newSchBits - prevSchBits) * 8);
25820 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
25827 /* DwPTS Scheduling Changes Start */
25830 * @brief Retransmit decision for TDD. Retx is avoided in below cases
25831 * 1) DL Sf -> Spl Sf
25832 * 2) DL SF -> DL SF 0
25836 * Function: rgSCHCmnRetxAvoidTdd
25837 * Purpose: Avoid allocating RETX for cases 1, 2
25839 * Invoked by: rgSCHCmnRetxAvoidTdd
25841 * @param[in] RgSchDlSf *curSf
25842 * @param[in] RgSchCellCb *cell
25843 * @param[in] RgSchDlHqProcCb *proc
25847 Bool rgSCHCmnRetxAvoidTdd
25851 RgSchDlHqProcCb *proc
25854 RgSchTddSfType txSfType = 0;
25857 /* Get the RBs of TB that will be retransmitted */
25858 if (proc->tbInfo[0].state == HQ_TB_NACKED)
25860 txSfType = proc->tbInfo[0].sfType;
25862 #ifdef XEON_SPECIFIC_CHANGES
25863 #ifndef XEON_TDD_SPCL
25864 /* Avoid re-transmission on Normal SF when the corresponding TB wss transmitted on SPCL SF */
25865 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
25872 if (proc->tbInfo[1].state == HQ_TB_NACKED)
25874 /* Select the TxSf with the highest num of possible REs
25875 * In ascending order -> 1) SPL SF 2) DL_SF_0 3) DL_SF */
25876 txSfType = RGSCH_MAX(txSfType, proc->tbInfo[1].sfType);
25878 #ifdef XEON_SPECIFIC_CHANGES
25879 #ifndef XEON_TDD_SPCL
25880 /* Avoid re-transmission on Normal SF when the corresponding TB wss tranmitted on SPCL SF */
25881 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
25889 if (txSfType > curSf->sfType)
25900 /* DwPTS Scheduling Changes End */
25903 * @brief Avoid allocating RETX incase of collision
25904 * with reserved resources for BCH/PSS/SSS occassions.
25908 * Function: rgSCHCmnRetxAllocAvoid
25909 * Purpose: Avoid allocating RETX incase of collision
25910 * with reserved resources for BCH/PSS/SSS occassions
25912 * Invoked by: rgSCHCmnDlAllocRetxRb
25914 * @param[in] RgSchDlSf *subFrm
25915 * @param[in] RgSchUeCb *ue
25916 * @param[in] RgSchDlHqProcCb *proc
25920 Bool rgSCHCmnRetxAllocAvoid
25924 RgSchDlHqProcCb *proc
25930 if (proc->tbInfo[0].state == HQ_TB_NACKED)
25932 reqRbs = proc->tbInfo[0].dlGrnt.numRb;
25936 reqRbs = proc->tbInfo[1].dlGrnt.numRb;
25938 /* Consider the dlGrnt.numRb of the Retransmitting proc->tbInfo
25939 * and current available RBs to determine if this RETX TB
25940 * will collide with the BCH/PSS/SSS occassion */
25941 if (subFrm->sfNum % 5 == 0)
25943 if ((subFrm->bwAssigned < cell->pbchRbEnd) &&
25944 (((subFrm->bwAssigned + reqRbs) - cell->pbchRbStart) > 0))
25956 * @brief This function invokes the TM specific DL RETX RB Allocation routine.
25960 * Function: rgSCHCmnDlAllocRetxRb
25961 * Purpose: This function invokes the TM specific
25962 * DL RETX RB Allocation routine.
25964 * Invoked by: Specific Schedulers
25966 * @param[in] RgSchCellCb *cell
25967 * @param[in] RgSchDlSf *subFrm
25968 * @param[in] RgSchUeCb *ue
25969 * @param[in] uint32_t bo
25970 * @param[out] uint32_t *effBo
25971 * @param[in] RgSchDlHqProcCb *proc
25972 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25976 S16 rgSCHCmnDlAllocRetxRb
25983 RgSchDlHqProcCb *proc,
25984 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25987 uint32_t newSchBits = 0;
25988 RgSchDlRbAlloc *allocInfo;
25991 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
25993 ue->dl.aggTbBits = 0;
25997 /* Check for DL BW exhaustion */
25998 if (subFrm->bw <= subFrm->bwAssigned)
26002 /* Call TM specific RB allocation routine */
26003 (dlAllocRetxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
26004 proc, cellWdAllocInfo);
26008 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26009 /* Calculate totals bits newly allocated */
26010 if (allocInfo->tbInfo[0].schdlngForTb)
26012 newSchBits += allocInfo->tbInfo[0].bytesReq;
26014 if (allocInfo->tbInfo[1].schdlngForTb)
26016 newSchBits += allocInfo->tbInfo[1].bytesReq;
26018 ue->dl.aggTbBits += (newSchBits * 8);
26019 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
26027 * @brief This function determines the RBs and Bytes required for
26028 * Transmission on 1 CW.
26032 * Function: rgSCHCmnDlAlloc1CwTxRb
26033 * Purpose: This function determines the RBs and Bytes required
26034 * for Transmission of DL SVC BO on 1 CW.
26035 * Also, takes care of SVC by SVC allocation by tracking
26036 * previous SVCs allocations.
26037 * Returns RFAILED if BO not satisfied at all.
26039 * Invoked by: DL UE Allocation
26041 * @param[in] RgSchCellCb *cell
26042 * @param[in] RgSchDlSf *subFrm
26043 * @param[in] RgSchUeCb *ue
26044 * @param[in] RgSchDlHqTbCb *tbInfo
26045 * @param[in] uint32_t bo
26046 * @param[out] uint8_t *numRb
26047 * @param[out] uint32_t *effBo
26051 static S16 rgSCHCmnDlAlloc1CwTxRb
26056 RgSchDlHqTbCb *tbInfo,
26065 RgSchCmnDlUe *ueDl;
26066 RgSchDlRbAlloc *allocInfo;
26069 /* Correcting wrap around issue.
26070 * This change has been done at mutliple places in this function.*/
26071 uint32_t tempNumRb;
26074 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26075 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26076 oldReq = ueDl->outStndAlloc;
26079 //TODO_SID: Currently setting max Tb size wrt to 5GTF TM3
26080 iTbs = ue->ue5gtfCb.mcs;
26081 ueDl->maxTbSz = MAX_5GTF_TB_SIZE * ue->ue5gtfCb.rank;
26082 ueDl->maxRb = MAX_5GTF_PRBS;
26084 ueDl->outStndAlloc += bo;
26085 /* consider Cumulative amount of this BO and bytes so far allocated */
26086 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbSz/8);
26087 /* Get the number of REs needed for this bo. */
26088 //noRes = ((bo * 8 * 1024) / eff);
26090 /* Get the number of RBs needed for this transmission */
26091 /* Number of RBs = No of REs / No of REs per RB */
26092 //tempNumRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
26093 tempNumRb = MAX_5GTF_PRBS;
26094 tbSz = RGSCH_MIN(bo, (rgSch5gtfTbSzTbl[iTbs]/8) * ue->ue5gtfCb.rank);
26096 /* DwPts Scheduling Changes End */
26097 *effBo = RGSCH_MIN(tbSz - oldReq, reqBytes);
26100 //RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs);
26105 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbSz, \
26106 iTbs, imcs, tbInfo, ue->ue5gtfCb.rank);
26107 *numRb = (uint8_t) tempNumRb;
26109 /* Update the subframe Allocated BW field */
26110 subFrm->bwAssigned = subFrm->bwAssigned + tempNumRb - allocInfo->rbsReq;
26117 * @brief This function is invoked in the event of any TB's allocation
26118 * being underutilized by the specific scheduler. Here we reduce iMcs
26119 * to increase redundancy and hence increase reception quality at UE.
26123 * Function: rgSCHCmnRdcImcsTxTb
26124 * Purpose: This function shall reduce the iMcs in accordance with
26125 * the total consumed bytes by the UE at allocation
26128 * Invoked by: UE DL Allocation finalization routine
26129 * of specific scheduler.
26131 * @param[in] RgSchDlRbAlloc *allocInfo
26132 * @param[in] uint8_t tbInfoIdx
26133 * @param[in] uint32_t cnsmdBytes
26137 Void rgSCHCmnRdcImcsTxTb
26139 RgSchDlRbAlloc *allocInfo,
26141 uint32_t cnsmdBytes
26145 /*The below functionality is not needed.*/
26151 iTbs = allocInfo->tbInfo[tbInfoIdx].iTbs;
26152 noLyr = allocInfo->tbInfo[tbInfoIdx].noLyr;
26153 numRb = allocInfo->rbsAlloc;
26156 if ((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) == cnsmdBytes)
26161 /* Get iTbs as suitable for the consumed bytes */
26162 while((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) > cnsmdBytes)
26166 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].\
26167 tbCb->dlGrnt.iMcs);
26173 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].tbCb->dlGrnt.iMcs);
26180 * @brief This function determines the RBs and Bytes required for
26181 * Transmission on 2 CWs.
26185 * Function: rgSCHCmnDlAlloc2CwTxRb
26186 * Purpose: This function determines the RBs and Bytes required
26187 * for Transmission of DL SVC BO on 2 CWs.
26188 * Also, takes care of SVC by SVC allocation by tracking
26189 * previous SVCs allocations.
26190 * Returns RFAILED if BO not satisfied at all.
26192 * Invoked by: TM3 and TM4 DL UE Allocation
26194 * @param[in] RgSchCellCb *cell
26195 * @param[in] RgSchDlSf *subFrm
26196 * @param[in] RgSchUeCb *ue
26197 * @param[in] RgSchDlHqProcCb *proc
26198 * @param[in] RgSchDlHqProcCb bo
26199 * @param[out] uint8_t *numRb
26200 * @param[out] uint32_t *effBo
26204 static S16 rgSCHCmnDlAlloc2CwTxRb
26209 RgSchDlHqProcCb *proc,
26216 uint32_t eff1, eff2;
26217 uint32_t tb1Sz, tb2Sz;
26218 uint8_t imcs1, imcs2;
26219 uint8_t noLyr1, noLyr2;
26220 uint8_t iTbs1, iTbs2;
26221 RgSchCmnDlCell *cellDl;
26222 RgSchCmnDlUe *ueDl;
26223 RgSchDlRbAlloc *allocInfo;
26226 /* Fix: MUE_PERTTI_DL */
26228 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
26229 uint8_t cfi = cellSch->dl.currCfi;
26231 uint32_t availBits = 0;
26233 uint32_t boTmp = bo;
26238 cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26239 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26240 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26241 oldReq = ueDl->outStndAlloc;
26244 if (ueDl->maxTbBits > ue->dl.aggTbBits)
26246 availBits = ueDl->maxTbBits - ue->dl.aggTbBits;
26248 /* check if we can further allocate to this UE */
26249 if ((ue->dl.aggTbBits >= ueDl->maxTbBits) ||
26250 (allocInfo->tbInfo[0].bytesReq >= ueDl->maxTbSz/8) ||
26251 (allocInfo->tbInfo[1].bytesReq >= ueDl->maxTbSz/8) ||
26252 (allocInfo->rbsReq >= ueDl->maxRb))
26254 DU_LOG("\nDEBUG --> SCH : rgSCHCmnDlAllocRb(): UEs max allocation exceed");
26258 noLyr1 = ueDl->mimoInfo.cwInfo[0].noLyr;
26259 noLyr2 = ueDl->mimoInfo.cwInfo[1].noLyr;
26261 /* If there is no CFI change, continue to use the BLER based
26263 if (ueDl->lastCfi == cfi)
26265 iTbs1 = ueDl->mimoInfo.cwInfo[0].iTbs[noLyr1 - 1];
26266 iTbs2 = ueDl->mimoInfo.cwInfo[1].iTbs[noLyr2 - 1];
26270 uint8_t cqi = ueDl->mimoInfo.cwInfo[0].cqi;
26272 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 0, noLyr1);
26274 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 0, noLyr1);
26277 cqi = ueDl->mimoInfo.cwInfo[1].cqi;
26279 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 1, noLyr2);
26281 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 1, noLyr2);
26285 /*ccpu00131191 and ccpu00131317 - Fix for RRC Reconfig failure
26286 * issue for VoLTE call */
26287 //if ((proc->hasDcch) || (TRUE == rgSCHLaaSCellEnabled(cell)))
26307 else if(!cellSch->dl.isDlFreqSel)
26310 /* for Tdd reduce iTbs only for SF0. SF5 contains only
26311 * SSS and can be ignored */
26312 if (subFrm->sfNum == 0)
26314 (iTbs1 > 1)? (iTbs1 -= 1) : (iTbs1 = 0);
26315 (iTbs2 > 1)? (iTbs2 -= 1) : (iTbs2 = 0);
26317 /* For SF 3 and 8 CRC is getting failed in DL.
26318 Need to do proper fix after the replay from
26320 #ifdef CA_PHY_BRDCM_61765
26321 if ((subFrm->sfNum == 3) || (subFrm->sfNum == 8))
26323 (iTbs1 > 2)? (iTbs1 -= 2) : (iTbs1 = 0);
26324 (iTbs2 > 2)? (iTbs2 -= 2) : (iTbs2 = 0);
26332 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26334 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
26338 eff1 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr1 - 1][cfi]))[iTbs1];
26339 eff2 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr2 - 1][cfi]))[iTbs2];
26342 bo = RGSCH_MIN(bo,availBits/8);
26343 ueDl->outStndAlloc += bo;
26344 /* consider Cumulative amount of this BO and bytes so far allocated */
26345 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbBits/8);
26346 bo = RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff1)/(eff1+eff2)),
26348 RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff2)/(eff1+eff2)),
26349 (ueDl->maxTbSz)/8) +
26350 1; /* Add 1 to adjust the truncation at weighted averaging */
26351 /* Get the number of REs needed for this bo. */
26352 noRes = ((bo * 8 * 1024) / (eff1 + eff2));
26354 /* Get the number of RBs needed for this transmission */
26355 /* Number of RBs = No of REs / No of REs per RB */
26356 numRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
26357 /* Cannot exceed the maximum number of RBs per UE */
26358 if (numRb > ueDl->maxRb)
26360 numRb = ueDl->maxRb;
26365 if(RFAILED == rgSCHLaaCmn2CwAdjustPrb(allocInfo, boTmp, &numRb, ueDl, noLyr1, noLyr2, iTbs1, iTbs2))
26368 while ((numRb <= ueDl->maxRb) &&
26369 (rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1] <= ueDl->maxTbSz) &&
26370 (rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1] <= ueDl->maxTbSz) &&
26371 ((rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8 +
26372 rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8) <= bo))
26378 availBw = subFrm->bw - subFrm->bwAssigned;
26379 /* Cannot exceed the total number of RBs in the cell */
26380 if ((S16)(numRb - allocInfo->rbsReq) > availBw)
26382 numRb = availBw + allocInfo->rbsReq;
26384 tb1Sz = rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8;
26385 tb2Sz = rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8;
26386 /* DwPts Scheduling Changes Start */
26388 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26390 /* Max Rb for Special Sf is approximated as 4/3 of maxRb */
26391 rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, (uint8_t*)&numRb, ueDl->maxRb*4/3,
26392 &iTbs1, &iTbs2, noLyr1,
26393 noLyr2, &tb1Sz, &tb2Sz, cfi);
26394 /* Check for available Bw */
26395 if ((S16)numRb - allocInfo->rbsReq > availBw)
26397 numRb = availBw + allocInfo->rbsReq;
26398 tb1Sz = rgTbSzTbl[noLyr1-1][iTbs1][RGSCH_MAX(numRb*3/4,1)-1]/8;
26399 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs2][RGSCH_MAX(numRb*3/4,1)-1]/8;
26403 /* DwPts Scheduling Changes End */
26404 /* Update the subframe Allocated BW field */
26405 subFrm->bwAssigned = subFrm->bwAssigned + numRb - \
26408 *effBo = RGSCH_MIN((tb1Sz + tb2Sz) - oldReq, reqBytes);
26411 if (ROK != rgSCHLaaCmn2TBPrbCheck(allocInfo, tb1Sz, tb2Sz, boTmp, effBo, iTbs1, iTbs2, numRb, proc))
26417 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs1, imcs1);
26418 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs2, imcs2);
26419 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tb1Sz, \
26420 iTbs1, imcs1, &proc->tbInfo[0], noLyr1);
26421 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
26422 iTbs2, imcs2, &proc->tbInfo[1], noLyr2);
26423 *numRbRef = (uint8_t)numRb;
26431 * @brief This function determines the RBs and Bytes required for
26432 * Transmission & Retransmission on 2 CWs.
26436 * Function: rgSCHCmnDlAlloc2CwTxRetxRb
26437 * Purpose: This function determines the RBs and Bytes required
26438 * for Transmission & Retransmission on 2 CWs. Allocate
26439 * RETX TB on a better CW and restrict new TX TB by
26441 * Returns RFAILED if BO not satisfied at all.
26443 * Invoked by: TM3 and TM4 DL UE Allocation
26445 * @param[in] RgSchCellCb *cell
26446 * @param[in] RgSchDlSf *subFrm
26447 * @param[in] RgSchUeCb *ue
26448 * @param[in] RgSchDlHqTbCb *reTxTb
26449 * @param[in] RgSchDlHqTbCb *txTb
26450 * @param[out] uint8_t *numRb
26451 * @param[out] uint32_t *effBo
26455 static S16 rgSCHCmnDlAlloc2CwTxRetxRb
26460 RgSchDlHqTbCb *reTxTb,
26461 RgSchDlHqTbCb *txTb,
26466 RgSchCmnDlUe *ueDl;
26467 RgSchDlRbAlloc *allocInfo;
26468 uint8_t imcs1, imcs2;
26471 RgSchCmnDlUeCwInfo *otherCw;
26473 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26474 uint8_t cfi = cellDl->currCfi;
26478 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26479 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26480 otherCw = &ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)];
26483 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26484 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26486 availBw = subFrm->bw - subFrm->bwAssigned;
26487 *numRb = reTxTb->dlGrnt.numRb;
26489 #ifdef XEON_TDD_SPCL
26490 *numRb = (reTxTb->initTxNumRbs);
26491 if(reTxTb->sfType == RG_SCH_SPL_SF_DATA && subFrm->sfType != RG_SCH_SPL_SF_DATA)
26493 *numRb = (reTxTb->initTxNumRbs*3/4);
26497 DU_LOG("\nERROR --> SCH : Number of RBs [%d] are less than or equal to 3",*numRb);
26503 if ((S16)*numRb > availBw)
26507 /* Update the subframe Allocated BW field */
26508 subFrm->bwAssigned += *numRb;
26509 noLyr2 = otherCw->noLyr;
26510 RG_SCH_CMN_GET_MCS_FOR_RETX(reTxTb, imcs1);
26512 /* If there is no CFI change, continue to use the BLER based
26514 if (ueDl->lastCfi == cfi)
26516 iTbs = otherCw->iTbs[noLyr2-1];
26521 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, otherCw->cqi, cfi,
26522 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
26524 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, otherCw->cqi, cfi,
26525 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
26528 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs][*numRb-1]/8;
26529 /* DwPts Scheduling Changes Start */
26532 /* DwPts Scheduling Changes End */
26533 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs2);
26535 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], reTxTb->tbSz, \
26536 0, imcs1, reTxTb, reTxTb->numLyrs);
26538 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
26539 iTbs, imcs2, txTb, noLyr2);
26541 *effBo = reTxTb->tbSz + tb2Sz;
26548 * @brief This function determines the RBs and Bytes required for BO
26549 * Retransmission on 2 CWs.
26553 * Function: rgSCHCmnDlAlloc2CwRetxRb
26554 * Purpose: This function determines the RBs and Bytes required
26555 * for BO Retransmission on 2 CWs. Allocate larger TB
26556 * on a better CW and check if the smaller TB can be
26557 * accomodated on the other CW.
26558 * Returns RFAILED if BO not satisfied at all.
26560 * Invoked by: Common Scheduler
26562 * @param[in] RgSchCellCb *cell
26563 * @param[in] RgSchDlSf *subFrm
26564 * @param[in] RgSchUeCb *ue
26565 * @param[in] RgSchDlHqProcCb *proc
26566 * @param[out] uint8_t *numRb
26567 * @param[out] Bool *swpFlg
26568 * @param[out] uint32_t *effBo
26572 static S16 rgSCHCmnDlAlloc2CwRetxRb
26577 RgSchDlHqProcCb *proc,
26583 RgSchDlRbAlloc *allocInfo;
26586 RgSchDlHqTbCb *lrgTbInfo, *othrTbInfo;
26589 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26592 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26593 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26595 lrgTbInfo = &proc->tbInfo[0];
26596 othrTbInfo = &proc->tbInfo[1];
26597 *numRb = lrgTbInfo->dlGrnt.numRb;
26598 #ifdef XEON_TDD_SPCL
26599 if((lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA || othrTbInfo->sfType == RG_SCH_SPL_SF_DATA))
26601 if(lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA)
26603 *numRb = (lrgTbInfo->initTxNumRbs);
26607 *numRb = (othrTbInfo->initTxNumRbs);
26610 if(subFrm->sfType != RG_SCH_SPL_SF_DATA)
26612 *numRb = (*numRb)*3/4;
26617 DU_LOG("\nERROR --> SCH : Number of RBs [%d] are less than or equal to 3",*numRb);
26622 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
26626 /* Update the subframe Allocated BW field */
26627 subFrm->bwAssigned += *numRb;
26628 RG_SCH_CMN_GET_MCS_FOR_RETX(lrgTbInfo, imcs1);
26629 RG_SCH_CMN_GET_MCS_FOR_RETX(othrTbInfo, imcs2);
26630 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], lrgTbInfo->tbSz, \
26631 0, imcs1, lrgTbInfo, lrgTbInfo->numLyrs);
26632 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], othrTbInfo->tbSz, \
26633 0, imcs2, othrTbInfo, othrTbInfo->numLyrs);
26634 *effBo = lrgTbInfo->tbSz + othrTbInfo->tbSz;
26643 * @brief This function determines the RBs and Bytes required for BO
26644 * Retransmission on 1 CW.
26648 * Function: rgSCHCmnDlAlloc1CwRetxRb
26649 * Purpose: This function determines the RBs and Bytes required
26650 * for BO Retransmission on 1 CW, the first CW.
26651 * Returns RFAILED if BO not satisfied at all.
26653 * Invoked by: Common Scheduler
26655 * @param[in] RgSchCellCb *cell
26656 * @param[in] RgSchDlSf *subFrm
26657 * @param[in] RgSchUeCb *ue
26658 * @param[in] RgSchDlHqTbCb *tbInfo
26659 * @param[in] uint8_t noLyr
26660 * @param[out] uint8_t *numRb
26661 * @param[out] uint32_t *effBo
26665 static S16 rgSCHCmnDlAlloc1CwRetxRb
26670 RgSchDlHqTbCb *tbInfo,
26676 RgSchDlRbAlloc *allocInfo;
26680 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26683 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26684 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26686 *numRb = tbInfo->dlGrnt.numRb;
26687 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
26691 /* Update the subframe Allocated BW field */
26692 subFrm->bwAssigned += *numRb;
26693 imcs = tbInfo->dlGrnt.iMcs;
26694 allocInfo->dciFormat = tbInfo->dlGrnt.dciFormat;
26695 /* Fix: For a RETX TB the iTbs is irrelevant, hence setting 0 */
26696 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbInfo->tbSz, \
26697 0, imcs, tbInfo, tbInfo->numLyrs);
26698 *effBo = tbInfo->tbSz;
26706 * @brief This function is called to handle Release PDCCH feedback for SPS UE
26710 * Function: rgSCHCmnDlRelPdcchFbk
26711 * Purpose: Invokes SPS module to handle release PDCCH feedback
26715 * @param[in] RgSchCellCb *cell
26716 * @param[in] RgSchUeCb *ue
26717 * @param[in] Bool isAck
26721 Void rgSCHCmnDlRelPdcchFbk
26729 rgSCHCmnSpsDlRelPdcchFbk(cell, ue, isAck);
26736 * @brief This function is invoked to handle Ack processing for a HARQ proc.
26740 * Function: rgSCHCmnDlProcAck
26741 * Purpose: DTX processing for HARQ proc
26745 * @param[in] RgSchCellCb *cell
26746 * @param[in] RgSchDlHqProcCb *hqP
26750 Void rgSCHCmnDlProcAck
26753 RgSchDlHqProcCb *hqP
26758 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
26760 /* Invoke SPS module if SPS service was scheduled for this HARQ proc */
26761 rgSCHCmnSpsDlProcAck(cell, hqP);
26765 #ifdef RGSCH_SPS_STATS
26766 uint32_t rgSchStatCrntiCeRcvCnt;
26769 * @brief This function is invoked to handle CRNTI CE reception for an UE
26773 * Function: rgSCHCmnHdlCrntiCE
26774 * Purpose: Handle CRNTI CE reception
26778 * @param[in] RgSchCellCb *cell
26779 * @param[in] RgSchDlHqProcCb *hqP
26783 Void rgSCHCmnHdlCrntiCE
26790 #ifdef RGSCH_SPS_STATS
26791 rgSchStatCrntiCeRcvCnt++;
26794 /* When UL sync lost happened due to TA timer expiry UE is being moved to
26795 PDCCH order inactivity list.But when CRNTI CE received in msg3 from UE
26796 we are not moving UE into active state due to that RRC Reconfiguration is
26798 So here we are moving UE to active list whenever we receive the CRNTI CE and
26800 /* CR ccpu00144525 */
26801 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
26803 /* Activate this UE if it was inactive */
26804 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
26805 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
26808 /* Handling is same as reception of UE RESET for both DL and UL */
26809 if (ue->dl.dlSpsCfg.isDlSpsEnabled)
26811 rgSCHCmnSpsDlUeReset(cell, ue);
26813 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26815 rgSCHCmnSpsUlUeReset(cell, ue);
26823 * @brief This function is called to handle relInd from MAC for a UE
26827 * Function: rgSCHCmnUlSpsRelInd
26828 * Purpose: Invokes SPS module to handle UL SPS release for a UE
26830 * Invoked by: SCH_UTL
26832 * @param[in] RgSchCellCb *cell
26833 * @param[in] RgSchUeCb *ue
26834 * @param[in] Bool isExplRel
26838 Void rgSCHCmnUlSpsRelInd
26846 rgSCHCmnSpsUlProcRelInd(cell, ue, isExplRel);
26849 } /* end of rgSCHCmnUlSpsRelInd */
26852 * @brief This function is called to handle SPS Activate Ind from MAC for a UE
26856 * Function: rgSCHCmnUlSpsActInd
26857 * Purpose: Invokes SPS module to handle UL SPS activate for a UE
26859 * Invoked by: SCH_UTL
26861 * @param[in] RgSchCellCb *cell
26862 * @param[in] RgSchUeCb *ue
26866 Void rgSCHCmnUlSpsActInd
26870 uint16_t spsSduSize
26875 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26877 rgSCHCmnSpsUlProcActInd(cell, ue,spsSduSize);
26881 } /* end of rgSCHCmnUlSpsActInd */
26884 * @brief This function is called to handle CRC in UL for UEs
26885 * undergoing SPS release
26889 * Function: rgSCHCmnUlCrcInd
26890 * Purpose: Invokes SPS module to handle CRC in UL for SPS UE
26892 * Invoked by: SCH_UTL
26894 * @param[in] RgSchCellCb *cell
26895 * @param[in] RgSchUeCb *ue
26896 * @param[in] CmLteTimingInfo crcTime
26900 Void rgSCHCmnUlCrcInd
26904 CmLteTimingInfo crcTime
26908 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26910 rgSCHCmnSpsUlProcCrcInd(cell, ue, crcTime);
26914 } /* end of rgSCHCmnUlCrcFailInd */
26917 * @brief This function is called to handle CRC failure in UL
26921 * Function: rgSCHCmnUlCrcFailInd
26922 * Purpose: Invokes SPS module to handle CRC failure in UL for SPS UE
26924 * Invoked by: SCH_UTL
26926 * @param[in] RgSchCellCb *cell
26927 * @param[in] RgSchUeCb *ue
26928 * @param[in] CmLteTimingInfo crcTime
26932 Void rgSCHCmnUlCrcFailInd
26936 CmLteTimingInfo crcTime
26940 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26942 rgSCHCmnSpsUlProcDtxInd(cell, ue, crcTime);
26946 } /* end of rgSCHCmnUlCrcFailInd */
26948 #endif /* LTEMAC_SPS */
26951 * @brief BCH,BCCH,PCCH Dowlink Scheduling Handler.
26955 * Function: rgSCHCmnDlBcchPcchAlloc
26956 * Purpose: This function calls common scheduler APIs to
26957 * schedule for BCCH/PCCH.
26958 * It then invokes Allocator for actual RB
26959 * allocations. It processes on the actual resources allocated
26960 * against requested to the allocator module.
26962 * Invoked by: Common Scheduler
26964 * @param[in] RgSchCellCb *cell
26967 static Void rgSCHCmnDlBcchPcchAlloc(RgSchCellCb *cell)
26970 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
26972 #ifdef LTEMAC_HDFDD
26973 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
26975 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
26978 RgInfSfAlloc *nextsfAlloc = &(cell->sfAllocArr[nextSfIdx]);
26979 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
26980 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
26984 /*Reset the bitmask for BCCH/PCCH*/
26985 rgSCHUtlResetSfAlloc(nextsfAlloc,TRUE,FALSE);
26986 #ifndef DISABLE_MIB_SIB /* Not sending MIB and SIB to CL */
26988 rgSCHChkNUpdSiCfg(cell);
26989 rgSCHSelectSi(cell);
26992 /*Perform the scheduling for BCCH,PCCH*/
26993 rgSCHCmnDlBcchPcch(cell, allocInfo, nextsfAlloc);
26995 /* Call common allocator for RB Allocation */
26996 rgSCHBcchPcchDlRbAlloc(cell, allocInfo);
26998 /* Finalize the Allocations for reqested Against alloced */
26999 rgSCHCmnDlBcchPcchFnlz(cell, allocInfo);
27000 #endif /* DISABLE_MIB_SIB */
27005 * @brief Handles RB allocation for BCCH/PCCH for downlink.
27009 * Function : rgSCHBcchPcchDlRbAlloc
27011 * Invoking Module Processing:
27012 * - This function is invoked for DL RB allocation of BCCH/PCCH
27014 * Processing Steps:
27015 * - If cell is frequency selecive,
27016 * - Call rgSCHDlfsBcchPcchAllocRb().
27018 * - Do the processing
27020 * @param[in] RgSchCellCb *cell
27021 * @param[in] RgSchDlRbAllocInfo *allocInfo
27025 static Void rgSCHBcchPcchDlRbAlloc
27028 RgSchCmnDlRbAllocInfo *allocInfo
27031 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27035 if (cellSch->dl.isDlFreqSel)
27037 cellSch->apisDlfs->rgSCHDlfsBcchPcchAllocRb(cell, allocInfo);
27041 rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo);
27048 * @brief Handles RB allocation for BCCH,PCCH for frequency
27049 * non-selective cell.
27053 * Function : rgSCHCmnNonDlfsBcchPcchRbAlloc
27055 * Invoking Module Processing:
27056 * - SCH shall invoke this if downlink frequency selective is disabled for
27057 * the cell for RB allocation.
27058 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
27059 * estimate and subframe for each allocation to be made to SCH.
27061 * Processing Steps:
27062 * - Allocate sequentially for BCCH,PCCH common channels.
27064 * @param[in] RgSchCellCb *cell
27065 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
27069 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc
27072 RgSchCmnDlRbAllocInfo *allocInfo
27075 RgSchDlRbAlloc *reqAllocInfo;
27079 /* Allocate for PCCH */
27080 reqAllocInfo = &(allocInfo->pcchAlloc);
27081 if (reqAllocInfo->rbsReq)
27083 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
27085 /* Allocate for BCCH on DLSCH */
27086 reqAllocInfo = &(allocInfo->bcchAlloc);
27087 if (reqAllocInfo->rbsReq)
27089 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
27097 * @brief This function implements the handling to check and
27098 * update the SI cfg at the start of the modificiation period.
27102 * Function: rgSCHChkNUpdSiCfg
27103 * Purpose: This function implements handling for update of SI Cfg
27104 * at the start of modification period.
27106 * Invoked by: Scheduler
27108 * @param[in] RgSchCellCb* cell
27113 static Void rgSCHChkNUpdSiCfg
27118 CmLteTimingInfo pdSchTmInfo;
27122 pdSchTmInfo = cell->crntTime;
27123 #ifdef LTEMAC_HDFDD
27124 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27125 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27126 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27128 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA);
27132 /* Updating the SIB1 for Warning SI message immediately after it is received
27133 * from application. No need to wait for next modification period.
27135 if((pdSchTmInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
27136 && (RGSCH_SIB1_TX_SF_NUM == (pdSchTmInfo.slot % RGSCH_NUM_SUB_FRAMES)))
27138 /*Check whether SIB1 with PWS has been updated*/
27139 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_PWS_UPD)
27141 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
27142 cell->siCb.newSiInfo.sib1Info.sib1);
27143 cell->siCb.crntSiInfo.sib1Info.mcs =
27144 cell->siCb.newSiInfo.sib1Info.mcs;
27145 cell->siCb.crntSiInfo.sib1Info.nPrb =
27146 cell->siCb.newSiInfo.sib1Info.nPrb;
27147 cell->siCb.crntSiInfo.sib1Info.msgLen =
27148 cell->siCb.newSiInfo.sib1Info.msgLen;
27149 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_PWS_UPD;
27153 /*Check if this SFN and SF No marks the start of next modification
27154 period. If current SFN,SF No doesn't marks the start of next
27155 modification period, then return. */
27156 if(!((pdSchTmInfo.sfn % cell->siCfg.modPrd == 0)
27157 && (0 == pdSchTmInfo.slot)))
27158 /*if(!((((pdSchTmInfo.hSfn * 1024) + pdSchTmInfo.sfn) % cell->siCfg.modPrd == 0)
27159 && (0 == pdSchTmInfo.slot)))*/
27164 /*Check whether MIB has been updated*/
27165 if(cell->siCb.siBitMask & RGSCH_SI_MIB_UPD)
27167 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.mib,
27168 cell->siCb.newSiInfo.mib);
27169 cell->siCb.siBitMask &= ~RGSCH_SI_MIB_UPD;
27172 /*Check whether SIB1 has been updated*/
27173 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_UPD)
27175 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
27176 cell->siCb.newSiInfo.sib1Info.sib1);
27177 cell->siCb.crntSiInfo.sib1Info.mcs = cell->siCb.newSiInfo.sib1Info.mcs;
27178 cell->siCb.crntSiInfo.sib1Info.nPrb = cell->siCb.newSiInfo.sib1Info.nPrb;
27179 cell->siCb.crntSiInfo.sib1Info.msgLen =
27180 cell->siCb.newSiInfo.sib1Info.msgLen;
27181 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_UPD;
27184 /*Check whether SIs have been updated*/
27185 if(cell->siCb.siBitMask & RGSCH_SI_SI_UPD)
27189 /*Check if SI cfg have been modified And Check if numSi have
27190 been changed, if yes then we would need to update the
27191 pointers for all the SIs */
27192 if((cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD) &&
27193 (cell->siCfg.numSi != cell->siCb.newSiCfg.numSi))
27195 for(idx = 0;idx < cell->siCb.newSiCfg.numSi;idx++)
27197 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
27198 cell->siCb.newSiInfo.siInfo[idx].si);
27199 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
27200 cell->siCb.siArray[idx].isWarningSi = FALSE;
27202 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
27203 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
27204 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
27207 /*If numSi have been reduced then we need to free the
27208 pointers at the indexes in crntSiInfo which haven't
27209 been exercised. If numSi has increased then nothing
27210 additional is requires as above handling has taken
27212 if(cell->siCfg.numSi > cell->siCb.newSiCfg.numSi)
27214 for(idx = cell->siCb.newSiCfg.numSi;
27215 idx < cell->siCfg.numSi;idx++)
27217 RGSCH_FREE_MSG(cell->siCb.crntSiInfo.siInfo[idx].si);
27218 cell->siCb.siArray[idx].si = NULLP;
27224 /*numSi has not been updated, we just need to update the
27225 pointers for the SIs which are set to NON NULLP */
27226 /*ccpu00118260 - Correct Update of SIB2 */
27227 for(idx = 0;idx < cell->siCfg.numSi;idx++)
27229 if(NULLP != cell->siCb.newSiInfo.siInfo[idx].si)
27231 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
27232 cell->siCb.newSiInfo.siInfo[idx].si);
27234 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
27235 cell->siCb.siArray[idx].isWarningSi = FALSE;
27236 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
27237 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
27238 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
27242 cell->siCb.siBitMask &= ~RGSCH_SI_SI_UPD;
27245 /*Check whether SI cfg have been updated*/
27246 if(cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD)
27248 cell->siCfg = cell->siCb.newSiCfg;
27249 cell->siCb.siBitMask &= ~RGSCH_SI_SICFG_UPD;
27257 * @brief This function implements the selection of the SI
27258 * that is to be scheduled.
27262 * Function: rgSCHSelectSi
27263 * Purpose: This function implements the selection of SI
27264 * that is to be scheduled.
27266 * Invoked by: Scheduler
27268 * @param[in] RgSchCellCb* cell
27273 static Void rgSCHSelectSi
27278 CmLteTimingInfo crntTmInfo;
27285 crntTmInfo = cell->crntTime;
27286 #ifdef LTEMAC_HDFDD
27287 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27288 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27289 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27291 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA);
27294 siWinSize = cell->siCfg.siWinSize;
27296 /* Select SI only once at the starting of the new window */
27297 if(cell->siCb.inWindow)
27299 if ((crntTmInfo.sfn % cell->siCfg.minPeriodicity) == 0 &&
27300 crntTmInfo.slot == 0)
27302 /* Reinit inWindow at the beginning of every SI window */
27303 cell->siCb.inWindow = siWinSize - 1;
27307 cell->siCb.inWindow--;
27311 else /* New window. Re-init the winSize counter with the window length */
27313 if((cell->siCb.siArray[cell->siCb.siCtx.siId - 1].isWarningSi == TRUE)&&
27314 (cell->siCb.siCtx.retxCntRem != 0))
27316 rgSCHUtlFreeWarningSiPdu(cell);
27317 cell->siCb.siCtx.warningSiFlag = FALSE;
27320 cell->siCb.inWindow = siWinSize - 1;
27323 x = rgSCHCmnGetSiSetId(crntTmInfo.sfn, crntTmInfo.slot,
27324 cell->siCfg.minPeriodicity);
27326 /* Window Id within a SI set. This window Id directly maps to a
27328 windowId = (((crntTmInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) +
27329 crntTmInfo.slot) - (x * (cell->siCfg.minPeriodicity * 10)))
27332 if(windowId >= RGR_MAX_NUM_SI)
27335 /* Update the siCtx if there is a valid SI and its periodicity
27337 if (NULLP != cell->siCb.siArray[windowId].si)
27339 /* Warning SI Periodicity is same as SIB2 Periodicity */
27340 if(((cell->siCb.siArray[windowId].isWarningSi == FALSE) &&
27341 (x % (cell->siCfg.siPeriodicity[windowId]
27342 /cell->siCfg.minPeriodicity) == 0)) ||
27343 ((cell->siCb.siArray[windowId].isWarningSi == TRUE) &&
27344 (x % (cell->siCfg.siPeriodicity[0]
27345 /cell->siCfg.minPeriodicity) == 0)))
27347 cell->siCb.siCtx.siId = windowId+1;
27348 cell->siCb.siCtx.retxCntRem = cell->siCfg.retxCnt;
27349 cell->siCb.siCtx.warningSiFlag = cell->siCb.siArray[windowId].
27351 cell->siCb.siCtx.timeToTx.sfn = crntTmInfo.sfn;
27352 cell->siCb.siCtx.timeToTx.slot = crntTmInfo.slot;
27354 RG_SCH_ADD_TO_CRNT_TIME(cell->siCb.siCtx.timeToTx,
27355 cell->siCb.siCtx.maxTimeToTx, (siWinSize - 1))
27359 {/* Update the siCtx with invalid si Id */
27360 cell->siCb.siCtx.siId = 0;
27368 * @brief This function implements scheduler DL allocation for
27373 * Function: rgSCHDlSiSched
27374 * Purpose: This function implements scheduler for DL allocation
27377 * Invoked by: Scheduler
27379 * @param[in] RgSchCellCb* cell
27384 static Void rgSCHDlSiSched
27387 RgSchCmnDlRbAllocInfo *allocInfo,
27388 RgInfSfAlloc *subfrmAlloc
27391 CmLteTimingInfo crntTimInfo;
27397 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27398 /* DwPTS Scheduling Changes Start */
27401 uint8_t cfi = cellDl->currCfi;
27403 /* DwPTS Scheduling Changes End */
27407 crntTimInfo = cell->crntTime;
27408 #ifdef LTEMAC_HDFDD
27409 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27410 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27411 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27413 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA);
27416 /* Compute the subframe for which allocation is being made.
27417 Essentially, we need pointer to the dl frame for this subframe */
27418 sf = rgSCHUtlSubFrmGet(cell, crntTimInfo);
27420 /*Check if scheduling of MIB is required */
27422 /* since we are adding the MIB repetition logic for EMTC UEs, checking if
27423 * emtcEnabled or not, If enabled MIB would be repeted at as part of EMTC
27424 * feature, otherwise scheduling at (n,0) */
27425 if(0 == cell->emtcEnable)
27428 if((crntTimInfo.sfn % RGSCH_MIB_PERIODICITY == 0)
27429 && (RGSCH_MIB_TX_SF_NUM == crntTimInfo.slot))
27432 uint8_t sfnOctet, mibOct2 = 0;
27433 uint8_t mibOct1 = 0;
27434 /*If MIB has not been yet setup by Application, return*/
27435 if(NULLP == cell->siCb.crntSiInfo.mib)
27438 SFndLenMsg(cell->siCb.crntSiInfo.mib, &mibLen);
27439 sf->bch.tbSize = mibLen;
27440 /*Fill the interface information */
27441 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, NULLD, NULLD);
27443 /*Set the bits of MIB to reflect SFN */
27444 /*First get the Most signficant 8 bits of SFN */
27445 sfnOctet = (uint8_t)(crntTimInfo.sfn >> 2);
27446 /*Get the first two octets of MIB, and then update them
27447 using the SFN octet value obtained above.*/
27448 if(ROK != SExamMsg((Data *)(&mibOct1),
27449 cell->siCb.crntSiInfo.mib, 0))
27452 if(ROK != SExamMsg((Data *)(&mibOct2),
27453 cell->siCb.crntSiInfo.mib, 1))
27456 /* ccpu00114572- Fix for improper way of MIB Octet setting for SFN */
27457 mibOct1 = (mibOct1 & 0xFC) | (sfnOctet >> 6);
27458 mibOct2 = (mibOct2 & 0x03) | (sfnOctet << 2);
27459 /* ccpu00114572- Fix ends*/
27461 /*Now, replace the two octets in MIB */
27462 if(ROK != SRepMsg((Data)(mibOct1),
27463 cell->siCb.crntSiInfo.mib, 0))
27466 if(ROK != SRepMsg((Data)(mibOct2),
27467 cell->siCb.crntSiInfo.mib, 1))
27470 /*Copy the MIB msg buff into interface buffer */
27471 SCpyMsgMsg(cell->siCb.crntSiInfo.mib,
27472 rgSchCb[cell->instIdx].rgSchInit.region,
27473 rgSchCb[cell->instIdx].rgSchInit.pool,
27474 &subfrmAlloc->cmnLcInfo.bchInfo.pdu);
27475 /* Added Dl TB count for MIB message transmission
27476 * This counter is incremented 4 times to consider
27477 * the retransmission at the PHY level on PBCH channel*/
27479 cell->dlUlTbCnt.tbTransDlTotalCnt += RG_SCH_MIB_CNT;
27486 allocInfo->bcchAlloc.schdFirst = FALSE;
27487 /*Check if scheduling of SIB1 is required.
27488 Check of (crntTimInfo.sfn % RGSCH_SIB1_PERIODICITY == 0)
27489 is not required here since the below check takes care
27490 of SFNs applicable for this one too.*/
27491 if((crntTimInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
27492 && (RGSCH_SIB1_TX_SF_NUM == crntTimInfo.slot))
27494 /*If SIB1 has not been yet setup by Application, return*/
27495 if(NULLP == (cell->siCb.crntSiInfo.sib1Info.sib1))
27500 allocInfo->bcchAlloc.schdFirst = TRUE;
27501 mcs = cell->siCb.crntSiInfo.sib1Info.mcs;
27502 nPrb = cell->siCb.crntSiInfo.sib1Info.nPrb;
27503 msgLen = cell->siCb.crntSiInfo.sib1Info.msgLen;
27507 /*Check if scheduling of SI can be performed.*/
27508 Bool invalid = FALSE;
27510 if(cell->siCb.siCtx.siId == 0)
27513 /*Check if the Si-Window for the current Si-Context is completed*/
27514 invalid = rgSCHCmnChkPastWin(crntTimInfo, cell->siCb.siCtx.maxTimeToTx);
27517 /* LTE_ADV_FLAG_REMOVED_START */
27518 if(cell->siCb.siCtx.retxCntRem)
27520 DU_LOG("\nERROR --> SCH : rgSCHDlSiSched(): SI not scheduled and window expired");
27522 /* LTE_ADV_FLAG_REMOVED_END */
27523 if(cell->siCb.siCtx.warningSiFlag == TRUE)
27525 rgSCHUtlFreeWarningSiPdu(cell);
27526 cell->siCb.siCtx.warningSiFlag = FALSE;
27531 /*Check the timinginfo of the current SI-Context to see if its
27532 transmission can be scheduled. */
27533 if(FALSE == (rgSCHCmnChkInWin(crntTimInfo,
27534 cell->siCb.siCtx.timeToTx,
27535 cell->siCb.siCtx.maxTimeToTx)))
27540 /*Check if retransmission count has become 0*/
27541 if(0 == cell->siCb.siCtx.retxCntRem)
27546 /* LTE_ADV_FLAG_REMOVED_START */
27547 /* Check if ABS is enabled/configured */
27548 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
27550 /* The pattern type is RGR_ABS_MUTE, then eNB need to blank the subframe */
27551 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
27553 /* Determine next scheduling subframe is ABS or not */
27554 if(RG_SCH_ABS_ENABLED_ABS_SF == (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern
27555 [((crntTimInfo.sfn*RGSCH_NUM_SUB_FRAMES) + crntTimInfo.slot) % RGR_ABS_PATTERN_LEN]))
27557 /* Skip the SI scheduling to next tti */
27562 /* LTE_ADV_FLAG_REMOVED_END */
27564 /*Schedule the transmission of the current SI-Context */
27565 /*Find out the messg length for the SI message */
27566 /* warningSiFlag is to differentiate between Warning SI
27568 if((rgSCHUtlGetMcsAndNPrb(cell, &nPrb, &mcs, &msgLen)) != ROK)
27573 cell->siCb.siCtx.i = RGSCH_CALC_SF_DIFF(crntTimInfo,
27574 cell->siCb.siCtx.timeToTx);
27578 /*Get the number of rb required */
27579 /*rgSCHCmnClcRbAllocForFxdTb(cell, msgLen, cellDl->ccchCqi, &rb);*/
27580 if(cellDl->bitsPerRb==0)
27582 while ((rgTbSzTbl[0][0][rb]) < (uint32_t) (msgLen*8))
27590 rb = RGSCH_CEIL((msgLen*8), cellDl->bitsPerRb);
27592 /* DwPTS Scheduling Changes Start */
27594 if (sf->sfType == RG_SCH_SPL_SF_DATA)
27596 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
27598 /* Calculate the less RE's because of DwPTS */
27599 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
27601 /* Increase number of RBs in Spl SF to compensate for lost REs */
27602 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
27605 /* DwPTS Scheduling Changes End */
27606 /*ccpu00115595- end*/
27607 /* Additional check to see if required RBs
27608 * exceeds the available */
27609 if (rb > sf->bw - sf->bwAssigned)
27611 DU_LOG("\nERROR --> SCH : rgSCHDlSiSched(): "
27612 "BW allocation failed CRNTI:%d",RGSCH_SI_RNTI);
27616 /* Update the subframe Allocated BW field */
27617 sf->bwAssigned = sf->bwAssigned + rb;
27619 /*Fill the parameters in allocInfo */
27620 allocInfo->bcchAlloc.rnti = RGSCH_SI_RNTI;
27621 allocInfo->bcchAlloc.dlSf = sf;
27622 allocInfo->bcchAlloc.rbsReq = rb;
27623 /*ccpu00116710- MCS is not getting assigned */
27624 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
27626 /* ccpu00117510 - ADD - Assignment of nPrb and other information */
27627 allocInfo->bcchAlloc.nPrb = nPrb;
27628 allocInfo->bcchAlloc.tbInfo[0].bytesReq = msgLen;
27629 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
27632 #endif /*RGR_SI_SCH*/
27635 /* ccpu00117452 - MOD - Changed macro name from
27636 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
27637 #ifdef RGR_CQI_REPT
27639 * @brief This function Updates the DL CQI for the UE.
27643 * Function: rgSCHCmnUeDlPwrCtColltCqiRept
27644 * Purpose: Manages PUSH N CQI reporting
27645 * Step 1: Store the CQI in collation array
27646 * Step 2: Increament the tracking count
27647 * Step 3: Check is it time to to send the report
27648 * Step 4: if yes, Send StaInd to RRM
27649 * Step 4.1: Fill StaInd for sending collated N CQI rpeorts
27650 * Step 4.2: Call utility function (rgSCHUtlRgrStaInd) to send rpts to RRM
27651 * Step 4.2.1: If sending was not sucessful, return RFAILED
27652 * Step 4.2.2: If sending was sucessful, return ROK
27653 * Step 5: If no, return
27654 * Invoked by: rgSCHCmnDlCqiInd
27656 * @param[in] RgSchCellCb *cell
27657 * @param[in] RgSchUeCb *ue
27658 * @param[in] RgrUeCqiRept *ueCqiRpt
27662 static S16 rgSCHCmnUeDlPwrCtColltCqiRept
27666 RgrUeCqiRept *ueCqiRpt
27669 uint8_t *cqiCount = NULLP;
27671 RgrStaIndInfo *staInfo = NULLP;
27674 /* Step 1: Store the CQI in collation array */
27675 /* Step 2: Increament the tracking count */
27676 cqiCount = &(ue->schCqiInfo.cqiCount);
27677 ue->schCqiInfo.cqiRept[(*cqiCount)++] =
27681 /* Step 3: Check is it time to to send the report */
27682 if(RG_SCH_CQIR_IS_TIMTOSEND_CQIREPT(ue))
27684 /* Step 4: if yes, Send StaInd to RRM */
27685 retVal = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&staInfo,
27686 sizeof(RgrStaIndInfo));
27689 DU_LOG("\nERROR --> SCH : Could not "
27690 "allocate memory for sending StaInd CRNTI:%d",ue->ueId);
27694 /* Step 4.1: Fill StaInd for sending collated N CQI rpeorts */
27697 uint32_t gCqiReptToAppCount;
27698 gCqiReptToAppCount++;
27703 retVal = rgSCHUtlFillSndStaInd(cell, ue, staInfo,
27704 ue->cqiReptCfgInfo.numColltdCqiRept);
27710 } /* End of rgSCHCmnUeDlPwrCtColltCqiRept */
27712 #endif /* End of RGR_CQI_REPT */
27715 * @brief This function checks for the retransmisson
27716 * for a DTX scenario.
27723 * @param[in] RgSchCellCb *cell
27724 * @param[in] RgSchUeCb *ue
27729 Void rgSCHCmnChkRetxAllowDtx
27733 RgSchDlHqProcCb *proc,
27741 if ((proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX))
27743 *reTxAllwd = FALSE;
27750 * @brief API for calculating the SI Set Id
27754 * Function: rgSCHCmnGetSiSetId
27756 * This API is used for calculating the SI Set Id, as shown below
27758 * siSetId = 0 siSetId = 1
27759 * |******************|******************|---------------->
27760 * (0,0) (8,0) (16,0) (SFN, SF)
27763 * @param[in] uint16_t sfn
27764 * @param[in] uint8_t sf
27765 * @return uint16_t siSetId
27767 uint16_t rgSCHCmnGetSiSetId
27771 uint16_t minPeriodicity
27774 /* 80 is the minimum SI periodicity in sf. Also
27775 * all other SI periodicities are multiples of 80 */
27776 return (((sfn * RGSCH_NUM_SUB_FRAMES_5G) + sf) / (minPeriodicity * 10));
27780 * @brief API for calculating the DwPts Rb, Itbs and tbSz
27784 * Function: rgSCHCmnCalcDwPtsTbSz
27786 * @param[in] RgSchCellCb *cell
27787 * @param[in] uint32_t bo
27788 * @param[in/out] uint8_t *rb
27789 * @param[in/out] uint8_t *iTbs
27790 * @param[in] uint8_t lyr
27791 * @param[in] uint8_t cfi
27792 * @return uint32_t tbSz
27794 static uint32_t rgSCHCmnCalcDwPtsTbSz
27805 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27806 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
27807 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
27810 /* DwPts Rb cannot exceed the cell Bw */
27811 numDwPtsRb = RGSCH_MIN(numDwPtsRb, cellDl->maxDlBwPerUe);
27813 /* Adjust the iTbs for optimum usage of the DwPts region.
27814 * Using the same iTbs adjustment will not work for all
27815 * special subframe configurations and iTbs levels. Hence use the
27816 * static iTbs Delta table for adjusting the iTbs */
27817 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs);
27821 while(rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1] < bo*8 &&
27822 numDwPtsRb < cellDl->maxDlBwPerUe)
27827 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
27831 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
27839 * @brief API for calculating the DwPts Rb, Itbs and tbSz
27843 * Function: rgSCHCmnCalcDwPtsTbSz2Cw
27845 * @param[in] RgSchCellCb *cell
27846 * @param[in] uint32_t bo
27847 * @param[in/out] uint8_t *rb
27848 * @param[in] uint8_t maxRb
27849 * @param[in/out] uint8_t *iTbs1
27850 * @param[in/out] uint8_t *iTbs2
27851 * @param[in] uint8_t lyr1
27852 * @param[in] uint8_t lyr2
27853 * @return[in/out] uint32_t *tb1Sz
27854 * @return[in/out] uint32_t *tb2Sz
27855 * @param[in] uint8_t cfi
27857 static Void rgSCHCmnCalcDwPtsTbSz2Cw
27872 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27873 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
27874 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
27877 /* DwPts Rb cannot exceed the cell Bw */
27878 numDwPtsRb = RGSCH_MIN(numDwPtsRb, maxRb);
27880 /* Adjust the iTbs for optimum usage of the DwPts region.
27881 * Using the same iTbs adjustment will not work for all
27882 * special subframe configurations and iTbs levels. Hence use the
27883 * static iTbs Delta table for adjusting the iTbs */
27884 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs1);
27885 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs2);
27887 while((rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1] +
27888 rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1])< bo*8 &&
27889 numDwPtsRb < maxRb)
27894 *tb1Sz = rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
27895 *tb2Sz = rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
27905 * @brief Updates the GBR LCGs when datInd is received from MAC
27909 * Function: rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
27910 * Purpose: This function updates the GBR LCGs
27911 * when datInd is received from MAC.
27915 * @param[in] RgSchCellCb *cell
27916 * @param[in] RgSchUeCb *ue
27917 * @param[in] RgInfUeDatInd *datInd
27920 Void rgSCHCmnUpdUeDataIndLcg
27924 RgInfUeDatInd *datInd
27928 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27931 for (idx = 0; (idx < RGINF_MAX_LCG_PER_UE - 1); idx++)
27933 if (datInd->lcgInfo[idx].bytesRcvd != 0)
27935 uint8_t lcgId = datInd->lcgInfo[idx].lcgId;
27936 uint32_t bytesRcvd = datInd->lcgInfo[idx].bytesRcvd;
27938 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
27940 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
27941 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
27943 if(bytesRcvd > cmnLcg->effGbr)
27945 bytesRcvd -= cmnLcg->effGbr;
27946 cmnLcg->effDeltaMbr = (cmnLcg->effDeltaMbr > bytesRcvd) ? \
27947 (cmnLcg->effDeltaMbr - bytesRcvd) : (0);
27948 cmnLcg->effGbr = 0;
27952 cmnLcg->effGbr -= bytesRcvd;
27954 /* To keep BS updated with the amount of data received for the GBR */
27955 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
27956 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27957 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr+cmnLcg->effDeltaMbr);
27959 else if(lcgId != 0)
27961 ue->ul.effAmbr = (ue->ul.effAmbr > datInd->lcgInfo[idx].bytesRcvd) ? \
27962 (ue->ul.effAmbr - datInd->lcgInfo[idx].bytesRcvd) : (0);
27963 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
27964 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27965 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
27966 ue->ul.nonGbrLcgBs = (ue->ul.nonGbrLcgBs > datInd->lcgInfo[idx].bytesRcvd) ? \
27967 (ue->ul.nonGbrLcgBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27969 ue->ul.nonLcg0Bs = (ue->ul.nonLcg0Bs > datInd->lcgInfo[idx].bytesRcvd) ? \
27970 (ue->ul.nonLcg0Bs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27979 if(TRUE == ue->isEmtcUe)
27981 if (cellSch->apisEmtcUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
27983 DU_LOG("\nERROR --> SCH : rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure");
27990 if (cellSch->apisUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
27992 DU_LOG("\nERROR --> SCH : rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure");
27998 /** @brief This function initializes DL allocation lists and prepares
28003 * Function: rgSCHCmnInitRbAlloc
28005 * @param [in] RgSchCellCb *cell
28010 static Void rgSCHCmnInitRbAlloc
28015 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28016 CmLteTimingInfo frm;
28021 /* Initializing RgSchCmnUlRbAllocInfo structure.*/
28022 rgSCHCmnInitDlRbAllocInfo(&cellSch->allocInfo);
28024 frm = cellSch->dl.time;
28026 dlSf = rgSCHUtlSubFrmGet(cell, frm);
28028 dlSf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
28029 dlSf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
28030 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
28032 dlSf->sfBeamInfo[idx].totVrbgAllocated = 0;
28033 dlSf->sfBeamInfo[idx].totVrbgRequired = 0;
28034 dlSf->sfBeamInfo[idx].vrbgStart = 0;
28037 dlSf->remUeCnt = cellSch->dl.maxUePerDlSf;
28038 /* Updating the Subframe information in RBAllocInfo */
28039 cellSch->allocInfo.dedAlloc.dedDlSf = dlSf;
28040 cellSch->allocInfo.msg4Alloc.msg4DlSf = dlSf;
28042 /* LTE_ADV_FLAG_REMOVED_START */
28043 /* Determine next scheduling subframe is ABS or not */
28044 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
28046 cell->lteAdvCb.absPatternDlIdx =
28047 ((frm.sfn*RGSCH_NUM_SUB_FRAMES_5G) + frm.slot) % RGR_ABS_PATTERN_LEN;
28048 cell->lteAdvCb.absDlSfInfo = (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern[
28049 cell->lteAdvCb.absPatternDlIdx]);
28054 cell->lteAdvCb.absDlSfInfo = RG_SCH_ABS_DISABLED;
28056 /* LTE_ADV_FLAG_REMOVED_END */
28059 cellSch->allocInfo.ccchSduAlloc.ccchSduDlSf = dlSf;
28062 /* Update subframe-wide allocation information with SPS allocation */
28063 rgSCHCmnSpsDlUpdDlSfAllocWithSps(cell, frm, dlSf);
28072 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
28077 * Function: rgSCHCmnSendTxModeInd(cell, ueUl, newTxMode)
28078 * Purpose: This function sends the TX mode Change
28079 * indication to RRM
28084 * @param[in] RgSchCellCb *cell
28085 * @param[in] RgSchUeCb *ue
28086 * @param[in] uint8_t newTxMode
28089 static Void rgSCHCmnSendTxModeInd
28096 RgmTransModeInd *txModeChgInd;
28097 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28100 if(!(ueDl->mimoInfo.forceTD & RG_SCH_CMN_TD_TXMODE_RECFG))
28103 if(SGetSBuf(cell->rgmSap->sapCfg.sapPst.region,
28104 cell->rgmSap->sapCfg.sapPst.pool, (Data**)&txModeChgInd,
28105 sizeof(RgmTransModeInd)) != ROK)
28109 RG_SCH_FILL_RGM_TRANSMODE_IND(ue->ueId, cell->cellId, newTxMode, txModeChgInd);
28110 RgUiRgmChangeTransModeInd(&(cell->rgmSap->sapCfg.sapPst),
28111 cell->rgmSap->sapCfg.suId, txModeChgInd);
28114 ue->mimoInfo.txModUpChgFactor = 0;
28115 ue->mimoInfo.txModDownChgFactor = 0;
28116 ueDl->laCb[0].deltaiTbs = 0;
28122 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
28127 * Function: rgSchCheckAndTriggerModeChange(cell, ueUl, iTbsNew)
28128 * Purpose: This function update and check for threashold for TM mode
28133 * @param[in] RgSchCellCb *cell
28134 * @param[in] RgSchUeCb *ue
28135 * @param[in] uint8_t iTbs
28138 Void rgSchCheckAndTriggerModeChange
28142 uint8_t reportediTbs,
28147 RgrTxMode txMode; /*!< UE's Transmission Mode */
28148 RgrTxMode modTxMode; /*!< UE's Transmission Mode */
28151 txMode = ue->mimoInfo.txMode;
28153 /* Check for Step down */
28154 /* Step down only when TM4 is configured. */
28155 if(RGR_UE_TM_4 == txMode)
28157 if((previTbs <= reportediTbs) && ((reportediTbs - previTbs) >= RG_SCH_MODE_CHNG_STEPDOWN_CHECK_FACTOR))
28159 ue->mimoInfo.txModDownChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
28163 ue->mimoInfo.txModDownChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
28166 ue->mimoInfo.txModDownChgFactor =
28167 RGSCH_MAX(ue->mimoInfo.txModDownChgFactor, -(RG_SCH_MODE_CHNG_STEPDOWN_THRSHD));
28169 if(ue->mimoInfo.txModDownChgFactor >= RG_SCH_MODE_CHNG_STEPDOWN_THRSHD)
28171 /* Trigger Mode step down */
28172 modTxMode = RGR_UE_TM_3;
28173 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
28177 /* Check for Setup up */
28178 /* Step Up only when TM3 is configured, Max possible Mode is TM4*/
28179 if(RGR_UE_TM_3 == txMode)
28181 if((previTbs > reportediTbs) || (maxiTbs == previTbs))
28183 ue->mimoInfo.txModUpChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
28187 ue->mimoInfo.txModUpChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
28190 ue->mimoInfo.txModUpChgFactor =
28191 RGSCH_MAX(ue->mimoInfo.txModUpChgFactor, -(RG_SCH_MODE_CHNG_STEPUP_THRSHD));
28193 /* Check if TM step up need to be triggered */
28194 if(ue->mimoInfo.txModUpChgFactor >= RG_SCH_MODE_CHNG_STEPUP_THRSHD)
28196 /* Trigger mode chnage */
28197 modTxMode = RGR_UE_TM_4;
28198 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
28207 * @brief Updates the GBR LCGs when datInd is received from MAC
28211 * Function: rgSCHCmnIsDlCsgPrio (cell)
28212 * Purpose: This function returns if csg UEs are
28213 * having priority at current time
28215 * Invoked by: Scheduler
28217 * @param[in] RgSchCellCb *cell
28218 * @param[in] RgSchUeCb *ue
28219 * @param[in] RgInfUeDatInd *datInd
28222 Bool rgSCHCmnIsDlCsgPrio(RgSchCellCb *cell)
28225 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
28227 /* Calculating the percentage resource allocated */
28228 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
28234 if(((cmnDlCell->ncsgPrbCnt * 100) / cmnDlCell->totPrbCnt) < cell->minDlResNonCsg)
28246 * @brief Updates the GBR LCGs when datInd is received from MAC
28250 * Function: rgSCHCmnIsUlCsgPrio (cell)
28251 * Purpose: This function returns if csg UEs are
28252 * having priority at current time
28254 * Invoked by: Scheduler
28256 * @param[in] RgSchCellCb *cell
28257 * @param[in] RgSchUeCb *ue
28258 * @param[in] RgInfUeDatInd *datInd
28261 Bool rgSCHCmnIsUlCsgPrio(RgSchCellCb *cell)
28263 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
28266 /* Calculating the percentage resource allocated */
28267 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
28273 if (((cmnUlCell->ncsgPrbCnt * 100) /cmnUlCell->totPrbCnt) < cell->minUlResNonCsg)
28284 /** @brief DL scheduler for SPS, and all other downlink data
28288 * Function: rgSchCmnPreDlSch
28290 * @param [in] Inst schInst;
28294 Void rgSchCmnPreDlSch
28296 RgSchCellCb **cell,
28298 RgSchCellCb **cellLst
28301 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell[0]);
28306 if(nCell > CM_LTE_MAX_CELLS)
28311 if (cell[0]->isDlDataAllwd && (cell[0]->stopDlSch == FALSE))
28313 /* Specific DL scheduler to perform UE scheduling */
28314 cellSch->apisDl->rgSCHDlPreSched(cell[0]);
28316 /* Rearranging the cell entries based on their remueCnt in SF.
28317 * cells will be processed in the order of number of ue scheduled
28319 for (idx = 0; idx < nCell; idx++)
28322 cellSch = RG_SCH_CMN_GET_CELL(cell[idx]);
28323 sf = cellSch->allocInfo.dedAlloc.dedDlSf;
28327 cellLst[idx] = cell[idx];
28331 for(j = 0; j < idx; j++)
28333 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cellLst[j]);
28334 RgSchDlSf *subfrm = cmnCell->allocInfo.dedAlloc.dedDlSf;
28336 if(sf->remUeCnt < subfrm->remUeCnt)
28339 for(k = idx; k > j; k--)
28341 cellLst[k] = cellLst[k-1];
28346 cellLst[j] = cell[idx];
28351 for (idx = 0; idx < nCell; idx++)
28353 cellLst[idx] = cell[idx];
28359 /** @brief DL scheduler for SPS, and all other downlink data
28362 * Function: rgSchCmnPstDlSch
28364 * @param [in] Inst schInst;
28368 Void rgSchCmnPstDlSch(RgSchCellCb *cell)
28370 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28373 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
28375 cellSch->apisDl->rgSCHDlPstSched(cell->instIdx);
28379 uint8_t rgSCHCmnCalcPcqiBitSz(RgSchUeCb *ueCb, uint8_t numTxAnt)
28381 uint8_t confRepMode;
28384 RgSchUePCqiCb *cqiCb = ueCb->nPCqiCb;
28387 confRepMode = cqiCb->cqiCfg.cqiSetup.prdModeEnum;
28388 if((ueCb->mimoInfo.txMode != RGR_UE_TM_3) &&
28389 (ueCb->mimoInfo.txMode != RGR_UE_TM_4))
28395 ri = cqiCb->perRiVal;
28397 switch(confRepMode)
28399 case RGR_PRD_CQI_MOD10:
28405 case RGR_PRD_CQI_MOD11:
28418 else if(numTxAnt == 4)
28431 /* This is number of antenna case 1.
28432 * This is not applicable for Mode 1-1.
28433 * So setting it to invalid value */
28439 case RGR_PRD_CQI_MOD20:
28447 pcqiSz = 4 + cqiCb->label;
28452 case RGR_PRD_CQI_MOD21:
28467 else if(numTxAnt == 4)
28480 /* This might be number of antenna case 1.
28481 * For mode 2-1 wideband case only antenna port 2 or 4 is supported.
28482 * So setting invalid value.*/
28490 pcqiSz = 4 + cqiCb->label;
28494 pcqiSz = 7 + cqiCb->label;
28507 /** @brief DL scheduler for SPS, and all other downlink data
28511 * Function: rgSCHCmnDlSch
28513 * @param [in] RgSchCellCb *cell
28518 Void rgSCHCmnDlSch(RgSchCellCb *cell)
28521 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28523 RgSchDynTddCb *rgSchDynTddInfo = &(rgSchCb[cell->instIdx].rgSchDynTdd);
28524 uint16_t dlCntrlSfIdx;
28528 dlSf = rgSCHUtlSubFrmGet(cell, cellSch->dl.time);
28530 if (rgSchDynTddInfo->isDynTddEnbld)
28532 RG_SCH_DYN_TDD_GET_SFIDX(dlCntrlSfIdx, rgSchDynTddInfo->crntDTddSfIdx,
28533 RG_SCH_CMN_DL_DELTA);
28534 if(RG_SCH_DYNTDD_DLC_ULD == rgSchDynTddInfo->sfInfo[dlCntrlSfIdx].sfType)
28536 if(1 == cell->cellId)
28538 ul5gtfsidDlAlreadyMarkUl++;
28540 DU_LOG("\nINFO --> SCH : ul5gtfsidDlAlreadyMarkUl: %d, [sfn:sf] [%04d:%02d]\n",
28541 ul5gtfsidDlAlreadyMarkUl, cellSch->dl.time.sfn,
28542 cellSch->dl.time.slot);
28550 /* Specific DL scheduler to perform UE scheduling */
28551 cellSch->apisDl->rgSCHDlNewSched(cell, &cellSch->allocInfo);
28552 /* LTE_ADV_FLAG_REMOVED_END */
28554 /* call common allocator for RB Allocation */
28555 rgSCHCmnDlRbAlloc(cell, &cellSch->allocInfo);
28557 /* Finalize the Allocations for reqested Against alloced */
28558 rgSCHCmnDlAllocFnlz(cell);
28560 /* Perform Pdcch allocations for PDCCH Order Q.
28561 * As of now, giving this the least preference.
28562 * This func call could be moved above other allocations
28564 rgSCHCmnGenPdcchOrder(cell, dlSf);
28566 /* Do group power control for PUCCH */
28567 rgSCHCmnGrpPwrCntrlPucch(cell, dlSf);
28572 /**********************************************************************
28575 **********************************************************************/