1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /************************************************************************
25 Desc: C source code for Entry point fucntions
29 **********************************************************************/
31 /** @file rg_sch_cmn.c
32 @brief This file implements the schedulers main access to MAC layer code.
36 /* header include files -- defines (.h) */
37 #include "common_def.h"
43 #include "rg_sch_err.h"
44 #include "rg_sch_inf.h"
46 #include "rg_sch_cmn.h"
48 /* header/extern include files (.x) */
49 #include "tfu.x" /* TFU types */
50 #include "lrg.x" /* layer management typedefs for MAC */
51 #include "rgr.x" /* layer management typedefs for MAC */
52 #include "rgm.x" /* layer management typedefs for MAC */
53 #include "rg_sch_inf.x" /* typedefs for Scheduler */
54 #include "rg_sch.x" /* typedefs for Scheduler */
55 #include "rg_sch_cmn.x" /* typedefs for Scheduler */
57 #include "lrg.x" /* Stats Structures */
58 #endif /* MAC_SCH_STATS */
61 #endif /* __cplusplus */
64 uint32_t emtcStatsUlTomSrInd;
65 uint32_t emtcStatsUlBsrTmrTxp;
68 #define RG_ITBS_DIFF(_x, _y) ((_x) > (_y) ? (_x) - (_y) : (_y) - (_x))
69 Void rgSCHSc1UlInit ARGS((RgUlSchdApis *apis));
70 #ifdef RG_PHASE2_SCHED
71 Void rgSCHRrUlInit ARGS((RgUlSchdApis *apis));
73 Void rgSCHEmtcHqInfoFree ARGS((RgSchCellCb *cell, RgSchDlHqProcCb *hqP));
74 Void rgSCHEmtcRrUlInit ARGS((RgUlSchdApis *apis));
75 Void rgSCHEmtcCmnDlInit ARGS((Void));
76 Void rgSCHEmtcCmnUlInit ARGS((Void));
77 Void rgSCHEmtcCmnUeNbReset ARGS((RgSchUeCb *ueCb));
78 RgSchCmnCqiToTbs *rgSchEmtcCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
80 Void rgSCHMaxciUlInit ARGS((RgUlSchdApis *apis));
81 Void rgSCHPfsUlInit ARGS((RgUlSchdApis *apis));
83 Void rgSCHSc1DlInit ARGS((RgDlSchdApis *apis));
84 #ifdef RG_PHASE2_SCHED
85 Void rgSCHRrDlInit ARGS((RgDlSchdApis *apis));
87 Void rgSCHEmtcRrDlInit ARGS((RgDlEmtcSchdApis *apis));
89 Void rgSCHMaxciDlInit ARGS((RgDlSchdApis *apis));
90 Void rgSCHPfsDlInit ARGS((RgDlSchdApis *apis));
92 Void rgSCHDlfsInit ARGS((RgDlfsSchdApis *apis));
96 Void rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl ARGS((RgSchCellCb *cell));
97 Void rgSCHCmnGetEmtcDciFrmtSizes ARGS((RgSchCellCb *cell));
98 Void rgSCHEmtcRrUlProcRmvFrmRetx ARGS((RgSchCellCb *cell, RgSchUlHqProcCb *proc));
99 S16 rgSCHCmnPrecompEmtcMsg3Vars
101 RgSchCmnUlCell *cellUl,
107 Void rgSCHEmtcCmnUeCcchSduDel
112 Void rgSCHEmtcRmvFrmTaLst
114 RgSchCmnDlCell *cellDl,
117 Void rgSCHEmtcInitTaLst
119 RgSchCmnDlCell *cellDl
121 Void rgSCHEmtcAddToTaLst
123 RgSchCmnDlCell *cellDl,
130 static Void rgSCHDlSiSched ARGS((RgSchCellCb *cell,
131 RgSchCmnDlRbAllocInfo *allocInfo,
132 RgInfSfAlloc *subfrmAlloc));
133 static Void rgSCHChkNUpdSiCfg ARGS((RgSchCellCb *cell));
134 static Void rgSCHSelectSi ARGS((RgSchCellCb *cell));
135 #endif /*RGR_SI_SCH*/
136 /* LTE_ADV_FLAG_REMOVED_START */
139 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
147 static S16 rgSCHCmnBuildRntpInfo (
155 static Void rgSCHCmnNonDlfsType0Alloc
159 RgSchDlRbAlloc *allocInfo,
162 static uint8_t rgSchCmnUlRvIdxToIMcsTbl[4] = {32, 30, 31, 29};
163 static Void rgSCHCmnUlNonadapRetx ARGS((
164 RgSchCmnUlCell *cellUl,
168 static Void rgSCHCmnUlSfRlsRetxProcs ARGS((
174 static S16 rgSCHCmnUlMdfyGrntForCqi ARGS((
181 uint32_t stepDownItbs,
185 static Void rgSCHCmnFillHqPPdcchDciFrmt1 ARGS((
187 RgSchDlRbAlloc *rbAllocInfo,
188 RgSchDlHqProcCb *hqP,
192 static Void rgSCHCmnFillHqPPdcchDciFrmt1A ARGS((
194 RgSchDlRbAlloc *rbAllocInfo,
195 RgSchDlHqProcCb *hqP,
199 static Void rgSCHCmnFillHqPPdcchDciFrmt1B ARGS((
201 RgSchDlRbAlloc *rbAllocInfo,
202 RgSchDlHqProcCb *hqP,
206 static Void rgSCHCmnFillHqPPdcchDciFrmt2 ARGS((
208 RgSchDlRbAlloc *rbAllocInfo,
209 RgSchDlHqProcCb *hqP,
213 static Void rgSCHCmnFillHqPPdcchDciFrmt2A ARGS((
215 RgSchDlRbAlloc *rbAllocInfo,
216 RgSchDlHqProcCb *hqP,
223 Void rgSCHCmnDlSpsSch
227 /* LTE_ADV_FLAG_REMOVED_END */
229 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc ARGS((
231 RgSchCmnDlRbAllocInfo *allocInfo
233 static Void rgSCHBcchPcchDlRbAlloc ARGS((
235 RgSchCmnDlRbAllocInfo *allocInfo
237 static Void rgSCHCmnDlBcchPcchAlloc ARGS((
241 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
244 TfuDlCqiPucch *pucchCqi,
245 RgrUeCqiRept *ueCqiRept,
247 Bool *is2ndCwCqiAvail
249 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
252 TfuDlCqiPusch *puschCqi,
253 RgrUeCqiRept *ueCqiRept,
255 Bool *is2ndCwCqiAvail
258 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
261 TfuDlCqiPucch *pucchCqi
263 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
266 TfuDlCqiPusch *puschCqi
269 /* ccpu00117452 - MOD - Changed macro name from
270 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
272 static S16 rgSCHCmnUeDlPwrCtColltCqiRept ARGS((
275 RgrUeCqiRept *ueCqiRept));
276 #endif /* End of RGR_CQI_REPT */
277 /* Fix: syed align multiple UEs to refresh at same time */
278 static Void rgSCHCmnGetRefreshPer ARGS((
282 static S16 rgSCHCmnApplyUeRefresh ARGS((
286 Void rgSCHCmnDlSetUeAllocLmtLa ARGS
291 static Void rgSCHCheckAndSetTxScheme ARGS
299 static uint32_t rgSCHCmnCalcDwPtsTbSz ARGS
309 static Void rgSCHCmnCalcDwPtsTbSz2Cw ARGS
325 static Void rgSCHCmnInitRbAlloc ARGS
331 #endif /* __cplusplus */
335 RgSchdApis rgSchCmnApis;
336 static RgUlSchdApis rgSchUlSchdTbl[RGSCH_NUM_SCHEDULERS];
337 static RgDlSchdApis rgSchDlSchdTbl[RGSCH_NUM_SCHEDULERS];
339 static RgUlSchdApis rgSchEmtcUlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
340 static RgDlEmtcSchdApis rgSchEmtcDlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
342 #ifdef RG_PHASE2_SCHED
343 static RgDlfsSchdApis rgSchDlfsSchdTbl[RGSCH_NUM_DLFS_SCHEDULERS];
345 RgUlSchdInits rgSchUlSchdInits = RGSCH_ULSCHED_INITS;
346 RgDlSchdInits rgSchDlSchdInits = RGSCH_DLSCHED_INITS;
348 static RgEmtcUlSchdInits rgSchEmtcUlSchdInits = RGSCH_EMTC_ULSCHED_INITS;
349 static RgEmtcDlSchdInits rgSchEmtcDlSchdInits = RGSCH_EMTC_DLSCHED_INITS;
351 #if (defined (RG_PHASE2_SCHED) && defined (TFU_UPGRADE))
352 static RgDlfsSchdInits rgSchDlfsSchdInits = RGSCH_DLFSSCHED_INITS;
355 typedef Void (*RgSchCmnDlAllocRbFunc) ARGS((RgSchCellCb *cell, RgSchDlSf *subFrm,
356 RgSchUeCb *ue, uint32_t bo, uint32_t *effBo, RgSchDlHqProcCb *proc,
357 RgSchCmnDlRbAllocInfo *cellWdAllocInfo));
358 typedef uint8_t (*RgSchCmnDlGetPrecInfFunc) ARGS((RgSchCellCb *cell, RgSchUeCb *ue,
359 uint8_t numLyrs, Bool bothCwEnbld));
360 static Void rgSCHCmnDlAllocTxRbTM1 ARGS((
366 RgSchDlHqProcCb *proc,
367 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
369 static Void rgSCHCmnDlAllocTxRbTM2 ARGS((
375 RgSchDlHqProcCb *proc,
376 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
378 static Void rgSCHCmnDlAllocTxRbTM3 ARGS((
384 RgSchDlHqProcCb *proc,
385 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
387 static Void rgSCHCmnDlAllocTxRbTM4 ARGS((
393 RgSchDlHqProcCb *proc,
394 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
397 static Void rgSCHCmnDlAllocTxRbTM5 ARGS((
403 RgSchDlHqProcCb *proc,
404 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
407 static Void rgSCHCmnDlAllocTxRbTM6 ARGS((
413 RgSchDlHqProcCb *proc,
414 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
416 static Void rgSCHCmnDlAllocTxRbTM7 ARGS((
422 RgSchDlHqProcCb *proc,
423 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
425 static Void rgSCHCmnDlAllocRetxRbTM1 ARGS((
431 RgSchDlHqProcCb *proc,
432 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
434 static Void rgSCHCmnDlAllocRetxRbTM2 ARGS((
440 RgSchDlHqProcCb *proc,
441 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
443 static Void rgSCHCmnDlAllocRetxRbTM3 ARGS((
449 RgSchDlHqProcCb *proc,
450 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
452 static Void rgSCHCmnDlAllocRetxRbTM4 ARGS((
458 RgSchDlHqProcCb *proc,
459 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
462 static Void rgSCHCmnDlAllocRetxRbTM5 ARGS((
468 RgSchDlHqProcCb *proc,
469 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
472 static Void rgSCHCmnDlAllocRetxRbTM6 ARGS((
478 RgSchDlHqProcCb *proc,
479 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
481 static Void rgSCHCmnDlAllocRetxRbTM7 ARGS((
487 RgSchDlHqProcCb *proc,
488 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
492 static uint8_t rgSchGetN1ResCount ARGS ((
496 Bool rgSchCmnChkDataOnlyOnPcell
502 uint8_t rgSCHCmnCalcPcqiBitSz
509 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
510 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[7] = {rgSCHCmnDlAllocTxRbTM1,
511 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
512 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7};
514 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
515 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[7] = {rgSCHCmnDlAllocRetxRbTM1,
516 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
517 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7};
519 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
520 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[9] = {rgSCHCmnDlAllocTxRbTM1,
521 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
522 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7, NULLP, NULLP};
524 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
525 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[9] = {rgSCHCmnDlAllocRetxRbTM1,
526 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
527 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7, NULLP, NULLP};
532 static uint8_t rgSCHCmnDlTM3PrecInf2 ARGS((
538 static uint8_t rgSCHCmnDlTM3PrecInf4 ARGS((
544 static uint8_t rgSCHCmnDlTM4PrecInf2 ARGS((
550 static uint8_t rgSCHCmnDlTM4PrecInf4 ARGS((
556 /* Functions specific to each transmission mode for DL RB Allocation*/
557 RgSchCmnDlGetPrecInfFunc getPrecInfoFunc[2][2] = {
558 {rgSCHCmnDlTM3PrecInf2, rgSCHCmnDlTM3PrecInf4},
559 {rgSCHCmnDlTM4PrecInf2, rgSCHCmnDlTM4PrecInf4}
562 static S16 rgSCHCmnDlAlloc1CwRetxRb ARGS((
566 RgSchDlHqTbCb *tbInfo,
571 static S16 rgSCHCmnDlAlloc2CwRetxRb ARGS((
575 RgSchDlHqProcCb *proc,
580 static Void rgSCHCmnDlTM3TxTx ARGS((
586 RgSchDlHqProcCb *proc,
587 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
589 static Void rgSCHCmnDlTM3TxRetx ARGS((
595 RgSchDlHqProcCb *proc,
596 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
598 static Void rgSCHCmnDlTM3RetxRetx ARGS((
604 RgSchDlHqProcCb *proc,
605 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
608 static Void rgSCHCmnNonDlfsUpdTyp2Alloc ARGS((
614 /* LTE_ADV_FLAG_REMOVED_START */
616 static Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc ARGS((
623 /* LTE_ADV_FLAG_REMOVED_END */
624 static Void rgSCHCmnDlRbInfoAddUeTx ARGS((
626 RgSchCmnDlRbAllocInfo *allocInfo,
628 RgSchDlHqProcCb *proc
630 static Void rgSCHCmnDlRbInfoAddUeRetx ARGS((
632 RgSchCmnDlRbAllocInfo *allocInfo,
636 static Void rgSCHCmnDlAdd2NonSchdRetxLst ARGS((
637 RgSchCmnDlRbAllocInfo *allocInfo,
639 RgSchDlHqProcCb *proc
641 static S16 rgSCHCmnDlAlloc2CwTxRetxRb ARGS((
645 RgSchDlHqTbCb *reTxTb,
650 static S16 rgSCHCmnDlAlloc2CwTxRb ARGS((
654 RgSchDlHqProcCb *proc,
659 static S16 rgSCHCmnDlAlloc1CwTxRb ARGS((
663 RgSchDlHqTbCb *tbInfo,
669 static Void rgSCHCmnFillHqPTb ARGS((
671 RgSchDlRbAlloc *rbAllocInfo,
677 static Void rgSCHCmnDlGetBestFitHole ARGS((
680 uint32_t *crntAllocMask,
683 uint8_t *allocNumRbs,
686 #ifdef RGSCH_SPS_UNUSED
687 static uint32_t rgSCHCmnGetRaType1Mask ARGS((
693 static uint32_t rgSCHCmnGetRaType0Mask ARGS((
697 static uint32_t rgSCHCmnGetRaType2Mask ARGS((
703 Bool rgSCHCmnRetxAllocAvoid ARGS((
706 RgSchDlHqProcCb *proc
709 uint16_t rgSCHCmnGetSiSetId ARGS((
712 uint16_t minPeriodicity
717 //TODO_SID: Currenly table is only for 100 Prbs. Need to modify wrt VRBG table 8.1.5.2.1-1 V5G_213
718 uint32_t rgSch5gtfTbSzTbl[MAX_5GTF_MCS] =
719 {1864, 5256, 8776, 13176, 17576, 21976, 26376, 31656, 35176, 39576, 43976, 47496, 52776, 59376, 66392};
720 uint32_t g5gtfTtiCnt = 0;
721 uint32_t gUl5gtfSrRecv = 0;
722 uint32_t gUl5gtfBsrRecv = 0;
723 uint32_t gUl5gtfUeSchPick = 0;
724 uint32_t gUl5gtfPdcchSchd = 0;
725 uint32_t gUl5gtfAllocAllocated = 0;
726 uint32_t gUl5gtfUeRbAllocDone = 0;
727 uint32_t gUl5gtfUeRmvFnlzZeroBo = 0;
728 uint32_t gUl5gtfUeFnlzReAdd = 0;
729 uint32_t gUl5gtfPdcchSend = 0;
730 uint32_t gUl5gtfRbAllocFail = 0;
731 uint32_t ul5gtfsidUlMarkUl = 0;
732 uint32_t ul5gtfsidDlSchdPass = 0;
733 uint32_t ul5gtfsidDlAlreadyMarkUl = 0;
734 uint32_t ul5gtfTotSchdCnt = 0;
737 /* CQI Offset Index to Beta CQI Offset value mapping,
738 * stored as parts per 1000. Reserved is set to 0.
739 * Refer 36.213 sec 8.6.3 Tbl 8.6.3-3 */
740 uint32_t rgSchCmnBetaCqiOffstTbl[16] = {0, 0, 1125,
741 1250, 1375, 1625, 1750, 2000, 2250, 2500, 2875,
742 3125, 3500, 4000, 5000, 6250};
743 uint32_t rgSchCmnBetaHqOffstTbl[16] = {2000, 2500, 3125,
744 4000, 5000, 6250, 8000,10000, 12625, 15875, 20000,
745 31000, 50000,80000,126000,0};
746 uint32_t rgSchCmnBetaRiOffstTbl[16] = {1250, 1625, 2000,
747 2500, 3125, 4000, 5000, 6250, 8000, 10000,12625,
749 S8 rgSchCmnDlCqiDiffOfst[8] = {0, 1, 2, 3, -4, -3, -2, -1};
751 /* Include CRS REs while calculating Efficiency */
752 const static uint8_t rgSchCmnAntIdx[5] = {0,0,1,0,2};
753 const static uint8_t rgSchCmnNumResForCrs[5] = {0,6,12,0,16};
754 uint32_t cfiSwitchCnt ;
760 S8 rgSchCmnApUeSelDiffCqi[4] = {1, 2, 3, 4};
761 S8 rgSchCmnApEnbConfDiffCqi[4] = {0, 1, 2, -1};
764 typedef struct rgSchCmnDlUeDciFrmtOptns
766 TfuDciFormat spfcDciFrmt; /* TM(Transmission Mode) specific DCI format.
767 * Search space : UE Specific by C-RNTI only. */
768 uint8_t spfcDciRAType; /* Resource Alloctn(RA) type for spfcDciFrmt */
769 TfuDciFormat prfrdDciFrmt; /* Preferred DCI format among the available
770 * options for TD (Transmit Diversity) */
771 uint8_t prfrdDciRAType; /* Resource Alloctn(RA) type for prfrdDciFrmt */
772 }RgSchCmnDlUeDciFrmtOptns;
775 /* DCI Format options for each Transmission Mode */
776 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[7] = {
777 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
778 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
779 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
780 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
781 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
782 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
783 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
787 /* DCI Format options for each Transmission Mode */
788 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[9] = {
789 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
790 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
791 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
792 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
793 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
794 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
795 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
800 typedef struct rgSchCmnDlImcsTbl
802 uint8_t modOdr; /* Modulation Order */
803 uint8_t iTbs; /* ITBS */
804 }RgSchCmnDlImcsTbl[29];
806 const struct rgSchCmnMult235Info
808 uint8_t match; /* Closest number satisfying 2^a.3^b.5^c, with a bias
809 * towards the smaller number */
810 uint8_t prvMatch; /* Closest number not greater than array index
811 * satisfying 2^a.3^b.5^c */
812 } rgSchCmnMult235Tbl[110+1] = {
814 {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {6, 6}, {8, 8},
815 {9, 9}, {10, 10}, {10, 10}, {12, 12}, {12, 12}, {15, 12}, {15, 15},
816 {16, 16}, {16, 16}, {18, 18}, {18, 18}, {20, 20}, {20, 20}, {20, 20},
817 {24, 20}, {24, 24}, {25, 25}, {25, 25}, {27, 27}, {27, 27}, {30, 27},
818 {30, 30}, {30, 30}, {32, 32}, {32, 32}, {32, 32}, {36, 32}, {36, 36},
819 {36, 36}, {36, 36}, {40, 36}, {40, 40}, {40, 40}, {40, 40}, {45, 40},
820 {45, 40}, {45, 45}, {45, 45}, {48, 45}, {48, 48}, {48, 48}, {50, 50},
821 {50, 50}, {50, 50}, {54, 50}, {54, 54}, {54, 54}, {54, 54}, {54, 54},
822 {60, 54}, {60, 54}, {60, 60}, {60, 60}, {60, 60}, {64, 60}, {64, 64},
823 {64, 64}, {64, 64}, {64, 64}, {64, 64}, {72, 64}, {72, 64}, {72, 64},
824 {72, 72}, {72, 72}, {75, 72}, {75, 75}, {75, 75}, {75, 75}, {80, 75},
825 {80, 75}, {80, 80}, {81, 81}, {81, 81}, {81, 81}, {81, 81}, {81, 81},
826 {90, 81}, {90, 81}, {90, 81}, {90, 81}, {90, 90}, {90, 90}, {90, 90},
827 {90, 90}, {96, 90}, {96, 90}, {96, 96}, {96, 96}, {96, 96}, {100, 96},
828 {100, 100}, {100, 100}, {100, 100}, {100, 100}, {100, 100}, {108, 100},
829 {108, 100}, {108, 100}, {108, 108}, {108, 108}, {108, 108}
833 /* BI table from 36.321 Table 7.2.1 */
834 const static S16 rgSchCmnBiTbl[RG_SCH_CMN_NUM_BI_VAL] = {
835 0, 10, 20, 30,40,60,80,120,160,240,320,480,960};
836 RgSchCmnUlCqiInfo rgSchCmnUlCqiTbl[RG_SCH_CMN_UL_NUM_CQI] = {
838 {RGSCH_CMN_QM_CQI_1,RGSCH_CMN_UL_EFF_CQI_1 },
839 {RGSCH_CMN_QM_CQI_2,RGSCH_CMN_UL_EFF_CQI_2 },
840 {RGSCH_CMN_QM_CQI_3,RGSCH_CMN_UL_EFF_CQI_3 },
841 {RGSCH_CMN_QM_CQI_4,RGSCH_CMN_UL_EFF_CQI_4 },
842 {RGSCH_CMN_QM_CQI_5,RGSCH_CMN_UL_EFF_CQI_5 },
843 {RGSCH_CMN_QM_CQI_6,RGSCH_CMN_UL_EFF_CQI_6 },
844 {RGSCH_CMN_QM_CQI_7,RGSCH_CMN_UL_EFF_CQI_7 },
845 {RGSCH_CMN_QM_CQI_8,RGSCH_CMN_UL_EFF_CQI_8 },
846 {RGSCH_CMN_QM_CQI_9,RGSCH_CMN_UL_EFF_CQI_9 },
847 {RGSCH_CMN_QM_CQI_10,RGSCH_CMN_UL_EFF_CQI_10 },
848 {RGSCH_CMN_QM_CQI_11,RGSCH_CMN_UL_EFF_CQI_11 },
849 {RGSCH_CMN_QM_CQI_12,RGSCH_CMN_UL_EFF_CQI_12 },
850 {RGSCH_CMN_QM_CQI_13,RGSCH_CMN_UL_EFF_CQI_13 },
851 {RGSCH_CMN_QM_CQI_14,RGSCH_CMN_UL_EFF_CQI_14 },
852 {RGSCH_CMN_QM_CQI_15,RGSCH_CMN_UL_EFF_CQI_15 },
856 /* This table maps a (delta_offset * 2 + 2) to a (beta * 8)
857 * where beta is 10^-(delta_offset/10) rounded off to nearest 1/8
859 static uint16_t rgSchCmnUlBeta8Tbl[29] = {
860 6, RG_SCH_CMN_UL_INVALID_BETA8, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23,
861 25, 28, 32, RG_SCH_CMN_UL_INVALID_BETA8, 40, RG_SCH_CMN_UL_INVALID_BETA8,
862 50, RG_SCH_CMN_UL_INVALID_BETA8, 64, RG_SCH_CMN_UL_INVALID_BETA8, 80,
863 RG_SCH_CMN_UL_INVALID_BETA8, 101, RG_SCH_CMN_UL_INVALID_BETA8, 127,
864 RG_SCH_CMN_UL_INVALID_BETA8, 160
868 /* QCI to SVC priority mapping. Index specifies the Qci*/
869 static uint8_t rgSchCmnDlQciPrio[RG_SCH_CMN_MAX_QCI] = RG_SCH_CMN_QCI_TO_PRIO;
871 /* The configuration is efficiency measured per 1024 REs. */
872 /* The first element stands for when CQI is not known */
873 /* This table is used to translate CQI to its corrospoding */
874 /* allocation parameters. These are currently from 36.213 */
875 /* Just this talbe needs to be edited for modifying the */
876 /* the resource allocation behaviour */
878 /* ADD CQI to MCS mapping correction
879 * single dimensional array is replaced by 2 dimensions for different CFI*/
880 static uint16_t rgSchCmnCqiPdschEff[4][16] = {RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI1,
881 RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI2,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI3};
883 static uint16_t rgSchCmn2LyrCqiPdschEff[4][16] = {RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI1,
884 RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI2, RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI3};
886 /* This configuration determines the transalation of a UEs CQI to its */
887 /* PDCCH coding efficiency. This may be edited based on the installation */
888 static uint8_t rgSchCmnDlRvTbl[4] = {0, 2, 3, 1}; /* RVIdx sequence is corrected*/
890 /* Indexed by [DciFrmt].
891 * Considering the following definition in determining the dciFrmt index.
906 static uint16_t rgSchCmnDciFrmtSizes[10];
908 static uint16_t rgSchCmnCqiPdcchEff[16] = RG_SCH_CMN_CQI_TO_PDCCH_EFF;
912 RgSchTddUlDlSubfrmTbl rgSchTddUlDlSubfrmTbl = {
913 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME},
914 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
915 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
916 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
917 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
918 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
919 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME}
924 uint8_t rgSchTddSpsDlMaxRetxTbl[RGSCH_MAX_TDD_UL_DL_CFG] = {
936 /* Special Subframes in OFDM symbols */
937 /* ccpu00134197-MOD-Correct the number of symbols */
938 RgSchTddSplSubfrmInfoTbl rgSchTddSplSubfrmInfoTbl = {
942 {11, 1, 1, 10, 1, 1},
950 /* PHICH 'm' value Table */
951 RgSchTddPhichMValTbl rgSchTddPhichMValTbl = {
952 {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
953 {0, 1, 0, 0, 1, 0, 1, 0, 0, 1},
954 {0, 0, 0, 1, 0, 0, 0, 0, 1, 0},
955 {1, 0, 0, 0, 0, 0, 0, 0, 1, 1},
956 {0, 0, 0, 0, 0, 0, 0, 0, 1, 1},
957 {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
958 {1, 1, 0, 0, 0, 1, 1, 0, 0, 1}
961 /* PHICH 'K' value Table */
962 RgSchTddKPhichTbl rgSchTddKPhichTbl = {
963 {0, 0, 4, 7, 6, 0, 0, 4, 7, 6},
964 {0, 0, 4, 6, 0, 0, 0, 4, 6, 0},
965 {0, 0, 6, 0, 0, 0, 0, 6, 0, 0},
966 {0, 0, 6, 6, 6, 0, 0, 0, 0, 0},
967 {0, 0, 6, 6, 0, 0, 0, 0, 0, 0},
968 {0, 0, 6, 0, 0, 0, 0, 0, 0, 0},
969 {0, 0, 4, 6, 6, 0, 0, 4, 7, 0}
972 /* Uplink association index 'K' value Table */
973 RgSchTddUlAscIdxKDashTbl rgSchTddUlAscIdxKDashTbl = {
974 {0, 0, 6, 4, 0, 0, 0, 6, 4, 0},
975 {0, 0, 4, 0, 0, 0, 0, 4, 0, 0},
976 {0, 0, 4, 4, 4, 0, 0, 0, 0, 0},
977 {0, 0, 4, 4, 0, 0, 0, 0, 0, 0},
978 {0, 0, 4, 0, 0, 0, 0, 0, 0, 0},
979 {0, 0, 7, 7, 5, 0, 0, 7, 7, 0}
983 /* PUSCH 'K' value Table */
984 RgSchTddPuschTxKTbl rgSchTddPuschTxKTbl = {
985 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
986 {0, 6, 0, 0, 4, 0, 6, 0, 0, 4},
987 {0, 0, 0, 4, 0, 0, 0, 0, 4, 0},
988 {4, 0, 0, 0, 0, 0, 0, 0, 4, 4},
989 {0, 0, 0, 0, 0, 0, 0, 0, 4, 4},
990 {0, 0, 0, 0, 0, 0, 0, 0, 4, 0},
991 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
994 /* PDSCH to PUCCH Table for DL Harq Feed back. Based on the
995 Downlink association set index 'K' table */
996 uint8_t rgSchTddPucchTxTbl[7][10] = {
997 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
998 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
999 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1000 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1001 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1002 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1003 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1006 /* Table to fetch the next DL sf idx for applying the
1007 new CFI. The next Dl sf Idx at which the new CFI
1008 is applied is always the starting Sf of the next ACK/NACK
1011 Ex: In Cfg-2, sf4 and sf9 are the only subframes at which
1012 a new ACK/NACK bundle of DL subframes can start
1014 D S U D D D S U D D D S U D D D S U D D
1017 dlSf Array for Cfg-2:
1018 sfNum: 0 1 3 4 5 6 8 9 0 1 3 4 5 6 8 9
1019 sfIdx: 0 1 2 3 4 5 6 7 8 9 10 11 12 12 14 15
1021 If CFI changes at sf0, nearest DL SF bundle >= 4 TTI is sf4
1022 So at sf4 the new CFI can be applied. To arrive at sf4 from
1023 sf0, the sfIdx has to be increased by 3 */
1025 uint8_t rgSchTddPdcchSfIncTbl[7][10] = {
1026 /* A/N Bundl: 0,1,5,6*/ {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
1027 /* A/N Bundl: 0,4,5,9*/ {2, 2, 0, 0, 3, 2, 2, 0, 0, 3},
1028 /* A/N Bundl: 4,9*/ {3, 6, 0, 5, 4, 3, 6, 0, 5, 4},
1029 /* A/N Bundl: 1,7,9*/ {4, 3, 0, 0, 0, 4, 5, 4, 6, 5},
1030 /* A/N Bundl: 0,6*/ {4, 3, 0, 0, 6, 5, 4, 7, 6, 5},
1031 /* A/N Bundl: 9*/ {8, 7, 0, 6, 5, 4, 12, 11, 10, 9},
1032 /* A/N Bundl: 0,1,5,6,9*/ {2, 1, 0, 0, 0, 2, 2, 0, 0, 3}
1036 /* combine compilation fixes */
1038 /* subframe offset values to be used when twoIntervalsConfig is enabled in UL
1040 RgSchTddSfOffTbl rgSchTddSfOffTbl = {
1041 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1042 {0, 0, 1, -1, 0, 0, 0, 1, -1, 0},
1043 {0, 0, 5, 0, 0, 0, 0, -5, 0, 0},
1044 {0, 0, 1, 1, -2, 0, 0, 0, 0, 0},
1045 {0, 0, 1, -1, 0, 0, 0, 0, 0, 0},
1046 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1047 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1051 /* Table to determine when uplink SPS configured grants should
1052 * explicitly be reserved in a subframe. When enries are same
1053 * as that of Msg3SubfrmTbl, indicates competition with msg3.
1054 * As of now, this is same as Msg3SubfrmTbl (leaving out uldlcfg 2),
1055 * except that all 255s are now zeros. */
1056 RgSchTddSpsUlRsrvTbl rgSchTddSpsUlRsrvTbl = {
1057 {0, 0, 0, 6, 8, 0, 0, 0, 6, 8},
1058 {0, 0, 6, 9, 0, 0, 0, 6, 9, 0},
1059 {0, 0, 10, 0, 0, 0, 0, 10, 0, 0},
1060 {0, 0, 0, 0, 8, 0, 7, 7, 14, 0},
1061 {0, 0, 0, 9, 0, 0, 7, 15, 0, 0},
1062 {0, 0, 10, 0, 0, 0, 16, 0, 0, 0},
1063 {0, 0, 0, 0, 8, 0, 0, 0, 9, 0}
1066 /* Inverse DL Assoc Set index Table */
1067 RgSchTddInvDlAscSetIdxTbl rgSchTddInvDlAscSetIdxTbl = {
1068 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1069 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1070 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1071 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1072 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1073 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1074 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1077 #endif /* (LTEMAC_SPS ) */
1079 /* Number of Uplink subframes Table */
1080 static uint8_t rgSchTddNumUlSf[] = {6, 4, 2, 3, 2, 1, 5};
1082 /* Downlink HARQ processes Table */
1083 RgSchTddUlNumHarqProcTbl rgSchTddUlNumHarqProcTbl = { 7, 4, 2, 3, 2, 1, 6};
1085 /* Uplink HARQ processes Table */
1086 RgSchTddDlNumHarqProcTbl rgSchTddDlNumHarqProcTbl = { 4, 7, 10, 9, 12, 15, 6};
1088 /* Downlink association index set 'K' value Table */
1089 RgSchTddDlAscSetIdxKTbl rgSchTddDlAscSetIdxKTbl = {
1090 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1092 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1094 { {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}} },
1096 { {0, {0}}, {0, {0}}, {3, {7, 6, 11}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1098 { {0, {0}}, {0, {0}}, {4, {12, 8, 7, 11}}, {4, {6, 5, 4, 7}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1100 { {0, {0}}, {0, {0}}, {9, {13, 12, 9, 8, 7, 5, 4, 11, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1102 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1105 /* ccpu132282-ADD-the table rgSchTddDlAscSetIdxKTbl is rearranged in
1106 * decreasing order of Km, this is used to calculate the NCE used for
1107 * calculating N1Pucch Resource for Harq*/
1108 RgSchTddDlAscSetIdxKTbl rgSchTddDlHqPucchResCalTbl = {
1109 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1111 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1113 { {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}} },
1115 { {0, {0}}, {0, {0}}, {3, {11, 7, 6}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1117 { {0, {0}}, {0, {0}}, {4, {12, 11, 8, 7}}, {4, {7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1119 { {0, {0}}, {0, {0}}, {9, {13, 12, 11, 9, 8, 7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1121 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1124 /* Minimum number of Ack/Nack feeback information to be
1125 stored for each UL-DL configuration */
1126 RgSchTddANFdbkMapTbl rgSchTddANFdbkMapTbl = {4, 4, 2, 3, 2, 1, 5};
1128 /* Uplink switch points and number of UL subframes Table */
1129 RgSchTddMaxUlSubfrmTbl rgSchTddMaxUlSubfrmTbl = {
1130 {2,3,3}, {2,2,2}, {2,1,1}, {1,3,0}, {1,2,0}, {1,1,0}, {2,3,2}
1133 /* Uplink switch points and number of DL subframes Table */
1134 RgSchTddMaxDlSubfrmTbl rgSchTddMaxDlSubfrmTbl = {
1135 {2,2,2}, {2,3,3}, {2,4,4}, {1,7,0}, {1,8,0}, {1,9,0}, {2,2,3}
1138 /* Number of UL subframes present before a particular subframe */
1139 RgSchTddNumUlSubfrmTbl rgSchTddNumUlSubfrmTbl = {
1140 {0, 0, 1, 2, 3, 3, 3, 4, 5, 6},
1141 {0, 0, 1, 2, 2, 2, 2, 3, 4, 4},
1142 {0, 0, 1, 1, 1, 1, 1, 2, 2, 2},
1143 {0, 0, 1, 2, 3, 3, 3, 3, 3, 3},
1144 {0, 0, 1, 2, 2, 2, 2, 2, 2, 2},
1145 {0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
1146 {0, 0, 1, 2, 3, 3, 3, 4, 5, 5}
1149 /* Number of DL subframes present till a particular subframe */
1150 RgSchTddNumDlSubfrmTbl rgSchTddNumDlSubfrmTbl = {
1151 {1, 2, 2, 2, 2, 3, 4, 4, 4, 4},
1152 {1, 2, 2, 2, 3, 4, 5, 5, 5, 6},
1153 {1, 2, 2, 3, 4, 5, 6, 6, 7, 8},
1154 {1, 2, 2, 2, 2, 3, 4, 5, 6, 7},
1155 {1, 2, 2, 2, 3, 4, 5, 6, 7, 8},
1156 {1, 2, 2, 3, 4, 5, 6, 7, 8, 9},
1157 {1, 2, 2, 2, 2, 3, 4, 4, 4, 5}
1161 /* Nearest possible UL subframe Index from UL subframe
1162 * DL Index < UL Index */
1163 RgSchTddLowDlSubfrmIdxTbl rgSchTddLowDlSubfrmIdxTbl = {
1164 {0, 1, 1, 1, 1, 5, 6, 6, 6, 6},
1165 {0, 1, 1, 1, 4, 5, 6, 6, 6, 9},
1166 {0, 1, 1, 3, 4, 5, 6, 6, 8, 9},
1167 {0, 1, 1, 1, 1, 5, 6, 7, 8, 9},
1168 {0, 1, 1, 1, 4, 5, 6, 7, 8, 9},
1169 {0, 1, 1, 3, 4, 5, 6, 7, 8, 9},
1170 {0, 1, 1, 1, 1, 5, 6, 6, 6, 9}
1173 /* Nearest possible DL subframe Index from UL subframe
1174 * DL Index > UL Index
1175 * 10 represents Next SFN low DL Idx */
1176 RgSchTddHighDlSubfrmIdxTbl rgSchTddHighDlSubfrmIdxTbl = {
1177 {0, 1, 5, 5, 5, 5, 6, 10, 10, 10},
1178 {0, 1, 4, 4, 4, 5, 6, 9, 9, 9},
1179 {0, 1, 3, 3, 4, 5, 6, 8, 8, 9},
1180 {0, 1, 5, 5, 5, 5, 6, 7, 8, 9},
1181 {0, 1, 4, 4, 4, 5, 6, 7, 8, 9},
1182 {0, 1, 3, 3, 4, 5, 6, 7, 8, 9},
1183 {0, 1, 5, 5, 5, 5, 6, 9, 9, 9}
1186 /* RACH Message3 related information */
1187 RgSchTddMsg3SubfrmTbl rgSchTddMsg3SubfrmTbl = {
1188 {7, 6, 255, 255, 255, 7, 6, 255, 255, 255},
1189 {7, 6, 255, 255, 8, 7, 6, 255, 255, 8},
1190 {7, 6, 255, 9, 8, 7, 6, 255, 9, 8},
1191 {12, 11, 255, 255, 255, 7, 6, 6, 6, 13},
1192 {12, 11, 255, 255, 8, 7, 6, 6, 14, 13},
1193 {12, 11, 255, 9, 8, 7, 6, 15, 14, 13},
1194 {7, 6, 255, 255, 255, 7, 6, 255, 255, 8}
1197 /* ccpu00132341-DEL Removed rgSchTddRlsDlSubfrmTbl and used Kset table for
1198 * releasing DL HARQs */
1200 /* DwPTS Scheduling Changes Start */
1201 /* Provides the number of Cell Reference Signals in DwPTS
1203 static uint8_t rgSchCmnDwptsCrs[2][3] = {/* [Spl Sf cfg][Ant Port] */
1204 {4, 8, 16}, /* Spl Sf cfg 1,2,3,6,7,8 */
1205 {6, 12, 20}, /* Spl Sf cfg 4 */
1208 static S8 rgSchCmnSplSfDeltaItbs[9] = RG_SCH_DWPTS_ITBS_ADJ;
1209 /* DwPTS Scheduling Changes End */
1213 static uint32_t rgSchCmnBsrTbl[64] = {
1214 0, 10, 12, 14, 17, 19, 22, 26,
1215 31, 36, 42, 49, 57, 67, 78, 91,
1216 107, 125, 146, 171, 200, 234, 274, 321,
1217 376, 440, 515, 603, 706, 826, 967, 1132,
1218 1326, 1552, 1817, 2127, 2490, 2915, 3413, 3995,
1219 4677, 5476, 6411, 7505, 8787, 10287, 12043, 14099,
1220 16507, 19325, 22624, 26487, 31009, 36304, 42502, 49759,
1221 58255, 68201, 79846, 93479, 109439, 128125, 150000, 220000
1224 static uint32_t rgSchCmnExtBsrTbl[64] = {
1225 0, 10, 13, 16, 19, 23, 29, 35,
1226 43, 53, 65, 80, 98, 120, 147, 181,
1227 223, 274, 337, 414, 509, 625, 769, 945,
1228 1162, 1429, 1757, 2161, 2657, 3267, 4017, 4940,
1229 6074, 7469, 9185, 11294, 13888, 17077, 20999, 25822,
1230 31752, 39045, 48012, 59039, 72598, 89272, 109774, 134986,
1231 165989, 204111, 250990, 308634, 379519, 466683, 573866, 705666,
1232 867737, 1067031, 1312097, 1613447, 1984009, 2439678, 3000000, 3100000
1235 uint8_t rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_MAX_CP][RG_SCH_CMN_UL_NUM_CQI];
1237 RgSchTbSzTbl rgTbSzTbl = {
1239 {16, 32, 56, 88, 120, 152, 176, 208, 224, 256, 288, 328, 344, 376, 392, 424, 456, 488, 504, 536, 568, 600, 616, 648, 680, 712, 744, 776, 776, 808, 840, 872, 904, 936, 968, 1000, 1032, 1032, 1064, 1096, 1128, 1160, 1192, 1224, 1256, 1256, 1288, 1320, 1352, 1384, 1416, 1416, 1480, 1480, 1544, 1544, 1608, 1608, 1608, 1672, 1672, 1736, 1736, 1800, 1800, 1800, 1864, 1864, 1928, 1928, 1992, 1992, 2024, 2088, 2088, 2088, 2152, 2152, 2216, 2216, 2280, 2280, 2280, 2344, 2344, 2408, 2408, 2472, 2472, 2536, 2536, 2536, 2600, 2600, 2664, 2664, 2728, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 2984, 2984, 3112},
1240 {24, 56, 88, 144, 176, 208, 224, 256, 328, 344, 376, 424, 456, 488, 520, 568, 600, 632, 680, 712, 744, 776, 808, 872, 904, 936, 968, 1000, 1032, 1064, 1128, 1160, 1192, 1224, 1256, 1288, 1352, 1384, 1416, 1416, 1480, 1544, 1544, 1608, 1608, 1672, 1736, 1736, 1800, 1800, 1864, 1864, 1928, 1992, 1992, 2024, 2088, 2088, 2152, 2152, 2216, 2280, 2280, 2344, 2344, 2408, 2472, 2472, 2536, 2536, 2600, 2600, 2664, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008},
1241 {32, 72, 144, 176, 208, 256, 296, 328, 376, 424, 472, 520, 568, 616, 648, 696, 744, 776, 840, 872, 936, 968, 1000, 1064, 1096, 1160, 1192, 1256, 1288, 1320, 1384, 1416, 1480, 1544, 1544, 1608, 1672, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2024, 2088, 2088, 2152, 2216, 2216, 2280, 2344, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2664, 2728, 2792, 2856, 2856, 2856, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968},
1242 {40, 104, 176, 208, 256, 328, 392, 440, 504, 568, 616, 680, 744, 808, 872, 904, 968, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 1992, 2024, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6200, 6456, 6456},
1243 {56, 120, 208, 256, 328, 408, 488, 552, 632, 696, 776, 840, 904, 1000, 1064, 1128, 1192, 1288, 1352, 1416, 1480, 1544, 1608, 1736, 1800, 1864, 1928, 1992, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2600, 2664, 2728, 2792, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992},
1244 {72, 144, 224, 328, 424, 504, 600, 680, 776, 872, 968, 1032, 1128, 1224, 1320, 1384, 1480, 1544, 1672, 1736, 1864, 1928, 2024, 2088, 2216, 2280, 2344, 2472, 2536, 2664, 2728, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4264, 4392, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9528},
1245 {328, 176, 256, 392, 504, 600, 712, 808, 936, 1032, 1128, 1224, 1352, 1480, 1544, 1672, 1736, 1864, 1992, 2088, 2216, 2280, 2408, 2472, 2600, 2728, 2792, 2984, 2984, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3880, 4008, 4136, 4136, 4264, 4392, 4584, 4584, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448},
1246 {104, 224, 328, 472, 584, 712, 840, 968, 1096, 1224, 1320, 1480, 1608, 1672, 1800, 1928, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536},
1247 {120, 256, 392, 536, 680, 808, 968, 1096, 1256, 1384, 1544, 1672, 1800, 1928, 2088, 2216, 2344, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15264},
1248 {136, 296, 456, 616, 776, 936, 1096, 1256, 1416, 1544, 1736, 1864, 2024, 2216, 2344, 2536, 2664, 2856, 2984, 3112, 3368, 3496, 3624, 3752, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 5160, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568},
1249 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080},
1250 {176, 376, 584, 776, 1000, 1192, 1384, 1608, 1800, 2024, 2216, 2408, 2600, 2792, 2984, 3240, 3496, 3624, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152},
1251 {208, 440, 680, 904, 1128, 1352, 1608, 1800, 2024, 2280, 2472, 2728, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 23688, 24496, 24496, 24496, 24496, 25456},
1252 {224, 488, 744, 1000, 1256, 1544, 1800, 2024, 2280, 2536, 2856, 3112, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 26416, 27376, 27376, 27376, 27376, 28336, 28336},
1253 {256, 552, 840, 1128, 1416, 1736, 1992, 2280, 2600, 2856, 3112, 3496, 3752, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 5992, 6200, 6456, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704},
1254 {280, 600, 904, 1224, 1544, 1800, 2152, 2472, 2728, 3112, 3368, 3624, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 6200, 6456, 6712, 6968, 7224, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008},
1255 {328, 632, 968, 1288, 1608, 1928, 2280, 2600, 2984, 3240, 3624, 3880, 4264, 4584, 4968, 5160, 5544, 5992, 6200, 6456, 6712, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 35160},
1256 {336, 696, 1064, 1416, 1800, 2152, 2536, 2856, 3240, 3624, 4008, 4392, 4776, 5160, 5352, 5736, 6200, 6456, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 39232},
1257 {376, 776, 1160, 1544, 1992, 2344, 2792, 3112, 3624, 4008, 4392, 4776, 5160, 5544, 5992, 6200, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14112, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816},
1258 {408, 840, 1288, 1736, 2152, 2600, 2984, 3496, 3880, 4264, 4776, 5160, 5544, 5992, 6456, 6968, 7224, 7736, 8248, 8504, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19080, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888},
1259 {440, 904, 1384, 1864, 2344, 2792, 3240, 3752, 4136, 4584, 5160, 5544, 5992, 6456, 6968, 7480, 7992, 8248, 8760, 9144, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 20616, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 48936, 51024, 51024, 51024},
1260 {488, 1000, 1480, 1992, 2472, 2984, 3496, 4008, 4584, 4968, 5544, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 9912, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056},
1261 {520, 1064, 1608, 2152, 2664, 3240, 3752, 4264, 4776, 5352, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 10296, 10680, 11448, 11832, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 19080, 19080, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256},
1262 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776},
1263 {584, 1192, 1800, 2408, 2984, 3624, 4264, 4968, 5544, 5992, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 22920, 23688, 24496, 25456, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592},
1264 {616, 1256, 1864, 2536, 3112, 3752, 4392, 5160, 5736, 6200, 6968, 7480, 8248, 8760, 9528, 10296, 10680, 11448, 12216, 12576, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592, 68808, 68808, 68808, 71112},
1265 {712, 1480, 2216, 2984, 3752, 4392, 5160, 5992, 6712, 7480, 8248, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 14688, 15264, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376}
1268 {32, 88, 152, 208, 256, 328, 376, 424, 488, 536, 600, 648, 712, 776, 808, 872, 936, 1000, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2088, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 5992, 6200},
1269 {56, 144, 208, 256, 344, 424, 488, 568, 632, 712, 776, 872, 936, 1000, 1064, 1160, 1224, 1288, 1384, 1416, 1544, 1608, 1672, 1736, 1800, 1864, 1992, 2024, 2088, 2152, 2280, 2344, 2408, 2472, 2536, 2600, 2728, 2792, 2856, 2856, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5352, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992},
1270 {72, 176, 256, 328, 424, 520, 616, 696, 776, 872, 968, 1064, 1160, 1256, 1320, 1416, 1544, 1608, 1672, 1800, 1864, 1992, 2088, 2152, 2216, 2344, 2408, 2536, 2600, 2664, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3368, 3496, 3624, 3624, 3752, 3880, 4008, 4008, 4136, 4264, 4264, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912},
1271 {104, 208, 328, 440, 568, 680, 808, 904, 1032, 1160, 1256, 1384, 1480, 1608, 1736, 1864, 1992, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2856, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4392, 4584, 4776, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12576, 12960, 12960},
1272 {120, 256, 408, 552, 696, 840, 1000, 1128, 1288, 1416, 1544, 1736, 1864, 1992, 2152, 2280, 2408, 2600, 2728, 2856, 2984, 3112, 3240, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840},
1273 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19080},
1274 {176, 392, 600, 808, 1032, 1224, 1480, 1672, 1864, 2088, 2280, 2472, 2728, 2984, 3112, 3368, 3496, 3752, 4008, 4136, 4392, 4584, 4776, 4968, 5160, 5352, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7992, 8248, 8248, 8504, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920},
1275 {224, 472, 712, 968, 1224, 1480, 1672, 1928, 2216, 2472, 2664, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376},
1276 {256, 536, 808, 1096, 1384, 1672, 1928, 2216, 2536, 2792, 3112, 3368, 3624, 3880, 4264, 4584, 4776, 4968, 5352, 5544, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576},
1277 {296, 616, 936, 1256, 1544, 1864, 2216, 2536, 2856, 3112, 3496, 3752, 4136, 4392, 4776, 5160, 5352, 5736, 5992, 6200, 6712, 6968, 7224, 7480, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160},
1278 {328, 680, 1032, 1384, 1736, 2088, 2472, 2792, 3112, 3496, 3880, 4264, 4584, 4968, 5352, 5736, 5992, 6200, 6712, 6968, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 21384, 21384, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888},
1279 {376, 776, 1192, 1608, 2024, 2408, 2792, 3240, 3624, 4008, 4392, 4776, 5352, 5736, 5992, 6456, 6968, 7224, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816},
1280 {440, 904, 1352, 1800, 2280, 2728, 3240, 3624, 4136, 4584, 4968, 5544, 5992, 6456, 6712, 7224, 7736, 8248, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 17568, 17568, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024},
1281 {488, 1000, 1544, 2024, 2536, 3112, 3624, 4136, 4584, 5160, 5736, 6200, 6712, 7224, 7736, 8248, 8760, 9144, 9912, 10296, 10680, 11448, 11832, 12216, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336},
1282 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776},
1283 {600, 1224, 1800, 2472, 3112, 3624, 4264, 4968, 5544, 6200, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 23688, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808},
1284 {632, 1288, 1928, 2600, 3240, 3880, 4584, 5160, 5992, 6456, 7224, 7736, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 68808, 71112, 71112, 71112, 71112},
1285 {696, 1416, 2152, 2856, 3624, 4392, 5160, 5736, 6456, 7224, 7992, 8760, 9528, 10296, 10680, 11448, 12216, 12960, 13536, 14688, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 71112, 73712, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 78704},
1286 {776, 1544, 2344, 3112, 4008, 4776, 5544, 6200, 7224, 7992, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 27376, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936},
1287 {840, 1736, 2600, 3496, 4264, 5160, 5992, 6968, 7736, 8504, 9528, 10296, 11064, 12216, 12960, 13536, 14688, 15264, 16416, 16992, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800},
1288 {904, 1864, 2792, 3752, 4584, 5544, 6456, 7480, 8248, 9144, 10296, 11064, 12216, 12960, 14112, 14688, 15840, 16992, 17568, 18336, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 42368, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 97896, 101840, 101840, 101840},
1289 {1000, 1992, 2984, 4008, 4968, 5992, 6968, 7992, 9144, 9912, 11064, 12216, 12960, 14112, 15264, 15840, 16992, 18336, 19080, 19848, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 31704, 32856, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136},
1290 {1064, 2152, 3240, 4264, 5352, 6456, 7480, 8504, 9528, 10680, 11832, 12960, 14112, 15264, 16416, 16992, 18336, 19080, 20616, 21384, 22920, 23688, 24496, 25456, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816},
1291 {1128, 2280, 3496, 4584, 5736, 6968, 7992, 9144, 10296, 11448, 12576, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 45352, 45352, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840,101840,101840,101840,105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496},
1292 {1192, 2408, 3624, 4968, 5992, 7224, 8504, 9912, 11064, 12216, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 42368, 43816, 45352, 46888, 46888, 48936, 51024, 51024, 52752, 52752, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208},
1293 {1256, 2536, 3752, 5160, 6200, 7480, 8760, 10296, 11448, 12576, 14112, 15264, 16416, 17568, 19080, 20616, 21384, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 43816, 43816, 45352, 46888, 48936, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040,115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208, 137792, 137792, 137792, 142248},
1294 {1480, 2984, 4392, 5992, 7480, 8760, 10296, 11832, 13536, 14688, 16416, 17568, 19080, 20616, 22152, 23688, 25456, 26416, 28336, 29296, 30576, 32856, 34008, 35160, 36696, 37888, 40576, 40576, 42368, 43816, 45352, 46888, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 63776, 63776, 66592, 68808, 68808, 71112, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 81176, 84760, 84760, 87936, 87936, 90816, 90816, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 128496, 128496, 128496, 133208, 133208, 133208, 137792, 137792, 137792, 142248, 142248, 142248, 146856, 146856,149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776}
1297 RgSchUlIMcsTbl rgUlIMcsTbl = {
1298 {2, 0}, {2, 1}, {2, 2}, {2, 3}, {2, 4}, {2, 5},
1299 {2, 6}, {2, 7}, {2, 8}, {2, 9}, {2, 10},
1300 {4, 10}, {4, 11}, {4, 12}, {4, 13}, {4, 14},
1301 {4, 15}, {4, 16}, {4, 17}, {4, 18}, {4, 19},
1302 {6, 19}, {6, 20}, {6, 21}, {6, 22}, {6, 23},
1303 {6, 24}, {6, 25}, {6, 26}
1305 RgSchUeCatTbl rgUeCatTbl = {
1306 /*Column1:Maximum number of bits of an UL-SCH
1307 transport block transmitted within a TTI
1309 Column2:Maximum number of bits of a DLSCH
1310 transport block received within a TTI
1312 Column3:Total number of soft channel bits
1314 Column4:Support for 64QAM in UL
1316 Column5:Maximum number of DL-SCH transport
1317 block bits received within a TTI
1319 Column6:Maximum number of supported layers for
1320 spatial multiplexing in DL
1322 {5160, {10296,0}, 250368, FALSE, 10296, 1},
1323 {25456, {51024,0}, 1237248, FALSE, 51024, 2},
1324 {51024, {75376,0}, 1237248, FALSE, 102048, 2},
1325 {51024, {75376,0}, 1827072, FALSE, 150752, 2},
1326 {75376, {149776,0}, 3667200, TRUE, 299552, 4},
1327 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1328 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1329 {149776,{299856,0}, 35982720,TRUE, 2998560, 8}
1332 /* [ccpu00138532]-ADD-The below table stores the min HARQ RTT time
1333 in Downlink for TDD and FDD. Indices 0 to 6 map to tdd UL DL config 0-6.
1334 Index 7 map to FDD */
1335 uint8_t rgSchCmnHarqRtt[8] = {4,7,10,9,12,15,6,8};
1336 /* Number of CFI Switchover Index is equals to 7 TDD Indexes + 1 FDD index */
1337 uint8_t rgSchCfiSwitchOvrWinLen[] = {7, 4, 2, 3, 2, 1, 6, 8};
1339 /* EffTbl is calculated for single layer and two layers.
1340 * CqiToTbs is calculated for single layer and two layers */
1341 RgSchCmnTbSzEff rgSchCmnNorCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1342 RgSchCmnTbSzEff rgSchCmnNorCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1343 /* New variable to store UL effiency values for normal and extended CP*/
1344 RgSchCmnTbSzEff rgSchCmnNorUlEff[1],rgSchCmnExtUlEff[1];
1345 RgSchCmnCqiToTbs rgSchCmnNorCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1346 RgSchCmnCqiToTbs rgSchCmnNorCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1347 RgSchCmnCqiToTbs *rgSchCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
1348 RgSchCmnTbSzEff rgSchCmnExtCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1349 RgSchCmnTbSzEff rgSchCmnExtCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1350 RgSchCmnCqiToTbs rgSchCmnExtCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1351 RgSchCmnCqiToTbs rgSchCmnExtCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1352 /* Include CRS REs while calculating Efficiency */
1353 RgSchCmnTbSzEff *rgSchCmnEffTbl[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_ANT_CONF][RG_SCH_CMN_MAX_CFI];
1354 RgSchCmnTbSzEff *rgSchCmnUlEffTbl[RG_SCH_CMN_MAX_CP];
1356 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3, 1};
1358 /* Added matrix 'rgRaPrmblToRaFrmTbl'for computation of RA sub-frames from RA preamble */
1359 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3};
1362 RgUlSchdInits rgSchUlSchdInits;
1363 RgDlSchdInits rgSchDlSchdInits;
1364 RgDlfsSchdInits rgSchDlfsSchdInits;
1366 RgEmtcUlSchdInits rgSchEmtcUlSchdInits;
1367 RgEmtcDlSchdInits rgSchEmtcDlSchdInits;
1371 static S16 rgSCHCmnUeIdleExdThrsld ARGS((
1375 RgSchUeCb* rgSCHCmnGetHoUe ARGS((
1379 static Void rgSCHCmnDelDedPreamble ARGS((
1383 RgSchUeCb* rgSCHCmnGetPoUe ARGS((
1386 CmLteTimingInfo timingInfo
1388 static Void rgSCHCmnDelRachInfo ARGS((
1392 static S16 rgSCHCmnUlRbAllocForPoHoUe ARGS((
1398 static Void rgSCHCmnHdlHoPo ARGS((
1400 CmLListCp *raRspLst,
1401 RgSchRaReqInfo *raReq
1403 static Void rgSCHCmnAllocPoHoGrnt ARGS((
1405 CmLListCp *raRspLst,
1407 RgSchRaReqInfo *raReq
1409 static Void rgSCHCmnFillPdcchOdr2Sf ARGS((
1416 static Void rgSCHCmnDlAdd2PdcchOdrQ ARGS((
1420 static Void rgSCHCmnDlRmvFrmPdcchOdrQ ARGS((
1424 static Void rgSCHCmnUpdNxtPrchMskIdx ARGS((
1427 static Void rgSCHCmnUpdRachParam ARGS((
1430 static S16 rgSCHCmnAllocPOParam ARGS((
1436 uint8_t *prachMskIdx
1438 static Void rgSCHCmnGenPdcchOrder ARGS((
1442 static Void rgSCHCmnCfgRachDedPrm ARGS((
1447 static Void rgSCHCmnHdlUlInactUes ARGS((
1450 static Void rgSCHCmnHdlDlInactUes ARGS((
1453 static Void rgSCHCmnUlInit ARGS((Void
1455 static Void rgSCHCmnDlInit ARGS((Void
1457 static Void rgSCHCmnInitDlRbAllocInfo ARGS((
1458 RgSchCmnDlRbAllocInfo *allocInfo
1460 static Void rgSCHCmnUpdUlCompEffBsr ARGS((
1464 static Void rgSCHCmnUlSetAllUnSched ARGS((
1465 RgSchCmnUlRbAllocInfo *allocInfo
1467 static Void rgSCHCmnUlUpdSf ARGS((
1469 RgSchCmnUlRbAllocInfo *allocInfo,
1472 static Void rgSCHCmnUlHndlAllocRetx ARGS((
1474 RgSchCmnUlRbAllocInfo *allocInfo,
1479 static Void rgSCHCmnGrpPwrCntrlPucch ARGS((
1483 static Void rgSCHCmnGrpPwrCntrlPusch ARGS((
1487 static Void rgSCHCmnDelUeFrmRefreshQ ARGS((
1491 static S16 rgSCHCmnTmrExpiry ARGS((
1492 PTR cb, /* Pointer to timer control block */
1493 S16 tmrEvnt /* Timer Event */
1495 static S16 rgSCHCmnTmrProc ARGS((
1498 static Void rgSCHCmnAddUeToRefreshQ ARGS((
1503 static Void rgSCHCmnDlCcchRetx ARGS((
1505 RgSchCmnDlRbAllocInfo *allocInfo
1507 static Void rgSCHCmnUpdUeMimoInfo ARGS((
1511 RgSchCmnCell *cellSchd
1513 static Void rgSCHCmnUpdUeUlCqiInfo ARGS((
1517 RgSchCmnUe *ueSchCmn,
1518 RgSchCmnCell *cellSchd,
1522 static Void rgSCHCmnDlCcchSduRetx ARGS((
1524 RgSchCmnDlRbAllocInfo *allocInfo
1526 static Void rgSCHCmnDlCcchSduTx ARGS((
1528 RgSchCmnDlRbAllocInfo *allocInfo
1530 static S16 rgSCHCmnCcchSduAlloc ARGS((
1533 RgSchCmnDlRbAllocInfo *allocInfo
1535 static S16 rgSCHCmnCcchSduDedAlloc ARGS((
1539 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc ARGS((
1545 static Void rgSCHCmnInitVars ARGS((
1549 /*ccpu00117180 - DEL - Moved rgSCHCmnUpdVars to .x as its access is now */
1550 static Void rgSCHCmnUlRbAllocForLst ARGS((
1556 CmLListCp *nonSchdLst,
1559 static S16 rgSCHCmnUlRbAllocForUe ARGS((
1566 static Void rgSCHCmnMsg3GrntReq ARGS((
1570 RgSchUlHqProcCb *hqProc,
1571 RgSchUlAlloc **ulAllocRef,
1572 uint8_t *hqProcIdRef
1574 static Void rgSCHCmnDlCcchRarAlloc ARGS((
1577 static Void rgSCHCmnDlCcchTx ARGS((
1579 RgSchCmnDlRbAllocInfo *allocInfo
1581 static Void rgSCHCmnDlBcchPcch ARGS((
1583 RgSchCmnDlRbAllocInfo *allocInfo,
1584 RgInfSfAlloc *subfrmAlloc
1586 Bool rgSCHCmnChkInWin ARGS((
1587 CmLteTimingInfo frm,
1588 CmLteTimingInfo start,
1591 Bool rgSCHCmnChkPastWin ARGS((
1592 CmLteTimingInfo frm,
1595 static Void rgSCHCmnClcAlloc ARGS((
1598 RgSchClcDlLcCb *lch,
1600 RgSchCmnDlRbAllocInfo *allocInfo
1603 static Void rgSCHCmnClcRbAlloc ARGS((
1614 static S16 rgSCHCmnMsg4Alloc ARGS((
1617 RgSchCmnDlRbAllocInfo *allocInfo
1619 static S16 rgSCHCmnMsg4DedAlloc ARGS((
1623 static Void rgSCHCmnDlRaRsp ARGS((
1625 RgSchCmnDlRbAllocInfo *allocInfo
1627 static S16 rgSCHCmnRaRspAlloc ARGS((
1633 RgSchCmnDlRbAllocInfo *allocInfo
1635 static Void rgSCHCmnUlUeDelAllocs ARGS((
1639 static Void rgSCHCmnDlSetUeAllocLmt ARGS((
1644 static S16 rgSCHCmnDlRgrCellCfg ARGS((
1649 static Void rgSCHCmnUlAdapRetx ARGS((
1650 RgSchUlAlloc *alloc,
1651 RgSchUlHqProcCb *proc
1653 static Void rgSCHCmnUlUpdAllocRetx ARGS((
1657 static Void rgSCHCmnUlSfReTxAllocs ARGS((
1661 /* Fix: syed Adaptive Msg3 Retx crash. */
1663 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1667 RgrUeRecfg *ueRecfg,
1671 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1681 * DL RB allocation specific functions
1684 static Void rgSCHCmnDlRbAlloc ARGS((
1686 RgSchCmnDlRbAllocInfo *allocInfo
1688 static Void rgSCHCmnNonDlfsRbAlloc ARGS((
1690 RgSchCmnDlRbAllocInfo *allocInfo
1692 static S16 rgSCHCmnNonDlfsCmnRbAlloc ARGS((
1694 RgSchDlRbAlloc *cmnAllocInfo));
1697 static Void rgSCHCmnNonDlfsPbchRbAllocAdj ARGS((
1699 RgSchDlRbAlloc *cmnAllocInfo,
1700 uint8_t pbchSsRsSym,
1703 /* Added function to adjust TBSize*/
1704 static Void rgSCHCmnNonDlfsPbchTbSizeAdj ARGS((
1705 RgSchDlRbAlloc *allocInfo,
1706 uint8_t numOvrlapgPbchRb,
1707 uint8_t pbchSsRsSym,
1712 /* Added function to find num of overlapping PBCH rb*/
1713 static Void rgSCHCmnFindNumPbchOvrlapRbs ARGS((
1716 RgSchDlRbAlloc *allocInfo,
1717 uint8_t *numOvrlapgPbchRb
1720 static uint8_t rgSCHCmnFindNumAddtlRbsAvl ARGS((
1723 RgSchDlRbAlloc *allocInfo
1727 static Void rgSCHCmnFindCodeRate ARGS((
1730 RgSchDlRbAlloc *allocInfo,
1736 static Void rgSCHCmnNonDlfsMsg4Alloc ARGS((
1738 RgSchCmnMsg4RbAlloc *msg4AllocInfo,
1741 static S16 rgSCHCmnNonDlfsMsg4RbAlloc ARGS((
1747 static S16 rgSCHCmnNonDlfsUeRbAlloc ARGS((
1751 uint8_t *isDlBwAvail
1754 static uint32_t rgSCHCmnCalcRiv ARGS(( uint8_t bw,
1760 static Void rgSCHCmnUpdHqAndDai ARGS((
1761 RgSchDlHqProcCb *hqP,
1763 RgSchDlHqTbCb *tbCb,
1766 static S16 rgSCHCmnUlCalcAvailBw ARGS((
1768 RgrCellCfg *cellCfg,
1770 uint8_t *rbStartRef,
1773 static S16 rgSCHCmnDlKdashUlAscInit ARGS((
1776 static S16 rgSCHCmnDlANFdbkInit ARGS((
1779 static S16 rgSCHCmnDlNpValInit ARGS((
1782 static S16 rgSCHCmnDlCreateRachPrmLst ARGS((
1785 static S16 rgSCHCmnDlCpyRachInfo ARGS((
1787 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
1790 static S16 rgSCHCmnDlRachInfoInit ARGS((
1793 static S16 rgSCHCmnDlPhichOffsetInit ARGS((
1798 static Void rgSCHCmnFindUlCqiUlTxAnt ARGS
1804 static RgSchCmnRank rgSCHCmnComputeRank ARGS
1807 uint32_t *pmiBitMap,
1811 static RgSchCmnRank rgSCHCmnComp2TxMode3 ARGS
1816 static RgSchCmnRank rgSCHCmnComp4TxMode3 ARGS
1821 static RgSchCmnRank rgSCHCmnComp2TxMode4 ARGS
1826 static RgSchCmnRank rgSCHCmnComp4TxMode4 ARGS
1831 static uint8_t rgSCHCmnCalcWcqiFrmSnr ARGS
1838 /* comcodsepa : start */
1841 * @brief This function computes efficiency and stores in a table.
1845 * Function: rgSCHCmnCompEff
1846 * Purpose: this function computes the efficiency as number of
1847 * bytes per 1024 symbols. The CFI table is also filled
1848 * with the same information such that comparison is valid
1850 * Invoked by: Scheduler
1852 * @param[in] uint8_t noPdcchSym
1853 * @param[in] uint8_t cpType
1854 * @param[in] uint8_t txAntIdx
1855 * @param[in] RgSchCmnTbSzEff* effTbl
1859 static Void rgSCHCmnCompEff(uint8_t noPdcchSym,uint8_t cpType,uint8_t txAntIdx,RgSchCmnTbSzEff *effTbl)
1863 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1869 case RG_SCH_CMN_NOR_CP:
1872 case RG_SCH_CMN_EXT_CP:
1876 /* Generate a log error. This case should never be executed */
1880 /* Depending on the Tx Antenna Index, deduct the
1881 * Resource elements for the CRS */
1885 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
1888 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
1891 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
1894 /* Generate a log error. This case should never be executed */
1897 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
1898 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1901 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1903 /* This line computes the coding efficiency per 1024 REs */
1904 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1906 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1911 * @brief This function computes efficiency and stores in a table.
1915 * Function: rgSCHCmnCompUlEff
1916 * Purpose: this function computes the efficiency as number of
1917 * bytes per 1024 symbols. The CFI table is also filled
1918 * with the same information such that comparison is valid
1920 * Invoked by: Scheduler
1922 * @param[in] uint8_t noUlRsSym
1923 * @param[in] uint8_t cpType
1924 * @param[in] uint8_t txAntIdx
1925 * @param[in] RgSchCmnTbSzEff* effTbl
1929 static Void rgSCHCmnCompUlEff(uint8_t noUlRsSym,uint8_t cpType,RgSchCmnTbSzEff *effTbl)
1938 case RG_SCH_CMN_NOR_CP:
1941 case RG_SCH_CMN_EXT_CP:
1945 /* Generate a log error. This case should never be executed */
1949 noResPerRb = ((noSymPerRb - noUlRsSym) * RB_SCH_CMN_NUM_SCS_PER_RB);
1950 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1953 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1955 /* This line computes the coding efficiency per 1024 REs */
1956 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1958 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1964 * @brief This function computes efficiency for 2 layers and stores in a table.
1968 * Function: rgSCHCmn2LyrCompEff
1969 * Purpose: this function computes the efficiency as number of
1970 * bytes per 1024 symbols. The CFI table is also filled
1971 * with the same information such that comparison is valid
1973 * Invoked by: Scheduler
1975 * @param[in] uint8_t noPdcchSym
1976 * @param[in] uint8_t cpType
1977 * @param[in] uint8_t txAntIdx
1978 * @param[in] RgSchCmnTbSzEff* effTbl2Lyr
1982 static Void rgSCHCmn2LyrCompEff(uint8_t noPdcchSym,uint8_t cpType,uint8_t txAntIdx,RgSchCmnTbSzEff *effTbl2Lyr)
1986 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1992 case RG_SCH_CMN_NOR_CP:
1995 case RG_SCH_CMN_EXT_CP:
1999 /* Generate a log error. This case should never be executed */
2003 /* Depending on the Tx Antenna Index, deduct the
2004 * Resource elements for the CRS */
2008 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
2011 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
2014 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
2017 /* Generate a log error. This case should never be executed */
2021 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
2022 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
2024 (*effTbl2Lyr)[i] = 0;
2025 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
2027 /* This line computes the coding efficiency per 1024 REs */
2028 (*effTbl2Lyr)[i] += (rgTbSzTbl[1][i][j] * 1024) / (noResPerRb * (j+1));
2030 (*effTbl2Lyr)[i] /= RG_SCH_CMN_NUM_RBS;
2037 * @brief This function initializes the rgSchCmnDciFrmtSizes table.
2041 * Function: rgSCHCmnGetDciFrmtSizes
2042 * Purpose: This function determines the sizes of all
2043 * the available DCI Formats. The order of
2044 * bits addition for each format is inaccordance
2046 * Invoked by: rgSCHCmnRgrCellCfg
2051 static Void rgSCHCmnGetDciFrmtSizes(RgSchCellCb *cell)
2055 /* DCI Format 0 size determination */
2056 rgSchCmnDciFrmtSizes[0] = 1 +
2058 rgSCHUtlLog32bitNbase2((cell->bwCfg.ulTotalBw * \
2059 (cell->bwCfg.ulTotalBw + 1))/2) +
2069 /* DCI Format 1 size determination */
2070 rgSchCmnDciFrmtSizes[1] = 1 +
2071 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2076 4 + 2 + /* HqProc Id and DAI */
2082 /* DCI Format 1A size determination */
2083 rgSchCmnDciFrmtSizes[2] = 1 + /* Flag for format0/format1a differentiation */
2084 1 + /* Localized/distributed VRB assignment flag */
2087 3 + /* Harq process Id */
2089 4 + /* Harq process Id */
2090 2 + /* UL Index or DAI */
2092 1 + /* New Data Indicator */
2095 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2096 (cell->bwCfg.dlTotalBw + 1))/2);
2097 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
2098 Since VRB is local */
2100 /* DCI Format 1B size determination */
2101 rgSchCmnDciFrmtSizes[3] = 1 +
2102 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2103 (cell->bwCfg.dlTotalBw + 1))/2) +
2113 ((cell->numTxAntPorts == 4)? 4:2) +
2116 /* DCI Format 1C size determination */
2117 /* Approximation: NDLVrbGap1 ~= Nprb for DL */
2118 rgSchCmnDciFrmtSizes[4] = (cell->bwCfg.dlTotalBw < 50)? 0:1 +
2119 (cell->bwCfg.dlTotalBw < 50)?
2120 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/2 * \
2121 (cell->bwCfg.dlTotalBw/2 + 1))/2)) :
2122 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/4 * \
2123 (cell->bwCfg.dlTotalBw/4 + 1))/2)) +
2126 /* DCI Format 1D size determination */
2127 rgSchCmnDciFrmtSizes[5] = 1 +
2128 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2129 (cell->bwCfg.dlTotalBw + 1))/2) +
2138 ((cell->numTxAntPorts == 4)? 4:2) +
2141 /* DCI Format 2 size determination */
2142 rgSchCmnDciFrmtSizes[6] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2143 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2151 ((cell->numTxAntPorts == 4)? 6:3);
2153 /* DCI Format 2A size determination */
2154 rgSchCmnDciFrmtSizes[7] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2155 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2163 ((cell->numTxAntPorts == 4)? 2:0);
2165 /* DCI Format 3 size determination */
2166 rgSchCmnDciFrmtSizes[8] = rgSchCmnDciFrmtSizes[0];
2168 /* DCI Format 3A size determination */
2169 rgSchCmnDciFrmtSizes[9] = rgSchCmnDciFrmtSizes[0];
2176 * @brief This function initializes the cmnCell->dciAggrLvl table.
2180 * Function: rgSCHCmnGetCqiDciFrmt2AggrLvl
2181 * Purpose: This function determines the Aggregation level
2182 * for each CQI level against each DCI format.
2183 * Invoked by: rgSCHCmnRgrCellCfg
2188 static Void rgSCHCmnGetCqiDciFrmt2AggrLvl(RgSchCellCb *cell)
2190 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2195 for (i = 0; i < RG_SCH_CMN_MAX_CQI; i++)
2197 for (j = 0; j < 10; j++)
2199 uint32_t pdcchBits; /* Actual number of phy bits needed for a given DCI Format
2200 * for a given CQI Level */
2201 pdcchBits = (rgSchCmnDciFrmtSizes[j] * 1024)/rgSchCmnCqiPdcchEff[i];
2203 if (pdcchBits < 192)
2205 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL2;
2208 if (pdcchBits < 384)
2210 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL4;
2213 if (pdcchBits < 768)
2215 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL8;
2218 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL16;
2225 * @brief This function initializes all the data for the scheduler.
2229 * Function: rgSCHCmnDlInit
2230 * Purpose: This function initializes the following information:
2231 * 1. Efficiency table
2232 * 2. CQI to table index - It is one row for upto 3 RBs
2233 * and another row for greater than 3 RBs
2234 * currently extended prefix is compiled out.
2235 * Invoked by: MAC intialization code..may be ActvInit
2240 static Void rgSCHCmnDlInit()
2246 RgSchCmnTbSzEff *effTbl;
2247 RgSchCmnCqiToTbs *tbsTbl;
2250 /* 0 corresponds to Single layer case, 1 corresponds to 2 layers case*/
2251 /* Init Efficiency table for normal cyclic prefix */
2252 /*Initialize Efficiency table for Layer Index 0 */
2253 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2254 /*Initialize Efficiency table for each of the CFI indices. The
2255 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2256 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[0];
2257 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[0];
2258 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[0];
2259 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[0];
2260 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2261 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[0];
2262 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[0];
2263 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[0];
2264 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[0];
2265 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2266 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[0];
2267 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[0];
2268 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[0];
2269 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[0];
2271 /*Initialize CQI to TBS table for Layer Index 0 for Normal CP */
2272 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[0];
2273 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[0];
2274 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[0];
2275 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[0];
2277 /*Intialize Efficency table for Layer Index 1 */
2278 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2279 /*Initialize Efficiency table for each of the CFI indices. The
2280 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2281 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[1];
2282 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[1];
2283 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[1];
2284 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[1];
2285 /*Initialize Efficiency table for Tx Antenna Port Index 1 */
2286 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[1];
2287 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[1];
2288 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[1];
2289 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[1];
2290 /*Initialize Efficiency table for Tx Antenna Port Index 2 */
2291 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[1];
2292 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[1];
2293 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[1];
2294 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[1];
2296 /*Initialize CQI to TBS table for Layer Index 1 for Normal CP */
2297 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[1];
2298 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[1];
2299 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[1];
2300 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[1];
2302 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2304 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2306 /* EfficiencyTbl calculation incase of 2 layers for normal CP */
2307 rgSCHCmnCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx,\
2308 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i]);
2309 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx, \
2310 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i]);
2314 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2316 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2318 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i];
2319 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][i];
2320 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2321 (j >= 0) && (k > 0); --j)
2323 /* ADD CQI to MCS mapping correction
2324 * single dimensional array is replaced by 2 dimensions for different CFI*/
2325 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2327 (*tbsTbl)[k--] = (uint8_t)j;
2334 /* effTbl,tbsTbl calculation incase of 2 layers for normal CP */
2335 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i];
2336 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][i];
2337 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2338 (j >= 0) && (k > 0); --j)
2340 /* ADD CQI to MCS mapping correction
2341 * single dimensional array is replaced by 2 dimensions for different CFI*/
2342 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2344 (*tbsTbl)[k--] = (uint8_t)j;
2354 /* Efficiency Table for Extended CP */
2355 /*Initialize Efficiency table for Layer Index 0 */
2356 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2357 /*Initialize Efficiency table for each of the CFI indices. The
2358 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2359 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[0];
2360 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[0];
2361 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[0];
2362 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[0];
2363 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2364 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[0];
2365 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[0];
2366 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[0];
2367 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[0];
2368 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2369 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[0];
2370 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[0];
2371 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[0];
2372 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[0];
2374 /*Initialize CQI to TBS table for Layer Index 0 for Extended CP */
2375 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[0];
2376 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[0];
2377 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[0];
2378 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[0];
2380 /*Initialize Efficiency table for Layer Index 1 */
2381 /*Initialize Efficiency table for each of the CFI indices. The
2382 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2383 /*Initialize Efficency table for Tx Antenna Port Index 0 */
2384 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[1];
2385 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[1];
2386 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[1];
2387 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[1];
2388 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2389 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[1];
2390 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[1];
2391 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[1];
2392 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[1];
2393 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2394 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[1];
2395 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[1];
2396 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[1];
2397 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[1];
2399 /*Initialize CQI to TBS table for Layer Index 1 for Extended CP */
2400 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[1];
2401 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[1];
2402 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[1];
2403 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[1];
2404 /* Activate this code when extended cp is supported */
2405 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2407 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2409 /* EfficiencyTbl calculation incase of 2 layers for extendedl CP */
2410 rgSCHCmnCompEff( (uint8_t)(i + 1 ), (uint8_t)RG_SCH_CMN_EXT_CP, idx,\
2411 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i]);
2412 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), (uint8_t) RG_SCH_CMN_EXT_CP,idx, \
2413 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i]);
2417 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2419 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2421 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i];
2422 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][i];
2423 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2424 (j >= 0) && (k > 0); --j)
2426 /* ADD CQI to MCS mapping correction
2427 * single dimensional array is replaced by 2 dimensions for different CFI*/
2428 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2430 (*tbsTbl)[k--] = (uint8_t)j;
2437 /* effTbl,tbsTbl calculation incase of 2 layers for extended CP */
2438 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i];
2439 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][i];
2440 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2441 (j >= 0) && (k > 0); --j)
2443 /* ADD CQI to MCS mapping correction
2444 * single dimensional array is replaced by 2 dimensions for different CFI*/
2445 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2447 (*tbsTbl)[k--] = (uint8_t)j;
2460 * @brief This function initializes all the data for the scheduler.
2464 * Function: rgSCHCmnUlInit
2465 * Purpose: This function initializes the following information:
2466 * 1. Efficiency table
2467 * 2. CQI to table index - It is one row for upto 3 RBs
2468 * and another row for greater than 3 RBs
2469 * currently extended prefix is compiled out.
2470 * Invoked by: MAC intialization code..may be ActvInit
2475 static Void rgSCHCmnUlInit()
2477 uint8_t *mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_NOR_CP][0];
2478 RgSchCmnTbSzEff *effTbl = &rgSchCmnNorUlEff[0];
2479 const RgSchCmnUlCqiInfo *cqiTbl = &rgSchCmnUlCqiTbl[0];
2483 /* Initaializing new variable added for UL eff */
2484 rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP] = &rgSchCmnNorUlEff[0];
2485 /* Reason behind using 3 as the number of symbols to rule out for
2486 * efficiency table computation would be that we are using 2 symbols for
2487 * DMRS(1 in each slot) and 1 symbol for SRS*/
2488 rgSCHCmnCompUlEff(RGSCH_UL_SYM_DMRS_SRS,RG_SCH_CMN_NOR_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP]);
2490 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2491 i >= 0 && j > 0; --i)
2493 if ((*effTbl)[i] <= cqiTbl[j].eff)
2495 mapTbl[j--] = (uint8_t)i;
2502 effTbl = &rgSchCmnExtUlEff[0];
2503 mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_EXT_CP][0];
2505 /* Initaializing new variable added for UL eff */
2506 rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP] = &rgSchCmnExtUlEff[0];
2507 /* Reason behind using 3 as the number of symbols to rule out for
2508 * efficiency table computation would be that we are using 2 symbols for
2509 * DMRS(1 in each slot) and 1 symbol for SRS*/
2510 rgSCHCmnCompUlEff(3,RG_SCH_CMN_EXT_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP]);
2512 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2513 i >= 0 && j > 0; --i)
2515 if ((*effTbl)[i] <= cqiTbl[j].eff)
2517 mapTbl[j--] = (uint8_t)i;
2529 * @brief This function initializes all the data for the scheduler.
2533 * Function: rgSCHCmnInit
2534 * Purpose: This function initializes the following information:
2535 * 1. Efficiency table
2536 * 2. CQI to table index - It is one row for upto 3 RBs
2537 * and another row for greater than 3 RBs
2538 * currently extended prefix is compiled out.
2539 * Invoked by: MAC intialization code..may be ActvInit
2551 rgSCHEmtcCmnDlInit();
2552 rgSCHEmtcCmnUlInit();
2558 /* Init the function pointers */
2559 rgSchCmnApis.rgSCHRgrUeCfg = rgSCHCmnRgrUeCfg;
2560 rgSchCmnApis.rgSCHRgrUeRecfg = rgSCHCmnRgrUeRecfg;
2561 rgSchCmnApis.rgSCHFreeUe = rgSCHCmnUeDel;
2562 rgSchCmnApis.rgSCHRgrCellCfg = rgSCHCmnRgrCellCfg;
2563 rgSchCmnApis.rgSCHRgrCellRecfg = rgSCHCmnRgrCellRecfg;
2564 rgSchCmnApis.rgSCHFreeCell = rgSCHCmnCellDel;
2565 rgSchCmnApis.rgSCHRgrLchCfg = rgSCHCmnRgrLchCfg;
2566 rgSchCmnApis.rgSCHRgrLcgCfg = rgSCHCmnRgrLcgCfg;
2567 rgSchCmnApis.rgSCHRgrLchRecfg = rgSCHCmnRgrLchRecfg;
2568 rgSchCmnApis.rgSCHRgrLcgRecfg = rgSCHCmnRgrLcgRecfg;
2569 rgSchCmnApis.rgSCHFreeDlLc = rgSCHCmnFreeDlLc;
2570 rgSchCmnApis.rgSCHFreeLcg = rgSCHCmnLcgDel;
2571 rgSchCmnApis.rgSCHRgrLchDel = rgSCHCmnRgrLchDel;
2572 rgSchCmnApis.rgSCHActvtUlUe = rgSCHCmnActvtUlUe;
2573 rgSchCmnApis.rgSCHActvtDlUe = rgSCHCmnActvtDlUe;
2574 rgSchCmnApis.rgSCHHdlUlTransInd = rgSCHCmnHdlUlTransInd;
2575 rgSchCmnApis.rgSCHDlDedBoUpd = rgSCHCmnDlDedBoUpd;
2576 rgSchCmnApis.rgSCHUlRecMsg3Alloc = rgSCHCmnUlRecMsg3Alloc;
2577 rgSchCmnApis.rgSCHUlCqiInd = rgSCHCmnUlCqiInd;
2578 rgSchCmnApis.rgSCHPucchDeltaPwrInd = rgSCHPwrPucchDeltaInd;
2579 rgSchCmnApis.rgSCHUlHqProcForUe = rgSCHCmnUlHqProcForUe;
2581 rgSchCmnApis.rgSCHUpdUlHqProc = rgSCHCmnUpdUlHqProc;
2583 rgSchCmnApis.rgSCHUpdBsrShort = rgSCHCmnUpdBsrShort;
2584 rgSchCmnApis.rgSCHUpdBsrTrunc = rgSCHCmnUpdBsrTrunc;
2585 rgSchCmnApis.rgSCHUpdBsrLong = rgSCHCmnUpdBsrLong;
2586 rgSchCmnApis.rgSCHUpdPhr = rgSCHCmnUpdPhr;
2587 rgSchCmnApis.rgSCHUpdExtPhr = rgSCHCmnUpdExtPhr;
2588 rgSchCmnApis.rgSCHContResUlGrant = rgSCHCmnContResUlGrant;
2589 rgSchCmnApis.rgSCHSrRcvd = rgSCHCmnSrRcvd;
2590 rgSchCmnApis.rgSCHFirstRcptnReq = rgSCHCmnFirstRcptnReq;
2591 rgSchCmnApis.rgSCHNextRcptnReq = rgSCHCmnNextRcptnReq;
2592 rgSchCmnApis.rgSCHFirstHqFdbkAlloc = rgSCHCmnFirstHqFdbkAlloc;
2593 rgSchCmnApis.rgSCHNextHqFdbkAlloc = rgSCHCmnNextHqFdbkAlloc;
2594 rgSchCmnApis.rgSCHDlProcAddToRetx = rgSCHCmnDlProcAddToRetx;
2595 rgSchCmnApis.rgSCHDlCqiInd = rgSCHCmnDlCqiInd;
2597 rgSchCmnApis.rgSCHUlProcAddToRetx = rgSCHCmnEmtcUlProcAddToRetx;
2600 rgSchCmnApis.rgSCHSrsInd = rgSCHCmnSrsInd;
2602 rgSchCmnApis.rgSCHDlTARpt = rgSCHCmnDlTARpt;
2603 rgSchCmnApis.rgSCHDlRlsSubFrm = rgSCHCmnDlRlsSubFrm;
2604 rgSchCmnApis.rgSCHUeReset = rgSCHCmnUeReset;
2606 rgSchCmnApis.rgSCHHdlCrntiCE = rgSCHCmnHdlCrntiCE;
2607 rgSchCmnApis.rgSCHDlProcAck = rgSCHCmnDlProcAck;
2608 rgSchCmnApis.rgSCHDlRelPdcchFbk = rgSCHCmnDlRelPdcchFbk;
2609 rgSchCmnApis.rgSCHUlSpsRelInd = rgSCHCmnUlSpsRelInd;
2610 rgSchCmnApis.rgSCHUlSpsActInd = rgSCHCmnUlSpsActInd;
2611 rgSchCmnApis.rgSCHUlCrcFailInd = rgSCHCmnUlCrcFailInd;
2612 rgSchCmnApis.rgSCHUlCrcInd = rgSCHCmnUlCrcInd;
2614 rgSchCmnApis.rgSCHDrxStrtInActvTmrInUl = rgSCHCmnDrxStrtInActvTmrInUl;
2615 rgSchCmnApis.rgSCHUpdUeDataIndLcg = rgSCHCmnUpdUeDataIndLcg;
2617 for (idx = 0; idx < RGSCH_NUM_SCHEDULERS; ++idx)
2619 rgSchUlSchdInits[idx](&rgSchUlSchdTbl[idx]);
2620 rgSchDlSchdInits[idx](&rgSchDlSchdTbl[idx]);
2623 for (idx = 0; idx < RGSCH_NUM_EMTC_SCHEDULERS; ++idx)
2625 rgSchEmtcUlSchdInits[idx](&rgSchEmtcUlSchdTbl[idx]);
2626 rgSchEmtcDlSchdInits[idx](&rgSchEmtcDlSchdTbl[idx]);
2629 #if (defined (RG_PHASE2_SCHED) && defined(TFU_UPGRADE))
2630 for (idx = 0; idx < RGSCH_NUM_DLFS_SCHEDULERS; ++idx)
2632 rgSchDlfsSchdInits[idx](&rgSchDlfsSchdTbl[idx]);
2636 rgSchCmnApis.rgSCHRgrSCellUeCfg = rgSCHCmnRgrSCellUeCfg;
2637 rgSchCmnApis.rgSCHRgrSCellUeDel = rgSCHCmnRgrSCellUeDel;
2644 * @brief This function is a wrapper to call scheduler specific API.
2648 * Function: rgSCHCmnDlRlsSubFrm
2649 * Purpose: Releases scheduler Information from DL SubFrm.
2653 * @param[in] RgSchCellCb *cell
2654 * @param[out] CmLteTimingInfo frm
2658 Void rgSCHCmnDlRlsSubFrm(RgSchCellCb *cell,CmLteTimingInfo frm)
2660 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2664 /* Get the pointer to the subframe */
2665 sf = rgSCHUtlSubFrmGet(cell, frm);
2667 rgSCHUtlSubFrmPut(cell, sf);
2670 /* Re-initialize DLFS specific information for the sub-frame */
2671 cellSch->apisDlfs->rgSCHDlfsReinitSf(cell, sf);
2679 * @brief This function is the starting function for DL allocation.
2683 * Function: rgSCHCmnDlCmnChAlloc
2684 * Purpose: Scheduling for downlink. It performs allocation in the order
2685 * of priority wich BCCH/PCH first, CCCH, Random Access and TA.
2687 * Invoked by: Scheduler
2689 * @param[in] RgSchCellCb* cell
2690 * @param[out] RgSchCmnDlRbAllocInfo* allocInfo
2694 static Void rgSCHCmnDlCcchRarAlloc(RgSchCellCb *cell)
2696 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2699 rgSCHCmnDlCcchRetx(cell, &cellSch->allocInfo);
2700 /* LTE_ADV_FLAG_REMOVED_START */
2701 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2703 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2705 /*eNodeB need to blank the subframe */
2709 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2714 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2716 /* LTE_ADV_FLAG_REMOVED_END */
2720 /*Added these function calls for processing CCCH SDU arriving
2721 * after guard timer expiry.Functions differ from above two functions
2722 * in using ueCb instead of raCb.*/
2723 rgSCHCmnDlCcchSduRetx(cell, &cellSch->allocInfo);
2724 /* LTE_ADV_FLAG_REMOVED_START */
2725 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2727 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2729 /*eNodeB need to blank the subframe */
2733 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2738 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2740 /* LTE_ADV_FLAG_REMOVED_END */
2744 if(cellSch->ul.msg3SchdIdx != RGSCH_INVALID_INFO)
2746 /* Do not schedule msg3 if there is a CFI change ongoing */
2747 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2749 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2753 /* LTE_ADV_FLAG_REMOVED_START */
2754 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2756 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2758 /*eNodeB need to blank the subframe */
2762 /* Do not schedule msg3 if there is a CFI change ongoing */
2763 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2765 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2771 /* Do not schedule msg3 if there is a CFI change ongoing */
2772 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2774 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2777 /* LTE_ADV_FLAG_REMOVED_END */
2785 * @brief Scheduling for CCCH SDU.
2789 * Function: rgSCHCmnCcchSduAlloc
2790 * Purpose: Scheduling for CCCH SDU
2792 * Invoked by: Scheduler
2794 * @param[in] RgSchCellCb* cell
2795 * @param[in] RgSchUeCb* ueCb
2796 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2800 static S16 rgSCHCmnCcchSduAlloc(RgSchCellCb *cell,RgSchUeCb *ueCb,RgSchCmnDlRbAllocInfo *allocInfo)
2802 RgSchDlRbAlloc *rbAllocInfo;
2803 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2804 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2807 /* Return if subframe BW exhausted */
2808 if (allocInfo->ccchSduAlloc.ccchSduDlSf->bw <=
2809 allocInfo->ccchSduAlloc.ccchSduDlSf->bwAssigned)
2811 DU_LOG("\nERROR --> SCH : bw<=bwAssigned for UEID:%d",ueCb->ueId);
2815 if (rgSCHDhmGetCcchSduHqProc(ueCb, cellSch->dl.time, &(ueDl->proc)) != ROK)
2817 DU_LOG("\nERROR --> SCH : rgSCHDhmGetCcchSduHqProc failed UEID:%d",ueCb->ueId);
2821 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
2822 rbAllocInfo->dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2824 if (rgSCHCmnCcchSduDedAlloc(cell, ueCb) != ROK)
2826 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
2827 rgSCHDhmRlsHqpTb(ueDl->proc, 0, FALSE);
2828 DU_LOG("\nERROR --> SCH : rgSCHCmnCcchSduDedAlloc failed UEID:%d",ueCb->ueId);
2831 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduTxLst, &ueDl->proc->reqLnk);
2832 ueDl->proc->reqLnk.node = (PTR)ueDl->proc;
2833 allocInfo->ccchSduAlloc.ccchSduDlSf->schdCcchUe++;
2838 * @brief This function scheduler for downlink CCCH messages.
2842 * Function: rgSCHCmnDlCcchSduTx
2843 * Purpose: Scheduling for downlink CCCH
2845 * Invoked by: Scheduler
2847 * @param[in] RgSchCellCb *cell
2848 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2852 static Void rgSCHCmnDlCcchSduTx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2856 RgSchCmnDlUe *ueCmnDl;
2857 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2858 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2861 node = cell->ccchSduUeLst.first;
2864 if(cellSch->dl.maxCcchPerDlSf &&
2865 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2871 ueCb = (RgSchUeCb *)(node->node);
2872 ueCmnDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2874 /* Fix : syed postpone scheduling for this
2875 * until msg4 is done */
2876 /* Fix : syed RLC can erroneously send CCCH SDU BO
2877 * twice. Hence an extra guard to avoid if already
2878 * scheduled for RETX */
2879 if ((!(ueCb->dl.dlInactvMask & RG_HQENT_INACTIVE)) &&
2882 if ((rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)) != ROK)
2889 DU_LOG("\nERROR --> SCH : THIS SHOULD "
2890 "NEVER HAPPEN for UEID:%d", ueCb->ueId);
2900 * @brief This function scheduler for downlink CCCH messages.
2904 * Function: rgSCHCmnDlCcchTx
2905 * Purpose: Scheduling for downlink CCCH
2907 * Invoked by: Scheduler
2909 * @param[in] RgSchCellCb *cell
2910 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2914 static Void rgSCHCmnDlCcchTx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2918 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2919 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
2921 node = cell->raInfo.toBeSchdLst.first;
2924 if(cellSch->dl.maxCcchPerDlSf &&
2925 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2932 raCb = (RgSchRaCb *)(node->node);
2934 /* Address allocation for this UE for MSG 4 */
2935 /* Allocation for Msg4 */
2936 if ((rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)) != ROK)
2947 * @brief This function scheduler for downlink CCCH messages.
2951 * Function: rgSCHCmnDlCcchSduRetx
2952 * Purpose: Scheduling for downlink CCCH
2954 * Invoked by: Scheduler
2956 * @param[in] RgSchCellCb *cell
2957 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2961 static Void rgSCHCmnDlCcchSduRetx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2963 RgSchDlRbAlloc *rbAllocInfo;
2965 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2967 RgSchDlHqProcCb *hqP;
2970 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2973 node = cellSch->dl.ccchSduRetxLst.first;
2976 if(cellSch->dl.maxCcchPerDlSf &&
2977 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2984 hqP = (RgSchDlHqProcCb *)(node->node);
2987 /* DwPts Scheduling Changes Start */
2989 if (rgSCHCmnRetxAvoidTdd(allocInfo->ccchSduAlloc.ccchSduDlSf,
2995 /* DwPts Scheduling Changes End */
2997 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3001 ueCb = (RgSchUeCb*)(hqP->hqE->ue);
3002 ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
3004 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3005 /* Fill RB Alloc Info */
3006 rbAllocInfo->dlSf = dlSf;
3007 rbAllocInfo->tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3008 rbAllocInfo->rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3009 /* Fix : syed iMcs setting did not correspond to RETX */
3010 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3011 rbAllocInfo->tbInfo[0].imcs);
3012 rbAllocInfo->rnti = ueCb->ueId;
3013 rbAllocInfo->tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3014 /* Fix : syed Copying info in entirety without depending on stale TX information */
3015 rbAllocInfo->tbInfo[0].tbCb = &hqP->tbInfo[0];
3016 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
3017 /* Fix : syed Assigning proc to scratchpad */
3020 retxBw += rbAllocInfo->rbsReq;
3022 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduRetxLst, \
3024 hqP->reqLnk.node = (PTR)hqP;
3028 dlSf->bwAssigned += retxBw;
3034 * @brief This function scheduler for downlink CCCH messages.
3038 * Function: rgSCHCmnDlCcchRetx
3039 * Purpose: Scheduling for downlink CCCH
3041 * Invoked by: Scheduler
3043 * @param[in] RgSchCellCb *cell
3044 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3048 static Void rgSCHCmnDlCcchRetx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
3051 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3053 RgSchDlHqProcCb *hqP;
3055 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3058 node = cellSch->dl.msg4RetxLst.first;
3061 if(cellSch->dl.maxCcchPerDlSf &&
3062 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3068 hqP = (RgSchDlHqProcCb *)(node->node);
3072 /* DwPts Scheduling Changes Start */
3074 if (rgSCHCmnRetxAvoidTdd(allocInfo->msg4Alloc.msg4DlSf,
3080 /* DwPts Scheduling Changes End */
3082 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3086 raCb = (RgSchRaCb*)(hqP->hqE->raCb);
3087 /* Fill RB Alloc Info */
3088 raCb->rbAllocInfo.dlSf = dlSf;
3089 raCb->rbAllocInfo.tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3090 raCb->rbAllocInfo.rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3091 /* Fix : syed iMcs setting did not correspond to RETX */
3092 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3093 raCb->rbAllocInfo.tbInfo[0].imcs);
3094 raCb->rbAllocInfo.rnti = raCb->tmpCrnti;
3095 raCb->rbAllocInfo.tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3096 /* Fix; syed Copying info in entirety without depending on stale TX information */
3097 raCb->rbAllocInfo.tbInfo[0].tbCb = &hqP->tbInfo[0];
3098 raCb->rbAllocInfo.tbInfo[0].schdlngForTb = TRUE;
3100 retxBw += raCb->rbAllocInfo.rbsReq;
3102 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4RetxLst, \
3104 hqP->reqLnk.node = (PTR)hqP;
3108 dlSf->bwAssigned += retxBw;
3114 * @brief This function implements scheduler DL allocation for
3115 * for broadcast (on PDSCH) and paging.
3119 * Function: rgSCHCmnDlBcchPcch
3120 * Purpose: This function implements scheduler for DL allocation
3121 * for broadcast (on PDSCH) and paging.
3123 * Invoked by: Scheduler
3125 * @param[in] RgSchCellCb* cell
3130 static Void rgSCHCmnDlBcchPcch(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo,RgInfSfAlloc *subfrmAlloc)
3132 CmLteTimingInfo frm;
3134 RgSchClcDlLcCb *pcch;
3138 RgSchClcDlLcCb *bcch, *bch;
3139 #endif/*RGR_SI_SCH*/
3142 frm = cell->crntTime;
3144 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
3145 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
3146 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
3148 // RGSCH_SUBFRAME_INDEX(frm);
3149 //RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
3152 /* Compute the subframe for which allocation is being made */
3153 /* essentially, we need pointer to the dl frame for this subframe */
3154 sf = rgSCHUtlSubFrmGet(cell, frm);
3158 bch = rgSCHDbmGetBcchOnBch(cell);
3159 #if (ERRCLASS & ERRCLS_DEBUG)
3162 DU_LOG("\nERROR --> SCH : BCCH on BCH is not configured");
3166 if (bch->boLst.first != NULLP)
3168 bo = (RgSchClcBoRpt *)(bch->boLst.first->node);
3169 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3171 sf->bch.tbSize = bo->bo;
3172 cmLListDelFrm(&bch->boLst, bch->boLst.first);
3173 /* ccpu00117052 - MOD - Passing double pointer
3174 for proper NULLP assignment*/
3175 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(*bo));
3176 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, bch->lcId,TRUE);
3181 if ((frm.sfn % 4 == 0) && (frm.subframe == 0))
3186 allocInfo->bcchAlloc.schdFirst = FALSE;
3187 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
3188 #if (ERRCLASS & ERRCLS_DEBUG)
3191 DU_LOG("\nERROR --> SCH : BCCH on DLSCH is not configured");
3195 if (bcch->boLst.first != NULLP)
3197 bo = (RgSchClcBoRpt *)(bcch->boLst.first->node);
3199 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3201 allocInfo->bcchAlloc.schdFirst = TRUE;
3202 /* Time to perform allocation for this BCCH transmission */
3203 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3207 if(!allocInfo->bcchAlloc.schdFirst)
3210 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
3211 #if (ERRCLASS & ERRCLS_DEBUG)
3214 DU_LOG("\nERROR --> SCH : BCCH on DLSCH is not configured");
3218 lnk = bcch->boLst.first;
3219 while (lnk != NULLP)
3221 bo = (RgSchClcBoRpt *)(lnk->node);
3223 valid = rgSCHCmnChkInWin(frm, bo->timeToTx, bo->maxTimeToTx);
3227 bo->i = RGSCH_CALC_SF_DIFF(frm, bo->timeToTx);
3228 /* Time to perform allocation for this BCCH transmission */
3229 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3234 valid = rgSCHCmnChkPastWin(frm, bo->maxTimeToTx);
3237 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
3238 /* ccpu00117052 - MOD - Passing double pointer
3239 for proper NULLP assignment*/
3240 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo,
3241 sizeof(RgSchClcBoRpt));
3247 rgSCHDlSiSched(cell, allocInfo, subfrmAlloc);
3248 #endif/*RGR_SI_SCH*/
3250 pcch = rgSCHDbmGetPcch(cell);
3254 DU_LOG("\nERROR --> SCH : PCCH on DLSCH is not configured");
3258 if (pcch->boLst.first != NULLP)
3260 bo = (RgSchClcBoRpt *)(pcch->boLst.first->node);
3262 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3264 /* Time to perform allocation for this PCCH transmission */
3265 rgSCHCmnClcAlloc(cell, sf, pcch, RGSCH_P_RNTI, allocInfo);
3273 * Fun: rgSCHCmnChkInWin
3275 * Desc: This function checks if frm occurs in window
3277 * Ret: TRUE - if in window
3282 * File: rg_sch_cmn.c
3285 Bool rgSCHCmnChkInWin(CmLteTimingInfo frm,CmLteTimingInfo start,CmLteTimingInfo end)
3290 if (end.sfn > start.sfn)
3292 if (frm.sfn > start.sfn
3293 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3295 if (frm.sfn < end.sfn
3297 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3299 || (frm.sfn == end.sfn && frm.slot <= start.slot))
3306 /* Testing for wrap around, sfn wraparound check should be enough */
3307 else if (end.sfn < start.sfn)
3309 if (frm.sfn > start.sfn
3310 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3316 if (frm.sfn < end.sfn
3317 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3323 else /* start.sfn == end.sfn */
3325 if (frm.sfn == start.sfn
3326 && (frm.slot >= start.slot
3327 && frm.slot <= end.slot))
3334 } /* end of rgSCHCmnChkInWin*/
3338 * Fun: rgSCHCmnChkPastWin
3340 * Desc: This function checks if frm has gone past window edge
3342 * Ret: TRUE - if past window edge
3347 * File: rg_sch_cmn.c
3350 Bool rgSCHCmnChkPastWin(CmLteTimingInfo frm,CmLteTimingInfo end)
3352 CmLteTimingInfo refFrm = end;
3356 RGSCH_INCR_FRAME(refFrm.sfn);
3357 RGSCH_INCR_SUB_FRAME(end, 1);
3358 pastWin = rgSCHCmnChkInWin(frm, end, refFrm);
3361 } /* end of rgSCHCmnChkPastWin*/
3364 * @brief This function implements allocation of the resources for common
3365 * channels BCCH, PCCH.
3369 * Function: rgSCHCmnClcAlloc
3370 * Purpose: This function implements selection of number of RBs based
3371 * the allowed grant for the service. It is also responsible
3372 * for selection of MCS for the transmission.
3374 * Invoked by: Scheduler
3376 * @param[in] RgSchCellCb *cell,
3377 * @param[in] RgSchDlSf *sf,
3378 * @param[in] RgSchClcDlLcCb *lch,
3379 * @param[in] uint16_t rnti,
3380 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3384 static Void rgSCHCmnClcAlloc(RgSchCellCb *cell,RgSchDlSf *sf,RgSchClcDlLcCb *lch,uint16_t rnti,RgSchCmnDlRbAllocInfo *allocInfo)
3386 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3393 uint8_t cfi = cellDl->currCfi;
3397 bo = (RgSchClcBoRpt *)(lch->boLst.first->node);
3401 /* rgSCHCmnClcRbAllocForFxdTb(cell, bo->bo, cellDl->ccchCqi, &rb);*/
3402 if(cellDl->bitsPerRb==0)
3404 while ((rgTbSzTbl[0][0][rb]) < (tbs*8))
3412 rb = RGSCH_CEIL((tbs*8), cellDl->bitsPerRb);
3414 /* DwPTS Scheduling Changes Start */
3416 if(sf->sfType == RG_SCH_SPL_SF_DATA)
3418 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
3420 /* Calculate the less RE's because of DwPTS */
3421 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
3423 /* Increase number of RBs in Spl SF to compensate for lost REs */
3424 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
3427 /* DwPTS Scheduling Changes End */
3428 /*ccpu00115595- end*/
3429 /* additional check to see if required RBs
3430 * exceeds the available */
3431 if (rb > sf->bw - sf->bwAssigned)
3433 DU_LOG("\nERROR --> SCH : BW allocation "
3434 "failed for CRNTI:%d",rnti);
3438 /* Update the subframe Allocated BW field */
3439 sf->bwAssigned = sf->bwAssigned + rb;
3440 /* Fill in the BCCH/PCCH transmission info to the RBAllocInfo struct */
3441 if (rnti == RGSCH_SI_RNTI)
3443 allocInfo->bcchAlloc.rnti = rnti;
3444 allocInfo->bcchAlloc.dlSf = sf;
3445 allocInfo->bcchAlloc.tbInfo[0].bytesReq = tbs;
3446 allocInfo->bcchAlloc.rbsReq = rb;
3447 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
3448 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
3449 /* Nprb indication at PHY for common Ch */
3450 allocInfo->bcchAlloc.nPrb = bo->nPrb;
3454 allocInfo->pcchAlloc.rnti = rnti;
3455 allocInfo->pcchAlloc.dlSf = sf;
3456 allocInfo->pcchAlloc.tbInfo[0].bytesReq = tbs;
3457 allocInfo->pcchAlloc.rbsReq = rb;
3458 allocInfo->pcchAlloc.tbInfo[0].imcs = mcs;
3459 allocInfo->pcchAlloc.tbInfo[0].noLyr = 1;
3460 allocInfo->pcchAlloc.nPrb = bo->nPrb;
3467 * @brief This function implements PDCCH allocation for common channels.
3471 * Function: rgSCHCmnCmnPdcchAlloc
3472 * Purpose: This function implements allocation of PDCCH for a UE.
3473 * 1. This uses index 0 of PDCCH table for efficiency.
3474 * 2. Uses he candidate PDCCH count for the aggr level.
3475 * 3. Look for availability for each candidate and choose
3476 * the first one available.
3478 * Invoked by: Scheduler
3480 * @param[in] RgSchCellCb *cell
3481 * @param[in] RgSchDlSf *sf
3482 * @return RgSchPdcch *
3483 * -# NULLP when unsuccessful
3486 RgSchPdcch *rgSCHCmnCmnPdcchAlloc(RgSchCellCb *cell,RgSchDlSf *subFrm)
3488 CmLteAggrLvl aggrLvl;
3489 RgSchPdcchInfo *pdcchInfo;
3491 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3492 uint8_t numCce; /*store num CCEs based on
3493 aggregation level */
3495 aggrLvl = cellSch->dl.cmnChAggrLvl;
3497 pdcchInfo = &(subFrm->pdcchInfo);
3499 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3502 if(subFrm->nCce != pdcchInfo->nCce)
3504 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3507 if(cell->nCce != pdcchInfo->nCce)
3509 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3515 case CM_LTE_AGGR_LVL4:
3518 case CM_LTE_AGGR_LVL8:
3521 case CM_LTE_AGGR_LVL16:
3528 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3531 pdcch->isSpsRnti = FALSE;
3533 /* Increment the CCE used counter in the current subframe */
3534 subFrm->cceCnt += numCce;
3535 pdcch->pdcchSearchSpace = RG_SCH_CMN_SEARCH_SPACE;
3540 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3541 subFrm->isCceFailure = TRUE;
3542 DU_LOG("\nDEBUG --> SCH : PDCCH ERR: NO PDDCH AVAIL IN COMMON SEARCH SPACE aggr:%u",
3549 * @brief This function implements bandwidth allocation for common channels.
3553 * Function: rgSCHCmnClcRbAlloc
3554 * Purpose: This function implements bandwith allocation logic
3555 * for common control channels.
3557 * Invoked by: Scheduler
3559 * @param[in] RgSchCellCb* cell
3560 * @param[in] uint32_t bo
3561 * @param[in] uint8_t cqi
3562 * @param[in] uint8_t *rb
3563 * @param[in] uint32_t *tbs
3564 * @param[in] uint8_t *mcs
3565 * @param[in] RgSchDlSf *sf
3570 Void rgSCHCmnClcRbAlloc
3583 static Void rgSCHCmnClcRbAlloc
3593 #endif /* LTEMAC_SPS */
3596 RgSchCmnTbSzEff *effTbl;
3599 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3600 uint8_t cfi = cellSch->dl.currCfi;
3603 /* first get the CQI to MCS table and determine the number of RBs */
3604 effTbl = (RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]);
3605 iTbsVal = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[cqi];
3606 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3608 /* Efficiency is number of bits per 1024 REs */
3609 eff = (*effTbl)[iTbsVal];
3611 /* Get the number of REs needed for this bo */
3612 noRes = ((bo * 8 * 1024) / eff );
3614 /* Get the number of RBs needed for this transmission */
3615 /* Number of RBs = No of REs / No of REs per RB */
3616 tmpRb = RGSCH_CEIL(noRes, cellSch->dl.noResPerRb[cfi]);
3617 /* KWORK_FIX: added check to see if rb has crossed maxRb*/
3618 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3619 if (tmpRb > cellSch->dl.maxDlBwPerUe)
3621 tmpRb = cellSch->dl.maxDlBwPerUe;
3623 while ((rgTbSzTbl[0][iTbsVal][tmpRb-1]/8) < bo &&
3624 (tmpRb < cellSch->dl.maxDlBwPerUe))
3627 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3629 *tbs = rgTbSzTbl[0][iTbsVal][tmpRb-1]/8;
3630 *rb = (uint8_t)tmpRb;
3631 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3639 * @brief Scheduling for MSG4.
3643 * Function: rgSCHCmnMsg4Alloc
3644 * Purpose: Scheduling for MSG4
3646 * Invoked by: Scheduler
3648 * @param[in] RgSchCellCb* cell
3649 * @param[in] RgSchRaCb* raCb
3650 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3654 static S16 rgSCHCmnMsg4Alloc(RgSchCellCb *cell,RgSchRaCb *raCb,RgSchCmnDlRbAllocInfo *allocInfo)
3656 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3659 /* SR_RACH_STATS : MSG4 TO BE TXED */
3661 /* Return if subframe BW exhausted */
3662 if (allocInfo->msg4Alloc.msg4DlSf->bw <=
3663 allocInfo->msg4Alloc.msg4DlSf->bwAssigned)
3665 DU_LOG("\nERROR --> SCH : bw<=bwAssigned");
3669 if (rgSCHDhmGetMsg4HqProc(raCb, cellSch->dl.time) != ROK)
3671 DU_LOG("\nERROR --> SCH : rgSCHDhmGetMsg4HqProc failed");
3675 raCb->rbAllocInfo.dlSf = allocInfo->msg4Alloc.msg4DlSf;
3677 if (rgSCHCmnMsg4DedAlloc(cell, raCb) != ROK)
3679 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
3680 rgSCHDhmRlsHqpTb(raCb->dlHqE->msg4Proc, 0, FALSE);
3681 DU_LOG("\nERROR --> SCH : rgSCHCmnMsg4DedAlloc failed.");
3684 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4TxLst, &raCb->dlHqE->msg4Proc->reqLnk);
3685 raCb->dlHqE->msg4Proc->reqLnk.node = (PTR)raCb->dlHqE->msg4Proc;
3686 allocInfo->msg4Alloc.msg4DlSf->schdCcchUe++;
3693 * @brief This function implements PDCCH allocation for an UE.
3697 * Function: PdcchAlloc
3698 * Purpose: This function implements allocation of PDCCH for an UE.
3699 * 1. Get the aggregation level for the CQI of the UE.
3700 * 2. Get the candidate PDCCH count for the aggr level.
3701 * 3. Look for availability for each candidate and choose
3702 * the first one available.
3704 * Invoked by: Scheduler
3709 * @param[in] dciFrmt
3710 * @return RgSchPdcch *
3711 * -# NULLP when unsuccessful
3714 RgSchPdcch *rgSCHCmnPdcchAlloc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlSf *subFrm,uint8_t cqi,TfuDciFormat dciFrmt,Bool isDtx)
3716 CmLteAggrLvl aggrLvl;
3717 RgSchPdcchInfo *pdcchInfo;
3721 /* 3.1 consider the selected DCI format size in determining the
3722 * aggregation level */
3723 //TODO_SID Need to update. Currently using 4 aggregation level
3724 aggrLvl = CM_LTE_AGGR_LVL2;//cellSch->dciAggrLvl[cqi][dciFrmt];
3727 if((dciFrmt == TFU_DCI_FORMAT_1A) &&
3728 ((ue) && (ue->allocCmnUlPdcch)) )
3730 pdcch = rgSCHCmnCmnPdcchAlloc(cell, subFrm);
3731 /* Since CRNTI Scrambled */
3734 pdcch->dciNumOfBits = ue->dciSize.cmnSize[dciFrmt];
3735 // prc_trace_format_string(PRC_TRACE_GROUP_PS, PRC_TRACE_INFO_LOW,"Forcing alloc in CMN search spc size %d fmt %d \n",
3736 // pdcch->dciNumOfBits, dciFrmt);
3742 /* Incrementing aggrLvl by 1 if it not AGGR_LVL8(MAX SIZE)
3743 * inorder to increse the redudancy bits for better decoding of UE */
3746 if (aggrLvl != CM_LTE_AGGR_LVL16)
3750 case CM_LTE_AGGR_LVL2:
3751 aggrLvl = CM_LTE_AGGR_LVL4;
3753 case CM_LTE_AGGR_LVL4:
3754 aggrLvl = CM_LTE_AGGR_LVL8;
3756 case CM_LTE_AGGR_LVL8:
3757 aggrLvl = CM_LTE_AGGR_LVL16;
3766 pdcchInfo = &subFrm->pdcchInfo;
3768 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3771 if(subFrm->nCce != pdcchInfo->nCce)
3773 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3776 if(cell->nCce != pdcchInfo->nCce)
3778 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3782 if (pdcchInfo->nCce < (1 << (aggrLvl - 1)))
3784 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3785 subFrm->isCceFailure = TRUE;
3786 DU_LOG("\nDEBUG --> SCH : PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
3792 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3794 /* SR_RACH_STATS : Reset isTBMsg4 */
3795 pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4= FALSE;
3796 pdcch->dci.u.format0Info.isSrGrant = FALSE;
3798 pdcch->isSpsRnti = FALSE;
3800 /* Increment the CCE used counter in the current subframe */
3801 subFrm->cceCnt += aggrLvl;
3802 pdcch->pdcchSearchSpace = RG_SCH_UE_SPECIFIC_SEARCH_SPACE;
3806 if (ue->cell != cell)
3808 /* Secondary Cell */
3809 //pdcch->dciNumOfBits = ue->dciSize.noUlCcSize[dciFrmt];
3810 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
3815 //pdcch->dciNumOfBits = ue->dciSize.dedSize[dciFrmt];
3816 //TODO_SID Need to update dci size.
3817 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
3823 pdcch->dciNumOfBits = cell->dciSize.size[dciFrmt];
3828 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3829 subFrm->isCceFailure = TRUE;
3831 DU_LOG("\nDEBUG --> SCH : PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
3838 * @brief This function implements BW allocation for CCCH SDU
3842 * Function: rgSCHCmnCcchSduDedAlloc
3843 * Purpose: Downlink bandwidth Allocation for CCCH SDU.
3845 * Invoked by: Scheduler
3847 * @param[in] RgSchCellCb* cell
3848 * @param[out] RgSchUeCb *ueCb
3852 static S16 rgSCHCmnCcchSduDedAlloc(RgSchCellCb *cell,RgSchUeCb *ueCb)
3854 RgSchDlHqEnt *hqE = NULLP;
3856 RgSchDlRbAlloc *rbAllocinfo = NULLP;
3857 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3861 uint8_t cfi = cellDl->currCfi;
3865 rbAllocinfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3867 effBo = ueCb->dlCcchInfo.bo + RGSCH_CCCH_SDU_HDRSIZE;
3870 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
3871 &rbAllocinfo->tbInfo[0].bytesReq,
3872 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
3873 #else /* LTEMAC_SPS */
3874 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
3875 &rbAllocinfo->tbInfo[0].bytesReq,\
3876 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
3878 #endif /* LTEMAC_SPS */
3881 /* Cannot exceed the total number of RBs in the cell */
3882 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
3883 rbAllocinfo->dlSf->bwAssigned)))
3885 /* Check if atleast one allocation was possible.
3886 This may be the case where the Bw is very less and
3887 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
3888 if (rbAllocinfo->dlSf->bwAssigned == 0)
3890 numRb = rbAllocinfo->dlSf->bw;
3891 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
3892 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
3896 rbAllocinfo->rbsReq = numRb;
3897 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
3898 /* DwPTS Scheduling Changes Start */
3900 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
3902 rbAllocinfo->tbInfo[0].bytesReq =
3903 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1,cfi);
3906 /* DwPTS Scheduling Changes End */
3907 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
3915 /* Update the subframe Allocated BW field */
3916 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
3917 rbAllocinfo->rbsReq;
3918 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
3919 rbAllocinfo->tbInfo[0].tbCb = &hqE->ccchSduProc->tbInfo[0];
3920 rbAllocinfo->rnti = ueCb->ueId;
3921 rbAllocinfo->tbInfo[0].noLyr = 1;
3928 * @brief This function implements BW allocation for MSG4
3932 * Function: rgSCHCmnMsg4DedAlloc
3933 * Purpose: Downlink bandwidth Allocation for MSG4.
3935 * Invoked by: Scheduler
3937 * @param[in] RgSchCellCb* cell
3938 * @param[out] RgSchRaCb *raCb
3942 static S16 rgSCHCmnMsg4DedAlloc(RgSchCellCb *cell,RgSchRaCb *raCb)
3945 RgSchDlRbAlloc *rbAllocinfo = &raCb->rbAllocInfo;
3949 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3950 uint8_t cfi = cellDl->currCfi;
3954 effBo = raCb->dlCcchInfo.bo + RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE;
3957 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
3958 &rbAllocinfo->tbInfo[0].bytesReq,\
3959 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
3960 #else /* LTEMAC_SPS */
3961 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
3962 &rbAllocinfo->tbInfo[0].bytesReq,\
3963 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
3965 #endif /* LTEMAC_SPS */
3968 /* Cannot exceed the total number of RBs in the cell */
3969 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
3970 rbAllocinfo->dlSf->bwAssigned)))
3972 /* Check if atleast one allocation was possible.
3973 This may be the case where the Bw is very less and
3974 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
3975 if (rbAllocinfo->dlSf->bwAssigned == 0)
3977 numRb = rbAllocinfo->dlSf->bw;
3978 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
3979 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
3983 rbAllocinfo->rbsReq = numRb;
3984 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
3985 /* DwPTS Scheduling Changes Start */
3987 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
3989 rbAllocinfo->tbInfo[0].bytesReq =
3990 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1, cfi);
3993 /* DwPTS Scheduling Changes End */
3994 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4002 /* Update the subframe Allocated BW field */
4003 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4004 rbAllocinfo->rbsReq;
4005 rbAllocinfo->rnti = raCb->tmpCrnti;
4006 rbAllocinfo->tbInfo[0].tbCb = &raCb->dlHqE->msg4Proc->tbInfo[0];
4007 rbAllocinfo->tbInfo[0].schdlngForTb = TRUE;
4008 rbAllocinfo->tbInfo[0].noLyr = 1;
4015 * @brief This function implements scheduling for RA Response.
4019 * Function: rgSCHCmnDlRaRsp
4020 * Purpose: Downlink scheduling for RA responses.
4022 * Invoked by: Scheduler
4024 * @param[in] RgSchCellCb* cell
4028 static Void rgSCHCmnDlRaRsp(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
4030 CmLteTimingInfo frm;
4031 CmLteTimingInfo schFrm;
4037 RgSchTddRachRspLst *rachRsp;
4038 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
4043 frm = cell->crntTime;
4044 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4046 /* Compute the subframe for which allocation is being made */
4047 /* essentially, we need pointer to the dl frame for this subframe */
4048 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4050 /* Get the RACH Response scheduling related information
4051 * for the subframe with RA index */
4052 raIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][frm.subframe]-1;
4054 rachRsp = &cell->rachRspLst[raIdx];
4056 for(sfnIdx = 0; sfnIdx < rachRsp->numRadiofrms; sfnIdx++)
4058 /* For all scheduled RACH Responses in SFNs */
4060 RG_SCH_CMN_DECR_FRAME(schFrm.sfn, rachRsp->rachRsp[sfnIdx].sfnOffset);
4061 /* For all scheduled RACH Responses in subframes */
4063 subfrmIdx < rachRsp->rachRsp[sfnIdx].numSubfrms; subfrmIdx++)
4065 schFrm.subframe = rachRsp->rachRsp[sfnIdx].subframe[subfrmIdx];
4066 /* compute the last RA RNTI used in the previous subframe */
4067 raIdx = (((schFrm.sfn % cell->raInfo.maxRaSize) * \
4068 RGSCH_NUM_SUB_FRAMES * RGSCH_MAX_RA_RNTI_PER_SUBFRM) \
4071 /* For all RA RNTIs within a subframe */
4073 for(i=0; (i < RGSCH_MAX_RA_RNTI_PER_SUBFRM) && \
4074 (noRaRnti < RGSCH_MAX_TDD_RA_RSP_ALLOC); i++)
4076 rarnti = (schFrm.subframe + RGSCH_NUM_SUB_FRAMES*i + 1);
4077 rntiIdx = (raIdx + RGSCH_NUM_SUB_FRAMES*i);
4079 if (cell->raInfo.raReqLst[rntiIdx].first != NULLP)
4081 /* compute the next RA RNTI */
4082 if (rgSCHCmnRaRspAlloc(cell, subFrm, rntiIdx,
4083 rarnti, noRaRnti, allocInfo) != ROK)
4085 /* The resources are exhausted */
4099 * @brief This function implements scheduling for RA Response.
4103 * Function: rgSCHCmnDlRaRsp
4104 * Purpose: Downlink scheduling for RA responses.
4106 * Invoked by: Scheduler
4108 * @param[in] RgSchCellCb* cell
4109 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4113 static Void rgSCHCmnDlRaRsp(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
4115 CmLteTimingInfo frm;
4116 CmLteTimingInfo winStartFrm;
4118 uint8_t winStartIdx;
4122 RgSchCmnCell *sched;
4123 uint8_t i,noRaRnti=0;
4125 frm = cell->crntTime;
4126 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4128 /* Compute the subframe for which allocation is being made */
4129 /* essentially, we need pointer to the dl frame for this subframe */
4130 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4131 sched = RG_SCH_CMN_GET_CELL(cell);
4133 /* ccpu00132523 - Window Start calculated by considering RAR window size,
4134 * RAR Wait period, Subframes occuppied for respective preamble format*/
4135 winGap = (sched->dl.numRaSubFrms-1) + (cell->rachCfg.raWinSize-1)
4136 +RGSCH_RARSP_WAIT_PERIOD;
4138 /* Window starting occassion is retrieved using the gap and tried to
4139 * fit to the size of raReqLst array*/
4140 RGSCHDECRFRMCRNTTIME(frm, winStartFrm, winGap);
4142 //5G_TODO TIMING update. Need to check
4143 winStartIdx = (winStartFrm.sfn & 1) * RGSCH_MAX_RA_RNTI+ winStartFrm.slot;
4145 for(i = 0; ((i < cell->rachCfg.raWinSize) && (noRaRnti < RG_SCH_CMN_MAX_CMN_PDCCH)); i++)
4147 raIdx = (winStartIdx + i) % RGSCH_RAREQ_ARRAY_SIZE;
4149 if (cell->raInfo.raReqLst[raIdx].first != NULLP)
4151 allocInfo->raRspAlloc[noRaRnti].biEstmt = \
4152 (!i * RGSCH_ONE_BIHDR_SIZE);
4153 rarnti = raIdx % RGSCH_MAX_RA_RNTI+ 1;
4154 if (rgSCHCmnRaRspAlloc(cell, subFrm, raIdx,
4155 rarnti, noRaRnti, allocInfo) != ROK)
4157 /* The resources are exhausted */
4160 /* ccpu00132523- If all the RAP ids are not scheduled then need not
4161 * proceed for next RA RNTIs*/
4162 if(allocInfo->raRspAlloc[noRaRnti].numRapids < cell->raInfo.raReqLst[raIdx].count)
4166 noRaRnti++; /* Max of RG_SCH_CMN_MAX_CMN_PDCCH RARNTIs
4167 for response allocation */
4176 * @brief This function allocates the resources for an RARNTI.
4180 * Function: rgSCHCmnRaRspAlloc
4181 * Purpose: Allocate resources to a RARNTI.
4182 * 0. Allocate PDCCH for sending the response.
4183 * 1. Locate the number of RA requests pending for the RARNTI.
4184 * 2. Compute the size of data to be built.
4185 * 3. Using common channel CQI, compute the number of RBs.
4187 * Invoked by: Scheduler
4189 * @param[in] RgSchCellCb *cell,
4190 * @param[in] RgSchDlSf *subFrm,
4191 * @param[in] uint16_t rarnti,
4192 * @param[in] uint8_t noRaRnti
4193 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4197 static S16 rgSCHCmnRaRspAlloc(RgSchCellCb *cell,RgSchDlSf *subFrm,uint16_t raIndex,uint16_t rarnti,uint8_t noRaRnti,RgSchCmnDlRbAllocInfo *allocInfo)
4199 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4200 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4204 /*ccpu00116700,ccpu00116708- Corrected the wrong type for mcs*/
4207 /* RACH handling related changes */
4208 Bool isAlloc = FALSE;
4209 static uint8_t schdNumRapid = 0;
4210 uint8_t remNumRapid = 0;
4215 uint8_t cfi = cellDl->currCfi;
4222 /* ccpu00132523: Resetting the schdRap Id count in every scheduling subframe*/
4229 if (subFrm->bw == subFrm->bwAssigned)
4231 DU_LOG("\nERROR --> SCH : bw == bwAssigned RARNTI:%d",rarnti);
4235 reqLst = &cell->raInfo.raReqLst[raIndex];
4236 if (reqLst->count == 0)
4238 DU_LOG("\nERROR --> SCH : reqLst Count=0 RARNTI:%d",rarnti);
4241 remNumRapid = reqLst->count;
4244 /* Limit number of rach rsps to maxMsg3PerUlsf */
4245 if ( schdNumRapid+remNumRapid > cellUl->maxMsg3PerUlSf )
4247 remNumRapid = cellUl->maxMsg3PerUlSf-schdNumRapid;
4253 /* Try allocating for as many RAPIDs as possible */
4254 /* BI sub-header size to the tbSize requirement */
4255 noBytes = RGSCH_GET_RAR_BYTES(remNumRapid) +\
4256 allocInfo->raRspAlloc[noRaRnti].biEstmt;
4257 if ((allwdTbSz = rgSCHUtlGetAllwdCchTbSz(noBytes*8, &nPrb, &mcs)) == -1)
4263 /* rgSCHCmnClcRbAllocForFxdTb(cell, allwdTbSz/8, cellDl->ccchCqi, &rb);*/
4264 if(cellDl->bitsPerRb==0)
4266 while ((rgTbSzTbl[0][0][rb]) <(uint32_t) allwdTbSz)
4274 rb = RGSCH_CEIL(allwdTbSz, cellDl->bitsPerRb);
4276 /* DwPTS Scheduling Changes Start */
4278 if (subFrm->sfType == RG_SCH_SPL_SF_DATA)
4280 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
4282 /* Calculate the less RE's because of DwPTS */
4283 lostRe = rb * (cellDl->noResPerRb[cfi] -
4284 cellDl->numReDwPts[cfi]);
4286 /* Increase number of RBs in Spl SF to compensate for lost REs */
4287 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
4290 /* DwPTS Scheduling Changes End */
4292 /*ccpu00115595- end*/
4293 if (rb > subFrm->bw - subFrm->bwAssigned)
4298 /* Allocation succeeded for 'remNumRapid' */
4301 DU_LOG("\nINFO --> SCH : RAR alloc noBytes:%u,allwdTbSz:%u,tbs:%u,rb:%u\n",
4302 noBytes,allwdTbSz,tbs,rb);
4307 DU_LOG("\nERROR --> SCH : BW alloc Failed");
4311 subFrm->bwAssigned = subFrm->bwAssigned + rb;
4313 /* Fill AllocInfo structure */
4314 allocInfo->raRspAlloc[noRaRnti].rnti = rarnti;
4315 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].bytesReq = tbs;
4316 allocInfo->raRspAlloc[noRaRnti].rbsReq = rb;
4317 allocInfo->raRspAlloc[noRaRnti].dlSf = subFrm;
4318 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].imcs = mcs;
4319 allocInfo->raRspAlloc[noRaRnti].raIndex = raIndex;
4320 /* RACH changes for multiple RAPID handling */
4321 allocInfo->raRspAlloc[noRaRnti].numRapids = remNumRapid;
4322 allocInfo->raRspAlloc[noRaRnti].nPrb = nPrb;
4323 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].noLyr = 1;
4324 allocInfo->raRspAlloc[noRaRnti].vrbgReq = RGSCH_CEIL(nPrb,MAX_5GTF_VRBG_SIZE);
4325 schdNumRapid += remNumRapid;
4329 /***********************************************************
4331 * Func : rgSCHCmnUlAllocFillRbInfo
4333 * Desc : Fills the start RB and the number of RBs for
4334 * uplink allocation.
4342 **********************************************************/
4343 Void rgSCHCmnUlAllocFillRbInfo(RgSchCellCb *cell,RgSchUlSf *sf,RgSchUlAlloc *alloc)
4345 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4346 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4347 uint8_t cfi = cellDl->currCfi;
4350 alloc->grnt.rbStart = (alloc->sbStart * cellUl->sbSize) +
4351 cell->dynCfiCb.bwInfo[cfi].startRb;
4353 /* Num RBs = numSbAllocated * sbSize - less RBs in the last SB */
4354 alloc->grnt.numRb = (alloc->numSb * cellUl->sbSize);
4360 * @brief Grant request for Msg3.
4364 * Function : rgSCHCmnMsg3GrntReq
4366 * This is invoked by downlink scheduler to request allocation
4369 * - Attempt to allocate msg3 in the current msg3 subframe
4370 * Allocation attempt based on whether preamble is from group A
4371 * and the value of MESSAGE_SIZE_GROUP_A
4372 * - Link allocation with passed RNTI and msg3 HARQ process
4373 * - Set the HARQ process ID (*hqProcIdRef)
4375 * @param[in] RgSchCellCb *cell
4376 * @param[in] CmLteRnti rnti
4377 * @param[in] Bool preamGrpA
4378 * @param[in] RgSchUlHqProcCb *hqProc
4379 * @param[out] RgSchUlAlloc **ulAllocRef
4380 * @param[out] uint8_t *hqProcIdRef
4383 static Void rgSCHCmnMsg3GrntReq
4388 RgSchUlHqProcCb *hqProc,
4389 RgSchUlAlloc **ulAllocRef,
4390 uint8_t *hqProcIdRef
4393 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4394 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
4396 RgSchUlAlloc *alloc;
4401 *ulAllocRef = NULLP;
4403 /* Fix: ccpu00120610 Use remAllocs from subframe during msg3 allocation */
4404 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
4408 if (preamGrpA == FALSE)
4410 numSb = cellUl->ra.prmblBNumSb;
4411 iMcs = cellUl->ra.prmblBIMcs;
4415 numSb = cellUl->ra.prmblANumSb;
4416 iMcs = cellUl->ra.prmblAIMcs;
4419 if ((hole = rgSCHUtlUlHoleFirst(sf)) != NULLP)
4421 if(*sf->allocCountRef == 0)
4423 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4424 /* Reinitialize the hole */
4425 if (sf->holeDb->count == 1 && (hole->start == 0)) /* Sanity check of holeDb */
4427 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4428 /* Re-Initialize available subbands because of CFI change*/
4429 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4433 DU_LOG("\nERROR --> SCH : holeDb sanity check failed RNTI:%d",rnti);
4436 if (numSb <= hole->num)
4439 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
4440 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
4441 alloc->grnt.iMcs = iMcs;
4442 alloc->grnt.iMcsCrnt = iMcs;
4443 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
4444 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
4445 /* To include the length and ModOrder in DataRecp Req.*/
4446 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
4447 RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
4448 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
4449 alloc->grnt.nDmrs = 0;
4450 alloc->grnt.hop = 0;
4451 alloc->grnt.delayBit = 0;
4452 alloc->grnt.isRtx = FALSE;
4453 *ulAllocRef = alloc;
4454 *hqProcIdRef = (cellUl->msg3SchdHqProcIdx);
4455 hqProc->procId = *hqProcIdRef;
4456 hqProc->ulSfIdx = (cellUl->msg3SchdIdx);
4459 alloc->pdcch = FALSE;
4460 alloc->forMsg3 = TRUE;
4461 alloc->hqProc = hqProc;
4462 rgSCHUhmNewTx(hqProc, (uint8_t)(cell->rachCfg.maxMsg3Tx - 1), alloc);
4463 DU_LOG("\nDEBUG --> SCH : RNTI:%d MSG3 ALLOC proc(%lu)procId(%d)schdIdx(%d)\n",
4465 ((PTR)alloc->hqProc),
4466 alloc->hqProc->procId,
4467 alloc->hqProc->ulSfIdx);
4468 DU_LOG("\nDEBUG --> SCH : alloc(%p)maxMsg3Tx(%d)",
4470 cell->rachCfg.maxMsg3Tx);
4479 * @brief This function determines the allocation limits and
4480 * parameters that aid in DL scheduling.
4484 * Function: rgSCHCmnDlSetUeAllocLmt
4485 * Purpose: This function determines the Maximum RBs
4486 * a UE is eligible to get based on softbuffer
4487 * limitation and cell->>>maxDlBwPerUe. The Codeword
4488 * specific parameters like iTbs, eff and noLyrs
4489 * are also set in this function. This function
4490 * is called while UE configuration and UeDlCqiInd.
4492 * Invoked by: Scheduler
4494 * @param[in] RgSchCellCb *cellCb
4495 * @param[in] RgSchCmnDlUe *ueDl
4499 static Void rgSCHCmnDlSetUeAllocLmt(RgSchCellCb *cell,RgSchCmnDlUe *ueDl,Bool isEmtcUe)
4503 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4504 uint8_t cfi = cellSch->dl.currCfi;
4508 if(TRUE == isEmtcUe)
4510 /* ITbs for CW0 for 1 Layer Tx */
4511 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4512 [ueDl->mimoInfo.cwInfo[0].cqi];
4513 /* ITbs for CW0 for 2 Layer Tx */
4514 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4515 [ueDl->mimoInfo.cwInfo[0].cqi];
4516 /* Eff for CW0 for 1 Layer Tx */
4517 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4518 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4519 /* Eff for CW0 for 2 Layer Tx */
4520 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4521 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4523 /* ITbs for CW1 for 1 Layer Tx */
4524 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4525 [ueDl->mimoInfo.cwInfo[1].cqi];
4526 /* ITbs for CW1 for 2 Layer Tx */
4527 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4528 [ueDl->mimoInfo.cwInfo[1].cqi];
4529 /* Eff for CW1 for 1 Layer Tx */
4530 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4531 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4532 /* Eff for CW1 for 2 Layer Tx */
4533 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4534 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4539 /* ITbs for CW0 for 1 Layer Tx */
4540 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4541 [ueDl->mimoInfo.cwInfo[0].cqi];
4542 /* ITbs for CW0 for 2 Layer Tx */
4543 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4544 [ueDl->mimoInfo.cwInfo[0].cqi];
4545 /* Eff for CW0 for 1 Layer Tx */
4546 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4547 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4548 /* Eff for CW0 for 2 Layer Tx */
4549 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4550 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4552 /* ITbs for CW1 for 1 Layer Tx */
4553 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4554 [ueDl->mimoInfo.cwInfo[1].cqi];
4555 /* ITbs for CW1 for 2 Layer Tx */
4556 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4557 [ueDl->mimoInfo.cwInfo[1].cqi];
4558 /* Eff for CW1 for 1 Layer Tx */
4559 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4560 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4561 /* Eff for CW1 for 2 Layer Tx */
4562 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4563 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4567 // ueDl->laCb.cqiBasediTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0] * 100;
4569 /* Assigning noLyrs to each CW assuming optimal Spatial multiplexing
4571 (ueDl->mimoInfo.ri/2 == 0)? (ueDl->mimoInfo.cwInfo[0].noLyr = 1) : \
4572 (ueDl->mimoInfo.cwInfo[0].noLyr = ueDl->mimoInfo.ri/2);
4573 ueDl->mimoInfo.cwInfo[1].noLyr = ueDl->mimoInfo.ri - ueDl->mimoInfo.cwInfo[0].noLyr;
4574 /* rg002.101:ccpu00102106: correcting DL harq softbuffer limitation logic.
4575 * The maxTbSz is the maximum number of PHY bits a harq process can
4576 * hold. Hence we limit our allocation per harq process based on this.
4577 * Earlier implementation we misinterpreted the maxTbSz to be per UE
4578 * per TTI, but in fact it is per Harq per TTI. */
4579 /* rg002.101:ccpu00102106: cannot exceed the harq Tb Size
4580 * and harq Soft Bits limit.*/
4582 /* Considering iTbs corresponding to 2 layer transmission for
4583 * codeword0(approximation) and the maxLayers supported by
4584 * this UE at this point of time. */
4585 RG_SCH_CMN_TBS_TO_MODODR(ueDl->mimoInfo.cwInfo[0].iTbs[1], modOrder);
4587 /* Bits/modOrder gives #REs, #REs/noResPerRb gives #RBs */
4588 /* rg001.301 -MOD- [ccpu00119213] : avoiding wraparound */
4589 maxRb = ((ueDl->maxSbSz)/(cellSch->dl.noResPerRb[cfi] * modOrder *\
4590 ueDl->mimoInfo.ri));
4591 if (cellSch->dl.isDlFreqSel)
4593 /* Rounding off to left nearest multiple of RBG size */
4594 maxRb -= maxRb % cell->rbgSize;
4596 ueDl->maxRb = RGSCH_MIN(maxRb, cellSch->dl.maxDlBwPerUe);
4597 if (cellSch->dl.isDlFreqSel)
4599 /* Rounding off to right nearest multiple of RBG size */
4600 if (ueDl->maxRb % cell->rbgSize)
4602 ueDl->maxRb += (cell->rbgSize -
4603 (ueDl->maxRb % cell->rbgSize));
4607 /* Set the index of the cwInfo, which is better in terms of
4608 * efficiency. If RI<2, only 1 CW, hence btrCwIdx shall be 0 */
4609 if (ueDl->mimoInfo.ri < 2)
4611 ueDl->mimoInfo.btrCwIdx = 0;
4615 if (ueDl->mimoInfo.cwInfo[0].eff[ueDl->mimoInfo.cwInfo[0].noLyr-1] <\
4616 ueDl->mimoInfo.cwInfo[1].eff[ueDl->mimoInfo.cwInfo[1].noLyr-1])
4618 ueDl->mimoInfo.btrCwIdx = 1;
4622 ueDl->mimoInfo.btrCwIdx = 0;
4632 * @brief This function updates TX Scheme.
4636 * Function: rgSCHCheckAndSetTxScheme
4637 * Purpose: This function determines the Maximum RBs
4638 * a UE is eligible to get based on softbuffer
4639 * limitation and cell->>>maxDlBwPerUe. The Codeword
4640 * specific parameters like iTbs, eff and noLyrs
4641 * are also set in this function. This function
4642 * is called while UE configuration and UeDlCqiInd.
4644 * Invoked by: Scheduler
4646 * @param[in] RgSchCellCb *cell
4647 * @param[in] RgSchUeCb *ue
4651 static Void rgSCHCheckAndSetTxScheme(RgSchCellCb *cell,RgSchUeCb *ue)
4653 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4654 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue ,cell);
4655 uint8_t cfi = cellSch->dl.currCfi;
4657 uint8_t cqiBasediTbs;
4661 maxiTbs = (*(RgSchCmnCqiToTbs*)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4662 [RG_SCH_CMN_MAX_CQI - 1];
4663 cqiBasediTbs = (ueDl->laCb[0].cqiBasediTbs)/100;
4664 actualiTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0];
4666 if((actualiTbs < RG_SCH_TXSCHEME_CHNG_ITBS_FACTOR) && (cqiBasediTbs >
4667 actualiTbs) && ((cqiBasediTbs - actualiTbs) > RG_SCH_TXSCHEME_CHNG_THRSHD))
4669 RG_SCH_CMN_SET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
4672 if(actualiTbs >= maxiTbs)
4674 RG_SCH_CMN_UNSET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
4681 * @brief This function determines the allocation limits and
4682 * parameters that aid in DL scheduling.
4686 * Function: rgSCHCmnDlSetUeAllocLmtLa
4687 * Purpose: This function determines the Maximum RBs
4688 * a UE is eligible to get based on softbuffer
4689 * limitation and cell->>>maxDlBwPerUe. The Codeword
4690 * specific parameters like iTbs, eff and noLyrs
4691 * are also set in this function. This function
4692 * is called while UE configuration and UeDlCqiInd.
4694 * Invoked by: Scheduler
4696 * @param[in] RgSchCellCb *cell
4697 * @param[in] RgSchUeCb *ue
4701 Void rgSCHCmnDlSetUeAllocLmtLa(RgSchCellCb *cell,RgSchUeCb *ue)
4705 uint8_t reportediTbs;
4706 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4707 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
4708 uint8_t cfi = cellSch->dl.currCfi;
4713 maxiTbs = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[RG_SCH_CMN_MAX_CQI - 1];
4714 if(ueDl->cqiFlag == TRUE)
4716 for(cwIdx=0; cwIdx < RG_SCH_CMN_MAX_CW_PER_UE; cwIdx++)
4720 /* Calcluating the reported iTbs for code word 0 */
4721 reportediTbs = ue->ue5gtfCb.mcs;
4723 iTbsNew = (S32) reportediTbs;
4725 if(!ueDl->laCb[cwIdx].notFirstCqi)
4727 /* This is the first CQI report from UE */
4728 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
4729 ueDl->laCb[cwIdx].notFirstCqi = TRUE;
4731 else if ((RG_ITBS_DIFF(reportediTbs, ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0])) > 5)
4733 /* Ignore this iTBS report and mark that last iTBS report was */
4734 /* ignored so that subsequently we reset the LA algorithm */
4735 ueDl->laCb[cwIdx].lastiTbsIgnored = TRUE;
4736 ueDl->laCb[cwIdx].numLastiTbsIgnored++;
4737 if( ueDl->laCb[cwIdx].numLastiTbsIgnored > 10)
4739 /* CQI reported by UE is not catching up. Reset the LA algorithm */
4740 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
4741 ueDl->laCb[cwIdx].deltaiTbs = 0;
4742 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
4743 ueDl->laCb[cwIdx].numLastiTbsIgnored = 0;
4748 if (ueDl->laCb[cwIdx].lastiTbsIgnored != TRUE)
4750 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
4751 (80 * ueDl->laCb[cwIdx].cqiBasediTbs))/100;
4755 /* Reset the LA as iTbs in use caught up with the value */
4756 /* reported by UE. */
4757 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
4758 (80 * ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] * 100))/100;
4759 ueDl->laCb[cwIdx].deltaiTbs = 0;
4760 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
4764 iTbsNew = (ueDl->laCb[cwIdx].cqiBasediTbs + ueDl->laCb[cwIdx].deltaiTbs)/100;
4766 RG_SCH_CHK_ITBS_RANGE(iTbsNew, maxiTbs);
4768 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] = RGSCH_MIN(iTbsNew, cell->thresholds.maxDlItbs);
4769 //ueDl->mimoInfo.cwInfo[cwIdx].iTbs[1] = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
4771 ue->ue5gtfCb.mcs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
4773 DU_LOG("\nINFO --> SCH : reportediTbs[%d] cqiBasediTbs[%d] deltaiTbs[%d] iTbsNew[%d] mcs[%d] cwIdx[%d]\n",
4774 reportediTbs, ueDl->laCb[cwIdx].cqiBasediTbs, ueDl->laCb[cwIdx].deltaiTbs,
4775 iTbsNew, ue->ue5gtfCb.mcs, cwIdx);
4779 if((ue->mimoInfo.txMode != RGR_UE_TM_3) && (ue->mimoInfo.txMode != RGR_UE_TM_4))
4784 ueDl->cqiFlag = FALSE;
4791 /***********************************************************
4793 * Func : rgSCHCmnDlUeResetTemp
4795 * Desc : Reset whatever variables where temporarily used
4796 * during UE scheduling.
4804 **********************************************************/
4805 Void rgSCHCmnDlHqPResetTemp(RgSchDlHqProcCb *hqP)
4808 /* Fix: syed having a hqP added to Lists for RB assignment rather than
4809 * a UE, as adding UE was limiting handling some scenarios */
4810 hqP->reqLnk.node = (PTR)NULLP;
4811 hqP->schdLstLnk.node = (PTR)NULLP;
4814 } /* rgSCHCmnDlHqPResetTemp */
4816 /***********************************************************
4818 * Func : rgSCHCmnDlUeResetTemp
4820 * Desc : Reset whatever variables where temporarily used
4821 * during UE scheduling.
4829 **********************************************************/
4830 Void rgSCHCmnDlUeResetTemp(RgSchUeCb *ue,RgSchDlHqProcCb *hqP)
4832 RgSchDlRbAlloc *allocInfo;
4833 RgSchCmnDlUe *cmnUe = RG_SCH_CMN_GET_DL_UE(ue,hqP->hqE->cell);
4839 /* Fix : syed check for UE's existence was useless.
4840 * Instead we need to check that reset is done only for the
4841 * information of a scheduled harq proc, which is cmnUe->proc.
4842 * Reset should not be done for non-scheduled hqP */
4843 if((cmnUe->proc == hqP) || (cmnUe->proc == NULLP))
4845 cmnUe->proc = NULLP;
4846 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, hqP->hqE->cell);
4848 tmpCb = allocInfo->laaCb;
4850 memset(allocInfo, 0, sizeof(RgSchDlRbAlloc));
4851 allocInfo->rnti = ue->ueId;
4853 allocInfo->laaCb = tmpCb;
4855 /* Fix: syed moving this to a common function for both scheduled
4856 * and non-scheduled UEs */
4857 cmnUe->outStndAlloc = 0;
4859 rgSCHCmnDlHqPResetTemp(hqP);
4862 } /* rgSCHCmnDlUeResetTemp */
4864 /***********************************************************
4866 * Func : rgSCHCmnUlUeResetTemp
4868 * Desc : Reset whatever variables where temporarily used
4869 * during UE scheduling.
4877 **********************************************************/
4878 Void rgSCHCmnUlUeResetTemp(RgSchCellCb *cell,RgSchUeCb *ue)
4880 RgSchCmnUlUe *cmnUlUe = RG_SCH_CMN_GET_UL_UE(ue,cell);
4882 memset(&cmnUlUe->alloc, 0, sizeof(cmnUlUe->alloc));
4885 } /* rgSCHCmnUlUeResetTemp */
4890 * @brief This function fills the PDCCH information from dlProc.
4894 * Function: rgSCHCmnFillPdcch
4895 * Purpose: This function fills in the PDCCH information
4896 * obtained from the RgSchDlRbAlloc
4897 * during common channel scheduling(P, SI, RA - RNTI's).
4899 * Invoked by: Downlink Scheduler
4901 * @param[out] RgSchPdcch* pdcch
4902 * @param[in] RgSchDlRbAlloc* rbAllocInfo
4906 Void rgSCHCmnFillPdcch(RgSchCellCb *cell,RgSchPdcch *pdcch,RgSchDlRbAlloc *rbAllocInfo)
4909 /* common channel pdcch filling,
4910 * only 1A and Local is supported */
4911 pdcch->rnti = rbAllocInfo->rnti;
4912 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
4913 switch(rbAllocInfo->dciFormat)
4915 #ifdef RG_5GTF /* ANOOP: ToDo: DCI format B1/B2 filling */
4916 case TFU_DCI_FORMAT_B1:
4919 pdcch->dci.u.formatB1Info.formatType = 0;
4920 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].cmnGrnt.xPDSCHRange;
4921 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].cmnGrnt.rbAssign;
4922 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = 0;
4923 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
4924 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = 0;
4925 //pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].ndi;
4926 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].cmnGrnt.rv;
4927 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
4928 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
4929 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
4930 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
4931 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
4932 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
4933 //TODO_SID: Need to update
4934 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
4935 pdcch->dci.u.formatB1Info.beamSwitch = 0;
4936 pdcch->dci.u.formatB1Info.SRS_Config = 0;
4937 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
4938 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
4939 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
4940 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].cmnGrnt.SCID;
4941 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
4942 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
4943 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
4945 break; /* case TFU_DCI_FORMAT_B1: */
4948 case TFU_DCI_FORMAT_B2:
4950 //DU_LOG("\nINFO --> SCH : RG_5GTF:: Pdcch filling with DCI format B2\n");
4952 break; /* case TFU_DCI_FORMAT_B2: */
4955 case TFU_DCI_FORMAT_1A:
4956 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
4958 /*Nprb indication at PHY for common Ch
4959 *setting least significant bit of tpc field to 1 if
4960 nPrb=3 and 0 otherwise. */
4961 if (rbAllocInfo->nPrb == 3)
4963 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 1;
4967 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 0;
4969 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
4970 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
4971 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
4972 rbAllocInfo->tbInfo[0].imcs;
4973 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = 0;
4974 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = 0;
4976 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
4978 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
4979 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
4980 rbAllocInfo->allocInfo.raType2.rbStart,
4981 rbAllocInfo->allocInfo.raType2.numRb);
4984 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = \
4987 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
4988 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
4991 break; /* case TFU_DCI_FORMAT_1A: */
4992 case TFU_DCI_FORMAT_1:
4993 pdcch->dci.u.format1Info.tpcCmd = 0;
4994 /* Avoiding this check,as we dont support Type1 RA */
4996 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
4999 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5000 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5001 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5003 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5004 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5006 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5007 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5009 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5010 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5014 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5015 pdcch->dci.u.format1Info.allocInfo.ndi = 0;
5016 pdcch->dci.u.format1Info.allocInfo.mcs = rbAllocInfo->tbInfo[0].imcs;
5017 pdcch->dci.u.format1Info.allocInfo.rv = 0;
5019 pdcch->dci.u.format1Info.dai = 1;
5023 DU_LOG("\nERROR --> SCH : Allocator's icorrect "
5024 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5032 * @brief This function finds whether the subframe is special subframe or not.
5036 * Function: rgSCHCmnIsSplSubfrm
5037 * Purpose: This function finds the subframe index of the special subframe
5038 * and finds whether the current DL index matches it or not.
5040 * Invoked by: Scheduler
5042 * @param[in] uint8_t splfrmCnt
5043 * @param[in] uint8_t curSubfrmIdx
5044 * @param[in] uint8_t periodicity
5045 * @param[in] RgSchTddSubfrmInfo *subfrmInfo
5049 static Bool rgSCHCmnIsSplSubfrm(uint8_t splfrmCnt,uint8_t curSubfrmIdx,uint8_t periodicity,RgSchTddSubfrmInfo *subfrmInfo)
5051 uint8_t dlSfCnt = 0;
5052 uint8_t splfrmIdx = 0;
5056 if(periodicity == RG_SCH_CMN_5_MS_PRD)
5060 dlSfCnt = ((splfrmCnt-1)/2) *\
5061 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5062 dlSfCnt = dlSfCnt + subfrmInfo->numFrmHf1;
5066 dlSfCnt = (splfrmCnt/2) * \
5067 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5072 dlSfCnt = splfrmCnt * subfrmInfo->numFrmHf1;
5074 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1 +\
5075 (periodicity*splfrmCnt - dlSfCnt);
5079 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1;
5082 if(splfrmIdx == curSubfrmIdx)
5091 * @brief This function updates DAI or UL index.
5095 * Function: rgSCHCmnUpdHqAndDai
5096 * Purpose: Updates the DAI based on UL-DL Configuration
5097 * index and UE. It also updates the HARQ feedback
5098 * time and 'm' index.
5102 * @param[in] RgDlHqProcCb *hqP
5103 * @param[in] RgSchDlSf *subFrm
5104 * @param[in] RgSchDlHqTbCb *tbCb
5105 * @param[in] uint8_t tbAllocIdx
5109 static Void rgSCHCmnUpdHqAndDai(RgSchDlHqProcCb *hqP,RgSchDlSf *subFrm,RgSchDlHqTbCb *tbCb,uint8_t tbAllocIdx)
5111 RgSchUeCb *ue = hqP->hqE->ue;
5116 /* set the time at which UE shall send the feedback
5117 * for this process */
5118 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5119 subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5120 tbCb->fdbkTime.subframe = subFrm->dlFdbkInfo.subframe;
5121 tbCb->m = subFrm->dlFdbkInfo.m;
5125 /* set the time at which UE shall send the feedback
5126 * for this process */
5127 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5128 hqP->subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5129 tbCb->fdbkTime.subframe = hqP->subFrm->dlFdbkInfo.subframe;
5130 tbCb->m = hqP->subFrm->dlFdbkInfo.m;
5133 /* ccpu00132340-MOD- DAI need to be updated for first TB only*/
5134 if(ue && !tbAllocIdx)
5136 Bool havePdcch = (tbCb->hqP->pdcch ? TRUE : FALSE);
5139 dlDai = rgSCHCmnUpdDai(ue, &tbCb->fdbkTime, tbCb->m, havePdcch,tbCb->hqP,
5142 {/* Non SPS occasions */
5143 tbCb->hqP->pdcch->dlDai = dlDai;
5144 /* hqP->ulDai is used for N1 resource filling
5145 * when SPS occaions present in a bundle */
5146 tbCb->hqP->ulDai = tbCb->dai;
5147 tbCb->hqP->dlDai = dlDai;
5151 /* Updatijng pucchFdbkIdx for both PUCCH or PUSCH
5153 tbCb->pucchFdbkIdx = tbCb->hqP->ulDai;
5160 * @brief This function updates DAI or UL index.
5164 * Function: rgSCHCmnUpdDai
5165 * Purpose: Updates the DAI in the ack-nack info, a valid
5166 * ue should be passed
5170 * @param[in] RgDlHqProcCb *hqP
5171 * @param[in] RgSchDlSf *subFrm
5172 * @param[in] RgSchDlHqTbCb *tbCb
5173 * @return uint8_t dlDai
5176 uint8_t rgSCHCmnUpdDai
5179 CmLteTimingInfo *fdbkTime,
5182 RgSchDlHqProcCb *hqP,
5186 RgSchTddANInfo *anInfo;
5187 uint8_t servCellIdx;
5188 uint8_t ackNackFdbkArrSize;
5193 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5194 hqP->hqE->cell->cellId,
5197 servCellIdx = RGSCH_PCELL_INDEX;
5199 ackNackFdbkArrSize = hqP->hqE->cell->ackNackFdbkArrSize;
5201 {/* SPS on primary cell */
5202 servCellIdx = RGSCH_PCELL_INDEX;
5203 ackNackFdbkArrSize = ue->cell->ackNackFdbkArrSize;
5207 anInfo = rgSCHUtlGetUeANFdbkInfo(ue, fdbkTime,servCellIdx);
5209 /* If no ACK/NACK feedback already present, create a new one */
5212 anInfo = &ue->cellInfo[servCellIdx]->anInfo[ue->cellInfo[servCellIdx]->nextFreeANIdx];
5213 anInfo->sfn = fdbkTime->sfn;
5214 anInfo->subframe = fdbkTime->subframe;
5215 anInfo->latestMIdx = m;
5216 /* Fixing DAI value - ccpu00109162 */
5217 /* Handle TDD case as in MIMO definition of the function */
5223 anInfo->isSpsOccasion = FALSE;
5224 /* set the free Index to store Ack/Nack Information*/
5225 ue->cellInfo[servCellIdx]->nextFreeANIdx = (ue->cellInfo[servCellIdx]->nextFreeANIdx + 1) %
5231 anInfo->latestMIdx = m;
5232 /* Fixing DAI value - ccpu00109162 */
5233 /* Handle TDD case as in MIMO definition of the function */
5234 anInfo->ulDai = anInfo->ulDai + 1;
5237 anInfo->dlDai = anInfo->dlDai + 1;
5241 /* ignoring the Scell check,
5242 * for primary cell this field is unused*/
5245 anInfo->n1ResTpcIdx = hqP->tpc;
5249 {/* As this not required for release pdcch */
5250 *ulDai = anInfo->ulDai;
5253 return (anInfo->dlDai);
5256 #endif /* ifdef LTE_TDD */
5258 uint32_t rgHqRvRetxCnt[4][2];
5259 uint32_t rgUlrate_grant;
5262 * @brief This function fills the HqP TB with rbAllocInfo.
5266 * Function: rgSCHCmnFillHqPTb
5267 * Purpose: This function fills in the HqP TB with rbAllocInfo.
5269 * Invoked by: rgSCHCmnFillHqPTb
5271 * @param[in] RgSchCellCb* cell
5272 * @param[in] RgSchDlRbAlloc *rbAllocInfo,
5273 * @param[in] uint8_t tbAllocIdx
5274 * @param[in] RgSchPdcch *pdcch
5279 Void rgSCHCmnFillHqPTb
5282 RgSchDlRbAlloc *rbAllocInfo,
5287 static Void rgSCHCmnFillHqPTb
5290 RgSchDlRbAlloc *rbAllocInfo,
5294 #endif /* LTEMAC_SPS */
5296 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
5297 RgSchDlTbAllocInfo *tbAllocInfo = &rbAllocInfo->tbInfo[tbAllocIdx];
5298 RgSchDlHqTbCb *tbInfo = tbAllocInfo->tbCb;
5299 RgSchDlHqProcCb *hqP = tbInfo->hqP;
5302 /*ccpu00120365-ADD-if tb is disabled, set mcs=0,rv=1.
5303 * Relevant for DCI format 2 & 2A as per 36.213-7.1.7.2
5305 if ( tbAllocInfo->isDisabled)
5308 tbInfo->dlGrnt.iMcs = 0;
5309 tbInfo->dlGrnt.rv = 1;
5311 /* Fill for TB retransmission */
5312 else if (tbInfo->txCntr > 0)
5315 tbInfo->timingInfo = cmnCellDl->time;
5317 if ((tbInfo->isAckNackDtx == TFU_HQFDB_DTX))
5319 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5320 rgHqRvRetxCnt[tbInfo->dlGrnt.rv][tbInfo->tbIdx]++;
5324 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[++(tbInfo->ccchSchdInfo.rvIdx) & 0x03];
5327 /* fill the scheduler information of hqProc */
5328 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5329 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx,hqP->tbInfo,tbInfo->tbIdx );
5330 rgSCHDhmHqTbRetx(hqP->hqE, tbInfo->timingInfo, hqP, tbInfo->tbIdx);
5332 /* Fill for TB transmission */
5335 /* Fill the HqProc */
5336 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5337 tbInfo->tbSz = tbAllocInfo->bytesAlloc;
5338 tbInfo->timingInfo = cmnCellDl->time;
5340 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[0];
5341 /* fill the scheduler information of hqProc */
5342 tbInfo->ccchSchdInfo.rvIdx = 0;
5343 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5344 /* DwPts Scheduling Changes Start */
5345 /* DwPts Scheduling Changes End */
5346 cell->measurements.dlBytesCnt += tbAllocInfo->bytesAlloc;
5349 /*ccpu00120365:-ADD-only add to subFrm list if tb is not disabled */
5350 if ( tbAllocInfo->isDisabled == FALSE )
5352 /* Set the number of transmitting SM layers for this TB */
5353 tbInfo->numLyrs = tbAllocInfo->noLyr;
5354 /* Set the TB state as WAITING to indicate TB has been
5355 * considered for transmission */
5356 tbInfo->state = HQ_TB_WAITING;
5357 hqP->subFrm = rbAllocInfo->dlSf;
5358 tbInfo->hqP->pdcch = pdcch;
5359 //tbInfo->dlGrnt.numRb = rbAllocInfo->rbsAlloc;
5360 rgSCHUtlDlHqPTbAddToTx(hqP->subFrm, hqP, tbInfo->tbIdx);
5366 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5370 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5371 * Purpose: This function fills in the PDCCH information
5372 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5373 * for dedicated service scheduling. It also
5374 * obtains TPC to be filled in from the power module.
5375 * Assign the PDCCH to HQProc.
5377 * Invoked by: Downlink Scheduler
5379 * @param[in] RgSchCellCb* cell
5380 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5381 * @param[in] RgDlHqProc* hqP
5382 * @param[out] RgSchPdcch *pdcch
5383 * @param[in] uint8_t tpc
5387 static Void rgSCHCmnFillHqPPdcchDciFrmtB1B2
5390 RgSchDlRbAlloc *rbAllocInfo,
5391 RgSchDlHqProcCb *hqP,
5398 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5399 //Currently hardcoding values here.
5400 //DU_LOG("\nINFO --> SCH : Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
5401 switch(rbAllocInfo->dciFormat)
5403 case TFU_DCI_FORMAT_B1:
5405 pdcch->dci.u.formatB1Info.formatType = 0;
5406 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5407 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5408 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5409 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5410 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5411 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5412 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5413 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5414 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5415 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5416 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5417 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5418 //TODO_SID: Need to update
5419 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5420 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5421 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5422 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5423 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5424 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5425 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5426 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5427 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5428 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5431 case TFU_DCI_FORMAT_B2:
5433 pdcch->dci.u.formatB2Info.formatType = 1;
5434 pdcch->dci.u.formatB2Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5435 pdcch->dci.u.formatB2Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5436 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5437 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5438 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5439 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5440 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5441 pdcch->dci.u.formatB2Info.CSI_BSI_BRI_Req = 0;
5442 pdcch->dci.u.formatB2Info.CSIRS_BRRS_TxTiming = 0;
5443 pdcch->dci.u.formatB2Info.CSIRS_BRRS_SymbIdx = 0;
5444 pdcch->dci.u.formatB2Info.CSIRS_BRRS_ProcInd = 0;
5445 pdcch->dci.u.formatB2Info.xPUCCH_TxTiming = 0;
5446 //TODO_SID: Need to update
5447 pdcch->dci.u.formatB2Info.freqResIdx_xPUCCH = 0;
5448 pdcch->dci.u.formatB2Info.beamSwitch = 0;
5449 pdcch->dci.u.formatB2Info.SRS_Config = 0;
5450 pdcch->dci.u.formatB2Info.SRS_Symbol = 0;
5451 //TODO_SID: Need to check.Currently setting 4(2 layer, ports(8,9) w/o OCC).
5452 pdcch->dci.u.formatB2Info.AntPorts_numLayers = 4;
5453 pdcch->dci.u.formatB2Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5454 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5455 pdcch->dci.u.formatB2Info.tpcCmd = 1; //tpc;
5456 pdcch->dci.u.formatB2Info.DL_PCRS = 0;
5460 DU_LOG("\nERROR --> SCH : 5GTF_ERROR Allocator's incorrect "
5461 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5468 uint32_t totPcellSCell;
5469 uint32_t addedForScell;
5470 uint32_t addedForScell1;
5471 uint32_t addedForScell2;
5473 * @brief This function fills the PDCCH information from dlProc.
5477 * Function: rgSCHCmnFillHqPPdcch
5478 * Purpose: This function fills in the PDCCH information
5479 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5480 * for dedicated service scheduling. It also
5481 * obtains TPC to be filled in from the power module.
5482 * Assign the PDCCH to HQProc.
5484 * Invoked by: Downlink Scheduler
5486 * @param[in] RgSchCellCb* cell
5487 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5488 * @param[in] RgDlHqProc* hqP
5492 Void rgSCHCmnFillHqPPdcch(RgSchCellCb *cell,RgSchDlRbAlloc *rbAllocInfo,RgSchDlHqProcCb *hqP)
5494 RgSchCmnDlCell *cmnCell = RG_SCH_CMN_GET_DL_CELL(cell);
5495 RgSchPdcch *pdcch = rbAllocInfo->pdcch;
5502 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5509 tpc = rgSCHPwrPucchTpcForUe(cell, hqP->hqE->ue);
5511 /* Fix: syed moving this to a common function for both scheduled
5512 * and non-scheduled UEs */
5514 pdcch->ue = hqP->hqE->ue;
5515 if (hqP->hqE->ue->csgMmbrSta == FALSE)
5517 cmnCell->ncsgPrbCnt += rbAllocInfo->rbsAlloc;
5519 cmnCell->totPrbCnt += rbAllocInfo->rbsAlloc;
5522 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlPrbUsg +=
5523 rbAllocInfo->rbsAlloc;
5524 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw0iTbs +=
5525 rbAllocInfo->tbInfo[0].iTbs;
5526 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw0iTbs ++;
5527 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
5528 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5531 totPcellSCell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5532 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5534 addedForScell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5535 addedForScell1 += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5537 DU_LOG("\nINFO --> SCH : Hqp %d cell %d addedForScell %lu addedForScell1 %lu sfn:sf %d:%d \n",
5539 hqP->hqE->cell->cellId,
5543 cell->crntTime.slot);
5547 hqP->hqE->cell->tenbStats->sch.dlPrbUsage[0] +=
5548 rbAllocInfo->rbsAlloc;
5549 hqP->hqE->cell->tenbStats->sch.dlSumCw0iTbs +=
5550 rbAllocInfo->tbInfo[0].iTbs;
5551 hqP->hqE->cell->tenbStats->sch.dlNumCw0iTbs ++;
5552 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
5553 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5554 if (rbAllocInfo->tbInfo[1].schdlngForTb)
5556 hqP->hqE->cell->tenbStats->sch.dlSumCw1iTbs +=
5557 rbAllocInfo->tbInfo[1].iTbs;
5558 hqP->hqE->cell->tenbStats->sch.dlNumCw1iTbs ++;
5559 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw1iTbs +=
5560 rbAllocInfo->tbInfo[1].iTbs;
5561 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw1iTbs ++;
5562 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
5563 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5567 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5569 addedForScell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5570 addedForScell2 += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5572 DU_LOG("\nINFO --> SCH : Hqp %d cell %d addedForScell %lu addedForScell2 %lu \n",
5574 hqP->hqE->cell->cellId,
5579 totPcellSCell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5583 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
5584 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5587 DU_LOG("\nINFO --> SCH : add DL TPT is %lu sfn:sf %d:%d \n", hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt ,
5589 cell->crntTime.slot);
5595 pdcch->rnti = rbAllocInfo->rnti;
5596 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
5597 /* Update subframe and pdcch info in HqTb control block */
5598 switch(rbAllocInfo->dciFormat)
5601 case TFU_DCI_FORMAT_B1:
5602 case TFU_DCI_FORMAT_B2:
5604 // DU_LOG("\nINFO --> SCH : RG_5GTF:: Pdcch filling with DCI format B1/B2\n");
5605 rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, \
5611 DU_LOG("\nERROR --> SCH : Allocator's incorrect dciForamt Fill for RNTI:%d",rbAllocInfo->rnti);
5618 * @brief This function fills the PDCCH DCI format 1 information from dlProc.
5622 * Function: rgSCHCmnFillHqPPdcchDciFrmt1
5623 * Purpose: This function fills in the PDCCH information
5624 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5625 * for dedicated service scheduling. It also
5626 * obtains TPC to be filled in from the power module.
5627 * Assign the PDCCH to HQProc.
5629 * Invoked by: Downlink Scheduler
5631 * @param[in] RgSchCellCb* cell
5632 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5633 * @param[in] RgDlHqProc* hqP
5634 * @param[out] RgSchPdcch *pdcch
5635 * @param[in] uint8_t tpc
5640 static Void rgSCHCmnFillHqPPdcchDciFrmt1
5643 RgSchDlRbAlloc *rbAllocInfo,
5644 RgSchDlHqProcCb *hqP,
5651 RgSchTddANInfo *anInfo;
5655 /* For activation or reactivation,
5656 * Harq ProcId should be 0 */
5657 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5661 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5662 pdcch->dci.u.format1Info.tpcCmd = tpc;
5663 /* Avoiding this check,as we dont support Type1 RA */
5665 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5668 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5669 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5670 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5672 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5673 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5675 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5676 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5678 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5679 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5684 if ((!(hqP->tbInfo[0].txCntr)) &&
5685 (cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5686 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5687 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV)))
5690 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5694 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
5697 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
5700 pdcch->dci.u.format1Info.allocInfo.ndi =
5701 rbAllocInfo->tbInfo[0].tbCb->ndi;
5702 pdcch->dci.u.format1Info.allocInfo.mcs =
5703 rbAllocInfo->tbInfo[0].imcs;
5704 pdcch->dci.u.format1Info.allocInfo.rv =
5705 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5707 if(hqP->hqE->ue != NULLP)
5710 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5711 hqP->hqE->cell->cellId,
5714 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5715 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5717 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5718 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5723 pdcch->dci.u.format1Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5727 /* Fixing DAI value - ccpu00109162 */
5728 pdcch->dci.u.format1Info.dai = RG_SCH_MAX_DAI_IDX;
5734 /* always 0 for RACH */
5735 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5737 /* Fixing DAI value - ccpu00109162 */
5738 pdcch->dci.u.format1Info.dai = 1;
5747 * @brief This function fills the PDCCH DCI format 1A information from dlProc.
5751 * Function: rgSCHCmnFillHqPPdcchDciFrmt1A
5752 * Purpose: This function fills in the PDCCH information
5753 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5754 * for dedicated service scheduling. It also
5755 * obtains TPC to be filled in from the power module.
5756 * Assign the PDCCH to HQProc.
5758 * Invoked by: Downlink Scheduler
5760 * @param[in] RgSchCellCb* cell
5761 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5762 * @param[in] RgDlHqProc* hqP
5763 * @param[out] RgSchPdcch *pdcch
5764 * @param[in] uint8_t tpc
5768 static Void rgSCHCmnFillHqPPdcchDciFrmt1A
5771 RgSchDlRbAlloc *rbAllocInfo,
5772 RgSchDlHqProcCb *hqP,
5779 RgSchTddANInfo *anInfo;
5783 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5787 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5788 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
5789 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = tpc;
5790 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
5791 rbAllocInfo->tbInfo[0].imcs;
5792 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = TRUE;
5794 if ((!(hqP->tbInfo[0].txCntr)) &&
5795 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5796 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5797 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
5800 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val = 0;
5804 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val
5808 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val =
5811 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = \
5812 rbAllocInfo->tbInfo[0].tbCb->ndi;
5813 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = \
5814 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5815 /* As of now, we do not support Distributed allocations */
5816 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
5817 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
5818 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
5820 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
5821 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5822 rbAllocInfo->allocInfo.raType2.rbStart,
5823 rbAllocInfo->allocInfo.raType2.numRb);
5825 if(hqP->hqE->ue != NULLP)
5828 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5829 hqP->hqE->cell->cellId,
5831 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5832 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5834 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5835 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5838 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5841 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val =
5842 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5846 /* Fixing DAI value - ccpu00109162 */
5847 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = RG_SCH_MAX_DAI_IDX;
5848 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
5855 /* always 0 for RACH */
5856 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres
5859 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5860 /* Fixing DAI value - ccpu00109162 */
5861 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
5869 * @brief This function fills the PDCCH DCI format 1B information from dlProc.
5873 * Function: rgSCHCmnFillHqPPdcchDciFrmt1B
5874 * Purpose: This function fills in the PDCCH information
5875 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5876 * for dedicated service scheduling. It also
5877 * obtains TPC to be filled in from the power module.
5878 * Assign the PDCCH to HQProc.
5880 * Invoked by: Downlink Scheduler
5882 * @param[in] RgSchCellCb* cell
5883 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5884 * @param[in] RgDlHqProc* hqP
5885 * @param[out] RgSchPdcch *pdcch
5886 * @param[in] uint8_t tpc
5890 static Void rgSCHCmnFillHqPPdcchDciFrmt1B
5893 RgSchDlRbAlloc *rbAllocInfo,
5894 RgSchDlHqProcCb *hqP,
5901 RgSchTddANInfo *anInfo;
5905 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5909 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5910 pdcch->dci.u.format1bInfo.tpcCmd = tpc;
5911 pdcch->dci.u.format1bInfo.allocInfo.mcs = \
5912 rbAllocInfo->tbInfo[0].imcs;
5914 if ((!(hqP->tbInfo[0].txCntr)) &&
5915 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5916 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5917 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
5920 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = 0;
5924 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
5927 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
5929 pdcch->dci.u.format1bInfo.allocInfo.ndi = \
5930 rbAllocInfo->tbInfo[0].tbCb->ndi;
5931 pdcch->dci.u.format1bInfo.allocInfo.rv = \
5932 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5933 /* As of now, we do not support Distributed allocations */
5934 pdcch->dci.u.format1bInfo.allocInfo.isLocal = TRUE;
5935 pdcch->dci.u.format1bInfo.allocInfo.nGap2.pres = NOTPRSNT;
5936 pdcch->dci.u.format1bInfo.allocInfo.alloc.type =
5938 pdcch->dci.u.format1bInfo.allocInfo.alloc.u.riv =
5939 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5940 rbAllocInfo->allocInfo.raType2.rbStart,
5941 rbAllocInfo->allocInfo.raType2.numRb);
5942 /* Fill precoding Info */
5943 pdcch->dci.u.format1bInfo.allocInfo.pmiCfm = \
5944 rbAllocInfo->mimoAllocInfo.precIdxInfo >> 4;
5945 pdcch->dci.u.format1bInfo.allocInfo.tPmi = \
5946 rbAllocInfo->mimoAllocInfo.precIdxInfo & 0x0F;
5948 if(hqP->hqE->ue != NULLP)
5951 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5952 hqP->hqE->cell->cellId,
5954 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5955 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5957 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5958 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5963 pdcch->dci.u.format1bInfo.dai =
5964 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5968 pdcch->dci.u.format1bInfo.dai = RG_SCH_MAX_DAI_IDX;
5969 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
5980 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5984 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5985 * Purpose: This function fills in the PDCCH information
5986 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5987 * for dedicated service scheduling. It also
5988 * obtains TPC to be filled in from the power module.
5989 * Assign the PDCCH to HQProc.
5991 * Invoked by: Downlink Scheduler
5993 * @param[in] RgSchCellCb* cell
5994 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5995 * @param[in] RgDlHqProc* hqP
5996 * @param[out] RgSchPdcch *pdcch
5997 * @param[in] uint8_t tpc
6001 static Void rgSCHCmnFillHqPPdcchDciFrmt2
6004 RgSchDlRbAlloc *rbAllocInfo,
6005 RgSchDlHqProcCb *hqP,
6012 RgSchTddANInfo *anInfo;
6016 /* ccpu00119023-ADD-For activation or reactivation,
6017 * Harq ProcId should be 0 */
6018 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6022 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6023 /*ccpu00120365:-ADD-call also if tb is disabled */
6024 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6025 rbAllocInfo->tbInfo[1].isDisabled)
6027 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6029 pdcch->dci.u.format2Info.tpcCmd = tpc;
6030 /* Avoiding this check,as we dont support Type1 RA */
6032 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6035 pdcch->dci.u.format2Info.allocInfo.isAllocType0 = TRUE;
6036 pdcch->dci.u.format2Info.allocInfo.resAllocMap[0] =
6037 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6039 pdcch->dci.u.format2Info.allocInfo.resAllocMap[1] =
6040 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6042 pdcch->dci.u.format2Info.allocInfo.resAllocMap[2] =
6043 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6045 pdcch->dci.u.format2Info.allocInfo.resAllocMap[3] =
6046 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6051 if ((!(hqP->tbInfo[0].txCntr)) &&
6052 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6053 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6054 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6057 pdcch->dci.u.format2Info.allocInfo.harqProcId = 0;
6061 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6064 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6066 /* Initialize the TB info for both the TBs */
6067 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].mcs = 0;
6068 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].rv = 1;
6069 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].mcs = 0;
6070 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].rv = 1;
6071 /* Fill tbInfo for scheduled TBs */
6072 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6073 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6074 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6075 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6076 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6077 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6078 /* If we reach this function. It is safely assumed that
6079 * rbAllocInfo->tbInfo[0] always has non default valid values.
6080 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6081 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6083 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6084 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6085 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6086 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6087 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6088 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6090 pdcch->dci.u.format2Info.allocInfo.transSwap =
6091 rbAllocInfo->mimoAllocInfo.swpFlg;
6092 pdcch->dci.u.format2Info.allocInfo.precoding =
6093 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6095 if(hqP->hqE->ue != NULLP)
6099 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6100 hqP->hqE->cell->cellId,
6102 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6103 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6105 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6106 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6111 pdcch->dci.u.format2Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6115 pdcch->dci.u.format2Info.dai = RG_SCH_MAX_DAI_IDX;
6116 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
6126 * @brief This function fills the PDCCH DCI format 2A information from dlProc.
6130 * Function: rgSCHCmnFillHqPPdcchDciFrmt2A
6131 * Purpose: This function fills in the PDCCH information
6132 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6133 * for dedicated service scheduling. It also
6134 * obtains TPC to be filled in from the power module.
6135 * Assign the PDCCH to HQProc.
6137 * Invoked by: Downlink Scheduler
6139 * @param[in] RgSchCellCb* cell
6140 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6141 * @param[in] RgDlHqProc* hqP
6142 * @param[out] RgSchPdcch *pdcch
6143 * @param[in] uint8_t tpc
6147 static Void rgSCHCmnFillHqPPdcchDciFrmt2A
6150 RgSchDlRbAlloc *rbAllocInfo,
6151 RgSchDlHqProcCb *hqP,
6157 RgSchTddANInfo *anInfo;
6161 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6165 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6166 /*ccpu00120365:-ADD-call also if tb is disabled */
6167 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6168 rbAllocInfo->tbInfo[1].isDisabled)
6171 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6174 pdcch->dci.u.format2AInfo.tpcCmd = tpc;
6175 /* Avoiding this check,as we dont support Type1 RA */
6177 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6180 pdcch->dci.u.format2AInfo.allocInfo.isAllocType0 = TRUE;
6181 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[0] =
6182 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6184 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[1] =
6185 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6187 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[2] =
6188 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6190 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[3] =
6191 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6196 if ((!(hqP->tbInfo[0].txCntr)) &&
6197 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6198 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6199 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6202 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = 0;
6206 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6209 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6211 /* Initialize the TB info for both the TBs */
6212 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].mcs = 0;
6213 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].rv = 1;
6214 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].mcs = 0;
6215 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].rv = 1;
6216 /* Fill tbInfo for scheduled TBs */
6217 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6218 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6219 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6220 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6221 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6222 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6223 /* If we reach this function. It is safely assumed that
6224 * rbAllocInfo->tbInfo[0] always has non default valid values.
6225 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6227 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6229 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6230 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6231 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6232 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6233 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6234 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6237 pdcch->dci.u.format2AInfo.allocInfo.transSwap =
6238 rbAllocInfo->mimoAllocInfo.swpFlg;
6239 pdcch->dci.u.format2AInfo.allocInfo.precoding =
6240 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6242 if(hqP->hqE->ue != NULLP)
6245 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6246 hqP->hqE->cell->cellId,
6248 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6249 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6251 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6252 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6257 pdcch->dci.u.format2AInfo.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6261 pdcch->dci.u.format2AInfo.dai = RG_SCH_MAX_DAI_IDX;
6262 DU_LOG("\nERROR --> SCH : PDCCH is been scheduled without updating anInfo RNTI:%d",
6274 * @brief init of Sch vars.
6278 * Function: rgSCHCmnInitVars
6279 Purpose: Initialization of various UL subframe indices
6281 * @param[in] RgSchCellCb *cell
6285 static Void rgSCHCmnInitVars(RgSchCellCb *cell)
6287 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6290 cellUl->idx = RGSCH_INVALID_INFO;
6291 cellUl->schdIdx = RGSCH_INVALID_INFO;
6292 cellUl->schdHqProcIdx = RGSCH_INVALID_INFO;
6293 cellUl->msg3SchdIdx = RGSCH_INVALID_INFO;
6295 cellUl->emtcMsg3SchdIdx = RGSCH_INVALID_INFO;
6297 cellUl->msg3SchdHqProcIdx = RGSCH_INVALID_INFO;
6298 cellUl->rcpReqIdx = RGSCH_INVALID_INFO;
6299 cellUl->hqFdbkIdx[0] = RGSCH_INVALID_INFO;
6300 cellUl->hqFdbkIdx[1] = RGSCH_INVALID_INFO;
6301 cellUl->reTxIdx[0] = RGSCH_INVALID_INFO;
6302 cellUl->reTxIdx[1] = RGSCH_INVALID_INFO;
6303 /* Stack Crash problem for TRACE5 Changes. Added the return below */
6310 * @brief Updation of Sch vars per TTI.
6314 * Function: rgSCHCmnUpdVars
6315 * Purpose: Updation of Sch vars per TTI.
6317 * @param[in] RgSchCellCb *cell
6321 Void rgSCHCmnUpdVars(RgSchCellCb *cell)
6323 CmLteTimingInfo timeInfo;
6324 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6328 idx = (cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot);
6329 cellUl->idx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6331 DU_LOG("\nDEBUG --> SCH : idx %d cellUl->idx %d RGSCH_NUM_SUB_FRAMES_5G %d time(%d %d) \n",idx,cellUl->idx ,RGSCH_NUM_SUB_FRAMES_5G,cell->crntTime.sfn,cell->crntTime.slot);
6333 /* Need to scheduler for after SCHED_DELTA */
6334 /* UL allocation has been advanced by 1 subframe
6335 * so that we do not wrap around and send feedback
6336 * before the data is even received by the PHY */
6337 /* Introduced timing delta for UL control */
6338 idx = (cellUl->idx + TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA);
6339 cellUl->schdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6341 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6342 TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA)
6343 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6345 /* ccpu00127193 filling schdTime for logging and enhancement purpose*/
6346 cellUl->schdTime = timeInfo;
6348 /* msg3 scheduling two subframes after general scheduling */
6349 idx = (cellUl->idx + RG_SCH_CMN_DL_DELTA + RGSCH_RARSP_MSG3_DELTA);
6350 cellUl->msg3SchdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6352 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6353 RG_SCH_CMN_DL_DELTA+ RGSCH_RARSP_MSG3_DELTA)
6354 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6356 idx = (cellUl->idx + TFU_RECPREQ_DLDELTA);
6358 cellUl->rcpReqIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6360 /* Downlink harq feedback is sometime after data reception / harq failure */
6361 /* Since feedback happens prior to scheduling being called, we add 1 to */
6362 /* take care of getting the correct subframe for feedback */
6363 idx = (cellUl->idx - TFU_CRCIND_ULDELTA + RG_SCH_CMN_UL_NUM_SF);
6365 DU_LOG("\nDEBUG --> SCH : Finally setting cellUl->hqFdbkIdx[0] = %d TFU_CRCIND_ULDELTA %d RG_SCH_CMN_UL_NUM_SF %d\n",idx,TFU_CRCIND_ULDELTA,RG_SCH_CMN_UL_NUM_SF);
6367 cellUl->hqFdbkIdx[0] = (idx % (RG_SCH_CMN_UL_NUM_SF));
6369 idx = ((cellUl->schdIdx) % (RG_SCH_CMN_UL_NUM_SF));
6371 cellUl->reTxIdx[0] = (uint8_t) idx;
6373 DU_LOG("\nDEBUG --> SCH : cellUl->hqFdbkIdx[0] %d cellUl->reTxIdx[0] %d \n",cellUl->hqFdbkIdx[0], cellUl->reTxIdx[0] );
6375 /* RACHO: update cmn sched specific RACH variables,
6376 * mainly the prachMaskIndex */
6377 rgSCHCmnUpdRachParam(cell);
6386 * @brief To get uplink subframe index associated with current PHICH
6391 * Function: rgSCHCmnGetPhichUlSfIdx
6392 * Purpose: Gets uplink subframe index associated with current PHICH
6393 * transmission based on SFN and subframe no
6395 * @param[in] CmLteTimingInfo *timeInfo
6396 * @param[in] RgSchCellCb *cell
6400 uint8_t rgSCHCmnGetPhichUlSfIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6402 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6404 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6411 dlsf = rgSCHUtlSubFrmGet(cell, *timeInfo);
6413 if(dlsf->phichOffInfo.sfnOffset == RGSCH_INVALID_INFO)
6415 return (RGSCH_INVALID_INFO);
6417 subframe = dlsf->phichOffInfo.subframe;
6419 sfn = (RGSCH_MAX_SFN + timeInfo->sfn -
6420 dlsf->phichOffInfo.sfnOffset) % RGSCH_MAX_SFN;
6422 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
6423 * wrap case such that idx will be proper*/
6424 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6425 numUlSf = ((numUlSf * sfn) + rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subframe]) - 1;
6426 idx = numUlSf % (cellUl->numUlSubfrms);
6432 * @brief To get uplink subframe index.
6437 * Function: rgSCHCmnGetUlSfIdx
6438 * Purpose: Gets uplink subframe index based on SFN and subframe number.
6440 * @param[in] CmLteTimingInfo *timeInfo
6441 * @param[in] uint8_t ulDlCfgIdx
6445 uint8_t rgSCHCmnGetUlSfIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6447 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6448 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6453 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
6454 * wrap case such that idx will be proper*/
6455 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6456 numUlSf = ((numUlSf * timeInfo->sfn) + \
6457 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->subframe]) - 1;
6458 idx = numUlSf % (cellUl->numUlSubfrms);
6466 * @brief To get uplink hq index.
6471 * Function: rgSCHCmnGetUlHqProcIdx
6472 * Purpose: Gets uplink subframe index based on SFN and subframe number.
6474 * @param[in] CmLteTimingInfo *timeInfo
6475 * @param[in] uint8_t ulDlCfgIdx
6479 uint8_t rgSCHCmnGetUlHqProcIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6485 numUlSf = (timeInfo->sfn * RGSCH_NUM_SUB_FRAMES_5G + timeInfo->slot);
6486 procId = numUlSf % RGSCH_NUM_UL_HQ_PROC;
6488 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6489 /*ccpu00130639 - MOD - To get correct UL HARQ Proc IDs for all UL/DL Configs*/
6490 uint8_t numUlSfInSfn;
6491 S8 sfnCycle = cell->tddHqSfnCycle;
6492 uint8_t numUlHarq = rgSchTddUlNumHarqProcTbl[ulDlCfgIdx]
6494 /* TRACE 5 Changes */
6496 /* Calculate the number of UL SF in one SFN */
6497 numUlSfInSfn = RGSCH_NUM_SUB_FRAMES -
6498 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6500 /* Check for the SFN wrap around case */
6501 if(cell->crntTime.sfn == 1023 && timeInfo->sfn == 0)
6505 else if(cell->crntTime.sfn == 0 && timeInfo->sfn == 1023)
6507 /* sfnCycle decremented by 1 */
6508 sfnCycle = (sfnCycle + numUlHarq-1) % numUlHarq;
6510 /* Calculate the total number of UL sf */
6511 /* -1 is done since uplink sf are counted from 0 */
6512 numUlSf = numUlSfInSfn * (timeInfo->sfn + (sfnCycle*1024)) +
6513 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->slot] - 1;
6515 procId = numUlSf % numUlHarq;
6521 /* UL_ALLOC_CHANGES */
6522 /***********************************************************
6524 * Func : rgSCHCmnUlFreeAlloc
6526 * Desc : Free an allocation - invokes UHM and releases
6527 * alloc for the scheduler
6528 * Doest need subframe as argument
6536 **********************************************************/
6537 Void rgSCHCmnUlFreeAlloc(RgSchCellCb *cell,RgSchUlAlloc *alloc)
6539 RgSchUlHqProcCb *hqProc;
6543 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
6544 if ((alloc->hqProc->remTx == 0) &&
6545 (alloc->hqProc->rcvdCrcInd == FALSE) &&
6548 RgSchRaCb *raCb = alloc->raCb;
6549 rgSCHUhmFreeProc(alloc->hqProc, cell);
6550 rgSCHUtlUlAllocRelease(alloc);
6551 rgSCHRamDelRaCb(cell, raCb, TRUE);
6556 hqProc = alloc->hqProc;
6557 rgSCHUtlUlAllocRelease(alloc);
6558 rgSCHUhmFreeProc(hqProc, cell);
6563 /***********************************************************
6565 * Func : rgSCHCmnUlFreeAllocation
6567 * Desc : Free an allocation - invokes UHM and releases
6568 * alloc for the scheduler
6576 **********************************************************/
6577 Void rgSCHCmnUlFreeAllocation(RgSchCellCb *cell,RgSchUlSf *sf,RgSchUlAlloc *alloc)
6579 RgSchUlHqProcCb *hqProc;
6584 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
6585 if ((alloc->hqProc->remTx == 0) &&
6586 (alloc->hqProc->rcvdCrcInd == FALSE) &&
6589 RgSchRaCb *raCb = alloc->raCb;
6590 rgSCHUhmFreeProc(alloc->hqProc, cell);
6591 rgSCHUtlUlAllocRls(sf, alloc);
6592 rgSCHRamDelRaCb(cell, raCb, TRUE);
6597 hqProc = alloc->hqProc;
6598 rgSCHUhmFreeProc(hqProc, cell);
6600 /* re-setting the PRB count while freeing the allocations */
6603 rgSCHUtlUlAllocRls(sf, alloc);
6609 * @brief This function implements PDCCH allocation for an UE
6610 * in the currently running subframe.
6614 * Function: rgSCHCmnPdcchAllocCrntSf
6615 * Purpose: This function determines current DL subframe
6616 * and UE DL CQI to call the actual pdcch allocator
6618 * Note that this function is called only
6619 * when PDCCH request needs to be made during
6620 * uplink scheduling.
6622 * Invoked by: Scheduler
6624 * @param[in] RgSchCellCb *cell
6625 * @param[in] RgSchUeCb *ue
6626 * @return RgSchPdcch *
6627 * -# NULLP when unsuccessful
6629 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf(RgSchCellCb *cell,RgSchUeCb *ue)
6631 CmLteTimingInfo frm = cell->crntTime;
6632 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
6634 RgSchPdcch *pdcch = NULLP;
6636 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
6637 sf = rgSCHUtlSubFrmGet(cell, frm);
6640 if (ue->allocCmnUlPdcch)
6642 pdcch = rgSCHCmnCmnPdcchAlloc(cell, sf);
6643 /* Since CRNTI Scrambled */
6646 pdcch->dciNumOfBits = ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
6652 //pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, y, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_0, FALSE);
6653 pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_A1, FALSE);
6658 /***********************************************************
6660 * Func : rgSCHCmnUlAllocFillNdmrs
6662 * Desc : Determines and fills N_dmrs for a UE uplink
6667 * Notes: N_dmrs determination is straightforward, so
6668 * it is configured per subband
6672 **********************************************************/
6673 Void rgSCHCmnUlAllocFillNdmrs(RgSchCmnUlCell *cellUl,RgSchUlAlloc *alloc)
6675 alloc->grnt.nDmrs = cellUl->dmrsArr[alloc->sbStart];
6679 /***********************************************************
6681 * Func : rgSCHCmnUlAllocLnkHqProc
6683 * Desc : Links a new allocation for an UE with the
6684 * appropriate HARQ process of the UE.
6692 **********************************************************/
6693 Void rgSCHCmnUlAllocLnkHqProc(RgSchUeCb *ue,RgSchUlAlloc *alloc,RgSchUlHqProcCb *proc,Bool isRetx)
6698 rgSCHCmnUlAdapRetx(alloc, proc);
6702 #ifdef LTE_L2_MEAS /* L2_COUNTERS */
6705 rgSCHUhmNewTx(proc, (((RgUeUlHqCb*)proc->hqEnt)->maxHqRetx), alloc);
6711 * @brief This function releases a PDCCH in the subframe that is
6712 * currently being allocated for.
6716 * Function: rgSCHCmnPdcchRlsCrntSf
6717 * Purpose: This function determines current DL subframe
6718 * which is considered for PDCCH allocation,
6719 * and then calls the actual function that
6720 * releases a PDCCH in a specific subframe.
6721 * Note that this function is called only
6722 * when PDCCH release needs to be made during
6723 * uplink scheduling.
6725 * Invoked by: Scheduler
6727 * @param[in] RgSchCellCb *cell
6728 * @param[in] RgSchPdcch *pdcch
6731 Void rgSCHCmnPdcchRlsCrntSf(RgSchCellCb *cell,RgSchPdcch *pdcch)
6733 CmLteTimingInfo frm = cell->crntTime;
6736 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
6737 sf = rgSCHUtlSubFrmGet(cell, frm);
6738 rgSCHUtlPdcchPut(cell, &sf->pdcchInfo, pdcch);
6741 /***********************************************************
6743 * Func : rgSCHCmnUlFillPdcchWithAlloc
6745 * Desc : Fills a PDCCH with format 0 information.
6753 **********************************************************/
6754 Void rgSCHCmnUlFillPdcchWithAlloc(RgSchPdcch *pdcch,RgSchUlAlloc *alloc,RgSchUeCb *ue)
6758 pdcch->rnti = alloc->rnti;
6759 //pdcch->dci.dciFormat = TFU_DCI_FORMAT_A2;
6760 pdcch->dci.dciFormat = alloc->grnt.dciFrmt;
6762 //Currently hardcoding values here.
6763 //DU_LOG("\nINFO --> SCH : Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
6764 switch(pdcch->dci.dciFormat)
6766 case TFU_DCI_FORMAT_A1:
6768 pdcch->dci.u.formatA1Info.formatType = 0;
6769 pdcch->dci.u.formatA1Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
6770 pdcch->dci.u.formatA1Info.xPUSCH_TxTiming = 0;
6771 pdcch->dci.u.formatA1Info.RBAssign = alloc->grnt.rbAssign;
6772 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
6773 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
6774 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
6775 pdcch->dci.u.formatA1Info.CSI_BSI_BRI_Req = 0;
6776 pdcch->dci.u.formatA1Info.CSIRS_BRRS_TxTiming = 0;
6777 pdcch->dci.u.formatA1Info.CSIRS_BRRS_SymbIdx = 0;
6778 pdcch->dci.u.formatA1Info.CSIRS_BRRS_ProcInd = 0;
6779 pdcch->dci.u.formatA1Info.numBSI_Reports = 0;
6780 pdcch->dci.u.formatA1Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
6781 pdcch->dci.u.formatA1Info.beamSwitch = 0;
6782 pdcch->dci.u.formatA1Info.SRS_Config = 0;
6783 pdcch->dci.u.formatA1Info.SRS_Symbol = 0;
6784 pdcch->dci.u.formatA1Info.REMapIdx_DMRS_PCRS_numLayers = 0;
6785 pdcch->dci.u.formatA1Info.SCID = alloc->grnt.SCID;
6786 pdcch->dci.u.formatA1Info.PMI = alloc->grnt.PMI;
6787 pdcch->dci.u.formatA1Info.UL_PCRS = 0;
6788 pdcch->dci.u.formatA1Info.tpcCmd = alloc->grnt.tpc;
6791 case TFU_DCI_FORMAT_A2:
6793 pdcch->dci.u.formatA2Info.formatType = 1;
6794 pdcch->dci.u.formatA2Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
6795 pdcch->dci.u.formatA2Info.xPUSCH_TxTiming = 0;
6796 pdcch->dci.u.formatA2Info.RBAssign = alloc->grnt.rbAssign;
6797 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
6798 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
6799 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
6800 pdcch->dci.u.formatA2Info.CSI_BSI_BRI_Req = 0;
6801 pdcch->dci.u.formatA2Info.CSIRS_BRRS_TxTiming = 0;
6802 pdcch->dci.u.formatA2Info.CSIRS_BRRS_SymbIdx = 0;
6803 pdcch->dci.u.formatA2Info.CSIRS_BRRS_ProcInd = 0;
6804 pdcch->dci.u.formatA2Info.numBSI_Reports = 0;
6805 pdcch->dci.u.formatA2Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
6806 pdcch->dci.u.formatA2Info.beamSwitch = 0;
6807 pdcch->dci.u.formatA2Info.SRS_Config = 0;
6808 pdcch->dci.u.formatA2Info.SRS_Symbol = 0;
6809 pdcch->dci.u.formatA2Info.REMapIdx_DMRS_PCRS_numLayers = 0;
6810 pdcch->dci.u.formatA2Info.SCID = alloc->grnt.SCID;
6811 pdcch->dci.u.formatA2Info.PMI = alloc->grnt.PMI;
6812 pdcch->dci.u.formatA2Info.UL_PCRS = 0;
6813 pdcch->dci.u.formatA2Info.tpcCmd = alloc->grnt.tpc;
6817 DU_LOG("\nERROR --> SCH : 5GTF_ERROR UL Allocator's icorrect "
6818 "dciForamt Fill RNTI:%d",alloc->rnti);
6826 /***********************************************************
6828 * Func : rgSCHCmnUlAllocFillTpc
6830 * Desc : Determines and fills TPC for an UE allocation.
6838 **********************************************************/
6839 Void rgSCHCmnUlAllocFillTpc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchUlAlloc *alloc)
6841 alloc->grnt.tpc = rgSCHPwrPuschTpcForUe(cell, ue);
6846 /***********************************************************
6848 * Func : rgSCHCmnAddUeToRefreshQ
6850 * Desc : Adds a UE to refresh queue, so that the UE is
6851 * periodically triggered to refresh it's GBR and
6860 **********************************************************/
6861 static Void rgSCHCmnAddUeToRefreshQ(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t wait)
6863 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
6865 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
6869 memset(&arg, 0, sizeof(arg));
6870 arg.tqCp = &sched->tmrTqCp;
6871 arg.tq = sched->tmrTq;
6872 arg.timers = &ueSchd->tmr;
6876 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
6883 * @brief Perform UE reset procedure.
6887 * Function : rgSCHCmnUlUeReset
6889 * This functions performs BSR resetting and
6890 * triggers UL specific scheduler
6891 * to Perform UE reset procedure.
6893 * @param[in] RgSchCellCb *cell
6894 * @param[in] RgSchUeCb *ue
6897 static Void rgSCHCmnUlUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
6899 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
6900 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
6902 RgSchCmnLcg *lcgCmn;
6904 RgSchCmnAllocRecord *allRcd;
6906 ue->ul.minReqBytes = 0;
6907 ue->ul.totalBsr = 0;
6909 ue->ul.nonGbrLcgBs = 0;
6910 ue->ul.effAmbr = ue->ul.cfgdAmbr;
6912 node = ueUl->ulAllocLst.first;
6915 allRcd = (RgSchCmnAllocRecord *)node->node;
6919 for(lcgCnt = 0; lcgCnt < RGSCH_MAX_LCG_PER_UE; lcgCnt++)
6921 lcgCmn = RG_SCH_CMN_GET_UL_LCG(&ue->ul.lcgArr[lcgCnt]);
6923 lcgCmn->reportedBs = 0;
6924 lcgCmn->effGbr = lcgCmn->cfgdGbr;
6925 lcgCmn->effDeltaMbr = lcgCmn->deltaMbr;
6927 rgSCHCmnUlUeDelAllocs(cell, ue);
6929 ue->isSrGrant = FALSE;
6931 cellSchd->apisUl->rgSCHUlUeReset(cell, ue);
6933 /* Stack Crash problem for TRACE5 changes. Added the return below */
6939 * @brief RESET UL CQI and DL CQI&RI to conservative values
6940 * for a reestablishing UE.
6944 * Function : rgSCHCmnResetRiCqi
6946 * RESET UL CQI and DL CQI&RI to conservative values
6947 * for a reestablishing UE
6949 * @param[in] RgSchCellCb *cell
6950 * @param[in] RgSchUeCb *ue
6953 static Void rgSCHCmnResetRiCqi(RgSchCellCb *cell,RgSchUeCb *ue)
6955 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
6956 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
6957 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
6958 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
6961 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
6962 cell->isCpUlExtend);
6964 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
6965 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
6966 ueDl->mimoInfo.ri = 1;
6967 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
6968 (ue->mimoInfo.txMode == RGR_UE_TM_6))
6970 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
6972 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
6974 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
6977 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
6979 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
6983 /* Request for an early Aper CQI in case of reest */
6984 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
6985 if(acqiCb && acqiCb->aCqiCfg.pres)
6987 acqiCb->aCqiTrigWt = 0;
6995 * @brief Perform UE reset procedure.
6999 * Function : rgSCHCmnDlUeReset
7001 * This functions performs BO resetting and
7002 * triggers DL specific scheduler
7003 * to Perform UE reset procedure.
7005 * @param[in] RgSchCellCb *cell
7006 * @param[in] RgSchUeCb *ue
7009 static Void rgSCHCmnDlUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
7011 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7012 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
7013 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7016 if (ueDl->rachInfo.poLnk.node != NULLP)
7018 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
7021 /* Fix: syed Remove from TA List if this UE is there.
7022 * If TA Timer is running. Stop it */
7023 if (ue->dlTaLnk.node)
7025 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
7026 ue->dlTaLnk.node = (PTR)NULLP;
7028 else if (ue->taTmr.tmrEvnt != TMR_NONE)
7030 rgSCHTmrStopTmr(cell, ue->taTmr.tmrEvnt, ue);
7033 cellSchd->apisDl->rgSCHDlUeReset(cell, ue);
7037 rgSCHSCellDlUeReset(cell,ue);
7043 * @brief Perform UE reset procedure.
7047 * Function : rgSCHCmnUeReset
7049 * This functions triggers specific scheduler
7050 * to Perform UE reset procedure.
7052 * @param[in] RgSchCellCb *cell
7053 * @param[in] RgSchUeCb *ue
7058 Void rgSCHCmnUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
7062 RgInfResetHqEnt hqEntRstInfo;
7064 /* RACHO: remove UE from pdcch, handover and rapId assoc Qs */
7065 rgSCHCmnDelRachInfo(cell, ue);
7067 rgSCHPwrUeReset(cell, ue);
7069 rgSCHCmnUlUeReset(cell, ue);
7070 rgSCHCmnDlUeReset(cell, ue);
7073 /* Making allocCmnUlPdcch TRUE to allocate DCI0/1A from Common search space.
7074 As because multiple cells are added hence 2 bits CqiReq is there
7075 This flag will be set to FALSE once we will get Scell READY */
7076 ue->allocCmnUlPdcch = TRUE;
7079 /* Fix : syed RESET UL CQI and DL CQI&RI to conservative values
7080 * for a reestablishing UE */
7081 /*Reset Cqi Config for all the configured cells*/
7082 for (idx = 0;idx < CM_LTE_MAX_CELLS; idx++)
7084 if (ue->cellInfo[idx] != NULLP)
7086 rgSCHCmnResetRiCqi(ue->cellInfo[idx]->cell, ue);
7089 /*After Reset Trigger APCQI for Pcell*/
7090 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7091 if(pCellInfo->acqiCb.aCqiCfg.pres)
7093 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
7096 /* sending HqEnt reset to MAC */
7097 hqEntRstInfo.cellId = cell->cellId;
7098 hqEntRstInfo.crnti = ue->ueId;
7100 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
7101 RgSchMacRstHqEnt(&pst,&hqEntRstInfo);
7107 * @brief UE out of MeasGap or AckNackReptn.
7111 * Function : rgSCHCmnActvtUlUe
7113 * This functions triggers specific scheduler
7114 * to start considering it for scheduling.
7116 * @param[in] RgSchCellCb *cell
7117 * @param[in] RgSchUeCb *ue
7122 Void rgSCHCmnActvtUlUe(RgSchCellCb *cell,RgSchUeCb *ue)
7124 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7126 /* : take care of this in UL retransmission */
7127 cellSchd->apisUl->rgSCHUlActvtUe(cell, ue);
7132 * @brief UE out of MeasGap or AckNackReptn.
7136 * Function : rgSCHCmnActvtDlUe
7138 * This functions triggers specific scheduler
7139 * to start considering it for scheduling.
7141 * @param[in] RgSchCellCb *cell
7142 * @param[in] RgSchUeCb *ue
7147 Void rgSCHCmnActvtDlUe(RgSchCellCb *cell,RgSchUeCb *ue)
7149 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7151 cellSchd->apisDl->rgSCHDlActvtUe(cell, ue);
7156 * @brief This API is invoked to indicate scheduler of a CRC indication.
7160 * Function : rgSCHCmnHdlUlTransInd
7161 * This API is invoked to indicate scheduler of a CRC indication.
7163 * @param[in] RgSchCellCb *cell
7164 * @param[in] RgSchUeCb *ue
7165 * @param[in] CmLteTimingInfo timingInfo
7169 Void rgSCHCmnHdlUlTransInd(RgSchCellCb *cell,RgSchUeCb *ue,CmLteTimingInfo timingInfo)
7172 /* Update the latest UL dat/sig transmission time */
7173 RGSCHCPYTIMEINFO(timingInfo, ue->ul.ulTransTime);
7174 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
7176 /* Some UL Transmission from this UE.
7177 * Activate this UE if it was inactive */
7178 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7179 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7187 * @brief Compute the minimum Rank based on Codebook subset
7188 * restriction configuration for 4 Tx Ports and Tx Mode 4.
7192 * Function : rgSCHCmnComp4TxMode4
7194 * Depending on BitMap set at CBSR during Configuration
7195 * - return the least possible Rank
7198 * @param[in] uint32_t *pmiBitMap
7199 * @return RgSchCmnRank
7201 static RgSchCmnRank rgSCHCmnComp4TxMode4(uint32_t *pmiBitMap)
7203 uint32_t bitMap0, bitMap1;
7204 bitMap0 = pmiBitMap[0];
7205 bitMap1 = pmiBitMap[1];
7206 if((bitMap1) & 0xFFFF)
7208 return (RG_SCH_CMN_RANK_1);
7210 else if((bitMap1>>16) & 0xFFFF)
7212 return (RG_SCH_CMN_RANK_2);
7214 else if((bitMap0) & 0xFFFF)
7216 return (RG_SCH_CMN_RANK_3);
7218 else if((bitMap0>>16) & 0xFFFF)
7220 return (RG_SCH_CMN_RANK_4);
7224 return (RG_SCH_CMN_RANK_1);
7230 * @brief Compute the minimum Rank based on Codebook subset
7231 * restriction configuration for 2 Tx Ports and Tx Mode 4.
7235 * Function : rgSCHCmnComp2TxMode4
7237 * Depending on BitMap set at CBSR during Configuration
7238 * - return the least possible Rank
7241 * @param[in] uint32_t *pmiBitMap
7242 * @return RgSchCmnRank
7244 static RgSchCmnRank rgSCHCmnComp2TxMode4(uint32_t *pmiBitMap)
7247 bitMap0 = pmiBitMap[0];
7248 if((bitMap0>>26)& 0x0F)
7250 return (RG_SCH_CMN_RANK_1);
7252 else if((bitMap0>>30) & 3)
7254 return (RG_SCH_CMN_RANK_2);
7258 return (RG_SCH_CMN_RANK_1);
7263 * @brief Compute the minimum Rank based on Codebook subset
7264 * restriction configuration for 4 Tx Ports and Tx Mode 3.
7268 * Function : rgSCHCmnComp4TxMode3
7270 * Depending on BitMap set at CBSR during Configuration
7271 * - return the least possible Rank
7274 * @param[in] uint32_t *pmiBitMap
7275 * @return RgSchCmnRank
7277 static RgSchCmnRank rgSCHCmnComp4TxMode3(uint32_t *pmiBitMap)
7280 bitMap0 = pmiBitMap[0];
7281 if((bitMap0>>28)& 1)
7283 return (RG_SCH_CMN_RANK_1);
7285 else if((bitMap0>>29) &1)
7287 return (RG_SCH_CMN_RANK_2);
7289 else if((bitMap0>>30) &1)
7291 return (RG_SCH_CMN_RANK_3);
7293 else if((bitMap0>>31) &1)
7295 return (RG_SCH_CMN_RANK_4);
7299 return (RG_SCH_CMN_RANK_1);
7304 * @brief Compute the minimum Rank based on Codebook subset
7305 * restriction configuration for 2 Tx Ports and Tx Mode 3.
7309 * Function : rgSCHCmnComp2TxMode3
7311 * Depending on BitMap set at CBSR during Configuration
7312 * - return the least possible Rank
7315 * @param[in] uint32_t *pmiBitMap
7316 * @return RgSchCmnRank
7318 static RgSchCmnRank rgSCHCmnComp2TxMode3(uint32_t *pmiBitMap)
7321 bitMap0 = pmiBitMap[0];
7322 if((bitMap0>>30)& 1)
7324 return (RG_SCH_CMN_RANK_1);
7326 else if((bitMap0>>31) &1)
7328 return (RG_SCH_CMN_RANK_2);
7332 return (RG_SCH_CMN_RANK_1);
7337 * @brief Compute the minimum Rank based on Codebook subset
7338 * restriction configuration.
7342 * Function : rgSCHCmnComputeRank
7344 * Depending on Num Tx Ports and Transmission mode
7345 * - return the least possible Rank
7348 * @param[in] RgrTxMode txMode
7349 * @param[in] uint32_t *pmiBitMap
7350 * @param[in] uint8_t numTxPorts
7351 * @return RgSchCmnRank
7353 static RgSchCmnRank rgSCHCmnComputeRank(RgrTxMode txMode,uint32_t *pmiBitMap,uint8_t numTxPorts)
7356 if (numTxPorts ==2 && txMode == RGR_UE_TM_3)
7358 return (rgSCHCmnComp2TxMode3(pmiBitMap));
7360 else if (numTxPorts ==4 && txMode == RGR_UE_TM_3)
7362 return (rgSCHCmnComp4TxMode3(pmiBitMap));
7364 else if (numTxPorts ==2 && txMode == RGR_UE_TM_4)
7366 return (rgSCHCmnComp2TxMode4(pmiBitMap));
7368 else if (numTxPorts ==4 && txMode == RGR_UE_TM_4)
7370 return (rgSCHCmnComp4TxMode4(pmiBitMap));
7374 return (RG_SCH_CMN_RANK_1);
7381 * @brief Harq Entity Deinitialization for CMN SCH.
7385 * Function : rgSCHCmnDlDeInitHqEnt
7387 * Harq Entity Deinitialization for CMN SCH
7389 * @param[in] RgSchCellCb *cell
7390 * @param[in] RgSchDlHqEnt *hqE
7393 /*KWORK_FIX:Changed function return type to void */
7394 Void rgSCHCmnDlDeInitHqEnt(RgSchCellCb *cell,RgSchDlHqEnt *hqE)
7396 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7397 RgSchDlHqProcCb *hqP;
7401 ret = cellSchd->apisDl->rgSCHDlUeHqEntDeInit(cell, hqE);
7402 /* Free only If the Harq proc are created*/
7407 for(cnt = 0; cnt < hqE->numHqPrcs; cnt++)
7409 hqP = &hqE->procs[cnt];
7410 if ((RG_SCH_CMN_GET_DL_HQP(hqP)))
7412 rgSCHUtlFreeSBuf(cell->instIdx,
7413 (Data**)(&(hqP->sch)), (sizeof(RgSchCmnDlHqProc)));
7417 rgSCHLaaDeInitDlHqProcCb (cell, hqE);
7424 * @brief Harq Entity initialization for CMN SCH.
7428 * Function : rgSCHCmnDlInitHqEnt
7430 * Harq Entity initialization for CMN SCH
7432 * @param[in] RgSchCellCb *cell
7433 * @param[in] RgSchUeCb *ue
7438 S16 rgSCHCmnDlInitHqEnt(RgSchCellCb *cell,RgSchDlHqEnt *hqEnt)
7440 RgSchDlHqProcCb *hqP;
7442 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7444 for(cnt = 0; cnt < hqEnt->numHqPrcs; cnt++)
7446 hqP = &hqEnt->procs[cnt];
7447 if (rgSCHUtlAllocSBuf(cell->instIdx,
7448 (Data**)&(hqP->sch), (sizeof(RgSchCmnDlHqProc))) != ROK)
7454 if((cell->emtcEnable) &&(hqEnt->ue->isEmtcUe))
7456 if(ROK != cellSchd->apisEmtcDl->rgSCHDlUeHqEntInit(cell, hqEnt))
7465 if(ROK != cellSchd->apisDl->rgSCHDlUeHqEntInit(cell, hqEnt))
7472 } /* rgSCHCmnDlInitHqEnt */
7475 * @brief This function computes distribution of refresh period
7479 * Function: rgSCHCmnGetRefreshDist
7480 * Purpose: This function computes distribution of refresh period
7481 * This is required to align set of UEs refresh
7482 * around the different consecutive subframe.
7484 * Invoked by: rgSCHCmnGetRefreshPerDist
7486 * @param[in] RgSchCellCb *cell
7487 * @param[in] RgSchUeCb *ue
7491 static uint8_t rgSCHCmnGetRefreshDist(RgSchCellCb *cell,RgSchUeCb *ue)
7495 for(refOffst = 0; refOffst < RGSCH_MAX_REFRESH_OFFSET; refOffst++)
7497 if(cell->refreshUeCnt[refOffst] < RGSCH_MAX_REFRESH_GRPSZ)
7499 cell->refreshUeCnt[refOffst]++;
7500 ue->refreshOffset = refOffst;
7501 /* DU_LOG("\nINFO --> SCH : UE[%d] refresh offset[%d]. Cell refresh ue count[%d].\n", ue->ueId, refOffst, cell->refreshUeCnt[refOffst]); */
7506 DU_LOG("\nERROR --> SCH : Allocation of refresh distribution failed\n");
7507 /* We should not enter here normally, but incase of failure, allocating from last offset*/
7508 cell->refreshUeCnt[refOffst-1]++;
7509 ue->refreshOffset = refOffst-1;
7511 return (refOffst-1);
7514 * @brief This function computes initial Refresh Wait Period.
7518 * Function: rgSCHCmnGetRefreshPer
7519 * Purpose: This function computes initial Refresh Wait Period.
7520 * This is required to align multiple UEs refresh
7521 * around the same time.
7523 * Invoked by: rgSCHCmnGetRefreshPer
7525 * @param[in] RgSchCellCb *cell
7526 * @param[in] RgSchUeCb *ue
7527 * @param[in] uint32_t *waitPer
7531 static Void rgSCHCmnGetRefreshPer(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t *waitPer)
7533 uint32_t refreshPer;
7534 uint32_t crntSubFrm;
7537 refreshPer = RG_SCH_CMN_REFRESH_TIME * RG_SCH_CMN_REFRESH_TIMERES;
7538 crntSubFrm = cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot;
7539 /* Fix: syed align multiple UEs to refresh at same time */
7540 *waitPer = refreshPer - (crntSubFrm % refreshPer);
7541 *waitPer = RGSCH_CEIL(*waitPer, RG_SCH_CMN_REFRESH_TIMERES);
7542 *waitPer = *waitPer + rgSCHCmnGetRefreshDist(cell, ue);
7550 * @brief UE initialisation for scheduler.
7554 * Function : rgSCHCmnRgrSCellUeCfg
7556 * This functions intialises UE specific scheduler
7557 * information for SCELL
7558 * 0. Perform basic validations
7559 * 1. Allocate common sched UE cntrl blk
7560 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
7562 * 4. Perform DLFS cfg
7564 * @param[in] RgSchCellCb *cell
7565 * @param[in] RgSchUeCb *ue
7566 * @param[out] RgSchErrInfo *err
7571 S16 rgSCHCmnRgrSCellUeCfg(RgSchCellCb *sCell,RgSchUeCb *ue,RgrUeSecCellCfg *sCellInfoCfg,RgSchErrInfo *err)
7576 RgSchCmnAllocRecord *allRcd;
7577 RgSchDlRbAlloc *allocInfo;
7578 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
7580 RgSchCmnUlUe *ueUlPcell;
7581 RgSchCmnUe *pCellUeSchCmn;
7582 RgSchCmnUe *ueSchCmn;
7584 RgSchCmnDlUe *pCellUeDl;
7586 Inst inst = ue->cell->instIdx;
7588 uint32_t idx = (uint8_t)((sCell->cellId - rgSchCb[sCell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
7590 pCellUeSchCmn = RG_SCH_CMN_GET_UE(ue,ue->cell);
7591 pCellUeDl = &pCellUeSchCmn->dl;
7593 /* 1. Allocate Common sched control block */
7594 if((rgSCHUtlAllocSBuf(sCell->instIdx,
7595 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
7597 DU_LOG("\nERROR --> SCH : Memory allocation FAILED\n");
7598 err->errCause = RGSCHERR_SCH_CFG;
7601 ueSchCmn = RG_SCH_CMN_GET_UE(ue,sCell);
7603 /*2. Perform UEs downlink configuration */
7604 ueDl = &ueSchCmn->dl;
7607 ueDl->mimoInfo = pCellUeDl->mimoInfo;
7609 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
7610 (ue->mimoInfo.txMode == RGR_UE_TM_6))
7612 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_NO_PMI);
7614 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
7616 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_RI_1);
7618 RGSCH_ARRAY_BOUND_CHECK(sCell->instIdx, rgUeCatTbl, pCellUeSchCmn->cmn.ueCat);
7619 ueDl->maxTbBits = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlTbBits;
7622 ri = RGSCH_MIN(ri, sCell->numTxAntPorts);
7623 if(((CM_LTE_UE_CAT_6 == pCellUeSchCmn->cmn.ueCat )
7624 ||(CM_LTE_UE_CAT_7 == pCellUeSchCmn->cmn.ueCat))
7627 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[1];
7631 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[0];
7634 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
7636 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
7637 rgSchTddDlNumHarqProcTbl[sCell->ulDlCfgIdx]);
7639 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
7640 RGSCH_NUM_DL_HQ_PROC);
7643 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, ue->isEmtcUe);
7645 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, FALSE);
7649 /* ambrCfgd config moved to ueCb.dl, as it's not needed for per cell wise*/
7651 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, sCell);
7652 allocInfo->rnti = ue->ueId;
7654 /* Initializing the lastCfi value to current cfi value */
7655 ueDl->lastCfi = cellSchd->dl.currCfi;
7657 if ((cellSchd->apisDl->rgSCHRgrSCellDlUeCfg(sCell, ue, err)) != ROK)
7659 DU_LOG("\nERROR --> SCH : Spec Sched DL UE CFG FAILED\n");
7663 /* TODO: enhance for DLFS RB Allocation for SCELLs in future dev */
7665 /* DLFS UE Config */
7666 if (cellSchd->dl.isDlFreqSel)
7668 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeCfg(sCell, ue, sCellInfoCfg, err)) != ROK)
7670 DU_LOG("\nERROR --> SCH : DLFS UE config FAILED\n");
7675 /* TODO: Do UL SCELL CFG during UL CA dev */
7677 ueUl = RG_SCH_CMN_GET_UL_UE(ue, sCell);
7679 /* TODO_ULCA: SRS for SCELL needs to be handled in the below function call */
7680 rgSCHCmnUpdUeUlCqiInfo(sCell, ue, ueUl, ueSchCmn, cellSchd,
7681 sCell->isCpUlExtend);
7683 ret = rgSCHUhmHqEntInit(sCell, ue);
7686 DU_LOG("\nERROR --> SCH : SCELL UHM HARQ Ent Init "
7687 "Failed for CRNTI:%d", ue->ueId);
7691 ueUlPcell = RG_SCH_CMN_GET_UL_UE(ue, ue->cell);
7692 /* Initialize uplink HARQ related information for UE */
7693 ueUl->hqEnt.maxHqRetx = ueUlPcell->hqEnt.maxHqRetx;
7694 cmLListInit(&ueUl->hqEnt.free);
7695 cmLListInit(&ueUl->hqEnt.inUse);
7696 for(i=0; i < ueUl->hqEnt.numHqPrcs; i++)
7698 ueUl->hqEnt.hqProcCb[i].hqEnt = (void*)(&ueUl->hqEnt);
7699 ueUl->hqEnt.hqProcCb[i].procId = i;
7700 ueUl->hqEnt.hqProcCb[i].ulSfIdx = RGSCH_INVALID_INFO;
7701 ueUl->hqEnt.hqProcCb[i].alloc = NULLP;
7703 /* ccpu00139513- Initializing SPS flags*/
7704 ueUl->hqEnt.hqProcCb[i].isSpsActvnHqP = FALSE;
7705 ueUl->hqEnt.hqProcCb[i].isSpsOccnHqP = FALSE;
7707 cmLListAdd2Tail(&ueUl->hqEnt.free, &ueUl->hqEnt.hqProcCb[i].lnk);
7708 ueUl->hqEnt.hqProcCb[i].lnk.node = (PTR)&ueUl->hqEnt.hqProcCb[i];
7711 /* Allocate UL BSR allocation tracking List */
7712 cmLListInit(&ueUl->ulAllocLst);
7714 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
7716 if((rgSCHUtlAllocSBuf(sCell->instIdx,
7717 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
7719 DU_LOG("\nERROR --> SCH : SCELL Memory allocation FAILED"
7720 "for CRNTI:%d",ue->ueId);
7721 err->errCause = RGSCHERR_SCH_CFG;
7724 allRcd->allocTime = sCell->crntTime;
7725 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
7726 allRcd->lnk.node = (PTR)allRcd;
7729 /* After initialising UL part, do power related init */
7730 ret = rgSCHPwrUeSCellCfg(sCell, ue, sCellInfoCfg);
7733 DU_LOG("\nERROR --> SCH : Could not do "
7734 "power config for UE CRNTI:%d",ue->ueId);
7739 if(TRUE == ue->isEmtcUe)
7741 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
7743 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
7744 "for CRNTI:%d",ue->ueId);
7751 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
7753 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
7754 "for CRNTI:%d",ue->ueId);
7759 ue->ul.isUlCaEnabled = TRUE;
7763 } /* rgSCHCmnRgrSCellUeCfg */
7767 * @brief UE initialisation for scheduler.
7771 * Function : rgSCHCmnRgrSCellUeDel
7773 * This functions Delete UE specific scheduler
7774 * information for SCELL
7776 * @param[in] RgSchCellCb *cell
7777 * @param[in] RgSchUeCb *ue
7782 S16 rgSCHCmnRgrSCellUeDel(RgSchUeCellInfo *sCellInfo,RgSchUeCb *ue)
7784 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
7785 Inst inst = ue->cell->instIdx;
7788 cellSchd->apisDl->rgSCHRgrSCellDlUeDel(sCellInfo, ue);
7791 rgSCHCmnUlUeDelAllocs(sCellInfo->cell, ue);
7794 if(TRUE == ue->isEmtcUe)
7796 cellSchd->apisEmtcUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
7801 cellSchd->apisUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
7804 /* DLFS UE Config */
7805 if (cellSchd->dl.isDlFreqSel)
7807 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeDel(sCellInfo->cell, ue)) != ROK)
7809 DU_LOG("\nERROR --> SCH : DLFS Scell del FAILED\n");
7814 rgSCHUtlFreeSBuf(sCellInfo->cell->instIdx,
7815 (Data**)(&(sCellInfo->sch)), (sizeof(RgSchCmnUe)));
7819 } /* rgSCHCmnRgrSCellUeDel */
7825 * @brief Handles 5gtf configuration for a UE
7829 * Function : rgSCHCmn5gtfUeCfg
7835 * @param[in] RgSchCellCb *cell
7836 * @param[in] RgSchUeCb *ue
7837 * @param[in] RgrUeCfg *cfg
7842 S16 rgSCHCmn5gtfUeCfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeCfg *cfg)
7845 RgSchUeGrp *ue5gtfGrp;
7846 ue->ue5gtfCb.grpId = cfg->ue5gtfCfg.grpId;
7847 ue->ue5gtfCb.BeamId = cfg->ue5gtfCfg.BeamId;
7848 ue->ue5gtfCb.numCC = cfg->ue5gtfCfg.numCC;
7849 ue->ue5gtfCb.mcs = cfg->ue5gtfCfg.mcs;
7850 ue->ue5gtfCb.maxPrb = cfg->ue5gtfCfg.maxPrb;
7852 ue->ue5gtfCb.cqiRiPer = 100;
7853 /* 5gtf TODO: CQIs to start from (10,0)*/
7854 ue->ue5gtfCb.nxtCqiRiOccn.sfn = 10;
7855 ue->ue5gtfCb.nxtCqiRiOccn.slot = 0;
7856 ue->ue5gtfCb.rank = 1;
7858 DU_LOG("\nINFO --> SCH : schd cfg at mac,%u,%u,%u,%u,%u\n",ue->ue5gtfCb.grpId,ue->ue5gtfCb.BeamId,ue->ue5gtfCb.numCC,
7859 ue->ue5gtfCb.mcs,ue->ue5gtfCb.maxPrb);
7861 ue5gtfGrp = &(cell->cell5gtfCb.ueGrp5gConf[ue->ue5gtfCb.BeamId]);
7863 /* TODO_5GTF: Currently handling 1 group only. Need to update when multi group
7864 scheduling comes into picture */
7865 if(ue5gtfGrp->beamBitMask & (1 << ue->ue5gtfCb.BeamId))
7867 DU_LOG("\nERROR --> SCH : 5GTF_ERROR Invalid beam id CRNTI:%d",cfg->crnti);
7870 ue5gtfGrp->beamBitMask |= (1 << ue->ue5gtfCb.BeamId);
7877 * @brief UE initialisation for scheduler.
7881 * Function : rgSCHCmnRgrUeCfg
7883 * This functions intialises UE specific scheduler
7885 * 0. Perform basic validations
7886 * 1. Allocate common sched UE cntrl blk
7887 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
7889 * 4. Perform DLFS cfg
7891 * @param[in] RgSchCellCb *cell
7892 * @param[in] RgSchUeCb *ue
7893 * @param[int] RgrUeCfg *ueCfg
7894 * @param[out] RgSchErrInfo *err
7899 S16 rgSCHCmnRgrUeCfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeCfg *ueCfg,RgSchErrInfo *err)
7901 RgSchDlRbAlloc *allocInfo;
7903 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7904 RgSchCmnUe *ueSchCmn;
7908 RgSchCmnAllocRecord *allRcd;
7910 uint32_t idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
7911 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7914 /* 1. Allocate Common sched control block */
7915 if((rgSCHUtlAllocSBuf(cell->instIdx,
7916 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
7918 DU_LOG("\nERROR --> SCH : Memory allocation FAILED for CRNTI:%d",ueCfg->crnti);
7919 err->errCause = RGSCHERR_SCH_CFG;
7922 ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
7923 ue->dl.ueDlCqiCfg = ueCfg->ueDlCqiCfg;
7924 pCellInfo->acqiCb.aCqiCfg = ueCfg->ueDlCqiCfg.aprdCqiCfg;
7925 if(ueCfg->ueCatEnum > 0 )
7927 /*KWORK_FIX removed NULL chk for ueSchCmn*/
7928 ueSchCmn->cmn.ueCat = ueCfg->ueCatEnum - 1;
7932 ueSchCmn->cmn.ueCat = 0; /* Assuming enum values correctly set */
7934 cmInitTimers(&ueSchCmn->cmn.tmr, 1);
7936 /*2. Perform UEs downlink configuration */
7937 ueDl = &ueSchCmn->dl;
7938 /* RACHO : store the rapId assigned for HandOver UE.
7939 * Append UE to handover list of cmnCell */
7940 if (ueCfg->dedPreambleId.pres == PRSNT_NODEF)
7942 rgSCHCmnDelDedPreamble(cell, ueCfg->dedPreambleId.val);
7943 ueDl->rachInfo.hoRapId = ueCfg->dedPreambleId.val;
7944 cmLListAdd2Tail(&cellSchd->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
7945 ueDl->rachInfo.hoLnk.node = (PTR)ue;
7948 rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd);
7950 if (ueCfg->txMode.pres == TRUE)
7952 if ((ueCfg->txMode.txModeEnum == RGR_UE_TM_4) ||
7953 (ueCfg->txMode.txModeEnum == RGR_UE_TM_6))
7955 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
7957 if (ueCfg->txMode.txModeEnum == RGR_UE_TM_3)
7959 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
7962 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
7963 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
7966 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
7967 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
7968 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
7971 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
7975 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
7978 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
7980 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
7981 rgSchTddDlNumHarqProcTbl[cell->ulDlCfgIdx]);
7983 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
7984 RGSCH_NUM_DL_HQ_PROC);
7987 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
7989 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
7991 /* if none of the DL and UL AMBR are configured then fail the configuration
7993 if((ueCfg->ueQosCfg.dlAmbr == 0) && (ueCfg->ueQosCfg.ueBr == 0))
7995 DU_LOG("\nERROR --> SCH : UL Ambr and DL Ambr are"
7996 "configured as 0 for CRNTI:%d",ueCfg->crnti);
7997 err->errCause = RGSCHERR_SCH_CFG;
8001 ue->dl.ambrCfgd = (ueCfg->ueQosCfg.dlAmbr * RG_SCH_CMN_REFRESH_TIME)/100;
8003 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
8004 allocInfo->rnti = ue->ueId;
8006 /* Initializing the lastCfi value to current cfi value */
8007 ueDl->lastCfi = cellSchd->dl.currCfi;
8009 if(cell->emtcEnable && ue->isEmtcUe)
8011 if ((cellSchd->apisEmtcDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8013 DU_LOG("\nERROR --> SCH : Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8021 if ((cellSchd->apisDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8023 DU_LOG("\nERROR --> SCH : Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8030 /* 3. Initialize ul part */
8031 ueUl = &ueSchCmn->ul;
8033 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
8034 cell->isCpUlExtend);
8036 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8037 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8039 ue->ul.cfgdAmbr = (ueCfg->ueQosCfg.ueBr * RG_SCH_CMN_REFRESH_TIME)/100;
8040 ue->ul.effAmbr = ue->ul.cfgdAmbr;
8041 RGSCHCPYTIMEINFO(cell->crntTime, ue->ul.ulTransTime);
8043 /* Allocate UL BSR allocation tracking List */
8044 cmLListInit(&ueUl->ulAllocLst);
8046 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8048 if((rgSCHUtlAllocSBuf(cell->instIdx,
8049 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8051 DU_LOG("\nERROR --> SCH : Memory allocation FAILED"
8052 "for CRNTI:%d",ueCfg->crnti);
8053 err->errCause = RGSCHERR_SCH_CFG;
8056 allRcd->allocTime = cell->crntTime;
8057 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8058 allRcd->lnk.node = (PTR)allRcd;
8060 /* Allocate common sch cntrl blocks for LCGs */
8061 for (cnt=0; cnt<RGSCH_MAX_LCG_PER_UE; cnt++)
8063 ret = rgSCHUtlAllocSBuf(cell->instIdx,
8064 (Data**)&(ue->ul.lcgArr[cnt].sch), (sizeof(RgSchCmnLcg)));
8067 DU_LOG("\nERROR --> SCH : SCH struct alloc failed for CRNTI:%d",ueCfg->crnti);
8068 err->errCause = RGSCHERR_SCH_CFG;
8072 /* After initialising UL part, do power related init */
8073 ret = rgSCHPwrUeCfg(cell, ue, ueCfg);
8076 DU_LOG("\nERROR --> SCH : Could not do "
8077 "power config for UE CRNTI:%d",ueCfg->crnti);
8081 ret = rgSCHCmnSpsUeCfg(cell, ue, ueCfg, err);
8084 DU_LOG("\nERROR --> SCH : Could not do "
8085 "SPS config for CRNTI:%d",ueCfg->crnti);
8088 #endif /* LTEMAC_SPS */
8091 if(TRUE == ue->isEmtcUe)
8093 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
8095 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
8096 "for CRNTI:%d",ueCfg->crnti);
8103 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
8105 DU_LOG("\nERROR --> SCH : Spec Sched UL UE CFG FAILED"
8106 "for CRNTI:%d",ueCfg->crnti);
8111 /* DLFS UE Config */
8112 if (cellSchd->dl.isDlFreqSel)
8114 if ((cellSchd->apisDlfs->rgSCHDlfsUeCfg(cell, ue, ueCfg, err)) != ROK)
8116 DU_LOG("\nERROR --> SCH : DLFS UE config FAILED"
8117 "for CRNTI:%d",ueCfg->crnti);
8122 /* Fix: syed align multiple UEs to refresh at same time */
8123 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
8124 /* Start UE Qos Refresh Timer */
8125 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
8127 rgSCHCmn5gtfUeCfg(cell, ue, ueCfg);
8131 } /* rgSCHCmnRgrUeCfg */
8134 * @brief UE TX mode reconfiguration handler.
8138 * Function : rgSCHCmnDlHdlTxModeRecfg
8140 * This functions updates UE specific scheduler
8141 * information upon UE reconfiguration.
8143 * @param[in] RgSchUeCb *ue
8144 * @param[in] RgrUeRecfg *ueRecfg
8148 static Void rgSCHCmnDlHdlTxModeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg,uint8_t numTxPorts)
8150 static Void rgSCHCmnDlHdlTxModeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg)
8153 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
8155 if (ueRecfg->txMode.pres != PRSNT_NODEF)
8159 /* ccpu00140894- Starting Timer for TxMode Transition Completion*/
8160 ue->txModeTransCmplt =FALSE;
8161 rgSCHTmrStartTmr (ue->cell, ue, RG_SCH_TMR_TXMODE_TRNSTN, RG_SCH_TXMODE_TRANS_TIMER);
8162 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_CMPLT)
8164 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell,
8165 RG_SCH_CMN_TD_TXMODE_RECFG);
8166 /* MS_WORKAROUND for ccpu00123186 MIMO Fix Start: need to set FORCE TD bitmap based on TX mode */
8167 ueDl->mimoInfo.ri = 1;
8168 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8169 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
8171 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8173 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
8175 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8177 /* MIMO Fix End: need to set FORCE TD bitmap based on TX mode */
8180 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_START)
8182 /* start afresh forceTD masking */
8183 RG_SCH_CMN_INIT_FORCE_TD(ue, cell, 0);
8184 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_TXMODE_RECFG);
8185 /* Intialize MIMO related parameters of UE */
8188 if(ueRecfg->txMode.pres)
8190 if((ueRecfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
8191 (ueRecfg->txMode.txModeEnum ==RGR_UE_TM_4))
8193 if(ueRecfg->ueCodeBookRstRecfg.pres)
8196 rgSCHCmnComputeRank(ueRecfg->txMode.txModeEnum,
8197 ueRecfg->ueCodeBookRstRecfg.pmiBitMap, numTxPorts);
8201 ueDl->mimoInfo.ri = 1;
8206 ueDl->mimoInfo.ri = 1;
8211 ueDl->mimoInfo.ri = 1;
8214 ueDl->mimoInfo.ri = 1;
8215 #endif /* TFU_UPGRADE */
8216 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8217 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
8219 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8221 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
8223 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8228 /***********************************************************
8230 * Func : rgSCHCmnUpdUeMimoInfo
8232 * Desc : Updates UL and DL Ue Information
8240 **********************************************************/
8241 static Void rgSCHCmnUpdUeMimoInfo(RgrUeCfg *ueCfg,RgSchCmnDlUe *ueDl,RgSchCellCb *cell,RgSchCmnCell *cellSchd)
8244 if(ueCfg->txMode.pres)
8246 if((ueCfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
8247 (ueCfg->txMode.txModeEnum ==RGR_UE_TM_4))
8249 if(ueCfg->ueCodeBookRstCfg.pres)
8252 rgSCHCmnComputeRank(ueCfg->txMode.txModeEnum,
8253 ueCfg->ueCodeBookRstCfg.pmiBitMap, cell->numTxAntPorts);
8257 ueDl->mimoInfo.ri = 1;
8262 ueDl->mimoInfo.ri = 1;
8267 ueDl->mimoInfo.ri = 1;
8271 ueDl->mimoInfo.ri = 1;
8272 #endif /*TFU_UPGRADE */
8273 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
8274 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
8278 /***********************************************************
8280 * Func : rgSCHCmnUpdUeUlCqiInfo
8282 * Desc : Updates UL and DL Ue Information
8290 **********************************************************/
8291 static Void rgSCHCmnUpdUeUlCqiInfo(RgSchCellCb *cell,RgSchUeCb *ue,RgSchCmnUlUe *ueUl,RgSchCmnUe *ueSchCmn,RgSchCmnCell *cellSchd,Bool isEcp)
8295 if(ue->srsCb.srsCfg.type == RGR_SCH_SRS_SETUP)
8297 if(ue->ul.ulTxAntSel.pres)
8299 ueUl->crntUlCqi[ue->srsCb.selectedAnt] = cellSchd->ul.dfltUlCqi;
8300 ueUl->validUlCqi = ueUl->crntUlCqi[ue->srsCb.selectedAnt];
8304 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
8305 ueUl->validUlCqi = ueUl->crntUlCqi[0];
8307 ue->validTxAnt = ue->srsCb.selectedAnt;
8311 ueUl->validUlCqi = cellSchd->ul.dfltUlCqi;
8315 ueUl->ulLaCb.cqiBasediTbs = rgSchCmnUlCqiToTbsTbl[isEcp]
8316 [ueUl->validUlCqi] * 100;
8317 ueUl->ulLaCb.deltaiTbs = 0;
8321 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
8322 #endif /*TFU_UPGRADE */
8323 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
8324 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
8326 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
8330 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
8335 /***********************************************************
8337 * Func : rgSCHCmnUpdUeCatCfg
8339 * Desc : Updates UL and DL Ue Information
8347 **********************************************************/
8348 static Void rgSCHCmnUpdUeCatCfg(RgSchUeCb *ue,RgSchCellCb *cell)
8350 RgSchDlHqEnt *hqE = NULLP;
8351 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
8352 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
8353 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
8354 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8357 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
8359 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
8362 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
8363 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
8364 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
8365 && (RG_SCH_MAX_TX_LYRS_4 == ri))
8367 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
8371 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
8374 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8376 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
8378 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
8382 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
8384 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8385 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8390 * @brief UE reconfiguration for scheduler.
8394 * Function : rgSChCmnRgrUeRecfg
8396 * This functions updates UE specific scheduler
8397 * information upon UE reconfiguration.
8399 * @param[in] RgSchCellCb *cell
8400 * @param[in] RgSchUeCb *ue
8401 * @param[int] RgrUeRecfg *ueRecfg
8402 * @param[out] RgSchErrInfo *err
8407 S16 rgSCHCmnRgrUeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg,RgSchErrInfo *err)
8409 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
8412 /* Basic validations */
8413 if (ueRecfg->ueRecfgTypes & RGR_UE_TXMODE_RECFG)
8416 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, cell->numTxAntPorts);
8418 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg);
8419 #endif /* TFU_UPGRADE */
8421 if(ueRecfg->ueRecfgTypes & RGR_UE_CSG_PARAM_RECFG)
8423 ue->csgMmbrSta = ueRecfg->csgMmbrSta;
8425 /* Changes for UE Category reconfiguration feature */
8426 if(ueRecfg->ueRecfgTypes & RGR_UE_UECAT_RECFG)
8428 rgSCHCmnUpdUeCatCfg(ue, cell);
8430 if (ueRecfg->ueRecfgTypes & RGR_UE_APRD_DLCQI_RECFG)
8432 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
8433 pCellInfo->acqiCb.aCqiCfg = ueRecfg->aprdDlCqiRecfg;
8436 if (ueRecfg->ueRecfgTypes & RGR_UE_PRD_DLCQI_RECFG)
8438 if ((ueRecfg->prdDlCqiRecfg.pres == TRUE)
8439 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD10)
8440 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD20))
8442 DU_LOG("\nERROR --> SCH : Unsupported periodic CQI "
8443 "reporting mode %d for old CRNIT:%d",
8444 (int)ueRecfg->prdDlCqiRecfg.prdModeEnum,ueRecfg->oldCrnti);
8445 err->errCause = RGSCHERR_SCH_CFG;
8448 ue->dl.ueDlCqiCfg.prdCqiCfg = ueRecfg->prdDlCqiRecfg;
8452 if (ueRecfg->ueRecfgTypes & RGR_UE_ULPWR_RECFG)
8454 if (rgSCHPwrUeRecfg(cell, ue, ueRecfg) != ROK)
8456 DU_LOG("\nERROR --> SCH : Power Reconfiguration Failed for OLD CRNTI:%d",ueRecfg->oldCrnti);
8461 if (ueRecfg->ueRecfgTypes & RGR_UE_QOS_RECFG)
8463 /* Uplink Sched related Initialization */
8464 if ((ueRecfg->ueQosRecfg.dlAmbr == 0) && (ueRecfg->ueQosRecfg.ueBr == 0))
8466 DU_LOG("\nERROR --> SCH : Ul Ambr and DL Ambr "
8467 "configured as 0 for OLD CRNTI:%d",ueRecfg->oldCrnti);
8468 err->errCause = RGSCHERR_SCH_CFG;
8471 ue->ul.cfgdAmbr = (ueRecfg->ueQosRecfg.ueBr * \
8472 RG_SCH_CMN_REFRESH_TIME)/100;
8473 /* Downlink Sched related Initialization */
8474 ue->dl.ambrCfgd = (ueRecfg->ueQosRecfg.dlAmbr * \
8475 RG_SCH_CMN_REFRESH_TIME)/100;
8476 /* Fix: syed Update the effAmbr and effUeBR fields w.r.t the
8477 * new QOS configuration */
8478 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
8479 /* Fix: syed align multiple UEs to refresh at same time */
8480 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
8481 rgSCHCmnApplyUeRefresh(cell, ue);
8482 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
8485 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
8487 if ((cellSchCmn->apisEmtcUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8489 DU_LOG("\nERROR --> SCH : Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8492 if ((cellSchCmn->apisEmtcDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8494 DU_LOG("\nERROR --> SCH : Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8501 if ((cellSchCmn->apisUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8503 DU_LOG("\nERROR --> SCH : Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8506 if ((cellSchCmn->apisDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8508 DU_LOG("\nERROR --> SCH : Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8512 /* DLFS UE Config */
8513 if (cellSchCmn->dl.isDlFreqSel)
8515 if ((cellSchCmn->apisDlfs->rgSCHDlfsUeRecfg(cell, ue, \
8516 ueRecfg, err)) != ROK)
8518 DU_LOG("\nERROR --> SCH : DLFS UE re-config FAILED for CRNTI:%d",ue->ueId);
8524 /* Invoke re-configuration on SPS module */
8525 if (rgSCHCmnSpsUeRecfg(cell, ue, ueRecfg, err) != ROK)
8527 DU_LOG("\nERROR --> SCH : DL SPS ReCFG FAILED for UE CRNTI:%d", ue->ueId);
8533 } /* rgSCHCmnRgrUeRecfg*/
8535 /***********************************************************
8537 * Func : rgSCHCmnUlUeDelAllocs
8539 * Desc : Deletion of all UE allocations.
8547 **********************************************************/
8548 static Void rgSCHCmnUlUeDelAllocs(RgSchCellCb *cell,RgSchUeCb *ue)
8550 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
8551 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
8554 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
8557 for (i = 0; i < ueUl->hqEnt.numHqPrcs; ++i)
8559 RgSchUlHqProcCb *proc = rgSCHUhmGetUlHqProc(cell, ue, i);
8562 /* proc can't be NULL here */
8570 /* Added Insure Fixes Of reading Dangling memory.NULLed crntAlloc */
8572 if(proc->alloc == ulSpsUe->ulSpsSchdInfo.crntAlloc)
8574 ulSpsUe->ulSpsSchdInfo.crntAlloc = NULLP;
8575 ulSpsUe->ulSpsSchdInfo.crntAllocSf = NULLP;
8579 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
8580 proc->alloc,ue->isEmtcUe);
8582 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
8585 /* PHY probably needn't be intimated since
8586 * whatever intimation it needs happens at the last minute
8589 /* Fix: syed Adaptive Msg3 Retx crash. Remove the harqProc
8590 * from adaptive retx List. */
8591 if (proc->reTxLnk.node)
8594 //TODO_SID: Need to take care
8595 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
8596 proc->reTxLnk.node = (PTR)NULLP;
8604 /***********************************************************
8606 * Func : rgSCHCmnDelUeFrmRefreshQ
8608 * Desc : Adds a UE to refresh queue, so that the UE is
8609 * periodically triggered to refresh it's GBR and
8618 **********************************************************/
8619 static Void rgSCHCmnDelUeFrmRefreshQ(RgSchCellCb *cell,RgSchUeCb *ue)
8621 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
8623 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
8626 #ifdef RGL_SPECIFIC_CHANGES
8627 if(ue->refreshOffset < RGSCH_MAX_REFRESH_GRPSZ)
8629 if(cell->refreshUeCnt[ue->refreshOffset])
8631 cell->refreshUeCnt[ue->refreshOffset]--;
8637 memset(&arg, 0, sizeof(arg));
8638 arg.tqCp = &sched->tmrTqCp;
8639 arg.tq = sched->tmrTq;
8640 arg.timers = &ueSchd->tmr;
8644 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
8650 /***********************************************************
8652 * Func : rgSCHCmnUeCcchSduDel
8654 * Desc : Clear CCCH SDU scheduling context.
8662 **********************************************************/
8663 static Void rgSCHCmnUeCcchSduDel(RgSchCellCb *cell,RgSchUeCb *ueCb)
8665 RgSchDlHqEnt *hqE = NULLP;
8666 RgSchDlHqProcCb *ccchSduHqP = NULLP;
8667 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
8670 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
8675 ccchSduHqP = hqE->ccchSduProc;
8676 if(ueCb->ccchSduLnk.node != NULLP)
8678 /* Remove the ccchSduProc if it is in the Tx list */
8679 cmLListDelFrm(&(cell->ccchSduUeLst), &(ueCb->ccchSduLnk));
8680 ueCb->ccchSduLnk.node = NULLP;
8682 else if(ccchSduHqP != NULLP)
8684 /* Fix for crash due to stale pdcch. Release ccch pdcch*/
8685 if(ccchSduHqP->pdcch)
8687 cmLListDelFrm(&ccchSduHqP->subFrm->pdcchInfo.pdcchs,
8688 &ccchSduHqP->pdcch->lnk);
8689 cmLListAdd2Tail(&cell->pdcchLst, &ccchSduHqP->pdcch->lnk);
8690 ccchSduHqP->pdcch = NULLP;
8692 if(ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node != NULLP)
8694 /* Remove the ccchSduProc if it is in the retx list */
8695 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
8696 &ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk);
8697 /* ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node = NULLP; */
8698 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
8700 else if ((ccchSduHqP->subFrm != NULLP) &&
8701 (ccchSduHqP->hqPSfLnk.node != NULLP))
8703 rgSCHUtlDlHqPTbRmvFrmTx(ccchSduHqP->subFrm,
8704 ccchSduHqP, 0, FALSE);
8705 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
8715 * @brief UE deletion for scheduler.
8719 * Function : rgSCHCmnUeDel
8721 * This functions deletes all scheduler information
8722 * pertaining to an UE.
8724 * @param[in] RgSchCellCb *cell
8725 * @param[in] RgSchUeCb *ue
8728 Void rgSCHCmnUeDel(RgSchCellCb *cell,RgSchUeCb *ue)
8730 RgSchDlHqEnt *hqE = NULLP;
8731 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
8733 RgSchCmnAllocRecord *allRcd;
8735 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
8738 if (RG_SCH_CMN_GET_UE(ue,cell) == NULLP)
8740 /* Common scheduler config has not happened yet */
8743 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
8746 /* UE Free can be triggered before MSG4 done when dlHqE is not updated */
8750 rgSCHEmtcCmnUeCcchSduDel(cell, ue);
8755 rgSCHCmnUeCcchSduDel(cell, ue);
8758 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
8760 rgSCHCmnUlUeDelAllocs(cell, ue);
8762 rgSCHCmnDelRachInfo(cell, ue);
8765 if(TRUE == ue->isEmtcUe)
8767 cellSchCmn->apisEmtcUl->rgSCHFreeUlUe(cell, ue);
8772 cellSchCmn->apisUl->rgSCHFreeUlUe(cell, ue);
8777 for(idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
8779 if(ue->cellInfo[idx] != NULLP)
8781 rgSCHSCellDelUeSCell(cell,ue,idx);
8788 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
8790 cellSchCmn->apisEmtcDl->rgSCHFreeDlUe(cell, ue);
8795 cellSchCmn->apisDl->rgSCHFreeDlUe(cell, ue);
8797 rgSCHPwrUeDel(cell, ue);
8800 rgSCHCmnSpsUeDel(cell, ue);
8801 #endif /* LTEMAC_SPS*/
8804 rgSchCmnDlSfHqDel(ue, cell);
8806 /* DLFS UE delete */
8807 if (cellSchCmn->dl.isDlFreqSel)
8809 cellSchCmn->apisDlfs->rgSCHDlfsUeDel(cell, ue);
8811 node = ueUl->ulAllocLst.first;
8813 /* ccpu00117052 - MOD - Passing double pointer in all the places of
8814 rgSCHUtlFreeSBuf function call for proper NULLP assignment*/
8817 allRcd = (RgSchCmnAllocRecord *)node->node;
8819 cmLListDelFrm(&ueUl->ulAllocLst, &allRcd->lnk);
8820 rgSCHUtlFreeSBuf(cell->instIdx,
8821 (Data**)(&allRcd), (sizeof(RgSchCmnAllocRecord)));
8824 for(cnt = 0; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
8826 if (ue->ul.lcgArr[cnt].sch != NULLP)
8828 rgSCHUtlFreeSBuf(cell->instIdx,
8829 (Data**)(&(ue->ul.lcgArr[cnt].sch)), (sizeof(RgSchCmnLcg)));
8833 /* Fix : syed Moved hqEnt deinit to rgSCHCmnDlDeInitHqEnt */
8834 idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId) & (CM_LTE_MAX_CELLS - 1));
8835 rgSCHUtlFreeSBuf(cell->instIdx,
8836 (Data**)(&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch))), (sizeof(RgSchCmnUe)));
8838 } /* rgSCHCmnUeDel */
8842 * @brief This function handles the common code rate configurations
8843 * done as part of RgrCellCfg/RgrCellRecfg.
8847 * Function: rgSCHCmnDlCnsdrCmnRt
8848 * Purpose: This function handles the common code rate configurations
8849 * done as part of RgrCellCfg/RgrCellRecfg.
8851 * Invoked by: Scheduler
8853 * @param[in] RgSchCellCb *cell
8854 * @param[in] RgrDlCmnCodeRateCfg *dlCmnCodeRate
8858 static S16 rgSCHCmnDlCnsdrCmnRt(RgSchCellCb *cell,RgrDlCmnCodeRateCfg *dlCmnCodeRate)
8860 RgSchCmnCell *cellDl = RG_SCH_CMN_GET_CELL(cell);
8862 uint32_t bitsPer2Rb;
8863 uint32_t bitsPer3Rb;
8868 /* code rate is bits per 1024 phy bits, since modl'n scheme is 2. it is
8869 * bits per 1024/2 REs */
8870 if (dlCmnCodeRate->bcchPchRaCodeRate != 0)
8872 bitsPerRb = ((dlCmnCodeRate->bcchPchRaCodeRate * 2) *
8873 cellDl->dl.noResPerRb[3])/1024;
8877 bitsPerRb = ((RG_SCH_CMN_DEF_BCCHPCCH_CODERATE * 2) *
8878 cellDl->dl.noResPerRb[3])/1024;
8880 /* Store bitsPerRb in cellDl->dl to use later to determine
8881 * Number of RBs for UEs with SI-RNTI, P-RNTI and RA-RNTI */
8882 cellDl->dl.bitsPerRb = bitsPerRb;
8883 /* ccpu00115595 end*/
8884 /* calculate the ITbs for 2 RBs. Initialize ITbs to MAX value */
8887 bitsPer2Rb = bitsPerRb * rbNum;
8888 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer2Rb))
8891 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs2Rbs = 0) :
8892 (cellDl->dl.cmnChITbs.iTbs2Rbs = i-1);
8894 /* calculate the ITbs for 3 RBs. Initialize ITbs to MAX value */
8897 bitsPer3Rb = bitsPerRb * rbNum;
8898 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer3Rb))
8901 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs3Rbs = 0) :
8902 (cellDl->dl.cmnChITbs.iTbs3Rbs = i-1);
8905 pdcchBits = 1 + /* Flag for format0/format1a differentiation */
8906 1 + /* Localized/distributed VRB assignment flag */
8909 3 + /* Harq process Id */
8911 4 + /* Harq process Id */
8912 2 + /* UL Index or DAI */
8914 1 + /* New Data Indicator */
8917 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
8918 (cell->bwCfg.dlTotalBw + 1))/2);
8919 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
8920 Since VRB is local */
8921 /* For TDD consider DAI */
8923 /* Convert the pdcchBits to actual pdcchBits required for transmission */
8924 if (dlCmnCodeRate->pdcchCodeRate != 0)
8926 pdcchBits = (pdcchBits * 1024)/dlCmnCodeRate->pdcchCodeRate;
8927 if (pdcchBits <= 288) /* 288 : Num of pdcch bits for aggrLvl=4 */
8929 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
8931 else /* 576 : Num of pdcch bits for aggrLvl=8 */
8933 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL8;
8938 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
8940 if (dlCmnCodeRate->ccchCqi == 0)
8946 cellDl->dl.ccchCqi = dlCmnCodeRate->ccchCqi;
8953 * @brief This function handles the configuration of cell for the first
8954 * time by the scheduler.
8958 * Function: rgSCHCmnDlRgrCellCfg
8959 * Purpose: Configuration received is stored into the data structures
8960 * Also, update the scheduler with the number of frames of
8961 * RACH preamble transmission.
8963 * Invoked by: BO and Scheduler
8965 * @param[in] RgSchCellCb* cell
8966 * @param[in] RgrCellCfg* cfg
8970 static S16 rgSCHCmnDlRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cfg,RgSchErrInfo *err)
8972 RgSchCmnCell *cellSch;
8975 uint8_t numPdcchSym;
8976 uint8_t noSymPerSlot;
8977 uint8_t maxDlSubfrms = cell->numDlSubfrms;
8978 uint8_t splSubfrmIdx = cfg->spclSfCfgIdx;
8979 uint8_t swPtCnt = 0;
8981 RgSchTddSubfrmInfo subfrmInfo = rgSchTddMaxUlSubfrmTbl[cell->ulDlCfgIdx];
8994 cellSch = RG_SCH_CMN_GET_CELL(cell);
8995 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->\
8996 rachCfg.preambleFormat];
8997 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
8998 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
9000 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
9001 3 TTI (MAX L1+L2 processing delay at the UE) */
9002 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
9003 rgSchCmnHarqRtt[cell->ulDlCfgIdx] + 3;
9004 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9005 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9006 if (cfg->maxUePerDlSf == 0)
9008 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9010 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
9016 if (cell->bwCfg.dlTotalBw <= 10)
9026 /* DwPTS Scheduling Changes Start */
9027 cellSch->dl.splSfCfg = splSubfrmIdx;
9029 if (cfg->isCpDlExtend == TRUE)
9031 if((0 == splSubfrmIdx) || (4 == splSubfrmIdx) ||
9032 (7 == splSubfrmIdx) || (8 == splSubfrmIdx)
9035 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
9039 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
9044 /* Refer to 36.213 Section 7.1.7 */
9045 if((0 == splSubfrmIdx) || (5 == splSubfrmIdx))
9047 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
9051 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
9054 /* DwPTS Scheduling Changes End */
9056 splSfCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
9057 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
9059 for (sfCount = 0; sfCount < maxDlSubfrms; sfCount++)
9061 sf = cell->subFrms[sfCount];
9062 /* Sfcount matches the first special subframe occurs at Index 0
9063 * or subsequent special subframes */
9064 if(subfrmInfo.switchPoints == 1)
9066 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
9067 RG_SCH_CMN_10_MS_PRD, &subfrmInfo);
9071 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
9072 RG_SCH_CMN_5_MS_PRD, &subfrmInfo);
9074 if(isSplfrm == TRUE)
9077 /* DwPTS Scheduling Changes Start */
9078 if (cell->splSubfrmCfg.isDlDataAllowed == TRUE)
9080 sf->sfType = RG_SCH_SPL_SF_DATA;
9084 sf->sfType = RG_SCH_SPL_SF_NO_DATA;
9086 /* DwPTS Scheduling Changes End */
9090 /* DwPTS Scheduling Changes Start */
9093 sf->sfType = RG_SCH_DL_SF;
9097 sf->sfType = RG_SCH_DL_SF_0;
9099 /* DwPTS Scheduling Changes End */
9102 /* Calculate the number of CCEs per subframe in the cell */
9103 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][sf->sfNum];
9104 if(cell->dynCfiCb.isDynCfiEnb == TRUE)
9106 /* In case if Dynamic CFI feature is enabled, default CFI
9107 * value 1 is used */
9108 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][1];
9112 if (sf->sfType == RG_SCH_SPL_SF_DATA)
9114 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
9118 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi)];
9123 /* Intialize the RACH response scheduling related infromation */
9124 if(rgSCHCmnDlRachInfoInit(cell) != ROK)
9129 /* Allocate PRACH preamble list */
9130 rgSCHCmnDlCreateRachPrmLst(cell);
9132 /* Initialize PHICH offset information */
9133 rgSCHCmnDlPhichOffsetInit(cell);
9135 /* Update the size of HARQ ACK/NACK feedback table */
9136 /* The array size is increased by 2 to have enough free indices, where other
9137 * indices are busy waiting for HARQ feedback */
9138 cell->ackNackFdbkArrSize = rgSchTddANFdbkMapTbl[cell->ulDlCfgIdx] + 2;
9140 /* Initialize expected HARQ ACK/NACK feedback time */
9141 rgSCHCmnDlANFdbkInit(cell);
9143 /* Initialize UL association set index */
9144 if(cell->ulDlCfgIdx != 0)
9146 rgSCHCmnDlKdashUlAscInit(cell);
9149 if (cfg->isCpDlExtend == TRUE)
9151 cp = RG_SCH_CMN_EXT_CP;
9153 cell->splSubfrmCfg.dwPts =
9154 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlDwPts;
9156 if ( cell->splSubfrmCfg.dwPts == 0 )
9158 cell->isDwPtsCnted = FALSE;
9162 cell->isDwPtsCnted = TRUE;
9165 if(cfg->isCpUlExtend == TRUE)
9167 cell->splSubfrmCfg.upPts =
9168 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlExtUpPts;
9172 cell->splSubfrmCfg.upPts =
9173 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlNorUpPts;
9178 cp = RG_SCH_CMN_NOR_CP;
9180 cell->splSubfrmCfg.dwPts =
9181 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlDwPts;
9182 cell->isDwPtsCnted = TRUE;
9184 if(cfg->isCpUlExtend == TRUE)
9186 cell->splSubfrmCfg.upPts =
9187 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlExtUpPts;
9191 cell->splSubfrmCfg.upPts =
9192 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlNorUpPts;
9196 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
9197 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++,cfiIdx++)
9199 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
9200 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
9201 [cell->numTxAntPorts]][cfiIdx];
9202 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
9203 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
9204 [cell->numTxAntPorts]][cfiIdx];
9207 /* Initializing the values of CFI parameters */
9208 if(cell->dynCfiCb.isDynCfiEnb)
9210 /* If DCFI is enabled, current CFI value will start from 1 */
9211 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
9215 /* If DCFI is disabled, current CFI value is set as default max allowed CFI value */
9216 cellSch->dl.currCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
9217 cellSch->dl.newCfi = cellSch->dl.currCfi;
9220 /* Include CRS REs while calculating Efficiency
9221 * The number of Resource Elements occupied by CRS depends on Number of
9222 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
9223 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
9224 * details of the same. Please note that PDCCH overlap symbols would not
9225 * considered in CRS REs deduction */
9226 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
9228 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
9229 - numPdcchSym) *RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
9232 /* DwPTS Scheduling Changes Start */
9233 antPortIdx = (cell->numTxAntPorts == 1)? 0:
9234 ((cell->numTxAntPorts == 2)? 1: 2);
9236 if (cp == RG_SCH_CMN_NOR_CP)
9238 splSfIdx = (splSubfrmIdx == 4)? 1: 0;
9242 splSfIdx = (splSubfrmIdx == 3)? 1: 0;
9245 numCrs = rgSchCmnDwptsCrs[splSfIdx][antPortIdx];
9247 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI-1; cfi++)
9249 /* If CFI is 2 and Ant Port is 4, don't consider the sym 1 CRS REs */
9250 if (antPortIdx == 2 && cfi == 2)
9254 cellSch->dl.numReDwPts[cfi] = ((cell->splSubfrmCfg.dwPts - cfi)*
9255 RB_SCH_CMN_NUM_SCS_PER_RB) - numCrs;
9257 /* DwPTS Scheduling Changes End */
9259 if (cfg->maxDlBwPerUe == 0)
9261 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
9265 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
9267 if (cfg->maxDlRetxBw == 0)
9269 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
9273 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
9275 /* Fix: MUE_PERTTI_DL*/
9276 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9277 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9278 if (cfg->maxUePerDlSf == 0)
9280 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9282 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
9283 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
9284 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
9286 DU_LOG("\nERROR --> SCH : Invalid configuration !: "
9287 "maxCcchPerDlSf %u > maxUePerDlSf %u",
9288 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
9292 else if (!cfg->maxCcchPerDlSf)
9294 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
9295 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
9296 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
9297 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
9298 * FLE crash in PHY as PHY has limit of 16 max*/
9299 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
9303 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
9305 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
9310 /*ccpu00118273 - ADD - start */
9311 cmLListInit(&cellSch->dl.msg4RetxLst);
9313 cmLListInit(&cellSch->dl.ccchSduRetxLst);
9316 #ifdef RG_PHASE2_SCHED
9317 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
9319 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
9321 if (cfg->dlfsCfg.isDlFreqSel)
9323 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
9329 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
9332 /* Power related configuration */
9333 ret = rgSCHPwrCellCfg(cell, cfg);
9339 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
9340 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
9341 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
9342 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
9343 cellSch->dl.msg4pAVal = cfg->msg4pAVal;
9348 * @brief This function handles the configuration of cell for the first
9349 * time by the scheduler.
9353 * Function: rgSCHCmnDlRgrCellCfg
9354 * Purpose: Configuration received is stored into the data structures
9355 * Also, update the scheduler with the number of frames of
9356 * RACH preamble transmission.
9358 * Invoked by: BO and Scheduler
9360 * @param[in] RgSchCellCb* cell
9361 * @param[in] RgrCellCfg* cfg
9362 * @param[in] RgSchErrInfo* err
9366 static S16 rgSCHCmnDlRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cfg,RgSchErrInfo *err)
9369 RgSchCmnCell *cellSch;
9371 uint8_t numPdcchSym;
9372 uint8_t noSymPerSlot;
9377 cellSch = RG_SCH_CMN_GET_CELL(cell);
9379 /* Initialize the parameters with the ones received in the */
9380 /* configuration. */
9382 /* Added matrix 'rgRaPrmblToRaFrmTbl' for computation of RA
9383 * sub-frames from preamble format */
9384 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->rachCfg.preambleFormat];
9386 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
9387 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
9389 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
9390 3 TTI (MAX L1+L2 processing delay at the UE) */
9391 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
9392 rgSchCmnHarqRtt[7] + 3;
9394 if (cell->bwCfg.dlTotalBw <= 10)
9405 if (cell->isCpDlExtend == TRUE)
9407 cp = RG_SCH_CMN_EXT_CP;
9412 cp = RG_SCH_CMN_NOR_CP;
9416 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
9417 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, cfiIdx++)
9419 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
9421 cellSch->dl.emtcCqiToTbsTbl[0][cfi] = rgSchEmtcCmnCqiToTbs[0][cp][cfiIdx];
9423 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
9424 [cell->numTxAntPorts]][cfiIdx];
9425 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
9427 cellSch->dl.emtcCqiToTbsTbl[1][cfi] = rgSchEmtcCmnCqiToTbs[1][cp][cfiIdx];
9429 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
9430 [cell->numTxAntPorts]][cfiIdx];
9433 /* Initializing the values of CFI parameters */
9434 if(cell->dynCfiCb.isDynCfiEnb)
9436 /* If DCFI is enabled, current CFI value will start from 1 */
9437 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
9441 /* If DCFI is disabled, current CFI value is set as default CFI value */
9442 cellSch->dl.currCfi = cellSch->cfiCfg.cfi;
9443 cellSch->dl.newCfi = cellSch->dl.currCfi;
9446 /* Include CRS REs while calculating Efficiency
9447 * The number of Resource Elements occupied by CRS depends on Number of
9448 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
9449 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
9450 * details of the same. Please note that PDCCH overlap symbols would not
9451 * considered in CRS REs deduction */
9452 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
9454 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
9455 - numPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
9458 if (cfg->maxDlBwPerUe == 0)
9460 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
9464 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
9466 if (cfg->maxDlRetxBw == 0)
9468 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
9472 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
9475 /* Fix: MUE_PERTTI_DL*/
9476 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9477 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9478 if (cfg->maxUePerDlSf == 0)
9480 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9482 /* Fix: MUE_PERTTI_DL syed validating Cell Configuration */
9483 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
9485 DU_LOG("\nERROR --> SCH : FAILED MaxUePerDlSf(%u) < MaxDlUeNewTxPerTti(%u)",
9486 cellSch->dl.maxUePerDlSf,
9487 cellSch->dl.maxUeNewTxPerTti);
9490 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
9491 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
9493 DU_LOG("\nERROR --> SCH : Invalid configuration !: "
9494 "maxCcchPerDlSf %u > maxUePerDlSf %u",
9495 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
9499 else if (!cfg->maxCcchPerDlSf)
9501 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
9502 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
9503 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
9504 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
9505 * FLE crash in PHY as PHY has limit of 16 max*/
9506 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
9510 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
9514 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
9518 cmLListInit(&cellSch->dl.msg4RetxLst);
9520 cmLListInit(&cellSch->dl.ccchSduRetxLst);
9523 #ifdef RG_PHASE2_SCHED
9524 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
9526 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
9528 if (cfg->dlfsCfg.isDlFreqSel)
9530 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
9536 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
9539 /* Power related configuration */
9540 ret = rgSCHPwrCellCfg(cell, cfg);
9546 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
9547 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
9548 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
9549 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
9550 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
9553 #endif /* LTE_TDD */
9555 /***********************************************************
9557 * Func : rgSCHCmnUlCalcReqRbCeil
9559 * Desc : Calculate RB required to satisfy 'bytes' for
9561 * Returns number of RBs such that requirement
9562 * is necessarily satisfied (does a 'ceiling'
9565 * Ret : Required RBs (uint8_t)
9571 **********************************************************/
9572 uint8_t rgSCHCmnUlCalcReqRbCeil(uint32_t bytes,uint8_t cqi,RgSchCmnUlCell *cellUl)
9574 uint32_t numRe = RGSCH_CEIL((bytes * 8) * 1024, rgSchCmnUlCqiTbl[cqi].eff);
9575 return ((uint8_t)RGSCH_CEIL(numRe, RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl)));
9578 /***********************************************************
9580 * Func : rgSCHCmnPrecompMsg3Vars
9582 * Desc : Precomputes the following for msg3 allocation:
9583 * 1. numSb and Imcs for msg size A
9584 * 2. numSb and Imcs otherwise
9588 * Notes: The corresponding vars in cellUl struct is filled
9593 **********************************************************/
9594 static S16 rgSCHCmnPrecompMsg3Vars(RgSchCmnUlCell *cellUl,uint8_t ccchCqi,uint16_t msgSzA,uint8_t sbSize,Bool isEcp)
9601 uint16_t msg3GrntSz = 0;
9604 if (ccchCqi > cellUl->max16qamCqi)
9606 ccchCqi = cellUl->max16qamCqi;
9608 /* #ifndef RG_SCH_CMN_EXP_CP_SUP For ECP Pick the index 1 */
9610 ccchTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
9611 ccchMcs = rgSCHCmnUlGetIMcsFrmITbs(ccchTbs, CM_LTE_UE_CAT_1);
9613 /* MCS should fit in 4 bits in RAR */
9619 /* Limit the ccchMcs to 15 as it
9620 * can be inferred from 36.213, section 6.2 that msg3 imcs
9622 * Since, UE doesn't exist right now, we use CAT_1 for ue
9624 while((ccchMcs = (rgSCHCmnUlGetIMcsFrmITbs(
9625 rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi],CM_LTE_UE_CAT_1))
9627 RG_SCH_CMN_MAX_MSG3_IMCS)
9632 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
9634 if (msgSzA < RGSCH_MIN_MSG3_GRNT_SZ)
9638 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(msgSzA, ccchCqi, cellUl), sbSize);
9640 numRb = numSb * sbSize;
9641 msg3GrntSz = 8 * msgSzA;
9643 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
9646 numRb = numSb * sbSize;
9648 while (rgSchCmnMult235Tbl[numSb].match != numSb)
9652 /* Reversed(Corrected) the assignment for preamble-GrpA
9653 * Refer- TG36.321- section- 5.1.2*/
9654 cellUl->ra.prmblBNumSb = numSb;
9655 cellUl->ra.prmblBIMcs = ccchMcs;
9656 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(RGSCH_MIN_MSG3_GRNT_SZ, \
9660 numRb = numSb * sbSize;
9661 msg3GrntSz = 8 * RGSCH_MIN_MSG3_GRNT_SZ;
9662 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
9665 numRb = numSb * sbSize;
9667 while (rgSchCmnMult235Tbl[numSb].match != numSb)
9671 /* Reversed(Corrected) the assignment for preamble-GrpA
9672 * Refer- TG36.321- section- 5.1.2*/
9673 cellUl->ra.prmblANumSb = numSb;
9674 cellUl->ra.prmblAIMcs = ccchMcs;
9678 uint32_t gPrntPucchDet=0;
9681 /***********************************************************
9683 * Func : rgSCHCmnUlCalcAvailBw
9685 * Desc : Calculates bandwidth available for PUSCH scheduling.
9687 * Ret : S16 (ROK/RFAILED)
9693 **********************************************************/
9694 static S16 rgSCHCmnUlCalcAvailBw(RgSchCellCb *cell,RgrCellCfg *cellCfg,uint8_t cfi,uint8_t *rbStartRef,uint8_t *bwAvailRef)
9697 uint8_t ulBw = cell->bwCfg.ulTotalBw;
9698 uint8_t n2Rb = cell->pucchCfg.resourceSize;
9699 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
9700 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
9701 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
9707 uint8_t exclRb; /* RBs to exclude */
9709 uint8_t puschRbStart;
9710 /* To avoid PUCCH and PUSCH collision issue */
9714 /* Maximum value of M as per Table 10.1-1 */
9715 uint8_t M[RGSCH_MAX_TDD_UL_DL_CFG] = {1, 2, 4, 3, 4, 9, 1};
9718 if (cell->isCpUlExtend)
9723 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
9725 /* Considering the max no. of CCEs for PUSCH BW calculation
9726 * based on min mi value */
9727 if (cell->ulDlCfgIdx == 0 || cell->ulDlCfgIdx == 6)
9736 totalCce = cell->dynCfiCb.cfi2NCceTbl[mi][cfi];
9738 P = rgSCHCmnGetPValFrmCCE(cell, totalCce-1);
9739 n1PlusOne = cell->rgSchTddNpValTbl[P + 1];
9740 n1Max = (M[cell->ulDlCfgIdx] - 1)*n1PlusOne + (totalCce-1) + n1Pucch;
9742 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
9744 n1RbPart = (c*n1Cs)/pucchDeltaShft;
9745 n1Rb = (n1Max - n1RbPart)/ n1PerRb;
9746 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
9748 /* get the total Number of RB's to be excluded for PUSCH */
9750 if(n1Pucch < n1RbPart)
9756 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
9758 puschRbStart = exclRb/2 + 1;
9760 /* Num of PUCCH RBs = puschRbStart*2 */
9761 if (puschRbStart * 2 >= ulBw)
9763 DU_LOG("\nERROR --> SCH : No bw available for PUSCH");
9767 *rbStartRef = puschRbStart;
9768 *bwAvailRef = ulBw - puschRbStart * 2;
9770 if(cell->pucchCfg.maxPucchRb !=0 &&
9771 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
9773 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
9780 /***********************************************************
9782 * Func : rgSCHCmnUlCalcAvailBw
9784 * Desc : Calculates bandwidth available for PUSCH scheduling.
9786 * Ret : S16 (ROK/RFAILED)
9792 **********************************************************/
9793 static S16 rgSCHCmnUlCalcAvailBw(RgSchCellCb *cell,RgrCellCfg *cellCfg,uint8_t cfi,uint8_t *rbStartRef,uint8_t *bwAvailRef)
9796 uint8_t ulBw = cell->bwCfg.ulTotalBw;
9797 uint8_t n2Rb = cell->pucchCfg.resourceSize;
9798 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
9799 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
9800 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
9806 uint8_t exclRb; /* RBs to exclude */
9808 uint8_t puschRbStart;
9810 uint16_t numOfN3PucchRb;
9811 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
9815 if (cell->isCpUlExtend)
9820 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
9822 totalCce = cell->dynCfiCb.cfi2NCceTbl[0][cfi];
9824 n1Max = n1Pucch + totalCce-1;
9826 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
9828 n1RbPart = (c*n1Cs)/pucchDeltaShft;
9829 n1Rb = (uint8_t)((n1Max - n1RbPart) / n1PerRb);
9830 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
9832 /* get the total Number of RB's to be excluded for PUSCH */
9834 if(n1Pucch < n1RbPart)
9840 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
9842 /*Support for PUCCH Format 3*/
9844 if (cell->isPucchFormat3Sptd)
9846 numOfN3PucchRb = RGSCH_CEIL(cellSch->dl.maxUePerDlSf,5);
9847 exclRb = exclRb + numOfN3PucchRb;
9850 puschRbStart = exclRb/2 + 1;
9855 DU_LOG("\nDEBUG --> SCH : CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%ld:%d:%d:%d:%d:%d]\n",
9856 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
9858 DU_LOG("\nDEBUG --> SCH : CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%d:%d:%d:%d:%d:%d]\n",
9859 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
9863 if (puschRbStart*2 >= ulBw)
9865 DU_LOG("\nERROR --> SCH : No bw available for PUSCH");
9869 *rbStartRef = puschRbStart;
9870 *bwAvailRef = ulBw - puschRbStart * 2;
9872 if(cell->pucchCfg.maxPucchRb !=0 &&
9873 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
9875 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
9884 /***********************************************************
9886 * Func : rgSCHCmnUlCellInit
9888 * Desc : Uplink scheduler initialisation for cell.
9896 **********************************************************/
9897 static S16 rgSCHCmnUlCellInit(RgSchCellCb *cell,RgrCellCfg *cellCfg)
9900 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
9901 uint8_t maxUePerUlSf = cellCfg->maxUePerUlSf;
9903 /* Added configuration for maximum number of MSG3s */
9904 uint8_t maxMsg3PerUlSf = cellCfg->maxMsg3PerUlSf;
9906 uint8_t maxUlBwPerUe = cellCfg->maxUlBwPerUe;
9907 uint8_t sbSize = cellCfg->puschSubBand.size;
9915 uint16_t ulDlCfgIdx = cell->ulDlCfgIdx;
9916 /* [ccpu00127294]-MOD-Change the max Ul subfrms size in TDD */
9917 uint8_t maxSubfrms = 2 * rgSchTddNumUlSf[ulDlCfgIdx];
9918 uint8_t ulToDlMap[12] = {0}; /* maximum 6 Subframes in UL * 2 */
9919 uint8_t maxUlsubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
9920 [RGSCH_NUM_SUB_FRAMES-1];
9924 uint8_t maxSubfrms = RG_SCH_CMN_UL_NUM_SF;
9930 #if (defined(LTE_L2_MEAS) )
9931 Inst inst = cell->instIdx;
9932 #endif /* #if (defined(LTE_L2_MEAS) || defined(DEBUGP) */
9933 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
9936 cellUl->maxUeNewTxPerTti = cellCfg->maxUlUeNewTxPerTti;
9937 if (maxUePerUlSf == 0)
9939 maxUePerUlSf = RG_SCH_CMN_MAX_UE_PER_UL_SF;
9942 if (maxMsg3PerUlSf == 0)
9944 maxMsg3PerUlSf = RG_SCH_CMN_MAX_MSG3_PER_UL_SF;
9946 /* fixed the problem while sending raRsp
9947 * if maxMsg3PerUlSf is greater than
9948 * RGSCH_MAX_RNTI_PER_RARNTI
9950 if(maxMsg3PerUlSf > RGSCH_MAX_RNTI_PER_RARNTI)
9952 maxMsg3PerUlSf = RGSCH_MAX_RNTI_PER_RARNTI;
9955 if(maxMsg3PerUlSf > maxUePerUlSf)
9957 maxMsg3PerUlSf = maxUePerUlSf;
9960 /*cellUl->maxAllocPerUlSf = maxUePerUlSf + maxMsg3PerUlSf;*/
9961 /*Max MSG3 should be a subset of Max UEs*/
9962 cellUl->maxAllocPerUlSf = maxUePerUlSf;
9963 cellUl->maxMsg3PerUlSf = maxMsg3PerUlSf;
9965 cellUl->maxAllocPerUlSf = maxUePerUlSf;
9967 /* Fix: MUE_PERTTI_UL syed validating Cell Configuration */
9968 if (cellUl->maxAllocPerUlSf < cellUl->maxUeNewTxPerTti)
9970 DU_LOG("\nERROR --> SCH : FAILED: MaxUePerUlSf(%u) < MaxUlUeNewTxPerTti(%u)",
9971 cellUl->maxAllocPerUlSf,
9972 cellUl->maxUeNewTxPerTti);
9978 for(idx = 0; idx < RGSCH_SF_ALLOC_SIZE; idx++)
9980 for(idx = 0; idx < RGSCH_NUM_SUB_FRAMES; idx++)
9984 ret = rgSCHUtlAllocSBuf(inst, (Data **)&(cell->sfAllocArr[idx].
9985 ulUeInfo.ulAllocInfo), (cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc)));
9988 DU_LOG("\nERROR --> SCH : Memory allocation failed ");
9993 if (maxUlBwPerUe == 0)
9995 /* ccpu00139362- Setting to configured UL BW instead of MAX BW(100)*/
9996 maxUlBwPerUe = cell->bwCfg.ulTotalBw;
9998 cellUl->maxUlBwPerUe = maxUlBwPerUe;
10000 /* FOR RG_SCH_CMN_EXT_CP_SUP */
10001 if (!cellCfg->isCpUlExtend)
10003 cellUl->ulNumRePerRb = 12 * (14 - RGSCH_UL_SYM_DMRS_SRS);
10007 cellUl->ulNumRePerRb = 12 * (12 - RGSCH_UL_SYM_DMRS_SRS);
10010 if (sbSize != rgSchCmnMult235Tbl[sbSize].match)
10012 DU_LOG("\nERROR --> SCH : Invalid subband size %d", sbSize);
10015 //Setting the subband size to 4 which is size of VRBG in 5GTF
10017 sbSize = MAX_5GTF_VRBG_SIZE;
10020 maxSbPerUe = maxUlBwPerUe / sbSize;
10021 if (maxSbPerUe == 0)
10023 DU_LOG("\nERROR --> SCH : rgSCHCmnUlCellInit(): "
10024 "maxUlBwPerUe/sbSize is zero");
10027 cellUl->maxSbPerUe = rgSchCmnMult235Tbl[maxSbPerUe].prvMatch;
10029 /* CQI related updations */
10030 if ((!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->ulCmnCodeRate.ccchCqi))
10031 || (!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->trgUlCqi.trgCqi)))
10033 DU_LOG("\nERROR --> SCH : rgSCHCmnUlCellInit(): "
10037 cellUl->dfltUlCqi = cellCfg->ulCmnCodeRate.ccchCqi;
10039 /* Changed the logic to determine maxUlCqi.
10040 * For a 16qam UE, maxUlCqi is the CQI Index at which
10041 * efficiency is as close as possible to RG_SCH_MAX_CODE_RATE_16QAM
10042 * Refer to 36.213-8.6.1 */
10043 for (i = RG_SCH_CMN_UL_NUM_CQI - 1;i > 0; --i)
10045 DU_LOG("\nINFO --> SCH : CQI %u:iTbs %u",i,
10046 rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i]);
10047 #ifdef MAC_SCH_STATS
10048 /* ccpu00128489 ADD Update mcs in hqFailStats here instead of at CRC
10049 * since CQI to MCS mapping does not change. The only exception is for
10050 * ITBS = 19 where the MCS can be 20 or 21 based on the UE cat. We
10051 * choose 20, instead of 21, ie UE_CAT_3 */
10052 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
10053 RG_SCH_CMN_UL_TBS_TO_MCS(iTbs, hqFailStats.ulCqiStat[i - 1].mcs);
10056 for (i = RG_SCH_CMN_UL_NUM_CQI - 1; i != 0; --i)
10058 /* Fix for ccpu00123912*/
10059 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
10060 if (iTbs <= RGSCH_UL_16QAM_MAX_ITBS) /* corresponds to 16QAM */
10062 DU_LOG("\nINFO --> SCH : 16 QAM CQI %u", i);
10063 cellUl->max16qamCqi = i;
10069 /* Precompute useful values for RA msg3 */
10070 ret = rgSCHCmnPrecompEmtcMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
10071 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
10078 /* Precompute useful values for RA msg3 */
10079 ret = rgSCHCmnPrecompMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
10080 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
10086 cellUl->sbSize = sbSize;
10089 cellUl->numUlSubfrms = maxSubfrms;
10091 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->ulSfArr,
10092 cellUl->numUlSubfrms * sizeof(RgSchUlSf));
10096 cellUl->numUlSubfrms = 0;
10100 /* store the DL subframe corresponding to the PUSCH offset
10101 * in their respective UL subframe */
10102 for(i=0; i < RGSCH_NUM_SUB_FRAMES; i++)
10104 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][i] != 0)
10106 subfrm = (i + rgSchTddPuschTxKTbl[ulDlCfgIdx][i]) % \
10107 RGSCH_NUM_SUB_FRAMES;
10108 subfrm = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subfrm]-1;
10109 dlIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][i]-1;
10110 RGSCH_ARRAY_BOUND_CHECK( cell->instIdx, ulToDlMap, subfrm);
10111 ulToDlMap[subfrm] = dlIdx;
10114 /* Copy the information in the remaining UL subframes based
10115 * on number of HARQ processes */
10116 for(i=maxUlsubfrms; i < maxSubfrms; i++)
10118 subfrm = i-maxUlsubfrms;
10119 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, i);
10120 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, subfrm)
10121 ulToDlMap[i] = ulToDlMap[subfrm];
10125 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++)
10128 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
10130 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
10139 cell->ulAvailBw = bwAvail;
10142 numSb = bwAvail/sbSize;
10144 cell->dynCfiCb.bwInfo[cfi].startRb = rbStart;
10145 cell->dynCfiCb.bwInfo[cfi].numSb = numSb;
10148 if(0 == cell->dynCfiCb.maxCfi)
10150 DU_LOG("\nERROR --> SCH : Incorrect Default CFI(%u), maxCfi(%u), maxPucchRb(%d)",
10151 cellSch->cfiCfg.cfi, cell->dynCfiCb.maxCfi,
10152 cell->pucchCfg.maxPucchRb);
10158 cellUl->dmrsArrSize = cell->dynCfiCb.bwInfo[1].numSb;
10159 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->dmrsArr,
10160 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10165 for (i = 0; i < cellUl->dmrsArrSize; ++i)
10167 cellUl->dmrsArr[i] = cellCfg->puschSubBand.dmrs[i];
10170 /* Init subframes */
10171 for (i = 0; i < maxSubfrms; ++i)
10173 ret = rgSCHUtlUlSfInit(cell, &cellUl->ulSfArr[i], i,
10174 cellUl->maxAllocPerUlSf);
10177 for (; i != 0; --i)
10179 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[i-1]);
10181 /* ccpu00117052 - MOD - Passing double pointer
10182 for proper NULLP assignment*/
10183 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)(&(cellUl->dmrsArr)),
10184 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10186 /* ccpu00117052 - MOD - Passing double pointer
10187 for proper NULLP assignment*/
10188 rgSCHUtlFreeSBuf(cell->instIdx,
10189 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
10194 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cellUl);
10199 * @brief Scheduler processing on cell configuration.
10203 * Function : rgSCHCmnRgrCellCfg
10205 * This function does requisite initialisation
10206 * and setup for scheduler1 when a cell is
10209 * @param[in] RgSchCellCb *cell
10210 * @param[in] RgrCellCfg *cellCfg
10211 * @param[out] RgSchErrInfo *err
10216 S16 rgSCHCmnRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cellCfg,RgSchErrInfo *err)
10219 RgSchCmnCell *cellSch;
10221 /* As part of RGR cell configuration, validate the CRGCellCfg
10222 * There is no trigger for crgCellCfg from SC1 */
10223 /* Removed failure check for Extended CP */
10225 if (((ret = rgSCHUtlAllocSBuf(cell->instIdx,
10226 (Data**)&(cell->sc.sch), (sizeof(RgSchCmnCell)))) != ROK))
10228 DU_LOG("\nERROR --> SCH : Memory allocation FAILED");
10229 err->errCause = RGSCHERR_SCH_CFG;
10232 cellSch = (RgSchCmnCell *)(cell->sc.sch);
10233 cellSch->cfiCfg = cellCfg->cfiCfg;
10234 cellSch->trgUlCqi.trgCqi = cellCfg->trgUlCqi.trgCqi;
10235 /* Initialize the scheduler refresh timer queues */
10236 cellSch->tmrTqCp.nxtEnt = 0;
10237 cellSch->tmrTqCp.tmrLen = RG_SCH_CMN_NUM_REFRESH_Q;
10239 /* RACHO Intialize the RACH ded Preamble Information */
10240 rgSCHCmnCfgRachDedPrm(cell);
10242 /* Initialize 'Np' value for each 'p' used for
10243 * HARQ ACK/NACK reception */
10244 rgSCHCmnDlNpValInit(cell);
10247 /* Initialize 'Np' value for each 'p' used for
10248 * HARQ ACK/NACK reception */
10250 rgSCHCmnDlNpValInit(cell);
10253 /* Now perform uplink related initializations */
10254 ret = rgSCHCmnUlCellInit(cell, cellCfg);
10257 /* There is no downlink deinit to be performed */
10258 err->errCause = RGSCHERR_SCH_CFG;
10261 ret = rgSCHCmnDlRgrCellCfg(cell, cellCfg, err);
10264 err->errCause = RGSCHERR_SCH_CFG;
10267 /* DL scheduler has no initializations to make */
10268 /* As of now DL scheduler always returns ROK */
10270 rgSCHCmnGetDciFrmtSizes(cell);
10271 rgSCHCmnGetCqiDciFrmt2AggrLvl(cell);
10273 rgSCHCmnGetEmtcDciFrmtSizes(cell);
10274 rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl(cell);
10275 #endif /* EMTC_ENABLE */
10278 if(TRUE == cellCfg->emtcEnable)
10280 cellSch->apisEmtcUl = &rgSchEmtcUlSchdTbl[0];
10281 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
10288 cellSch->apisUl = &rgSchUlSchdTbl[RG_SCH_CMN_GET_UL_SCHED_TYPE(cell)];
10289 ret = cellSch->apisUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
10295 if(TRUE == cellCfg->emtcEnable)
10297 cellSch->apisEmtcDl = &rgSchEmtcDlSchdTbl[0];
10298 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
10305 cellSch->apisDl = &rgSchDlSchdTbl[RG_SCH_CMN_GET_DL_SCHED_TYPE(cell)];
10307 /* Perform SPS specific initialization for the cell */
10308 ret = rgSCHCmnSpsCellCfg(cell, cellCfg, err);
10314 ret = cellSch->apisDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
10319 rgSCHCmnInitVars(cell);
10322 } /* rgSCHCmnRgrCellCfg*/
10326 * @brief This function handles the reconfiguration of cell.
10330 * Function: rgSCHCmnRgrCellRecfg
10331 * Purpose: Update the reconfiguration parameters.
10333 * Invoked by: Scheduler
10335 * @param[in] RgSchCellCb* cell
10339 S16 rgSCHCmnRgrCellRecfg(RgSchCellCb *cell,RgrCellRecfg *recfg,RgSchErrInfo *err)
10342 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10343 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
10346 if (recfg->recfgTypes & RGR_CELL_UL_CMNRATE_RECFG)
10348 uint8_t oldCqi = cellUl->dfltUlCqi;
10349 if (!RG_SCH_CMN_UL_IS_CQI_VALID(recfg->ulCmnCodeRate.ccchCqi))
10351 err->errCause = RGSCHERR_SCH_CFG;
10352 DU_LOG("\nERROR --> SCH : rgSCHCmnRgrCellRecfg(): "
10356 cellUl->dfltUlCqi = recfg->ulCmnCodeRate.ccchCqi;
10357 ret = rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
10358 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
10361 cellUl->dfltUlCqi = oldCqi;
10362 rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
10363 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
10368 if (recfg->recfgTypes & RGR_CELL_DL_CMNRATE_RECFG)
10370 if (rgSCHCmnDlCnsdrCmnRt(cell, &recfg->dlCmnCodeRate) != ROK)
10372 err->errCause = RGSCHERR_SCH_CFG;
10378 if(TRUE == cell->emtcEnable)
10380 /* Invoke UL sched for cell Recfg */
10381 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
10387 /* Invoke DL sched for cell Recfg */
10388 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
10397 /* Invoke UL sched for cell Recfg */
10398 ret = cellSch->apisUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
10404 /* Invoke DL sched for cell Recfg */
10405 ret = cellSch->apisDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
10412 if (recfg->recfgTypes & RGR_CELL_DLFS_RECFG)
10414 ret = cellSch->apisDlfs->rgSCHDlfsCellRecfg(cell, recfg, err);
10419 cellSch->dl.isDlFreqSel = recfg->dlfsRecfg.isDlFreqSel;
10422 if (recfg->recfgTypes & RGR_CELL_PWR_RECFG)
10424 ret = rgSCHPwrCellRecfg(cell, recfg);
10434 /***********************************************************
10436 * Func : rgSCHCmnUlCellDeinit
10438 * Desc : Uplink scheduler de-initialisation for cell.
10446 **********************************************************/
10447 static Void rgSCHCmnUlCellDeinit(RgSchCellCb *cell)
10449 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
10452 uint8_t maxSubfrms = cellUl->numUlSubfrms;
10455 CmLList *lnk = NULLP;
10456 RgSchL2MeasCb *measCb;
10460 for(ulSfIdx = 0; ulSfIdx < RGSCH_SF_ALLOC_SIZE; ulSfIdx++)
10462 for(ulSfIdx = 0; ulSfIdx < RGSCH_NUM_SUB_FRAMES; ulSfIdx++)
10465 if(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo != NULLP)
10467 /* ccpu00117052 - MOD - Passing double pointer
10468 for proper NULLP assignment*/
10469 rgSCHUtlFreeSBuf(cell->instIdx,
10470 (Data **)(&(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo)),
10471 cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc));
10473 /* ccpu00117052 - DEL - removed explicit NULLP assignment
10474 as it is done in above utility function */
10477 /* Free the memory allocated to measCb */
10478 lnk = cell->l2mList.first;
10479 while(lnk != NULLP)
10481 measCb = (RgSchL2MeasCb *)lnk->node;
10482 cmLListDelFrm(&cell->l2mList, lnk);
10484 /* ccpu00117052 - MOD - Passing double pointer
10485 for proper NULLP assignment*/
10486 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&measCb,\
10487 sizeof(RgSchL2MeasCb));
10490 if (cellUl->dmrsArr != NULLP)
10492 /* ccpu00117052 - MOD - Passing double pointer
10493 for proper NULLP assignment*/
10494 rgSCHUtlFreeSBuf(cell->instIdx,(Data **)(&(cellUl->dmrsArr)),
10495 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10497 /* De-init subframes */
10499 for (ulSfIdx = 0; ulSfIdx < maxSubfrms; ++ulSfIdx)
10501 for (ulSfIdx = 0; ulSfIdx < RG_SCH_CMN_UL_NUM_SF; ++ulSfIdx)
10504 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[ulSfIdx]);
10508 if (cellUl->ulSfArr != NULLP)
10510 /* ccpu00117052 - MOD - Passing double pointer
10511 for proper NULLP assignment*/
10512 rgSCHUtlFreeSBuf(cell->instIdx,
10513 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
10521 * @brief Scheduler processing for cell delete.
10525 * Function : rgSCHCmnCellDel
10527 * This functions de-initialises and frees memory
10528 * taken up by scheduler1 for the entire cell.
10530 * @param[in] RgSchCellCb *cell
10533 Void rgSCHCmnCellDel(RgSchCellCb *cell)
10535 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10540 if (cellSch == NULLP)
10544 /* Perform the deinit for the UL scheduler */
10545 rgSCHCmnUlCellDeinit(cell);
10547 if(TRUE == cell->emtcEnable)
10549 if (cellSch->apisEmtcUl)
10551 cellSch->apisEmtcUl->rgSCHFreeUlCell(cell);
10555 if (cellSch->apisUl)
10557 /* api pointer checks added (here and below in
10558 * this function). pl check. - antriksh */
10559 cellSch->apisUl->rgSCHFreeUlCell(cell);
10562 /* Perform the deinit for the DL scheduler */
10563 cmLListInit(&cellSch->dl.taLst);
10564 if (cellSch->apisDl)
10566 cellSch->apisDl->rgSCHFreeDlCell(cell);
10569 if (cellSch->apisEmtcDl)
10571 rgSCHEmtcInitTaLst(&cellSch->dl);
10573 cellSch->apisEmtcDl->rgSCHFreeDlCell(cell);
10577 /* DLFS de-initialization */
10578 if (cellSch->dl.isDlFreqSel && cellSch->apisDlfs)
10580 cellSch->apisDlfs->rgSCHDlfsCellDel(cell);
10583 rgSCHPwrCellDel(cell);
10585 rgSCHCmnSpsCellDel(cell);
10588 /* ccpu00117052 - MOD - Passing double pointer
10589 for proper NULLP assignment*/
10590 rgSCHUtlFreeSBuf(cell->instIdx,
10591 (Data**)(&(cell->sc.sch)), (sizeof(RgSchCmnCell)));
10593 } /* rgSCHCmnCellDel */
10597 * @brief This function validates QOS parameters for DL.
10601 * Function: rgSCHCmnValidateDlQos
10602 * Purpose: This function validates QOS parameters for DL.
10604 * Invoked by: Scheduler
10606 * @param[in] CrgLchQosCfg *dlQos
10610 static S16 rgSCHCmnValidateDlQos(RgrLchQosCfg *dlQos)
10612 uint8_t qci = dlQos->qci;
10613 if ( qci < RG_SCH_CMN_MIN_QCI || qci > RG_SCH_CMN_MAX_QCI )
10618 if ((qci >= RG_SCH_CMN_GBR_QCI_START) &&
10619 (qci <= RG_SCH_CMN_GBR_QCI_END))
10621 if ((dlQos->mbr == 0) || (dlQos->mbr < dlQos->gbr))
10630 * @brief Scheduler invocation on logical channel addition.
10634 * Function : rgSCHCmnRgrLchCfg
10636 * This functions does required processing when a new
10637 * (dedicated) logical channel is added. Assumes lcg
10638 * pointer in ulLc is set.
10640 * @param[in] RgSchCellCb *cell
10641 * @param[in] RgSchUeCb *ue
10642 * @param[in] RgSchDlLcCb *dlLc
10643 * @param[int] RgrLchCfg *lcCfg
10644 * @param[out] RgSchErrInfo *err
10649 S16 rgSCHCmnRgrLchCfg
10660 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10663 ret = rgSCHUtlAllocSBuf(cell->instIdx,
10664 (Data**)&((dlLc)->sch), (sizeof(RgSchCmnDlSvc)));
10667 DU_LOG("\nERROR --> SCH : rgSCHCmnRgrLchCfg(): "
10668 "SCH struct alloc failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10669 err->errCause = RGSCHERR_SCH_CFG;
10672 if(lcCfg->lcType != CM_LTE_LCH_DCCH)
10674 ret = rgSCHCmnValidateDlQos(&lcCfg->dlInfo.dlQos);
10677 DU_LOG("\nERROR --> SCH : rgSchCmnCrgLcCfg(): "
10678 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10679 err->errCause = RGSCHERR_SCH_CFG;
10682 /* Perform DL service activation in the scheduler */
10683 ((RgSchCmnDlSvc *)(dlLc->sch))->qci = lcCfg->dlInfo.dlQos.qci;
10684 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = rgSchCmnDlQciPrio[lcCfg->dlInfo.dlQos.qci - 1];
10685 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcCfg->dlInfo.dlQos.gbr * \
10686 RG_SCH_CMN_REFRESH_TIME)/100;
10687 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcCfg->dlInfo.dlQos.mbr * \
10688 RG_SCH_CMN_REFRESH_TIME)/100;
10692 /*assigning highest priority to DCCH */
10693 ((RgSchCmnDlSvc *)(dlLc->sch))->prio=RG_SCH_CMN_DCCH_PRIO;
10696 dlLc->lcType=lcCfg->lcType;
10699 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
10701 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcCfg(cell, ue,dlLc ,lcCfg, err);
10710 ret = cellSch->apisDl->rgSCHRgrDlLcCfg(cell, ue, dlLc, lcCfg, err);
10718 if(TRUE == ue->isEmtcUe)
10720 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
10729 ret = cellSch->apisUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
10739 rgSCHSCellDlLcCfg(cell, ue, dlLc);
10745 if(lcCfg->dlInfo.dlSpsCfg.isSpsEnabled)
10747 /* Invoke SPS module if SPS is enabled for the service */
10748 ret = rgSCHCmnSpsDlLcCfg(cell, ue, dlLc, lcCfg, err);
10751 DU_LOG("\nERROR --> SCH : rgSchCmnRgrLchCfg(): "
10752 "SPS configuration failed for DL LC for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10753 err->errCause = RGSCHERR_SCH_CFG;
10763 * @brief Scheduler invocation on logical channel addition.
10767 * Function : rgSCHCmnRgrLchRecfg
10769 * This functions does required processing when an existing
10770 * (dedicated) logical channel is reconfigured. Assumes lcg
10771 * pointer in ulLc is set to the old value.
10772 * Independent of whether new LCG is meant to be configured,
10773 * the new LCG scheduler information is accessed and possibly modified.
10775 * @param[in] RgSchCellCb *cell
10776 * @param[in] RgSchUeCb *ue
10777 * @param[in] RgSchDlLcCb *dlLc
10778 * @param[int] RgrLchRecfg *lcRecfg
10779 * @param[out] RgSchErrInfo *err
10784 S16 rgSCHCmnRgrLchRecfg
10789 RgrLchRecfg *lcRecfg,
10794 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10797 if(dlLc->lcType != CM_LTE_LCH_DCCH)
10799 ret = rgSCHCmnValidateDlQos(&lcRecfg->dlRecfg.dlQos);
10803 DU_LOG("\nERROR --> SCH : DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10804 err->errCause = RGSCHERR_SCH_CFG;
10807 if (((RgSchCmnDlSvc *)(dlLc->sch))->qci != lcRecfg->dlRecfg.dlQos.qci)
10809 DU_LOG("\nERROR --> SCH : Qci, hence lc Priority change "
10810 "not supported for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10811 err->errCause = RGSCHERR_SCH_CFG;
10814 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcRecfg->dlRecfg.dlQos.gbr * \
10815 RG_SCH_CMN_REFRESH_TIME)/100;
10816 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcRecfg->dlRecfg.dlQos.mbr * \
10817 RG_SCH_CMN_REFRESH_TIME)/100;
10821 /*assigning highest priority to DCCH */
10822 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = RG_SCH_CMN_DCCH_PRIO;
10826 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
10828 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10833 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
10842 ret = cellSch->apisDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10847 ret = cellSch->apisUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
10855 if (lcRecfg->recfgTypes & RGR_DL_LC_SPS_RECFG)
10857 /* Invoke SPS module if SPS is enabled for the service */
10858 if(lcRecfg->dlRecfg.dlSpsRecfg.isSpsEnabled)
10860 ret = rgSCHCmnSpsDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10863 DU_LOG("\nERROR --> SCH : SPS re-configuration not "
10864 "supported for dlLC Ignore this CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10875 * @brief Scheduler invocation on logical channel addition.
10879 * Function : rgSCHCmnRgrLcgCfg
10881 * This functions does required processing when a new
10882 * (dedicated) logical channel is added. Assumes lcg
10883 * pointer in ulLc is set.
10885 * @param[in] RgSchCellCb *cell,
10886 * @param[in] RgSchUeCb *ue,
10887 * @param[in] RgSchLcgCb *lcg,
10888 * @param[in] RgrLcgCfg *lcgCfg,
10889 * @param[out] RgSchErrInfo *err
10894 S16 rgSCHCmnRgrLcgCfg
10904 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10905 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCfg->ulInfo.lcgId].sch));
10908 ulLcg->cfgdGbr = (lcgCfg->ulInfo.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
10909 ulLcg->effGbr = ulLcg->cfgdGbr;
10910 ulLcg->deltaMbr = ((lcgCfg->ulInfo.mbr - lcgCfg->ulInfo.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
10911 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
10914 if(TRUE == ue->isEmtcUe)
10916 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
10925 ret = cellSch->apisUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
10931 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
10933 /* Indicate MAC that this LCG is GBR LCG */
10934 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcgCfg->ulInfo.lcgId, TRUE);
10940 * @brief Scheduler invocation on logical channel addition.
10944 * Function : rgSCHCmnRgrLcgRecfg
10946 * This functions does required processing when a new
10947 * (dedicated) logical channel is added. Assumes lcg
10948 * pointer in ulLc is set.
10950 * @param[in] RgSchCellCb *cell,
10951 * @param[in] RgSchUeCb *ue,
10952 * @param[in] RgSchLcgCb *lcg,
10953 * @param[in] RgrLcgRecfg *reCfg,
10954 * @param[out] RgSchErrInfo *err
10959 S16 rgSCHCmnRgrLcgRecfg
10964 RgrLcgRecfg *reCfg,
10969 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10970 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[reCfg->ulRecfg.lcgId].sch));
10973 ulLcg->cfgdGbr = (reCfg->ulRecfg.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
10974 ulLcg->effGbr = ulLcg->cfgdGbr;
10975 ulLcg->deltaMbr = ((reCfg->ulRecfg.mbr - reCfg->ulRecfg.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
10976 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
10979 if(TRUE == ue->isEmtcUe)
10981 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
10990 ret = cellSch->apisUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
10996 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
10998 /* Indicate MAC that this LCG is GBR LCG */
10999 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, TRUE);
11003 /* In case of RAB modification */
11004 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, FALSE);
11009 /***********************************************************
11011 * Func : rgSCHCmnRgrLchDel
11013 * Desc : Scheduler handling for a (dedicated)
11014 * uplink logical channel being deleted.
11021 **********************************************************/
11022 S16 rgSCHCmnRgrLchDel(RgSchCellCb *cell,RgSchUeCb *ue,CmLteLcId lcId,uint8_t lcgId)
11024 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11026 if(TRUE == ue->isEmtcUe)
11028 cellSch->apisEmtcUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
11033 cellSch->apisUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
11038 /***********************************************************
11040 * Func : rgSCHCmnLcgDel
11042 * Desc : Scheduler handling for a (dedicated)
11043 * uplink logical channel being deleted.
11051 **********************************************************/
11052 Void rgSCHCmnLcgDel(RgSchCellCb *cell,RgSchUeCb *ue,RgSchLcgCb *lcg)
11054 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11055 RgSchCmnLcg *lcgCmn = RG_SCH_CMN_GET_UL_LCG(lcg);
11057 if (lcgCmn == NULLP)
11062 if (RGSCH_IS_GBR_BEARER(lcgCmn->cfgdGbr))
11064 /* Indicate MAC that this LCG is GBR LCG */
11065 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcg->lcgId, FALSE);
11069 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
11071 rgSCHCmnSpsUlLcgDel(cell, ue, lcg);
11073 #endif /* LTEMAC_SPS */
11075 lcgCmn->effGbr = 0;
11076 lcgCmn->reportedBs = 0;
11077 lcgCmn->cfgdGbr = 0;
11078 /* set lcg bs to 0. Deletion of control block happens
11079 * at the time of UE deletion. */
11082 if(TRUE == ue->isEmtcUe)
11084 cellSch->apisEmtcUl->rgSCHFreeUlLcg(cell, ue, lcg);
11089 cellSch->apisUl->rgSCHFreeUlLcg(cell, ue, lcg);
11096 * @brief This function deletes a service from scheduler.
11100 * Function: rgSCHCmnFreeDlLc
11101 * Purpose: This function is made available through a FP for
11102 * making scheduler aware of a service being deleted from UE.
11104 * Invoked by: BO and Scheduler
11106 * @param[in] RgSchCellCb* cell
11107 * @param[in] RgSchUeCb* ue
11108 * @param[in] RgSchDlLcCb* svc
11112 Void rgSCHCmnFreeDlLc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlLcCb *svc)
11114 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11115 if (svc->sch == NULLP)
11120 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
11122 cellSch->apisEmtcDl->rgSCHFreeDlLc(cell, ue, svc);
11127 cellSch->apisDl->rgSCHFreeDlLc(cell, ue, svc);
11133 rgSCHSCellDlLcDel(cell, ue, svc);
11138 /* If SPS service, invoke SPS module */
11139 if (svc->dlLcSpsCfg.isSpsEnabled)
11141 rgSCHCmnSpsDlLcDel(cell, ue, svc);
11145 /* ccpu00117052 - MOD - Passing double pointer
11146 for proper NULLP assignment*/
11147 rgSCHUtlFreeSBuf(cell->instIdx,
11148 (Data**)(&(svc->sch)), (sizeof(RgSchCmnDlSvc)));
11151 rgSCHLaaDeInitDlLchCb(cell, svc);
11160 * @brief This function Processes the Final Allocations
11161 * made by the RB Allocator against the requested
11162 * CCCH SDURetx Allocations.
11166 * Function: rgSCHCmnDlCcchSduRetxFnlz
11167 * Purpose: This function Processes the Final Allocations
11168 * made by the RB Allocator against the requested
11169 * CCCH Retx Allocations.
11170 * Scans through the scheduled list of ccchSdu retrans
11171 * fills the corresponding pdcch, adds the hqProc to
11172 * the corresponding SubFrm and removes the hqP from
11175 * Invoked by: Common Scheduler
11177 * @param[in] RgSchCellCb *cell
11178 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11182 static Void rgSCHCmnDlCcchSduRetxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11185 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11186 RgSchDlRbAlloc *rbAllocInfo;
11187 RgSchDlHqProcCb *hqP;
11190 /* Traverse through the Scheduled Retx List */
11191 node = allocInfo->ccchSduAlloc.schdCcchSduRetxLst.first;
11194 hqP = (RgSchDlHqProcCb *)(node->node);
11196 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
11198 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11200 /* Remove the HqP from cell's ccchSduRetxLst */
11201 cmLListDelFrm(&cmnCellDl->ccchSduRetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
11202 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
11204 /* Fix: syed dlAllocCb reset should be performed.
11205 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11206 rgSCHCmnDlUeResetTemp(ue, hqP);
11208 /* Fix: syed dlAllocCb reset should be performed.
11209 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11210 node = allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst.first;
11213 hqP = (RgSchDlHqProcCb *)(node->node);
11216 /* reset the UE allocation Information */
11217 rgSCHCmnDlUeResetTemp(ue, hqP);
11223 * @brief This function Processes the Final Allocations
11224 * made by the RB Allocator against the requested
11225 * CCCH Retx Allocations.
11229 * Function: rgSCHCmnDlCcchRetxFnlz
11230 * Purpose: This function Processes the Final Allocations
11231 * made by the RB Allocator against the requested
11232 * CCCH Retx Allocations.
11233 * Scans through the scheduled list of msg4 retrans
11234 * fills the corresponding pdcch, adds the hqProc to
11235 * the corresponding SubFrm and removes the hqP from
11238 * Invoked by: Common Scheduler
11240 * @param[in] RgSchCellCb *cell
11241 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11245 static Void rgSCHCmnDlCcchRetxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11248 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11249 RgSchDlRbAlloc *rbAllocInfo;
11250 RgSchDlHqProcCb *hqP;
11253 /* Traverse through the Scheduled Retx List */
11254 node = allocInfo->msg4Alloc.schdMsg4RetxLst.first;
11257 hqP = (RgSchDlHqProcCb *)(node->node);
11258 raCb = hqP->hqE->raCb;
11259 rbAllocInfo = &raCb->rbAllocInfo;
11261 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11263 /* Remove the HqP from cell's msg4RetxLst */
11264 cmLListDelFrm(&cmnCellDl->msg4RetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
11265 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
11266 /* Fix: syed dlAllocCb reset should be performed.
11267 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11268 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
11269 rgSCHCmnDlHqPResetTemp(hqP);
11271 /* Fix: syed dlAllocCb reset should be performed.
11272 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11273 node = allocInfo->msg4Alloc.nonSchdMsg4RetxLst.first;
11276 hqP = (RgSchDlHqProcCb *)(node->node);
11277 raCb = hqP->hqE->raCb;
11279 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
11280 rgSCHCmnDlHqPResetTemp(hqP);
11287 * @brief This function Processes the Final Allocations
11288 * made by the RB Allocator against the requested
11289 * CCCH SDU tx Allocations.
11293 * Function: rgSCHCmnDlCcchSduTxFnlz
11294 * Purpose: This function Processes the Final Allocations
11295 * made by the RB Allocator against the requested
11296 * CCCH tx Allocations.
11297 * Scans through the scheduled list of CCCH SDU trans
11298 * fills the corresponding pdcch, adds the hqProc to
11299 * the corresponding SubFrm and removes the hqP from
11302 * Invoked by: Common Scheduler
11304 * @param[in] RgSchCellCb *cell
11305 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11309 static Void rgSCHCmnDlCcchSduTxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11313 RgSchDlRbAlloc *rbAllocInfo;
11314 RgSchDlHqProcCb *hqP;
11315 RgSchLchAllocInfo lchSchdData;
11317 /* Traverse through the Scheduled Retx List */
11318 node = allocInfo->ccchSduAlloc.schdCcchSduTxLst.first;
11321 hqP = (RgSchDlHqProcCb *)(node->node);
11322 ueCb = hqP->hqE->ue;
11324 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
11326 /* fill the pdcch and HqProc */
11327 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11329 /* Remove the raCb from cell's toBeSchdLst */
11330 cmLListDelFrm(&cell->ccchSduUeLst, &ueCb->ccchSduLnk);
11331 ueCb->ccchSduLnk.node = (PTR)NULLP;
11333 /* Fix : Resetting this required to avoid complication
11334 * in reestablishment case */
11335 ueCb->dlCcchInfo.bo = 0;
11337 /* Indicate DHM of the CCCH LC scheduling */
11338 hqP->tbInfo[0].contResCe = NOTPRSNT;
11339 lchSchdData.lcId = 0;
11340 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
11341 (RGSCH_MSG4_HDRSIZE);
11342 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
11344 /* Fix: syed dlAllocCb reset should be performed.
11345 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11346 rgSCHCmnDlUeResetTemp(ueCb, hqP);
11348 /* Fix: syed dlAllocCb reset should be performed.
11349 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11350 node = allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst.first;
11353 hqP = (RgSchDlHqProcCb *)(node->node);
11354 ueCb = hqP->hqE->ue;
11356 /* Release HqProc */
11357 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
11358 /*Fix: Removing releasing of TB1 as it will not exist for CCCH SDU and hence caused a crash*/
11359 /*rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
11360 /* reset the UE allocation Information */
11361 rgSCHCmnDlUeResetTemp(ueCb, hqP);
11368 * @brief This function Processes the Final Allocations
11369 * made by the RB Allocator against the requested
11370 * CCCH tx Allocations.
11374 * Function: rgSCHCmnDlCcchTxFnlz
11375 * Purpose: This function Processes the Final Allocations
11376 * made by the RB Allocator against the requested
11377 * CCCH tx Allocations.
11378 * Scans through the scheduled list of msg4 trans
11379 * fills the corresponding pdcch, adds the hqProc to
11380 * the corresponding SubFrm and removes the hqP from
11383 * Invoked by: Common Scheduler
11385 * @param[in] RgSchCellCb *cell
11386 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11390 static Void rgSCHCmnDlCcchTxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11394 RgSchDlRbAlloc *rbAllocInfo;
11395 RgSchDlHqProcCb *hqP;
11396 RgSchLchAllocInfo lchSchdData;
11398 /* Traverse through the Scheduled Retx List */
11399 node = allocInfo->msg4Alloc.schdMsg4TxLst.first;
11402 hqP = (RgSchDlHqProcCb *)(node->node);
11403 raCb = hqP->hqE->raCb;
11405 rbAllocInfo = &raCb->rbAllocInfo;
11407 /* fill the pdcch and HqProc */
11408 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11409 /* MSG4 Fix Start */
11411 rgSCHRamRmvFrmRaInfoSchdLst(cell, raCb);
11414 /* Indicate DHM of the CCCH LC scheduling */
11415 lchSchdData.lcId = 0;
11416 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
11417 (RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE);
11418 /* TRansmitting presence of cont Res CE across MAC-SCH interface to
11419 * identify CCCH SDU transmissions which need to be done
11421 * contention resolution CE*/
11422 hqP->tbInfo[0].contResCe = PRSNT_NODEF;
11423 /*Dont add lc if only cont res CE is being transmitted*/
11424 if(raCb->dlCcchInfo.bo)
11426 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
11431 /* Fix: syed dlAllocCb reset should be performed.
11432 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11433 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
11434 rgSCHCmnDlHqPResetTemp(hqP);
11436 node = allocInfo->msg4Alloc.nonSchdMsg4TxLst.first;
11439 hqP = (RgSchDlHqProcCb *)(node->node);
11440 raCb = hqP->hqE->raCb;
11442 rbAllocInfo = &raCb->rbAllocInfo;
11443 /* Release HqProc */
11444 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
11445 /*Fix: Removing releasing of TB1 as it will not exist for MSG4 and hence caused a crash*/
11446 /* rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
11447 /* reset the UE allocation Information */
11448 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
11449 rgSCHCmnDlHqPResetTemp(hqP);
11456 * @brief This function calculates the BI Index to be sent in the Bi header
11460 * Function: rgSCHCmnGetBiIndex
11461 * Purpose: This function Processes utilizes the previous BI time value
11462 * calculated and the difference last BI sent time and current time. To
11463 * calculate the latest BI Index. It also considers the how many UE's
11464 * Unserved in this subframe.
11466 * Invoked by: Common Scheduler
11468 * @param[in] RgSchCellCb *cell
11469 * @param[in] uint32_t ueCount
11473 uint8_t rgSCHCmnGetBiIndex(RgSchCellCb *cell,uint32_t ueCount)
11475 S16 prevVal = 0; /* To Store Intermediate Value */
11476 uint16_t newBiVal = 0; /* To store Bi Value in millisecond */
11478 uint16_t timeDiff = 0;
11481 if (cell->biInfo.prevBiTime != 0)
11484 if(cell->emtcEnable == TRUE)
11486 timeDiff =(RGSCH_CALC_SF_DIFF_EMTC(cell->crntTime, cell->biInfo.biTime));
11491 timeDiff =(RGSCH_CALC_SF_DIFF(cell->crntTime, cell->biInfo.biTime));
11494 prevVal = cell->biInfo.prevBiTime - timeDiff;
11500 newBiVal = RG_SCH_CMN_GET_BI_VAL(prevVal,ueCount);
11501 /* To be used next time when BI is calculated */
11503 if(cell->emtcEnable == TRUE)
11505 RGSCHCPYTIMEINFO_EMTC(cell->crntTime, cell->biInfo.biTime)
11510 RGSCHCPYTIMEINFO(cell->crntTime, cell->biInfo.biTime)
11513 /* Search the actual BI Index from table Backoff Parameters Value and
11514 * return that Index */
11517 if (rgSchCmnBiTbl[idx] > newBiVal)
11522 }while(idx < RG_SCH_CMN_NUM_BI_VAL-1);
11523 cell->biInfo.prevBiTime = rgSchCmnBiTbl[idx];
11524 /* For 16 Entries in Table 7.2.1 36.321.880 - 3 reserved so total 13 Entries */
11525 return (idx); /* Returning reserved value from table UE treats it has 960 ms */
11526 } /* rgSCHCmnGetBiIndex */
11530 * @brief This function Processes the Final Allocations
11531 * made by the RB Allocator against the requested
11532 * RAR allocations. Assumption: The reuqested
11533 * allocations are always satisfied completely.
11534 * Hence no roll back.
11538 * Function: rgSCHCmnDlRaRspFnlz
11539 * Purpose: This function Processes the Final Allocations
11540 * made by the RB Allocator against the requested.
11541 * Takes care of PDCCH filling.
11543 * Invoked by: Common Scheduler
11545 * @param[in] RgSchCellCb *cell
11546 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11550 static Void rgSCHCmnDlRaRspFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11552 uint32_t rarCnt = 0;
11553 RgSchDlRbAlloc *raRspAlloc;
11554 RgSchDlSf *subFrm = NULLP;
11558 RgSchRaReqInfo *raReq;
11560 RgSchUlAlloc *ulAllocRef=NULLP;
11561 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11562 uint8_t allocRapidCnt = 0;
11564 uint32_t msg3SchdIdx = 0;
11565 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
11566 uint8_t msg3Subfrm;
11570 for (rarCnt=0; rarCnt<RG_SCH_CMN_MAX_CMN_PDCCH; rarCnt++)
11572 raRspAlloc = &allocInfo->raRspAlloc[rarCnt];
11573 /* Having likely condition first for optimization */
11574 if (!raRspAlloc->pdcch)
11580 subFrm = raRspAlloc->dlSf;
11581 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
11582 /* Corrected RACH handling for multiple RAPIDs per RARNTI */
11583 allocRapidCnt = raRspAlloc->numRapids;
11584 while (allocRapidCnt)
11586 raReq = (RgSchRaReqInfo *)(reqLst->first->node);
11587 /* RACHO: If dedicated preamble, then allocate UL Grant
11588 * (consequence of handover/pdcchOrder) and continue */
11589 if (RGSCH_IS_DEDPRM(cell, raReq->raReq.rapId))
11591 rgSCHCmnHdlHoPo(cell, &subFrm->raRsp[rarCnt].contFreeUeLst,
11593 cmLListDelFrm(reqLst, reqLst->first);
11595 /* ccpu00117052 - MOD - Passing double pointer
11596 for proper NULLP assignment*/
11597 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11598 sizeof(RgSchRaReqInfo));
11602 if(cell->overLoadBackOffEnab)
11603 {/* rach Overlaod conrol is triggerd, Skipping this rach */
11604 cmLListDelFrm(reqLst, reqLst->first);
11606 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11607 sizeof(RgSchRaReqInfo));
11610 /* Attempt to include each RA request into the RSP */
11611 /* Any failure in the procedure is considered to */
11612 /* affect futher allocations in the same TTI. When */
11613 /* a failure happens, we break out and complete */
11614 /* the processing for random access */
11615 if (rgSCHRamCreateRaCb(cell, &raCb, &err) != ROK)
11619 /* Msg3 allocation request to USM */
11620 if (raReq->raReq.rapId < cell->rachCfg.sizeRaPreambleGrpA)
11624 /*ccpu00128820 - MOD - Msg3 alloc double delete issue*/
11625 rgSCHCmnMsg3GrntReq(cell, raCb->tmpCrnti, preamGrpA, \
11626 &(raCb->msg3HqProc), &ulAllocRef, &raCb->msg3HqProcId);
11627 if (ulAllocRef == NULLP)
11629 rgSCHRamDelRaCb(cell, raCb, TRUE);
11632 if (raReq->raReq.cqiPres)
11634 raCb->ccchCqi = raReq->raReq.cqiIdx;
11638 raCb->ccchCqi = cellDl->ccchCqi;
11640 raCb->rapId = raReq->raReq.rapId;
11641 raCb->ta.pres = TRUE;
11642 raCb->ta.val = raReq->raReq.ta;
11643 raCb->msg3Grnt = ulAllocRef->grnt;
11644 /* Populating the tpc value received */
11645 raCb->msg3Grnt.tpc = raReq->raReq.tpc;
11646 /* PHR handling for MSG3 */
11647 ulAllocRef->raCb = raCb;
11649 /* To the crntTime, add the MIN time at which UE will
11650 * actually send MSG3 i.e DL_DELTA+6 */
11651 raCb->msg3AllocTime = cell->crntTime;
11652 RGSCH_INCR_SUB_FRAME(raCb->msg3AllocTime, RG_SCH_CMN_MIN_MSG3_RECP_INTRVL);
11654 msg3SchdIdx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) %
11655 RGSCH_NUM_SUB_FRAMES;
11656 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
11657 special subframe */
11658 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][msg3SchdIdx] !=
11659 RG_SCH_TDD_UL_SUBFRAME)
11661 RGSCHCMNADDTOCRNTTIME(cell->crntTime,raCb->msg3AllocTime,
11662 RG_SCH_CMN_DL_DELTA)
11663 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][
11664 raCb->msg3AllocTime.slot];
11665 RGSCHCMNADDTOCRNTTIME(raCb->msg3AllocTime, raCb->msg3AllocTime,
11669 cmLListAdd2Tail(&subFrm->raRsp[rarCnt].raRspLst, &raCb->rspLnk);
11670 raCb->rspLnk.node = (PTR)raCb;
11671 cmLListDelFrm(reqLst, reqLst->first);
11673 /* ccpu00117052 - MOD - Passing double pointer
11674 for proper NULLP assignment*/
11675 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11676 sizeof(RgSchRaReqInfo));
11678 /* SR_RACH_STATS : RAR scheduled */
11683 /* Fill subframe data members */
11684 subFrm->raRsp[rarCnt].raRnti = raRspAlloc->rnti;
11685 subFrm->raRsp[rarCnt].pdcch = raRspAlloc->pdcch;
11686 subFrm->raRsp[rarCnt].tbSz = raRspAlloc->tbInfo[0].bytesAlloc;
11687 /* Fill PDCCH data members */
11688 rgSCHCmnFillPdcch(cell, subFrm->raRsp[rarCnt].pdcch, raRspAlloc);
11691 if(cell->overLoadBackOffEnab)
11692 {/* rach Overlaod conrol is triggerd, Skipping this rach */
11693 subFrm->raRsp[rarCnt].backOffInd.pres = PRSNT_NODEF;
11694 subFrm->raRsp[rarCnt].backOffInd.val = cell->overLoadBackOffval;
11699 subFrm->raRsp[rarCnt].backOffInd.pres = NOTPRSNT;
11702 /*[ccpu00125212] Avoiding sending of empty RAR in case of RAR window
11703 is short and UE is sending unauthorised preamble.*/
11704 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
11705 if ((raRspAlloc->biEstmt) && (reqLst->count))
11707 subFrm->raRsp[0].backOffInd.pres = PRSNT_NODEF;
11708 /* Added as part of Upgrade */
11709 subFrm->raRsp[0].backOffInd.val =
11710 rgSCHCmnGetBiIndex(cell, reqLst->count);
11712 /* SR_RACH_STATS : Back Off Inds */
11716 else if ((subFrm->raRsp[rarCnt].raRspLst.first == NULLP) &&
11717 (subFrm->raRsp[rarCnt].contFreeUeLst.first == NULLP))
11719 /* Return the grabbed PDCCH */
11720 rgSCHUtlPdcchPut(cell, &subFrm->pdcchInfo, raRspAlloc->pdcch);
11721 subFrm->raRsp[rarCnt].pdcch = NULLP;
11722 DU_LOG("\nERROR --> SCH : rgSCHCmnRaRspAlloc(): "
11723 "Not even one RaReq.");
11727 DU_LOG("\nDEBUG --> SCH : RNTI:%d Scheduled RAR @ (%u,%u) ",
11729 cell->crntTime.sfn,
11730 cell->crntTime.slot);
11736 * @brief This function computes rv.
11740 * Function: rgSCHCmnDlCalcRvForBcch
11741 * Purpose: This function computes rv.
11743 * Invoked by: Common Scheduler
11745 * @param[in] RgSchCellCb *cell
11746 * @param[in] Bool si
11747 * @param[in] uint16_t i
11751 static uint8_t rgSCHCmnDlCalcRvForBcch(RgSchCellCb *cell,Bool si,uint16_t i)
11754 CmLteTimingInfo frm;
11756 frm = cell->crntTime;
11757 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
11765 k = (frm.sfn/2) % 4;
11767 rv = RGSCH_CEIL(3*k, 2) % 4;
11772 * @brief This function Processes the Final Allocations
11773 * made by the RB Allocator against the requested
11774 * BCCH/PCCH allocations. Assumption: The reuqested
11775 * allocations are always satisfied completely.
11776 * Hence no roll back.
11780 * Function: rgSCHCmnDlBcchPcchFnlz
11781 * Purpose: This function Processes the Final Allocations
11782 * made by the RB Allocator against the requested.
11783 * Takes care of PDCCH filling.
11785 * Invoked by: Common Scheduler
11787 * @param[in] RgSchCellCb *cell
11788 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11792 static Void rgSCHCmnDlBcchPcchFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11794 RgSchDlRbAlloc *rbAllocInfo;
11798 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
11800 #ifdef LTEMAC_HDFDD
11801 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11803 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11807 /* Moving variables to available scope for optimization */
11808 RgSchClcDlLcCb *pcch;
11811 RgSchClcDlLcCb *bcch;
11814 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11818 rbAllocInfo = &allocInfo->pcchAlloc;
11819 if (rbAllocInfo->pdcch)
11821 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
11823 /* Added sfIdx calculation for TDD as well */
11825 #ifdef LTEMAC_HDFDD
11826 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11828 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11831 subFrm = rbAllocInfo->dlSf;
11832 pcch = rgSCHDbmGetPcch(cell);
11835 DU_LOG("\nERROR --> SCH : rgSCHCmnDlBcchPcchFnlz( ): "
11836 "No Pcch Present");
11840 /* Added Dl TB count for paging message transmission*/
11842 cell->dlUlTbCnt.tbTransDlTotalCnt++;
11844 bo = (RgSchClcBoRpt *)pcch->boLst.first->node;
11845 cmLListDelFrm(&pcch->boLst, &bo->boLstEnt);
11846 /* ccpu00117052 - MOD - Passing double pointer
11847 for proper NULLP assignment*/
11848 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
11849 /* Fill subframe data members */
11850 subFrm->pcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
11851 subFrm->pcch.pdcch = rbAllocInfo->pdcch;
11852 /* Fill PDCCH data members */
11853 rgSCHCmnFillPdcch(cell, subFrm->pcch.pdcch, rbAllocInfo);
11854 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, pcch->lcId, TRUE);
11855 /* ccpu00132314-ADD-Update the tx power allocation info
11856 TODO-Need to add a check for max tx power per symbol */
11857 subfrmAlloc->cmnLcInfo.pcchInfo.txPwrOffset = cellDl->pcchTxPwrOffset;
11861 rbAllocInfo = &allocInfo->bcchAlloc;
11862 if (rbAllocInfo->pdcch)
11864 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
11866 #ifdef LTEMAC_HDFDD
11867 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11869 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11872 subFrm = rbAllocInfo->dlSf;
11874 /* Fill subframe data members */
11875 subFrm->bcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
11876 subFrm->bcch.pdcch = rbAllocInfo->pdcch;
11877 /* Fill PDCCH data members */
11878 rgSCHCmnFillPdcch(cell, subFrm->bcch.pdcch, rbAllocInfo);
11880 if(rbAllocInfo->schdFirst)
11883 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
11884 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
11886 /*Copy the SIB1 msg buff into interface buffer */
11887 SCpyMsgMsg(cell->siCb.crntSiInfo.sib1Info.sib1,
11888 rgSchCb[cell->instIdx].rgSchInit.region,
11889 rgSchCb[cell->instIdx].rgSchInit.pool,
11890 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11891 #endif/*RGR_SI_SCH*/
11892 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
11893 rgSCHCmnDlCalcRvForBcch(cell, FALSE, 0);
11901 i = cell->siCb.siCtx.i;
11902 /*Decrement the retransmission count */
11903 cell->siCb.siCtx.retxCntRem--;
11905 /*Copy the SI msg buff into interface buffer */
11906 if(cell->siCb.siCtx.warningSiFlag == FALSE)
11908 SCpyMsgMsg(cell->siCb.siArray[cell->siCb.siCtx.siId-1].si,
11909 rgSchCb[cell->instIdx].rgSchInit.region,
11910 rgSchCb[cell->instIdx].rgSchInit.pool,
11911 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11915 pdu = rgSCHUtlGetWarningSiPdu(cell);
11916 RGSCH_NULL_CHECK(cell->instIdx, pdu);
11918 rgSchCb[cell->instIdx].rgSchInit.region,
11919 rgSchCb[cell->instIdx].rgSchInit.pool,
11920 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11921 if(cell->siCb.siCtx.retxCntRem == 0)
11923 rgSCHUtlFreeWarningSiPdu(cell);
11924 cell->siCb.siCtx.warningSiFlag = FALSE;
11929 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
11930 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
11932 if(bo->retxCnt != cell->siCfg.retxCnt-1)
11937 #endif/*RGR_SI_SCH*/
11938 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
11939 rgSCHCmnDlCalcRvForBcch(cell, TRUE, i);
11942 /* Added Dl TB count for SIB1 and SI messages transmission.
11943 * This counter will be incremented only for the first transmission
11944 * (with RV 0) of these messages*/
11946 if(subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv == 0)
11948 cell->dlUlTbCnt.tbTransDlTotalCnt++;
11952 if(bo->retxCnt == 0)
11954 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
11955 /* ccpu00117052 - MOD - Passing double pointer
11956 for proper NULLP assignment*/
11957 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
11959 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, bcch->lcId, sendInd);
11961 /*Fill the interface info */
11962 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, NULLD, NULLD);
11964 /* ccpu00132314-ADD-Update the tx power allocation info
11965 TODO-Need to add a check for max tx power per symbol */
11966 subfrmAlloc->cmnLcInfo.bcchInfo.txPwrOffset = cellDl->bcchTxPwrOffset;
11968 /*mBuf has been already copied above */
11969 #endif/*RGR_SI_SCH*/
11982 * Function: rgSCHCmnUlSetAllUnSched
11985 * Invoked by: Common Scheduler
11987 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
11991 static Void rgSCHCmnUlSetAllUnSched(RgSchCmnUlRbAllocInfo *allocInfo)
11996 node = allocInfo->contResLst.first;
11999 rgSCHCmnUlMov2NonSchdCntResLst(allocInfo, (RgSchUeCb *)node->node);
12000 node = allocInfo->contResLst.first;
12003 node = allocInfo->retxUeLst.first;
12006 rgSCHCmnUlMov2NonSchdRetxUeLst(allocInfo, (RgSchUeCb *)node->node);
12007 node = allocInfo->retxUeLst.first;
12010 node = allocInfo->ueLst.first;
12013 rgSCHCmnUlMov2NonSchdUeLst(allocInfo, (RgSchUeCb *)node->node);
12014 node = allocInfo->ueLst.first;
12026 * Function: rgSCHCmnUlAdd2CntResLst
12029 * Invoked by: Common Scheduler
12031 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
12032 * @param[in] RgSchUeCb *ue
12036 Void rgSCHCmnUlAdd2CntResLst(RgSchCmnUlRbAllocInfo *allocInfo,RgSchUeCb *ue)
12038 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,ue->cell))->alloc);
12039 cmLListAdd2Tail(&allocInfo->contResLst, &ulAllocInfo->reqLnk);
12040 ulAllocInfo->reqLnk.node = (PTR)ue;
12049 * Function: rgSCHCmnUlAdd2UeLst
12052 * Invoked by: Common Scheduler
12054 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
12055 * @param[in] RgSchUeCb *ue
12059 Void rgSCHCmnUlAdd2UeLst(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUeCb *ue)
12061 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,cell))->alloc);
12062 if (ulAllocInfo->reqLnk.node == NULLP)
12064 cmLListAdd2Tail(&allocInfo->ueLst, &ulAllocInfo->reqLnk);
12065 ulAllocInfo->reqLnk.node = (PTR)ue;
12075 * Function: rgSCHCmnAllocUlRb
12076 * Purpose: To do RB allocations for uplink
12078 * Invoked by: Common Scheduler
12080 * @param[in] RgSchCellCb *cell
12081 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12084 Void rgSCHCmnAllocUlRb(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo)
12086 RgSchUlSf *sf = allocInfo->sf;
12088 /* Schedule for new transmissions */
12089 rgSCHCmnUlRbAllocForLst(cell, sf, allocInfo->ueLst.count,
12090 &allocInfo->ueLst, &allocInfo->schdUeLst,
12091 &allocInfo->nonSchdUeLst, (Bool)TRUE);
12095 /***********************************************************
12097 * Func : rgSCHCmnUlRbAllocForLst
12099 * Desc : Allocate for a list in cmn rb alloc information passed
12108 **********************************************************/
12109 static Void rgSCHCmnUlRbAllocForLst
12115 CmLListCp *schdLst,
12116 CmLListCp *nonSchdLst,
12125 CmLteTimingInfo timeInfo;
12129 if(schdLst->count == 0)
12131 cmLListInit(schdLst);
12134 cmLListInit(nonSchdLst);
12136 if(isNewTx == TRUE)
12138 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.numUes = (uint8_t) count;
12140 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime, timeInfo, TFU_ULCNTRL_DLDELTA);
12141 k = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][timeInfo.subframe];
12142 RG_SCH_ADD_TO_CRNT_TIME(timeInfo,
12143 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo, k);
12145 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime,cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo,
12146 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
12151 for (lnk = reqLst->first; count; lnk = lnk->next, --count)
12153 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
12154 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
12159 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
12164 ueUl->subbandShare = ueUl->subbandRequired;
12165 if(isNewTx == TRUE)
12167 maxRb = RGSCH_MIN((ueUl->subbandRequired * MAX_5GTF_VRBG_SIZE), ue->ue5gtfCb.maxPrb);
12169 ret = rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole);
12172 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, schdLst);
12173 rgSCHCmnUlUeFillAllocInfo(cell, ue);
12177 gUl5gtfRbAllocFail++;
12178 #if defined (TENB_STATS) && defined (RG_5GTF)
12179 cell->tenbStats->sch.ul5gtfRbAllocFail++;
12181 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
12182 ue->isMsg4PdcchWithCrnti = FALSE;
12183 ue->isSrGrant = FALSE;
12186 if(isNewTx == TRUE)
12188 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
12189 ulAllocInfo[count - 1].rnti = ue->ueId;
12190 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
12191 ulAllocInfo[count - 1].numPrb = ue->ul.nPrb;
12194 ueUl->subbandShare = 0; /* This reset will take care of
12195 * all scheduler types */
12197 for (; count; lnk = lnk->next, --count)
12199 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
12200 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
12201 ue->isMsg4PdcchWithCrnti = FALSE;
12208 /***********************************************************
12210 * Func : rgSCHCmnUlMdfyGrntForCqi
12212 * Desc : Modify UL Grant to consider presence of
12213 * CQI along with PUSCH Data.
12218 * - Scale down iTbs based on betaOffset and
12219 * size of Acqi Size.
12220 * - Optionally attempt to increase numSb by 1
12221 * if input payload size does not fit in due
12222 * to reduced tbSz as a result of iTbsNew.
12226 **********************************************************/
12227 static S16 rgSCHCmnUlMdfyGrntForCqi
12235 uint32_t stepDownItbs,
12239 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(ue->cell);
12244 uint32_t remREsForPusch;
12245 uint32_t bitsPerRe;
12247 uint32_t betaOffVal = ue->ul.betaOffstVal;
12248 uint32_t cqiRiRptSz = ue->ul.cqiRiSz;
12249 uint32_t betaOffHqVal = rgSchCmnBetaHqOffstTbl[ue->ul.betaHqOffst];
12250 uint32_t resNumSb = *numSb;
12251 uint32_t puschEff = 1000;
12254 Bool mdfyiTbsFlg = FALSE;
12255 uint8_t resiTbs = *iTbs;
12261 iMcs = rgSCHCmnUlGetIMcsFrmITbs(resiTbs, RG_SCH_CMN_GET_UE_CTGY(ue));
12262 RG_SCH_UL_MCS_TO_MODODR(iMcs, modOdr);
12263 if (RG_SCH_CMN_GET_UE_CTGY(ue) != CM_LTE_UE_CAT_5)
12265 modOdr = RGSCH_MIN(RGSCH_QM_QPSK, modOdr);
12269 modOdr = RGSCH_MIN(RGSCH_QM_64QAM, modOdr);
12271 nPrb = resNumSb * cellUl->sbSize;
12272 /* Restricting the minumum iTbs requried to modify to 10 */
12273 if ((nPrb >= maxRb) && (resiTbs <= 10))
12275 /* Could not accomodate ACQI */
12278 totREs = nPrb * RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
12279 tbSz = rgTbSzTbl[0][resiTbs][nPrb-1];
12280 /* totalREs/tbSz = num of bits perRE. */
12281 cqiRiREs = (totREs * betaOffVal * cqiRiRptSz)/(1000 * tbSz); /* betaOffVal is represented
12282 as parts per 1000 */
12283 hqREs = (totREs * betaOffHqVal * hqSz)/(1000 * tbSz);
12284 if ((cqiRiREs + hqREs) < totREs)
12286 remREsForPusch = totREs - cqiRiREs - hqREs;
12287 bitsPerRe = (tbSz * 1000)/remREsForPusch; /* Multiplying by 1000 for Interger Oper */
12288 puschEff = bitsPerRe/modOdr;
12290 if (puschEff < effTgt)
12292 /* ensure resultant efficiency for PUSCH Data is within 0.93*/
12297 /* Alternate between increasing SB or decreasing iTbs until eff is met */
12298 if (mdfyiTbsFlg == FALSE)
12302 resNumSb = resNumSb + 1;
12304 mdfyiTbsFlg = TRUE;
12310 resiTbs-= stepDownItbs;
12312 mdfyiTbsFlg = FALSE;
12315 }while (1); /* Loop breaks if efficency is met
12316 or returns RFAILED if not able to meet the efficiency */
12325 /***********************************************************
12327 * Func : rgSCHCmnUlRbAllocForUe
12329 * Desc : Do uplink RB allocation for an UE.
12333 * Notes: Note that as of now, for retx, maxRb
12334 * is not considered. Alternatives, such
12335 * as dropping retx if it crosses maxRb
12336 * could be considered.
12340 **********************************************************/
12341 static S16 rgSCHCmnUlRbAllocForUe
12350 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
12351 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
12352 RgSchUlAlloc *alloc = NULLP;
12358 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->schdHqProcIdx];
12360 RgSchUlHqProcCb *proc = NULLP;
12364 uint8_t numVrbgTemp;
12366 TfuDciFormat dciFrmt;
12371 rgSCHUhmGetAvlHqProc(cell, ue, &proc);
12374 //DU_LOG("\nINFO --> SCH : UE [%d] HQ Proc unavailable\n", ue->ueId);
12379 if (ue->ue5gtfCb.rank == 2)
12381 dciFrmt = TFU_DCI_FORMAT_A2;
12386 dciFrmt = TFU_DCI_FORMAT_A1;
12389 /* 5gtf TODO : To pass dci frmt to this function */
12390 pdcch = rgSCHCmnPdcchAllocCrntSf(cell, ue);
12393 DU_LOG("\nDEBUG --> SCH : rgSCHCmnUlRbAllocForUe(): Could not get PDCCH for CRNTI:%d",ue->ueId);
12396 gUl5gtfPdcchSchd++;
12397 #if defined (TENB_STATS) && defined (RG_5GTF)
12398 cell->tenbStats->sch.ul5gtfPdcchSchd++;
12401 //TODO_SID using configured prb as of now
12402 nPrb = ue->ue5gtfCb.maxPrb;
12403 reqVrbg = nPrb/MAX_5GTF_VRBG_SIZE;
12404 iMcs = ue->ue5gtfCb.mcs; //gSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
12408 if((sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart > MAX_5GTF_VRBG)
12409 || (sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated > MAX_5GTF_VRBG))
12411 DU_LOG("\nINFO --> SCH : 5GTF_ERROR vrbg > 25 valstart = %d valalloc %d\n", sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart
12412 , sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated);
12417 /*TODO_SID: Workaround for alloc. Currently alloc is ulsf based. To handle multiple beams, we need a different
12418 design. Now alloc are formed based on MAX_5GTF_UE_SCH macro. */
12419 numVrbgTemp = MAX_5GTF_VRBG/MAX_5GTF_UE_SCH;
12422 alloc = rgSCHCmnUlSbAlloc(sf, numVrbgTemp,\
12425 if (alloc == NULLP)
12427 DU_LOG("\nERROR --> SCH : rgSCHCmnUlRbAllocForUe(): Could not get UlAlloc %d CRNTI:%d",numVrbg,ue->ueId);
12428 rgSCHCmnPdcchRlsCrntSf(cell, pdcch);
12431 gUl5gtfAllocAllocated++;
12432 #if defined (TENB_STATS) && defined (RG_5GTF)
12433 cell->tenbStats->sch.ul5gtfAllocAllocated++;
12435 alloc->grnt.vrbgStart = sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart;
12436 alloc->grnt.numVrbg = numVrbg;
12437 alloc->grnt.numLyr = numLyr;
12438 alloc->grnt.dciFrmt = dciFrmt;
12440 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart += numVrbg;
12441 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated += numVrbg;
12443 //rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
12445 sf->totPrb += alloc->grnt.numRb;
12446 ue->ul.nPrb = alloc->grnt.numRb;
12448 if (ue->csgMmbrSta != TRUE)
12450 cellUl->ncsgPrbCnt += alloc->grnt.numRb;
12452 cellUl->totPrbCnt += (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
12453 alloc->pdcch = pdcch;
12454 alloc->grnt.iMcs = iMcs;
12455 alloc->grnt.iMcsCrnt = iMcsCrnt;
12456 alloc->grnt.hop = 0;
12457 /* Initial Num RBs support for UCI on PUSCH */
12459 ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
12461 alloc->forMsg3 = FALSE;
12462 //RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTb5gtfSzTbl[0], (iTbs));
12464 //ueUl->alloc.allocdBytes = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
12465 /* TODO_SID Allocating based on configured MCS as of now.
12466 Currently for format A2. When doing multi grp per tti, need to update this. */
12467 ueUl->alloc.allocdBytes = (rgSch5gtfTbSzTbl[iMcs]/8) * ue->ue5gtfCb.rank;
12469 alloc->grnt.datSz = ueUl->alloc.allocdBytes;
12470 //TODO_SID Need to check mod order.
12471 RG_SCH_CMN_TBS_TO_MODODR(iMcs, alloc->grnt.modOdr);
12472 //alloc->grnt.modOdr = 6;
12473 alloc->grnt.isRtx = FALSE;
12475 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
12476 alloc->grnt.SCID = 0;
12477 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
12478 alloc->grnt.PMI = 0;
12479 alloc->grnt.uciOnxPUSCH = 0;
12480 alloc->grnt.hqProcId = proc->procId;
12482 alloc->hqProc = proc;
12483 alloc->hqProc->ulSfIdx = cellUl->schdIdx;
12485 /*commenting to retain the rnti used for transmission SPS/c-rnti */
12486 alloc->rnti = ue->ueId;
12487 ueUl->alloc.alloc = alloc;
12488 /*rntiwari-Adding the debug for generating the graph.*/
12489 /* No grant attr recorded now */
12493 /***********************************************************
12495 * Func : rgSCHCmnUlRbAllocAddUeToLst
12497 * Desc : Add UE to list (scheduled/non-scheduled list)
12498 * for UL RB allocation information.
12506 **********************************************************/
12507 Void rgSCHCmnUlRbAllocAddUeToLst(RgSchCellCb *cell,RgSchUeCb *ue,CmLListCp *lst)
12509 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
12512 gUl5gtfUeRbAllocDone++;
12513 #if defined (TENB_STATS) && defined (RG_5GTF)
12514 cell->tenbStats->sch.ul5gtfUeRbAllocDone++;
12516 cmLListAdd2Tail(lst, &ueUl->alloc.schdLstLnk);
12517 ueUl->alloc.schdLstLnk.node = (PTR)ue;
12522 * @brief This function Processes the Final Allocations
12523 * made by the RB Allocator against the requested.
12527 * Function: rgSCHCmnUlAllocFnlz
12528 * Purpose: This function Processes the Final Allocations
12529 * made by the RB Allocator against the requested.
12531 * Invoked by: Common Scheduler
12533 * @param[in] RgSchCellCb *cell
12534 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12538 static Void rgSCHCmnUlAllocFnlz(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo)
12540 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12542 /* call scheduler specific Finalization */
12543 cellSch->apisUl->rgSCHUlAllocFnlz(cell, allocInfo);
12549 * @brief This function Processes the Final Allocations
12550 * made by the RB Allocator against the requested.
12554 * Function: rgSCHCmnDlAllocFnlz
12555 * Purpose: This function Processes the Final Allocations
12556 * made by the RB Allocator against the requested.
12558 * Invoked by: Common Scheduler
12560 * @param[in] RgSchCellCb *cell
12564 Void rgSCHCmnDlAllocFnlz(RgSchCellCb *cell)
12566 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12567 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
12570 rgSCHCmnDlCcchRetxFnlz(cell, allocInfo);
12571 rgSCHCmnDlCcchTxFnlz(cell, allocInfo);
12573 /* Added below functions for handling CCCH SDU transmission received
12575 * * guard timer expiry*/
12576 rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo);
12577 rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo);
12579 rgSCHCmnDlRaRspFnlz(cell, allocInfo);
12580 /* call scheduler specific Finalization */
12581 cellSch->apisDl->rgSCHDlAllocFnlz(cell, allocInfo);
12583 /* Stack Crash problem for TRACE5 Changes. Added the return below */
12590 * @brief Update an uplink subframe.
12594 * Function : rgSCHCmnUlUpdSf
12596 * For each allocation
12597 * - if no more tx needed
12598 * - Release allocation
12600 * - Perform retransmission
12602 * @param[in] RgSchUlSf *sf
12605 static Void rgSCHCmnUlUpdSf(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUlSf *sf)
12609 while ((lnk = sf->allocs.first))
12611 RgSchUlAlloc *alloc = (RgSchUlAlloc *)lnk->node;
12614 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
12619 /* If need to handle all retx together, run another loop separately */
12620 rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc);
12622 rgSCHCmnUlRlsUlAlloc(cell, sf, alloc);
12625 /* By this time, all allocs would have been cleared and
12626 * SF is reset to be made ready for new allocations. */
12627 rgSCHCmnUlSfReset(cell, sf);
12628 /* In case there are timing problems due to msg3
12629 * allocations being done in advance, (which will
12630 * probably happen with the current FDD code that
12631 * handles 8 subframes) one solution
12632 * could be to hold the (recent) msg3 allocs in a separate
12633 * list, and then possibly add that to the actual
12634 * list later. So at this time while allocations are
12635 * traversed, the recent msg3 ones are not seen. Anytime after
12636 * this (a good time is when the usual allocations
12637 * are made), msg3 allocations could be transferred to the
12638 * normal list. Not doing this now as it is assumed
12639 * that incorporation of TDD shall take care of this.
12647 * @brief Handle uplink allocation for retransmission.
12651 * Function : rgSCHCmnUlHndlAllocRetx
12653 * Processing Steps:
12654 * - Add to queue for retx.
12655 * - Do not release here, release happends as part
12656 * of the loop that calls this function.
12658 * @param[in] RgSchCellCb *cell
12659 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12660 * @param[in] RgSchUlSf *sf
12661 * @param[in] RgSchUlAlloc *alloc
12664 static Void rgSCHCmnUlHndlAllocRetx(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUlSf *sf,RgSchUlAlloc *alloc)
12667 RgSchCmnUlUe *ueUl;
12669 rgTbSzTbl[0][rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs)]\
12670 [alloc->grnt.numRb-1]/8;
12671 if (!alloc->forMsg3)
12673 ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue);
12674 ueUl->alloc.reqBytes = bytes;
12675 rgSCHUhmRetx(alloc->hqProc);
12676 rgSCHCmnUlAdd2RetxUeLst(allocInfo, alloc->ue);
12680 /* RACHO msg3 retx handling. Part of RACH procedure changes. */
12681 retxAlloc = rgSCHCmnUlGetUlAlloc(cell, sf, alloc->numSb);
12682 if (retxAlloc == NULLP)
12684 DU_LOG("\nERROR --> SCH : rgSCHCmnUlRbAllocForUe():Could not get UlAlloc for msg3Retx RNTI:%d",
12688 retxAlloc->grnt.iMcs = alloc->grnt.iMcs;
12689 retxAlloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl\
12690 [alloc->hqProc->rvIdx];
12691 retxAlloc->grnt.nDmrs = 0;
12692 retxAlloc->grnt.hop = 0;
12693 retxAlloc->grnt.delayBit = 0;
12694 retxAlloc->rnti = alloc->rnti;
12695 retxAlloc->ue = NULLP;
12696 retxAlloc->pdcch = FALSE;
12697 retxAlloc->forMsg3 = TRUE;
12698 retxAlloc->raCb = alloc->raCb;
12699 retxAlloc->hqProc = alloc->hqProc;
12700 rgSCHUhmRetx(retxAlloc->hqProc);
12707 * @brief Uplink Scheduling Handler.
12711 * Function: rgSCHCmnUlAlloc
12712 * Purpose: This function Handles Uplink Scheduling.
12714 * Invoked by: Common Scheduler
12716 * @param[in] RgSchCellCb *cell
12719 /* ccpu00132653- The definition of this function made common for TDD and FDD*/
12720 static Void rgSCHCmnUlAlloc(RgSchCellCb *cell)
12722 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12723 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
12724 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12725 RgSchCmnUlRbAllocInfo allocInfo;
12726 RgSchCmnUlRbAllocInfo *allocInfoRef = &allocInfo;
12733 /* Initializing RgSchCmnUlRbAllocInfo structure */
12734 rgSCHCmnInitUlRbAllocInfo(allocInfoRef);
12736 /* Get Uplink Subframe */
12737 allocInfoRef->sf = &cellUl->ulSfArr[cellUl->schdIdx];
12739 /* initializing the UL PRB count */
12740 allocInfoRef->sf->totPrb = 0;
12744 rgSCHCmnSpsUlTti(cell, allocInfoRef);
12747 if(*allocInfoRef->sf->allocCountRef == 0)
12751 if ((hole = rgSCHUtlUlHoleFirst(allocInfoRef->sf)) != NULLP)
12753 /* Sanity check of holeDb */
12754 if (allocInfoRef->sf->holeDb->count == 1 && hole->start == 0)
12756 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
12757 /* Re-Initialize available subbands because of CFI change*/
12758 allocInfoRef->sf->availSubbands = cell->dynCfiCb.\
12759 bwInfo[cellDl->currCfi].numSb;
12760 /*Currently initializing 5gtf ulsf specific initialization here.
12761 need to do at proper place */
12763 allocInfoRef->sf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
12764 allocInfoRef->sf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
12765 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
12767 allocInfoRef->sf->sfBeamInfo[idx].totVrbgAllocated = 0;
12768 allocInfoRef->sf->sfBeamInfo[idx].totVrbgRequired = 0;
12769 allocInfoRef->sf->sfBeamInfo[idx].vrbgStart = 0;
12775 DU_LOG("\nERROR --> SCH : holeDb sanity check failed");
12780 /* Fix: Adaptive re-transmissions prioritised over other transmissions */
12781 /* perform adaptive retransmissions */
12782 rgSCHCmnUlSfReTxAllocs(cell, allocInfoRef->sf);
12786 /* Fix: syed Adaptive Msg3 Retx crash. Release all
12787 Harq processes for which adap Retx failed, to avoid
12788 blocking. This step should be done before New TX
12789 scheduling to make hqProc available. Right now we
12790 dont check if proc is in adap Retx list for considering
12791 it to be available. But now with this release that
12792 functionality would be correct. */
12794 rgSCHCmnUlSfRlsRetxProcs(cell, allocInfoRef->sf);
12797 /* Specific UL scheduler to perform UE scheduling */
12798 cellSch->apisUl->rgSCHUlSched(cell, allocInfoRef);
12800 /* Call UL RB allocator module */
12801 rgSCHCmnAllocUlRb(cell, allocInfoRef);
12803 /* Do group power control for PUSCH */
12804 rgSCHCmnGrpPwrCntrlPusch(cell, allocInfoRef->sf);
12806 cell->sc.apis->rgSCHDrxStrtInActvTmrInUl(cell);
12808 rgSCHCmnUlAllocFnlz(cell, allocInfoRef);
12809 if(5000 == g5gtfTtiCnt)
12811 ul5gtfsidDlAlreadyMarkUl = 0;
12812 ul5gtfsidDlSchdPass = 0;
12813 ul5gtfsidUlMarkUl = 0;
12814 ul5gtfTotSchdCnt = 0;
12822 * @brief send Subframe Allocations.
12826 * Function: rgSCHCmnSndCnsldtInfo
12827 * Purpose: Send the scheduled
12828 * allocations to MAC for StaInd generation to Higher layers and
12829 * for MUXing. PST's RgInfSfAlloc to MAC instance.
12831 * Invoked by: Common Scheduler
12833 * @param[in] RgSchCellCb *cell
12836 Void rgSCHCmnSndCnsldtInfo(RgSchCellCb *cell)
12838 RgInfSfAlloc *subfrmAlloc;
12840 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12843 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
12845 /* Send the allocations to MAC for MUXing */
12846 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
12847 subfrmAlloc->cellId = cell->cellId;
12848 /* Populate the List of UEs needing PDB-based Flow control */
12849 cellSch->apisDl->rgSCHDlFillFlwCtrlInfo(cell, subfrmAlloc);
12851 if((subfrmAlloc->rarInfo.numRaRntis) ||
12853 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
12854 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
12855 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
12857 (subfrmAlloc->ueInfo.numUes) ||
12858 (subfrmAlloc->cmnLcInfo.bitMask) ||
12859 (subfrmAlloc->ulUeInfo.numUes) ||
12860 (subfrmAlloc->flowCntrlInfo.numUes))
12862 if((subfrmAlloc->rarInfo.numRaRntis) ||
12864 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
12865 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
12866 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
12868 (subfrmAlloc->ueInfo.numUes) ||
12869 (subfrmAlloc->cmnLcInfo.bitMask) ||
12870 (subfrmAlloc->flowCntrlInfo.numUes))
12873 RgSchMacSfAlloc(&pst, subfrmAlloc);
12876 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_NUM_SUB_FRAMES;
12878 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_SF_ALLOC_SIZE;
12884 * @brief Consolidate Subframe Allocations.
12888 * Function: rgSCHCmnCnsldtSfAlloc
12889 * Purpose: Consolidate Subframe Allocations.
12891 * Invoked by: Common Scheduler
12893 * @param[in] RgSchCellCb *cell
12896 Void rgSCHCmnCnsldtSfAlloc(RgSchCellCb *cell)
12898 RgInfSfAlloc *subfrmAlloc;
12899 CmLteTimingInfo frm;
12901 CmLListCp dlDrxInactvTmrLst;
12902 CmLListCp dlInActvLst;
12903 CmLListCp ulInActvLst;
12904 RgSchCmnCell *cellSch = NULLP;
12907 cmLListInit(&dlDrxInactvTmrLst);
12908 cmLListInit(&dlInActvLst);
12909 cmLListInit(&ulInActvLst);
12911 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
12913 /* Get Downlink Subframe */
12914 frm = cell->crntTime;
12915 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
12916 dlSf = rgSCHUtlSubFrmGet(cell, frm);
12918 /* Fill the allocation Info */
12919 rgSCHUtlFillRgInfRarInfo(dlSf, subfrmAlloc, cell);
12922 rgSCHUtlFillRgInfUeInfo(dlSf, cell, &dlDrxInactvTmrLst,
12923 &dlInActvLst, &ulInActvLst);
12924 #ifdef RG_PFS_STATS
12925 cell->totalPrb += dlSf->bwAssigned;
12927 /* Mark the following Ues inactive for UL*/
12928 cellSch = RG_SCH_CMN_GET_CELL(cell);
12930 /* Calling Scheduler specific function with DRX inactive UE list*/
12931 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInActvLst);
12932 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInActvLst);
12935 /*re/start DRX inactivity timer for the UEs*/
12936 (Void)rgSCHDrxStrtInActvTmr(cell,&dlDrxInactvTmrLst,RG_SCH_DRX_DL);
12942 * @brief Initialize the DL Allocation Information Structure.
12946 * Function: rgSCHCmnInitDlRbAllocInfo
12947 * Purpose: Initialize the DL Allocation Information Structure.
12949 * Invoked by: Common Scheduler
12951 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
12954 static Void rgSCHCmnInitDlRbAllocInfo(RgSchCmnDlRbAllocInfo *allocInfo)
12956 memset(&allocInfo->pcchAlloc, 0, sizeof(RgSchDlRbAlloc));
12957 memset(&allocInfo->bcchAlloc, 0, sizeof(RgSchDlRbAlloc));
12958 memset(allocInfo->raRspAlloc, 0, RG_SCH_CMN_MAX_CMN_PDCCH*sizeof(RgSchDlRbAlloc));
12960 allocInfo->msg4Alloc.msg4DlSf = NULLP;
12961 cmLListInit(&allocInfo->msg4Alloc.msg4TxLst);
12962 cmLListInit(&allocInfo->msg4Alloc.msg4RetxLst);
12963 cmLListInit(&allocInfo->msg4Alloc.schdMsg4TxLst);
12964 cmLListInit(&allocInfo->msg4Alloc.schdMsg4RetxLst);
12965 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4TxLst);
12966 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4RetxLst);
12968 allocInfo->ccchSduAlloc.ccchSduDlSf = NULLP;
12969 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduTxLst);
12970 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduRetxLst);
12971 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduTxLst);
12972 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduRetxLst);
12973 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst);
12974 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst);
12977 allocInfo->dedAlloc.dedDlSf = NULLP;
12978 cmLListInit(&allocInfo->dedAlloc.txHqPLst);
12979 cmLListInit(&allocInfo->dedAlloc.retxHqPLst);
12980 cmLListInit(&allocInfo->dedAlloc.schdTxHqPLst);
12981 cmLListInit(&allocInfo->dedAlloc.schdRetxHqPLst);
12982 cmLListInit(&allocInfo->dedAlloc.nonSchdTxHqPLst);
12983 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxHqPLst);
12985 cmLListInit(&allocInfo->dedAlloc.txRetxHqPLst);
12986 cmLListInit(&allocInfo->dedAlloc.schdTxRetxHqPLst);
12987 cmLListInit(&allocInfo->dedAlloc.nonSchdTxRetxHqPLst);
12989 cmLListInit(&allocInfo->dedAlloc.txSpsHqPLst);
12990 cmLListInit(&allocInfo->dedAlloc.retxSpsHqPLst);
12991 cmLListInit(&allocInfo->dedAlloc.schdTxSpsHqPLst);
12992 cmLListInit(&allocInfo->dedAlloc.schdRetxSpsHqPLst);
12993 cmLListInit(&allocInfo->dedAlloc.nonSchdTxSpsHqPLst);
12994 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxSpsHqPLst);
12998 rgSCHLaaCmnInitDlRbAllocInfo (allocInfo);
13001 cmLListInit(&allocInfo->dedAlloc.errIndTxHqPLst);
13002 cmLListInit(&allocInfo->dedAlloc.schdErrIndTxHqPLst);
13003 cmLListInit(&allocInfo->dedAlloc.nonSchdErrIndTxHqPLst);
13008 * @brief Initialize the UL Allocation Information Structure.
13012 * Function: rgSCHCmnInitUlRbAllocInfo
13013 * Purpose: Initialize the UL Allocation Information Structure.
13015 * Invoked by: Common Scheduler
13017 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13020 Void rgSCHCmnInitUlRbAllocInfo(RgSchCmnUlRbAllocInfo *allocInfo)
13022 allocInfo->sf = NULLP;
13023 cmLListInit(&allocInfo->contResLst);
13024 cmLListInit(&allocInfo->schdContResLst);
13025 cmLListInit(&allocInfo->nonSchdContResLst);
13026 cmLListInit(&allocInfo->ueLst);
13027 cmLListInit(&allocInfo->schdUeLst);
13028 cmLListInit(&allocInfo->nonSchdUeLst);
13034 * @brief Scheduling for PUCCH group power control.
13038 * Function: rgSCHCmnGrpPwrCntrlPucch
13039 * Purpose: This function does group power control for PUCCH
13040 * corresponding to the subframe for which DL UE allocations
13043 * Invoked by: Common Scheduler
13045 * @param[in] RgSchCellCb *cell
13048 static Void rgSCHCmnGrpPwrCntrlPucch(RgSchCellCb *cell,RgSchDlSf *dlSf)
13050 rgSCHPwrGrpCntrlPucch(cell, dlSf);
13055 * @brief Scheduling for PUSCH group power control.
13059 * Function: rgSCHCmnGrpPwrCntrlPusch
13060 * Purpose: This function does group power control, for
13061 * the subframe for which UL allocation has (just) happened.
13063 * Invoked by: Common Scheduler
13065 * @param[in] RgSchCellCb *cell
13066 * @param[in] RgSchUlSf *ulSf
13069 static Void rgSCHCmnGrpPwrCntrlPusch(RgSchCellCb *cell,RgSchUlSf *ulSf)
13071 /*removed unused variable *cellSch*/
13072 CmLteTimingInfo frm;
13076 /* Got to pass DL SF corresponding to UL SF, so get that first.
13077 * There is no easy way of getting dlSf by having the RgSchUlSf*,
13078 * so use the UL delta from current time to get the DL SF. */
13079 frm = cell->crntTime;
13082 if(cell->emtcEnable == TRUE)
13084 RGSCH_INCR_SUB_FRAME_EMTC(frm, TFU_DLCNTRL_DLDELTA);
13089 RGSCH_INCR_SUB_FRAME(frm, TFU_DLCNTRL_DLDELTA);
13091 /* Del filling of dl.time */
13092 dlSf = rgSCHUtlSubFrmGet(cell, frm);
13094 rgSCHPwrGrpCntrlPusch(cell, dlSf, ulSf);
13099 /* Fix: syed align multiple UEs to refresh at same time */
13100 /***********************************************************
13102 * Func : rgSCHCmnApplyUeRefresh
13104 * Desc : Apply UE refresh in CMN and Specific
13105 * schedulers. Data rates and corresponding
13106 * scratchpad variables are updated.
13114 **********************************************************/
13115 static S16 rgSCHCmnApplyUeRefresh(RgSchCellCb *cell,RgSchUeCb *ue)
13117 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13118 uint32_t effGbrBsr = 0;
13119 uint32_t effNonGbrBsr = 0;
13123 /* Reset the refresh cycle variableCAP */
13124 ue->ul.effAmbr = ue->ul.cfgdAmbr;
13126 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
13128 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
13130 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
13132 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
13134 cmnLcg->effGbr = cmnLcg->cfgdGbr;
13135 cmnLcg->effDeltaMbr = cmnLcg->deltaMbr;
13136 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
13137 /* Considering GBR LCG will be prioritised by UE */
13138 effGbrBsr += cmnLcg->bs;
13139 }/* Else no remaing BS so nonLcg0 will be updated when BSR will be received */
13142 effNonGbrBsr += cmnLcg->reportedBs;
13143 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
13147 effNonGbrBsr = RGSCH_MIN(effNonGbrBsr,ue->ul.effAmbr);
13148 ue->ul.nonGbrLcgBs = effNonGbrBsr;
13150 ue->ul.nonLcg0Bs = effGbrBsr + effNonGbrBsr;
13151 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
13152 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
13155 /* call scheduler specific event handlers
13156 * for refresh timer expiry */
13157 cellSch->apisUl->rgSCHUlUeRefresh(cell, ue);
13158 cellSch->apisDl->rgSCHDlUeRefresh(cell, ue);
13163 /***********************************************************
13165 * Func : rgSCHCmnTmrExpiry
13167 * Desc : Adds an UE to refresh queue, so that the UE is
13168 * periodically triggered to refresh it's GBR and
13177 **********************************************************/
13178 static S16 rgSCHCmnTmrExpiry
13180 PTR cb, /* Pointer to timer control block */
13181 S16 tmrEvnt /* Timer Event */
13184 RgSchUeCb *ue = (RgSchUeCb *)cb;
13185 RgSchCellCb *cell = ue->cell;
13186 #if (ERRCLASS & ERRCLS_DEBUG)
13190 #if (ERRCLASS & ERRCLS_DEBUG)
13191 if (tmrEvnt != RG_SCH_CMN_EVNT_UE_REFRESH)
13193 DU_LOG("\nERROR --> SCH : rgSCHCmnTmrExpiry(): Invalid "
13194 "timer event CRNTI:%d",ue->ueId);
13201 rgSCHCmnApplyUeRefresh(cell, ue);
13203 rgSCHCmnAddUeToRefreshQ(cell, ue, RG_SCH_CMN_REFRESH_TIME);
13208 /***********************************************************
13210 * Func : rgSCHCmnTmrProc
13212 * Desc : Timer entry point per cell. Timer
13213 * processing is triggered at every frame boundary
13222 **********************************************************/
13223 static S16 rgSCHCmnTmrProc(RgSchCellCb *cell)
13225 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
13226 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
13227 /* Moving the assignment of scheduler pointer
13228 to available scope for optimization */
13230 if ((cell->crntTime.slot % RGSCH_NUM_SUB_FRAMES_5G) == 0)
13232 /* Reset the counters periodically */
13233 if ((cell->crntTime.sfn % RG_SCH_CMN_CSG_REFRESH_TIME) == 0)
13235 RG_SCH_RESET_HCSG_DL_PRB_CNTR(cmnDlCell);
13236 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cmnUlCell);
13238 if ((cell->crntTime.sfn % RG_SCH_CMN_OVRLDCTRL_REFRESH_TIME) == 0)
13241 cell->measurements.ulTpt = ((cell->measurements.ulTpt * 95) + ( cell->measurements.ulBytesCnt * 5))/100;
13242 cell->measurements.dlTpt = ((cell->measurements.dlTpt * 95) + ( cell->measurements.dlBytesCnt * 5))/100;
13244 rgSCHUtlCpuOvrLdAdjItbsCap(cell);
13245 /* reset cell level tpt measurements for next cycle */
13246 cell->measurements.ulBytesCnt = 0;
13247 cell->measurements.dlBytesCnt = 0;
13249 /* Comparing with Zero instead of % is being done for efficiency.
13250 * If Timer resolution changes then accordingly update the
13251 * macro RG_SCH_CMN_REFRESH_TIMERES */
13252 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
13253 cmPrcTmr(&sched->tmrTqCp, sched->tmrTq, (PFV)rgSCHCmnTmrExpiry);
13260 /***********************************************************
13262 * Func : rgSchCmnUpdCfiVal
13264 * Desc : Update the CFI value if CFI switch was done
13272 **********************************************************/
13273 static Void rgSchCmnUpdCfiVal(RgSchCellCb *cell,uint8_t delta)
13276 CmLteTimingInfo pdsch;
13277 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
13283 uint8_t splSfCfi = 0;
13287 pdsch = cell->crntTime;
13288 RGSCH_INCR_SUB_FRAME(pdsch, delta);
13289 dlSf = rgSCHUtlSubFrmGet(cell, pdsch);
13290 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
13291 *change happens in that SF then UL PDCCH allocation happens with old CFI
13292 *but CFI in control Req goes updated one since it was stored in the CELL
13294 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
13295 if(cell->dynCfiCb.pdcchSfIdx != 0xFF)
13298 dlIdx = rgSCHUtlGetDlSfIdx(cell, &pdsch);
13300 dlIdx = (((pdsch.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (pdsch.slot % RGSCH_NUM_SUB_FRAMES));
13301 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
13303 /* If current downlink subframe index is same as pdcch SF index,
13304 * perform the switching of CFI in this subframe */
13305 if(cell->dynCfiCb.pdcchSfIdx == dlIdx)
13307 cellCmnDl->currCfi = cellCmnDl->newCfi;
13308 cell->dynCfiCb.pdcchSfIdx = 0xFF;
13310 /* Updating the nCce value based on the new CFI */
13312 splSfCfi = cellCmnDl->newCfi;
13313 for(idx = 0; idx < cell->numDlSubfrms; idx++)
13315 tddSf = cell->subFrms[idx];
13317 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][tddSf->sfNum];
13319 if(tddSf->sfType == RG_SCH_SPL_SF_DATA)
13321 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
13323 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
13327 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][cellCmnDl->currCfi];
13330 /* Setting the switch over window length based on config index.
13331 * During switch over period all the UL trnsmissions are Acked
13333 cell->dynCfiCb.switchOvrWinLen =
13334 rgSchCfiSwitchOvrWinLen[cell->ulDlCfgIdx];
13336 cell->nCce = cell->dynCfiCb.cfi2NCceTbl[0][cellCmnDl->currCfi];
13337 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
13338 *change happens in that SF then UL PDCCH allocation happens with old CFI
13339 *but CFI in control Req goes updated one since it was stored in the CELL
13341 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
13342 cell->dynCfiCb.switchOvrWinLen = rgSchCfiSwitchOvrWinLen[7];
13350 /***********************************************************
13352 * Func : rgSchCmnUpdtPdcchSfIdx
13354 * Desc : Update the switch over window length
13362 **********************************************************/
13364 static Void rgSchCmnUpdtPdcchSfIdx(RgSchCellCb *cell,uint8_t dlIdx,uint8_t sfNum)
13366 static Void rgSchCmnUpdtPdcchSfIdx(RgSchCellCb *cell,uint8_t dlIdx)
13372 /* Resetting the parameters on CFI switching */
13373 cell->dynCfiCb.cceUsed = 0;
13374 cell->dynCfiCb.lowCceCnt = 0;
13376 cell->dynCfiCb.cceFailSum = 0;
13377 cell->dynCfiCb.cceFailCnt = 0;
13378 cell->dynCfiCb.prevCceFailIdx = 0;
13380 cell->dynCfiCb.switchOvrInProgress = TRUE;
13382 for(idx = 0; idx < cell->dynCfiCb.numFailSamples; idx++)
13384 cell->dynCfiCb.cceFailSamples[idx] = 0;
13387 cell->dynCfiCb.ttiCnt = 0;
13389 cell->dynCfiCb.cfiSwitches++;
13390 cfiSwitchCnt = cell->dynCfiCb.cfiSwitches;
13393 cell->dynCfiCb.pdcchSfIdx = (dlIdx +
13394 rgSchTddPdcchSfIncTbl[cell->ulDlCfgIdx][sfNum]) % cell->numDlSubfrms;
13396 cell->dynCfiCb.pdcchSfIdx = (dlIdx + RG_SCH_CFI_APPLY_DELTA) % \
13397 RGSCH_NUM_DL_slotS;
13401 /***********************************************************
13403 * Func : rgSchCmnUpdCfiDb
13405 * Desc : Update the counters related to dynamic
13406 * CFI feature in cellCb.
13414 **********************************************************/
13415 Void rgSchCmnUpdCfiDb(RgSchCellCb *cell,uint8_t delta)
13417 CmLteTimingInfo frm;
13423 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13424 uint8_t nCceLowerCfi = 0;
13426 uint8_t cceFailIdx;
13432 /* Get Downlink Subframe */
13433 frm = cell->crntTime;
13434 RGSCH_INCR_SUB_FRAME(frm, delta);
13437 dlIdx = rgSCHUtlGetDlSfIdx(cell, &frm);
13438 dlSf = cell->subFrms[dlIdx];
13439 isHiDci0 = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][dlSf->sfNum];
13441 /* Changing the idexing
13442 so that proper subframe is selected */
13443 dlIdx = (((frm.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (frm.slot % RGSCH_NUM_SUB_FRAMES));
13444 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
13445 dlSf = cell->subFrms[dlIdx];
13448 currCfi = cellSch->dl.currCfi;
13450 if(!cell->dynCfiCb.switchOvrInProgress)
13453 if(!cell->dynCfiCb.isDynCfiEnb)
13455 if(currCfi != cellSch->cfiCfg.cfi)
13457 if(currCfi < cellSch->cfiCfg.cfi)
13459 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
13460 cfiIncr = cell->dynCfiCb.cfiIncr;
13464 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
13465 cfiDecr = cell->dynCfiCb.cfiDecr;
13472 /* Setting ttiMod to 0 for ttiCnt > 1000 in case if this
13473 * function was not called in UL subframe*/
13474 if(cell->dynCfiCb.ttiCnt > RGSCH_CFI_TTI_MON_INTRVL)
13481 ttiMod = cell->dynCfiCb.ttiCnt % RGSCH_CFI_TTI_MON_INTRVL;
13484 dlSf->dlUlBothCmplt++;
13486 if((dlSf->dlUlBothCmplt == 2) || (!isHiDci0))
13488 if(dlSf->dlUlBothCmplt == 2)
13491 /********************STEP UP CRITERIA********************/
13492 /* Updating the CCE failure count parameter */
13493 cell->dynCfiCb.cceFailCnt += dlSf->isCceFailure;
13494 cell->dynCfiCb.cceFailSum += dlSf->isCceFailure;
13496 /* Check if cfi step up can be performed */
13497 if(currCfi < cell->dynCfiCb.maxCfi)
13499 if(cell->dynCfiCb.cceFailSum >= cell->dynCfiCb.cfiStepUpTtiCnt)
13501 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
13502 cfiIncr = cell->dynCfiCb.cfiIncr;
13507 /********************STEP DOWN CRITERIA********************/
13509 /* Updating the no. of CCE used in this dl subframe */
13510 cell->dynCfiCb.cceUsed += dlSf->cceCnt;
13512 if(currCfi > RGSCH_MIN_CFI_VAL)
13514 /* calculating the number of CCE for next lower CFI */
13516 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][dlSf->sfNum];
13517 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[mPhich][currCfi-1];
13519 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[0][currCfi-1];
13521 if(dlSf->cceCnt < nCceLowerCfi)
13523 /* Updating the count of TTIs in which no. of CCEs
13524 * used were less than the CCEs of next lower CFI */
13525 cell->dynCfiCb.lowCceCnt++;
13530 totalCce = (nCceLowerCfi * cell->dynCfiCb.cfiStepDownTtiCnt *
13531 RGSCH_CFI_CCE_PERCNTG)/100;
13533 if((!cell->dynCfiCb.cceFailSum) &&
13534 (cell->dynCfiCb.lowCceCnt >=
13535 cell->dynCfiCb.cfiStepDownTtiCnt) &&
13536 (cell->dynCfiCb.cceUsed < totalCce))
13538 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
13539 cfiDecr = cell->dynCfiCb.cfiDecr;
13545 cceFailIdx = ttiMod/cell->dynCfiCb.failSamplePrd;
13547 if(cceFailIdx != cell->dynCfiCb.prevCceFailIdx)
13549 /* New sample period has started. Subtract the old count
13550 * from the new sample period */
13551 cell->dynCfiCb.cceFailSum -= cell->dynCfiCb.cceFailSamples[cceFailIdx];
13553 /* Store the previous sample period data */
13554 cell->dynCfiCb.cceFailSamples[cell->dynCfiCb.prevCceFailIdx]
13555 = cell->dynCfiCb.cceFailCnt;
13557 cell->dynCfiCb.prevCceFailIdx = cceFailIdx;
13559 /* Resetting the CCE failure count as zero for next sample period */
13560 cell->dynCfiCb.cceFailCnt = 0;
13565 /* Restting the parametrs after Monitoring Interval expired */
13566 cell->dynCfiCb.cceUsed = 0;
13567 cell->dynCfiCb.lowCceCnt = 0;
13568 cell->dynCfiCb.ttiCnt = 0;
13571 cell->dynCfiCb.ttiCnt++;
13575 if(cellSch->dl.newCfi != cellSch->dl.currCfi)
13578 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, dlSf->sfNum);
13580 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx);
13587 * @brief Dl Scheduler for Broadcast and Common channel scheduling.
13591 * Function: rgSCHCmnDlCommonChSch
13592 * Purpose: This function schedules DL Common channels for LTE.
13593 * Invoked by TTI processing in TOM. Scheduling is done for
13594 * BCCH, PCCH, Msg4, CCCH SDU, RAR in that order
13596 * Invoked by: TOM (TTI processing)
13598 * @param[in] RgSchCellCb *cell
13601 Void rgSCHCmnDlCommonChSch(RgSchCellCb *cell)
13603 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13606 cellSch->apisDl->rgSCHDlTickForPdbTrkng(cell);
13607 rgSchCmnUpdCfiVal(cell, RG_SCH_CMN_DL_DELTA);
13609 /* handle Inactive UEs for DL */
13610 rgSCHCmnHdlDlInactUes(cell);
13612 /* Send a Tick to Refresh Timer */
13613 rgSCHCmnTmrProc(cell);
13615 if (cell->isDlDataAllwd && (cell->stopSiSch == FALSE))
13617 rgSCHCmnInitRbAlloc(cell);
13618 /* Perform DL scheduling of BCCH, PCCH */
13619 rgSCHCmnDlBcchPcchAlloc(cell);
13623 if(cell->siCb.inWindow != 0)
13625 cell->siCb.inWindow--;
13628 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
13630 rgSCHCmnDlCcchRarAlloc(cell);
13636 * @brief Scheduler invocation per TTI.
13640 * Function: rgSCHCmnUlSch
13641 * Purpose: This function implements UL scheduler alone. This is to
13642 * be able to perform scheduling with more flexibility.
13644 * Invoked by: TOM (TTI processing)
13646 * @param[in] RgSchCellCb *cell
13649 Void rgSCHCmnUlSch(RgSchCellCb *cell)
13651 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13656 if(TRUE == rgSCHLaaSCellEnabled(cell))
13662 if(cellSch->ul.schdIdx != RGSCH_INVALID_INFO)
13664 rgSchCmnUpdCfiVal(cell, TFU_ULCNTRL_DLDELTA);
13666 /* Handle Inactive UEs for UL */
13667 rgSCHCmnHdlUlInactUes(cell);
13668 /* Perform UL Scheduling EVERY TTI */
13669 rgSCHCmnUlAlloc(cell);
13671 /* Calling function to update CFI parameters*/
13672 rgSchCmnUpdCfiDb(cell, TFU_ULCNTRL_DLDELTA);
13674 if(cell->dynCfiCb.switchOvrWinLen > 0)
13676 /* Decrementing the switchover window length */
13677 cell->dynCfiCb.switchOvrWinLen--;
13679 if(!cell->dynCfiCb.switchOvrWinLen)
13681 if(cell->dynCfiCb.dynCfiRecfgPend)
13683 /* Toggling the Dynamic CFI enabling */
13684 cell->dynCfiCb.isDynCfiEnb ^= 1;
13685 rgSCHDynCfiReCfg(cell, cell->dynCfiCb.isDynCfiEnb);
13686 cell->dynCfiCb.dynCfiRecfgPend = FALSE;
13688 cell->dynCfiCb.switchOvrInProgress = FALSE;
13696 rgSCHCmnSpsUlTti(cell, NULLP);
13706 * @brief This function updates the scheduler with service for an UE.
13710 * Function: rgSCHCmnDlDedBoUpd
13711 * Purpose: This function should be called whenever there is a
13712 * change BO for a service.
13714 * Invoked by: BO and Scheduler
13716 * @param[in] RgSchCellCb* cell
13717 * @param[in] RgSchUeCb* ue
13718 * @param[in] RgSchDlLcCb* svc
13722 Void rgSCHCmnDlDedBoUpd(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlLcCb *svc)
13724 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13726 /* RACHO : if UEs idle time exceeded and a BO update
13727 * is received, then add UE to the pdcch Order Q */
13728 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
13730 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue, cell);
13731 /* If PDCCH order is already triggered and we are waiting for
13732 * RACH from UE then do not add to PdcchOdrQ. */
13733 if (ueDl->rachInfo.rapIdLnk.node == NULLP)
13735 rgSCHCmnDlAdd2PdcchOdrQ(cell, ue);
13741 /* If SPS service, invoke SPS module */
13742 if (svc->dlLcSpsCfg.isSpsEnabled)
13744 rgSCHCmnSpsDlDedBoUpd(cell, ue, svc);
13745 /* Note: Retrun from here, no update needed in other schedulers */
13750 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
13752 cellSch->apisEmtcDl->rgSCHDlDedBoUpd(cell, ue, svc);
13753 //DU_LOG("\nINFO --> SCH : rgSCHEMTCDlDedBoUpd\n");
13758 cellSch->apisDl->rgSCHDlDedBoUpd(cell, ue, svc);
13763 rgSCHSCellDlDedBoUpd(cell, ue, svc);
13771 * @brief Removes an UE from Cell's TA List.
13775 * Function: rgSCHCmnRmvFrmTaLst
13776 * Purpose: Removes an UE from Cell's TA List.
13778 * Invoked by: Specific Scheduler
13780 * @param[in] RgSchCellCb* cell
13781 * @param[in] RgSchUeCb* ue
13785 Void rgSCHCmnRmvFrmTaLst(RgSchCellCb *cell,RgSchUeCb *ue)
13787 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
13790 if(cell->emtcEnable && ue->isEmtcUe)
13792 rgSCHEmtcRmvFrmTaLst(cellCmnDl,ue);
13797 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
13798 ue->dlTaLnk.node = (PTR)NULLP;
13803 /* Fix: syed Remove the msg4Proc from cell
13804 * msg4Retx Queue. I have used CMN scheduler function
13805 * directly. Please define a new API and call this
13806 * function through that. */
13809 * @brief This function removes MSG4 HARQ process from cell RETX Queues.
13813 * Function: rgSCHCmnDlMsg4ProcRmvFrmRetx
13814 * Purpose: This function removes MSG4 HARQ process from cell RETX Queues.
13816 * Invoked by: UE/RACB deletion.
13818 * @param[in] RgSchCellCb* cell
13819 * @param[in] RgSchDlHqProc* hqP
13823 Void rgSCHCmnDlMsg4ProcRmvFrmRetx(RgSchCellCb *cell,RgSchDlHqProcCb *hqP)
13825 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13827 if (hqP->tbInfo[0].ccchSchdInfo.retxLnk.node)
13829 if (hqP->hqE->msg4Proc == hqP)
13831 cmLListDelFrm(&cellSch->dl.msg4RetxLst, \
13832 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13833 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
13836 else if(hqP->hqE->ccchSduProc == hqP)
13838 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
13839 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13840 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
13849 * @brief This function adds a HARQ process for retx.
13853 * Function: rgSCHCmnDlProcAddToRetx
13854 * Purpose: This function adds a HARQ process to retransmission
13855 * queue. This may be performed when a HARQ ack is
13858 * Invoked by: HARQ feedback processing
13860 * @param[in] RgSchCellCb* cell
13861 * @param[in] RgSchDlHqProc* hqP
13865 Void rgSCHCmnDlProcAddToRetx(RgSchCellCb *cell,RgSchDlHqProcCb *hqP)
13867 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13869 if (hqP->hqE->msg4Proc == hqP) /* indicating msg4 transmission */
13871 cmLListAdd2Tail(&cellSch->dl.msg4RetxLst, \
13872 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13873 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
13876 else if(hqP->hqE->ccchSduProc == hqP)
13878 /*If CCCH SDU being transmitted without cont res CE*/
13879 cmLListAdd2Tail(&cellSch->dl.ccchSduRetxLst,
13880 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13881 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
13887 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
13889 /* Invoke SPS module for SPS HARQ proc re-transmission handling */
13890 rgSCHCmnSpsDlProcAddToRetx(cell, hqP);
13893 #endif /* LTEMAC_SPS */
13895 if((TRUE == cell->emtcEnable)
13896 && (TRUE == hqP->hqE->ue->isEmtcUe))
13898 cellSch->apisEmtcDl->rgSCHDlProcAddToRetx(cell, hqP);
13903 cellSch->apisDl->rgSCHDlProcAddToRetx(cell, hqP);
13911 * @brief This function performs RI validation and
13912 * updates it to the ueCb.
13916 * Function: rgSCHCmnDlSetUeRi
13917 * Purpose: This function performs RI validation and
13918 * updates it to the ueCb.
13920 * Invoked by: rgSCHCmnDlCqiInd
13922 * @param[in] RgSchCellCb *cell
13923 * @param[in] RgSchUeCb *ue
13924 * @param[in] uint8_t ri
13925 * @param[in] Bool isPeriodic
13929 static Void rgSCHCmnDlSetUeRi(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t ri,Bool isPer)
13931 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
13932 RgSchCmnUeInfo *ueSchCmn = RG_SCH_CMN_GET_CMN_UE(ue);
13935 RgSchUePCqiCb *cqiCb = RG_SCH_GET_UE_CELL_CQI_CB(ue,cell);
13940 /* FIX for RRC Reconfiguration issue */
13941 /* ccpu00140894- During Tx Mode transition RI report will not entertained for
13942 * specific during which SCH expecting UE can complete TX mode transition*/
13943 if (ue->txModeTransCmplt == FALSE)
13948 /* Restrict the Number of TX layers to cell->numTxAntPorts.
13949 * Protection from invalid RI values. */
13950 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
13952 /* Special case of converting PMI to sane value when
13953 * there is a switch in RI from 1 to 2 and PMI reported
13954 * for RI=1 is invalid for RI=2 */
13955 if ((cell->numTxAntPorts == 2) && (ue->mimoInfo.txMode == RGR_UE_TM_4))
13957 if ((ri == 2) && ( ueDl->mimoInfo.ri == 1))
13959 ueDl->mimoInfo.pmi = (ueDl->mimoInfo.pmi < 2)? 1:2;
13963 /* Restrict the Number of TX layers according to the UE Category */
13964 ueDl->mimoInfo.ri = RGSCH_MIN(ri, rgUeCatTbl[ueSchCmn->ueCat].maxTxLyrs);
13966 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].riCnt[ueDl->mimoInfo.ri-1]++;
13967 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
13971 ue->tenbStats->stats.nonPersistent.sch[0].riCnt[ueDl->mimoInfo.ri-1]++;
13972 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
13978 /* If RI is from Periodic CQI report */
13979 cqiCb->perRiVal = ueDl->mimoInfo.ri;
13980 /* Reset at every Periodic RI Reception */
13981 cqiCb->invalidateCqi = FALSE;
13985 /* If RI is from Aperiodic CQI report */
13986 if (cqiCb->perRiVal != ueDl->mimoInfo.ri)
13988 /* if this aperRI is different from last reported
13989 * perRI then invalidate all CQI reports till next
13991 cqiCb->invalidateCqi = TRUE;
13995 cqiCb->invalidateCqi = FALSE;
14000 if (ueDl->mimoInfo.ri > 1)
14002 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
14004 else if (ue->mimoInfo.txMode == RGR_UE_TM_3) /* ri == 1 */
14006 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
14014 * @brief This function performs PMI validation and
14015 * updates it to the ueCb.
14019 * Function: rgSCHCmnDlSetUePmi
14020 * Purpose: This function performs PMI validation and
14021 * updates it to the ueCb.
14023 * Invoked by: rgSCHCmnDlCqiInd
14025 * @param[in] RgSchCellCb *cell
14026 * @param[in] RgSchUeCb *ue
14027 * @param[in] uint8_t pmi
14031 static S16 rgSCHCmnDlSetUePmi(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t pmi)
14033 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14035 if (ue->txModeTransCmplt == FALSE)
14040 if (cell->numTxAntPorts == 2)
14046 if (ueDl->mimoInfo.ri == 2)
14048 /*ccpu00118150 - MOD - changed pmi value validation from 0 to 2*/
14049 /* PMI 2 and 3 are invalid incase of 2 TxAnt and 2 Layered SM */
14050 if (pmi == 2 || pmi == 3)
14054 ueDl->mimoInfo.pmi = pmi+1;
14058 ueDl->mimoInfo.pmi = pmi;
14061 else if (cell->numTxAntPorts == 4)
14067 ueDl->mimoInfo.pmi = pmi;
14069 /* Reset the No PMI Flag in forceTD */
14070 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
14075 * @brief This function Updates the DL CQI on PUCCH for the UE.
14079 * Function: rgSCHCmnDlProcCqiMode10
14081 * This function updates the DL CQI on PUCCH for the UE.
14083 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14085 * Processing Steps:
14087 * @param[in] RgSchCellCb *cell
14088 * @param[in] RgSchUeCb *ue
14089 * @param[in] TfuDlCqiRpt *dlCqiRpt
14094 #ifdef RGR_CQI_REPT
14095 static inline Void rgSCHCmnDlProcCqiMode10
14099 TfuDlCqiPucch *pucchCqi,
14103 static inline Void rgSCHCmnDlProcCqiMode10
14107 TfuDlCqiPucch *pucchCqi
14111 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14113 if (pucchCqi->u.mode10Info.type == TFU_RPT_CQI)
14115 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14116 /* Checking whether the decoded CQI is a value between 1 and 15*/
14117 if((pucchCqi->u.mode10Info.u.cqi) && (pucchCqi->u.mode10Info.u.cqi
14118 < RG_SCH_CMN_MAX_CQI))
14120 ueDl->cqiFlag = TRUE;
14121 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode10Info.u.cqi;
14122 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14123 /* ccpu00117452 - MOD - Changed macro name from
14124 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14125 #ifdef RGR_CQI_REPT
14126 *isCqiAvail = TRUE;
14134 else if (pucchCqi->u.mode10Info.type == TFU_RPT_RI)
14136 if ( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode10Info.u.ri) )
14138 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode10Info.u.ri,
14143 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14144 pucchCqi->u.mode10Info.u.ri,ue->ueId);
14151 * @brief This function Updates the DL CQI on PUCCH for the UE.
14155 * Function: rgSCHCmnDlProcCqiMode11
14157 * This function updates the DL CQI on PUCCH for the UE.
14159 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14161 * Processing Steps:
14162 * Process CQI MODE 11
14163 * @param[in] RgSchCellCb *cell
14164 * @param[in] RgSchUeCb *ue
14165 * @param[in] TfuDlCqiRpt *dlCqiRpt
14170 #ifdef RGR_CQI_REPT
14171 static inline Void rgSCHCmnDlProcCqiMode11
14175 TfuDlCqiPucch *pucchCqi,
14177 Bool *is2ndCwCqiAvail
14180 static inline Void rgSCHCmnDlProcCqiMode11
14184 TfuDlCqiPucch *pucchCqi
14188 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14190 if (pucchCqi->u.mode11Info.type == TFU_RPT_CQI)
14192 ue->mimoInfo.puschFdbkVld = FALSE;
14193 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14194 if((pucchCqi->u.mode11Info.u.cqi.cqi) &&
14195 (pucchCqi->u.mode11Info.u.cqi.cqi < RG_SCH_CMN_MAX_CQI))
14197 ueDl->cqiFlag = TRUE;
14198 /* ccpu00117452 - MOD - Changed macro name from
14199 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14200 #ifdef RGR_CQI_REPT
14201 *isCqiAvail = TRUE;
14203 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode11Info.u.cqi.cqi;
14204 if (pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.pres)
14206 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
14207 ueDl->mimoInfo.cwInfo[1].cqi, \
14208 pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.val);
14209 #ifdef RGR_CQI_REPT
14210 /* ccpu00117259 - ADD - Considering second codeword CQI info
14211 incase of MIMO for CQI Reporting */
14212 *is2ndCwCqiAvail = TRUE;
14220 rgSCHCmnDlSetUePmi(cell, ue, \
14221 pucchCqi->u.mode11Info.u.cqi.pmi);
14223 else if (pucchCqi->u.mode11Info.type == TFU_RPT_RI)
14225 if( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode11Info.u.ri))
14227 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode11Info.u.ri,
14232 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14233 pucchCqi->u.mode11Info.u.ri,ue->ueId);
14240 * @brief This function Updates the DL CQI on PUCCH for the UE.
14244 * Function: rgSCHCmnDlProcCqiMode20
14246 * This function updates the DL CQI on PUCCH for the UE.
14248 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14250 * Processing Steps:
14251 * Process CQI MODE 20
14252 * @param[in] RgSchCellCb *cell
14253 * @param[in] RgSchUeCb *ue
14254 * @param[in] TfuDlCqiRpt *dlCqiRpt
14259 #ifdef RGR_CQI_REPT
14260 static inline Void rgSCHCmnDlProcCqiMode20
14264 TfuDlCqiPucch *pucchCqi,
14268 static inline Void rgSCHCmnDlProcCqiMode20
14272 TfuDlCqiPucch *pucchCqi
14276 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14278 if (pucchCqi->u.mode20Info.type == TFU_RPT_CQI)
14280 if (pucchCqi->u.mode20Info.u.cqi.isWideband)
14282 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14283 if((pucchCqi->u.mode20Info.u.cqi.u.wideCqi) &&
14284 (pucchCqi->u.mode20Info.u.cqi.u.wideCqi < RG_SCH_CMN_MAX_CQI))
14286 ueDl->cqiFlag = TRUE;
14287 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode20Info.u.cqi.\
14289 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14290 /* ccpu00117452 - MOD - Changed macro name from
14291 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14292 #ifdef RGR_CQI_REPT
14293 *isCqiAvail = TRUE;
14302 else if (pucchCqi->u.mode20Info.type == TFU_RPT_RI)
14304 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode20Info.u.ri))
14306 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode20Info.u.ri,
14311 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14312 pucchCqi->u.mode20Info.u.ri,ue->ueId);
14320 * @brief This function Updates the DL CQI on PUCCH for the UE.
14324 * Function: rgSCHCmnDlProcCqiMode21
14326 * This function updates the DL CQI on PUCCH for the UE.
14328 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14330 * Processing Steps:
14331 * Process CQI MODE 21
14332 * @param[in] RgSchCellCb *cell
14333 * @param[in] RgSchUeCb *ue
14334 * @param[in] TfuDlCqiRpt *dlCqiRpt
14339 #ifdef RGR_CQI_REPT
14340 static inline Void rgSCHCmnDlProcCqiMode21
14344 TfuDlCqiPucch *pucchCqi,
14346 Bool *is2ndCwCqiAvail
14349 static inline Void rgSCHCmnDlProcCqiMode21
14353 TfuDlCqiPucch *pucchCqi
14357 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14359 if (pucchCqi->u.mode21Info.type == TFU_RPT_CQI)
14361 ue->mimoInfo.puschFdbkVld = FALSE;
14362 if (pucchCqi->u.mode21Info.u.cqi.isWideband)
14364 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14365 if((pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi) &&
14366 (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi < RG_SCH_CMN_MAX_CQI))
14368 ueDl->cqiFlag = TRUE;
14369 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode21Info.u.cqi.\
14371 if (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.pres)
14373 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
14374 ueDl->mimoInfo.cwInfo[1].cqi, \
14375 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.val);
14376 #ifdef RGR_CQI_REPT
14377 /* ccpu00117259 - ADD - Considering second codeword CQI info
14378 incase of MIMO for CQI Reporting */
14379 *is2ndCwCqiAvail = TRUE;
14382 /* ccpu00117452 - MOD - Changed macro name from
14383 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14384 #ifdef RGR_CQI_REPT
14385 *isCqiAvail = TRUE;
14392 rgSCHCmnDlSetUePmi(cell, ue, \
14393 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.pmi);
14396 else if (pucchCqi->u.mode21Info.type == TFU_RPT_RI)
14398 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode21Info.u.ri))
14400 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode21Info.u.ri,
14405 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14406 pucchCqi->u.mode21Info.u.ri,ue->ueId);
14414 * @brief This function Updates the DL CQI on PUCCH for the UE.
14418 * Function: rgSCHCmnDlCqiOnPucchInd
14420 * This function updates the DL CQI on PUCCH for the UE.
14422 * Invoked by: rgSCHCmnDlCqiInd
14424 * Processing Steps:
14425 * - Depending on the reporting mode of the PUCCH, the CQI/PMI/RI values
14426 * are updated and stored for each UE
14428 * @param[in] RgSchCellCb *cell
14429 * @param[in] RgSchUeCb *ue
14430 * @param[in] TfuDlCqiRpt *dlCqiRpt
14435 #ifdef RGR_CQI_REPT
14436 static Void rgSCHCmnDlCqiOnPucchInd
14440 TfuDlCqiPucch *pucchCqi,
14441 RgrUeCqiRept *ueCqiRept,
14443 Bool *is2ndCwCqiAvail
14446 static Void rgSCHCmnDlCqiOnPucchInd
14450 TfuDlCqiPucch *pucchCqi
14454 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14456 /* ccpu00117452 - MOD - Changed
14457 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14458 #ifdef RGR_CQI_REPT
14459 /* Save CQI mode information in the report */
14460 ueCqiRept->cqiMode = pucchCqi->mode;
14463 switch(pucchCqi->mode)
14465 case TFU_PUCCH_CQI_MODE10:
14466 #ifdef RGR_CQI_REPT
14467 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail);
14469 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi);
14471 ueDl->cqiFlag = TRUE;
14473 case TFU_PUCCH_CQI_MODE11:
14474 #ifdef RGR_CQI_REPT
14475 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail,
14478 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi);
14480 ueDl->cqiFlag = TRUE;
14482 case TFU_PUCCH_CQI_MODE20:
14483 #ifdef RGR_CQI_REPT
14484 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail);
14486 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi);
14488 ueDl->cqiFlag = TRUE;
14490 case TFU_PUCCH_CQI_MODE21:
14491 #ifdef RGR_CQI_REPT
14492 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail,
14495 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi);
14497 ueDl->cqiFlag = TRUE;
14501 DU_LOG("\nERROR --> SCH : Unknown CQI Mode %d of UE %d",
14502 pucchCqi->mode,ue->ueId);
14503 /* ccpu00117452 - MOD - Changed macro name from
14504 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14505 #ifdef RGR_CQI_REPT
14506 *isCqiAvail = FALSE;
14513 } /* rgSCHCmnDlCqiOnPucchInd */
14517 * @brief This function Updates the DL CQI on PUSCH for the UE.
14521 * Function: rgSCHCmnDlCqiOnPuschInd
14523 * This function updates the DL CQI on PUSCH for the UE.
14525 * Invoked by: rgSCHCmnDlCqiInd
14527 * Processing Steps:
14528 * - Depending on the reporting mode of the PUSCH, the CQI/PMI/RI values
14529 * are updated and stored for each UE
14531 * @param[in] RgSchCellCb *cell
14532 * @param[in] RgSchUeCb *ue
14533 * @param[in] TfuDlCqiRpt *dlCqiRpt
14538 #ifdef RGR_CQI_REPT
14539 static Void rgSCHCmnDlCqiOnPuschInd
14543 TfuDlCqiPusch *puschCqi,
14544 RgrUeCqiRept *ueCqiRept,
14546 Bool *is2ndCwCqiAvail
14549 static Void rgSCHCmnDlCqiOnPuschInd
14553 TfuDlCqiPusch *puschCqi
14557 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14558 uint32_t prevRiVal = 0;
14559 if (puschCqi->ri.pres == PRSNT_NODEF)
14561 if (RG_SCH_CMN_IS_RI_VALID(puschCqi->ri.val))
14563 /* Saving the previous ri value to revert back
14564 in case PMI update failed */
14565 if (RGR_UE_TM_4 == ue->mimoInfo.txMode ) /* Cheking for TM4. TM8 check later */
14567 prevRiVal = ueDl->mimoInfo.ri;
14569 rgSCHCmnDlSetUeRi(cell, ue, puschCqi->ri.val, FALSE);
14573 DU_LOG("\nERROR --> SCH : Invalid RI value(%x) CRNTI:%d",
14574 puschCqi->ri.val,ue->ueId);
14578 ue->mimoInfo.puschFdbkVld = FALSE;
14579 /* ccpu00117452 - MOD - Changed macro name from
14580 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14581 #ifdef RGR_CQI_REPT
14582 /* Save CQI mode information in the report */
14583 ueCqiRept->cqiMode = puschCqi->mode;
14584 /* ccpu00117259 - DEL - removed default setting of isCqiAvail to TRUE */
14587 switch(puschCqi->mode)
14589 case TFU_PUSCH_CQI_MODE_20:
14590 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14591 /* Checking whether the decoded CQI is a value between 1 and 15*/
14592 if((puschCqi->u.mode20Info.wideBandCqi) &&
14593 (puschCqi->u.mode20Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
14595 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode20Info.wideBandCqi;
14596 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14597 /* ccpu00117452 - MOD - Changed macro name from
14598 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14599 #ifdef RGR_CQI_REPT
14600 *isCqiAvail = TRUE;
14608 case TFU_PUSCH_CQI_MODE_30:
14609 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14610 if((puschCqi->u.mode30Info.wideBandCqi) &&
14611 (puschCqi->u.mode30Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
14613 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode30Info.wideBandCqi;
14614 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14615 /* ccpu00117452 - MOD - Changed macro name from
14616 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14617 #ifdef RGR_CQI_REPT
14618 *isCqiAvail = TRUE;
14622 uint32_t gACqiRcvdCount;
14633 case TFU_PUSCH_CQI_MODE_12:
14634 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14635 if((puschCqi->u.mode12Info.cqiIdx[0]) &&
14636 (puschCqi->u.mode12Info.cqiIdx[0] < RG_SCH_CMN_MAX_CQI))
14638 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode12Info.cqiIdx[0];
14639 /* ccpu00117452 - MOD - Changed macro name from
14640 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14641 #ifdef RGR_CQI_REPT
14642 *isCqiAvail = TRUE;
14649 if((puschCqi->u.mode12Info.cqiIdx[1]) &&
14650 (puschCqi->u.mode12Info.cqiIdx[1] < RG_SCH_CMN_MAX_CQI))
14652 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode12Info.cqiIdx[1];
14653 /* ccpu00117452 - MOD - Changed macro name from
14654 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14655 #ifdef RGR_CQI_REPT
14656 /* ccpu00117259 - ADD - Considering second codeword CQI info
14657 incase of MIMO for CQI Reporting */
14658 *is2ndCwCqiAvail = TRUE;
14665 ue->mimoInfo.puschFdbkVld = TRUE;
14666 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_12;
14667 ue->mimoInfo.puschPmiInfo.u.mode12Info = puschCqi->u.mode12Info;
14668 /* : resetting this is time based. Make use of CQI reporting
14669 * periodicity, DELTA's in determining the exact time at which this
14670 * need to be reset. */
14672 case TFU_PUSCH_CQI_MODE_22:
14673 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14674 if((puschCqi->u.mode22Info.wideBandCqi[0]) &&
14675 (puschCqi->u.mode22Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
14677 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode22Info.wideBandCqi[0];
14678 /* ccpu00117452 - MOD - Changed macro name from
14679 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14680 #ifdef RGR_CQI_REPT
14681 *isCqiAvail = TRUE;
14688 if((puschCqi->u.mode22Info.wideBandCqi[1]) &&
14689 (puschCqi->u.mode22Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
14691 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode22Info.wideBandCqi[1];
14692 /* ccpu00117452 - MOD - Changed macro name from
14693 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14694 #ifdef RGR_CQI_REPT
14695 /* ccpu00117259 - ADD - Considering second codeword CQI info
14696 incase of MIMO for CQI Reporting */
14697 *is2ndCwCqiAvail = TRUE;
14704 rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode22Info.wideBandPmi);
14705 ue->mimoInfo.puschFdbkVld = TRUE;
14706 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_22;
14707 ue->mimoInfo.puschPmiInfo.u.mode22Info = puschCqi->u.mode22Info;
14709 case TFU_PUSCH_CQI_MODE_31:
14710 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14711 if((puschCqi->u.mode31Info.wideBandCqi[0]) &&
14712 (puschCqi->u.mode31Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
14714 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode31Info.wideBandCqi[0];
14715 /* ccpu00117452 - MOD - Changed macro name from
14716 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14717 #ifdef RGR_CQI_REPT
14718 *isCqiAvail = TRUE;
14721 if (ueDl->mimoInfo.ri > 1)
14723 if((puschCqi->u.mode31Info.wideBandCqi[1]) &&
14724 (puschCqi->u.mode31Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
14726 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode31Info.wideBandCqi[1];
14727 /* ccpu00117452 - MOD - Changed macro name from
14728 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14729 #ifdef RGR_CQI_REPT
14730 /* ccpu00117259 - ADD - Considering second codeword CQI info
14731 incase of MIMO for CQI Reporting */
14732 *is2ndCwCqiAvail = TRUE;
14736 if (rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode31Info.pmi) != ROK)
14738 /* To avoid Rank and PMI inconsistency */
14739 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
14740 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
14742 ueDl->mimoInfo.ri = prevRiVal;
14745 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_31;
14746 ue->mimoInfo.puschPmiInfo.u.mode31Info = puschCqi->u.mode31Info;
14750 DU_LOG("\nERROR --> SCH : Unknown CQI Mode %d CRNTI:%d",
14751 puschCqi->mode,ue->ueId);
14752 /* CQI decoding failed revert the RI to previous value */
14753 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
14754 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
14756 ueDl->mimoInfo.ri = prevRiVal;
14758 /* ccpu00117452 - MOD - Changed macro name from
14759 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14760 #ifdef RGR_CQI_REPT
14761 *isCqiAvail = FALSE;
14762 /* ccpu00117259 - ADD - Considering second codeword CQI info
14763 incase of MIMO for CQI Reporting */
14764 *is2ndCwCqiAvail = FALSE;
14771 } /* rgSCHCmnDlCqiOnPuschInd */
14775 * @brief This function Updates the DL CQI for the UE.
14779 * Function: rgSCHCmnDlCqiInd
14780 * Purpose: Updates the DL CQI for the UE
14784 * @param[in] RgSchCellCb *cell
14785 * @param[in] RgSchUeCb *ue
14786 * @param[in] TfuDlCqiRpt *dlCqi
14790 Void rgSCHCmnDlCqiInd
14796 CmLteTimingInfo timingInfo
14799 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14800 /* ccpu00117452 - MOD - Changed macro name from
14801 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14802 #ifdef RGR_CQI_REPT
14803 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14804 RgrUeCqiRept ueCqiRept = {{0}};
14805 Bool isCqiAvail = FALSE;
14806 /* ccpu00117259 - ADD - Considering second codeword CQI info
14807 incase of MIMO for CQI Reporting */
14808 Bool is2ndCwCqiAvail = FALSE;
14812 #ifdef RGR_CQI_REPT
14815 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
14819 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
14824 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi);
14828 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi);
14832 #ifdef CQI_CONFBITMASK_DROP
14833 if(!ue->cqiConfBitMask)
14835 if (ueDl->mimoInfo.cwInfo[0].cqi >15)
14837 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
14838 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
14840 else if ( ueDl->mimoInfo.cwInfo[0].cqi >= ue->prevCqi)
14842 ue->prevCqi = ueDl->mimoInfo.cwInfo[0].cqi;
14846 uint8_t dlCqiDeltaPrev = 0;
14847 dlCqiDeltaPrev = ue->prevCqi - ueDl->mimoInfo.cwInfo[0].cqi;
14848 if (dlCqiDeltaPrev > 3)
14849 dlCqiDeltaPrev = 3;
14850 if ((ue->prevCqi - dlCqiDeltaPrev) < 6)
14856 ue->prevCqi = ue->prevCqi - dlCqiDeltaPrev;
14858 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
14859 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
14865 /* ccpu00117452 - MOD - Changed macro name from
14866 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14867 #ifdef RGR_CQI_REPT
14868 /* ccpu00117259 - ADD - Considering second codeword CQI info
14869 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail\
14870 in 'if' condition*/
14871 if (RG_SCH_CQIR_IS_PUSHNCQI_ENBLE(ue) && (isCqiAvail || is2ndCwCqiAvail))
14873 ueCqiRept.cqi[0] = ueDl->mimoInfo.cwInfo[0].cqi;
14875 /* ccpu00117259 - ADD - Considering second codeword CQI info
14876 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail
14877 in 'if' condition*/
14878 ueCqiRept.cqi[1] = 0;
14879 if(is2ndCwCqiAvail)
14881 ueCqiRept.cqi[1] = ueDl->mimoInfo.cwInfo[1].cqi;
14883 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, &ueCqiRept);
14888 rgSCHCmnDlSetUeAllocLmtLa(cell, ue);
14889 rgSCHCheckAndSetTxScheme(cell, ue);
14892 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), ue->isEmtcUe);
14894 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), FALSE);
14898 if (cellSch->dl.isDlFreqSel)
14900 cellSch->apisDlfs->rgSCHDlfsDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo);
14903 /* Call SPS module to update CQI indication */
14904 rgSCHCmnSpsDlCqiIndHndlr(cell, ue, timingInfo);
14906 /* Call Specific scheduler to process on dlCqiInd */
14908 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
14910 cellSch->apisEmtcDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
14915 cellSch->apisDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
14918 #ifdef RG_PFS_STATS
14919 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].avgCqi +=
14920 ueDl->mimoInfo.cwInfo[0].cqi;
14921 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].totalCqiOcc++;
14925 ueDl->avgCqi += ueDl->mimoInfo.cwInfo[0].cqi;
14926 ueDl->numCqiOccns++;
14927 if (ueDl->mimoInfo.ri == 1)
14938 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
14939 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
14940 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw0Cqi ++;
14941 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw1Cqi ++;
14942 cell->tenbStats->sch.dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
14943 cell->tenbStats->sch.dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
14944 cell->tenbStats->sch.dlNumCw0Cqi ++;
14945 cell->tenbStats->sch.dlNumCw1Cqi ++;
14952 * @brief This function calculates the wideband CQI from SNR
14953 * reported for each RB.
14957 * Function: rgSCHCmnCalcWcqiFrmSnr
14958 * Purpose: Wideband CQI calculation from SNR
14960 * Invoked by: RG SCH
14962 * @param[in] RgSchCellCb *cell
14963 * @param[in] TfuSrsRpt *srsRpt,
14964 * @return Wideband CQI
14967 static uint8_t rgSCHCmnCalcWcqiFrmSnr(RgSchCellCb *cell, TfuSrsRpt *srsRpt)
14969 uint8_t wideCqi=1; /*Calculated value from SNR*/
14970 /*Need to map a certain SNR with a WideCQI value.
14971 * The CQI calculation is still primitive. Further, need to
14972 * use a improvized method for calculating WideCQI from SNR*/
14973 if (srsRpt->snr[0] <=50)
14977 else if (srsRpt->snr[0]>=51 && srsRpt->snr[0] <=100)
14981 else if (srsRpt->snr[0]>=101 && srsRpt->snr[0] <=150)
14985 else if (srsRpt->snr[0]>=151 && srsRpt->snr[0] <=200)
14989 else if (srsRpt->snr[0]>=201 && srsRpt->snr[0] <=250)
14998 }/*rgSCHCmnCalcWcqiFrmSnr*/
15002 * @brief This function Updates the SRS for the UE.
15006 * Function: rgSCHCmnSrsInd
15007 * Purpose: Updates the UL SRS for the UE
15011 * @param[in] RgSchCellCb *cell
15012 * @param[in] RgSchUeCb *ue
15013 * @param[in] TfuSrsRpt *srsRpt,
15017 Void rgSCHCmnSrsInd(RgSchCellCb *cell,RgSchUeCb *ue,TfuSrsRpt *srsRpt,CmLteTimingInfo timingInfo)
15019 uint8_t wideCqi; /*Calculated value from SNR*/
15020 uint32_t recReqTime; /*Received Time in TTI*/
15022 recReqTime = (timingInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) + timingInfo.slot;
15023 ue->srsCb.selectedAnt = (recReqTime/ue->srsCb.peri)%2;
15024 if(srsRpt->wideCqiPres)
15026 wideCqi = srsRpt->wideCqi;
15030 wideCqi = rgSCHCmnCalcWcqiFrmSnr(cell, srsRpt);
15032 rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi);
15034 }/*rgSCHCmnSrsInd*/
15039 * @brief This function is a handler for TA report for an UE.
15043 * Function: rgSCHCmnDlTARpt
15044 * Purpose: Determine based on UE_IDLE_TIME threshold,
15045 * whether UE needs to be Linked to the scheduler's TA list OR
15046 * if it needs a PDCCH Order.
15051 * @param[in] RgSchCellCb *cell
15052 * @param[in] RgSchUeCb *ue
15056 Void rgSCHCmnDlTARpt(RgSchCellCb *cell,RgSchUeCb *ue)
15058 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15059 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
15060 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15061 CmLListCp poInactvLst;
15064 /* RACHO: If UE idle time is more than threshold, then
15065 * set its poInactv pdcch order inactivity */
15066 /* Fix : syed Ignore if TaTmr is not configured */
15067 if ((ue->dl.taCb.cfgTaTmr) && (rgSCHCmnUeIdleExdThrsld(cell, ue) == ROK))
15069 uint32_t prevDlMsk = ue->dl.dlInactvMask;
15070 uint32_t prevUlMsk = ue->ul.ulInactvMask;
15071 ue->dl.dlInactvMask |= RG_PDCCHODR_INACTIVE;
15072 ue->ul.ulInactvMask |= RG_PDCCHODR_INACTIVE;
15073 /* Indicate Specific scheduler for this UEs inactivity */
15074 cmLListInit(&poInactvLst);
15075 cmLListAdd2Tail(&poInactvLst, &ueDl->rachInfo.inActUeLnk);
15076 ueDl->rachInfo.inActUeLnk.node = (PTR)ue;
15077 /* Send inactivate ind only if not already sent */
15078 if (prevDlMsk == 0)
15080 cellSch->apisDl->rgSCHDlInactvtUes(cell, &poInactvLst);
15082 if (prevUlMsk == 0)
15084 cellSch->apisUl->rgSCHUlInactvtUes(cell, &poInactvLst);
15089 /* Fix: ccpu00124009 Fix for loop in the linked list "cellDl->taLst" */
15090 if (!ue->dlTaLnk.node)
15093 if(cell->emtcEnable)
15097 rgSCHEmtcAddToTaLst(cellDl,ue);
15104 cmLListAdd2Tail(&cellDl->taLst, &ue->dlTaLnk);
15105 ue->dlTaLnk.node = (PTR)ue;
15110 DU_LOG("\nERROR --> SCH : <TA>TA duplicate entry attempt failed: UEID:%u",
15119 * @brief Indication of UL CQI.
15123 * Function : rgSCHCmnFindUlCqiUlTxAnt
15125 * - Finds the Best Tx Antenna amongst the CQIs received
15126 * from Two Tx Antennas.
15128 * @param[in] RgSchCellCb *cell
15129 * @param[in] RgSchUeCb *ue
15130 * @param[in] uint8_t wideCqi
15133 static Void rgSCHCmnFindUlCqiUlTxAnt(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t wideCqi)
15135 ue->validTxAnt = 1;
15137 } /* rgSCHCmnFindUlCqiUlTxAnt */
15141 * @brief Indication of UL CQI.
15145 * Function : rgSCHCmnUlCqiInd
15147 * - Updates uplink CQI information for the UE. Computes and
15148 * stores the lowest CQI of CQIs reported in all subbands.
15150 * @param[in] RgSchCellCb *cell
15151 * @param[in] RgSchUeCb *ue
15152 * @param[in] TfuUlCqiRpt *ulCqiInfo
15155 Void rgSCHCmnUlCqiInd(RgSchCellCb *cell,RgSchUeCb *ue,TfuUlCqiRpt *ulCqiInfo)
15157 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15158 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15163 #if (defined(SCH_STATS) || defined(TENB_STATS))
15164 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
15167 /* consider inputs from SRS handlers about SRS occassions
15168 * in determining the UL TX Antenna selection */
15169 ueUl->crntUlCqi[0] = ulCqiInfo->wideCqi;
15171 ueUl->validUlCqi = ueUl->crntUlCqi[0];
15172 ue->validTxAnt = 0;
15174 iTbsNew = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][ueUl->validUlCqi];
15175 previTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
15177 if (RG_ITBS_DIFF(iTbsNew, previTbs) > 5)
15179 /* Ignore this iTBS report and mark that last iTBS report was */
15180 /* ignored so that subsequently we reset the LA algorithm */
15181 ueUl->ulLaCb.lastiTbsIgnored = TRUE;
15185 if (ueUl->ulLaCb.lastiTbsIgnored != TRUE)
15187 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
15188 (80 * ueUl->ulLaCb.cqiBasediTbs))/100;
15192 /* Reset the LA as iTbs in use caught up with the value */
15193 /* reported by UE. */
15194 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
15195 (80 * previTbs * 100))/100;
15196 ueUl->ulLaCb.deltaiTbs = 0;
15197 ueUl->ulLaCb.lastiTbsIgnored = FALSE;
15202 rgSCHPwrUlCqiInd(cell, ue);
15204 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
15206 rgSCHCmnSpsUlCqiInd(cell, ue);
15209 /* Applicable to only some schedulers */
15211 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
15213 cellSch->apisEmtcUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
15218 cellSch->apisUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
15222 ueUl->numCqiOccns++;
15223 ueUl->avgCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15228 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15229 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulNumCqi ++;
15230 cell->tenbStats->sch.ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15231 cell->tenbStats->sch.ulNumCqi ++;
15236 } /* rgSCHCmnUlCqiInd */
15239 * @brief Returns HARQ proc for which data expected now.
15243 * Function: rgSCHCmnUlHqProcForUe
15244 * Purpose: This function returns the harq process for
15245 * which data is expected in the current subframe.
15246 * It does not validate that the HARQ process
15247 * has an allocation.
15251 * @param[in] RgSchCellCb *cell
15252 * @param[in] CmLteTimingInfo frm
15253 * @param[in] RgSchUeCb *ue
15254 * @param[out] RgSchUlHqProcCb **procRef
15257 Void rgSCHCmnUlHqProcForUe
15260 CmLteTimingInfo frm,
15262 RgSchUlHqProcCb **procRef
15266 uint8_t procId = rgSCHCmnGetUlHqProcIdx(&frm, cell);
15269 *procRef = rgSCHUhmGetUlHqProc(cell, ue, procId);
15271 *procRef = rgSCHUhmGetUlProcByTime(cell, ue, frm);
15278 * @brief Update harq process for allocation.
15282 * Function : rgSCHCmnUpdUlHqProc
15284 * This function is invoked when harq process
15285 * control block is now in a new memory location
15286 * thus requiring a pointer/reference update.
15288 * @param[in] RgSchCellCb *cell
15289 * @param[in] RgSchUlHqProcCb *curProc
15290 * @param[in] RgSchUlHqProcCb *oldProc
15295 S16 rgSCHCmnUpdUlHqProc
15298 RgSchUlHqProcCb *curProc,
15299 RgSchUlHqProcCb *oldProc
15305 #if (ERRCLASS & ERRCLS_DEBUG)
15306 if (curProc->alloc == NULLP)
15311 curProc->alloc->hqProc = curProc;
15313 } /* rgSCHCmnUpdUlHqProc */
15316 /*MS_WORKAROUND for CR FIXME */
15318 * @brief Hsndles BSR timer expiry
15322 * Function : rgSCHCmnBsrTmrExpry
15324 * This function is invoked when periodic BSR timer expires for a UE.
15326 * @param[in] RgSchUeCb *ue
15331 S16 rgSCHCmnBsrTmrExpry(RgSchUeCb *ueCb)
15333 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ueCb->cell);
15336 ueCb->isSrGrant = TRUE;
15339 emtcStatsUlBsrTmrTxp++;
15343 if(ueCb->cell->emtcEnable)
15347 cellSch->apisEmtcUl->rgSCHSrRcvd(ueCb->cell, ueCb);
15354 cellSch->apisUl->rgSCHSrRcvd(ueCb->cell, ueCb);
15361 * @brief Short BSR update.
15365 * Function : rgSCHCmnUpdBsrShort
15367 * This functions does requisite updates to handle short BSR reporting.
15369 * @param[in] RgSchCellCb *cell
15370 * @param[in] RgSchUeCb *ue
15371 * @param[in] RgSchLcgCb *ulLcg
15372 * @param[in] uint8_t bsr
15373 * @param[out] RgSchErrInfo *err
15378 S16 rgSCHCmnUpdBsrShort
15389 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15391 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15392 RgSchCmnLcg *cmnLcg = NULLP;
15398 if (!RGSCH_LCG_ISCFGD(ulLcg))
15400 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
15403 for (lcgCnt=0; lcgCnt<4; lcgCnt++)
15406 /* Set BS of all other LCGs to Zero.
15407 If Zero BSR is reported in Short BSR include this LCG too */
15408 if ((lcgCnt != ulLcg->lcgId) ||
15409 (!bsr && !ueUl->hqEnt.numBusyHqProcs))
15411 /* If old BO is zero do nothing */
15412 if(((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs != 0)
15414 for(idx = 0; idx < ue->ul.lcgArr[lcgCnt].numLch; idx++)
15416 if((ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount) &&
15417 (ue->ulActiveLCs & (1 <<
15418 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1))))
15421 ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount--;
15422 ue->ulActiveLCs &= ~(1 <<
15423 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1));
15429 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgCnt]))
15431 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs = 0;
15432 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->reportedBs = 0;
15437 if(ulLcg->lcgId && bsr && (((RgSchCmnLcg *)(ulLcg->sch))->bs == 0))
15439 for(idx = 0; idx < ulLcg->numLch; idx++)
15442 if (!(ue->ulActiveLCs & (1 << (ulLcg->lcArray[idx]->qciCb->qci -1))))
15444 ulLcg->lcArray[idx]->qciCb->ulUeCount++;
15445 ue->ulActiveLCs |= (1 << (ulLcg->lcArray[idx]->qciCb->qci -1));
15450 /* Resetting the nonGbrLcgBs info here */
15451 ue->ul.nonGbrLcgBs = 0;
15452 ue->ul.nonLcg0Bs = 0;
15454 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
15456 if (TRUE == ue->ul.useExtBSRSizes)
15458 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
15462 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
15464 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15466 /* TBD check for effGbr != 0 */
15467 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15469 else if (0 == ulLcg->lcgId)
15471 /* This is added for handling LCG0 */
15472 cmnLcg->bs = cmnLcg->reportedBs;
15476 /* Update non GBR LCG's BS*/
15477 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
15478 cmnLcg->bs = ue->ul.nonGbrLcgBs;
15480 ue->ul.totalBsr = cmnLcg->bs;
15483 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (bsr == 0))
15485 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
15489 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
15491 rgSCHCmnSpsBsrRpt(cell, ue, ulLcg);
15494 rgSCHCmnUpdUlCompEffBsr(ue);
15497 if(cell->emtcEnable)
15501 cellSch->apisEmtcUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
15508 cellSch->apisUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
15512 if (ue->ul.isUlCaEnabled && ue->numSCells)
15514 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
15516 #ifndef PAL_ENABLE_UL_CA
15517 if((ue->cellInfo[sCellIdx] != NULLP) &&
15518 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
15520 if(ue->cellInfo[sCellIdx] != NULLP)
15523 cellSch->apisUl->rgSCHUpdBsrShort(ue->cellInfo[sCellIdx]->cell,
15534 * @brief Truncated BSR update.
15538 * Function : rgSCHCmnUpdBsrTrunc
15540 * This functions does required updates to handle truncated BSR report.
15543 * @param[in] RgSchCellCb *cell
15544 * @param[in] RgSchUeCb *ue
15545 * @param[in] RgSchLcgCb *ulLcg
15546 * @param[in] uint8_t bsr
15547 * @param[out] RgSchErrInfo *err
15552 S16 rgSCHCmnUpdBsrTrunc
15561 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15562 RgSchCmnLcg *cmnLcg = NULLP;
15569 if (!RGSCH_LCG_ISCFGD(ulLcg))
15571 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
15574 /* set all higher prio lcgs bs to 0 and update this lcgs bs and
15575 total bsr= sumofall lcgs bs */
15578 for (cnt = ulLcg->lcgId-1; cnt >= 0; cnt--)
15581 /* If Existing BO is zero the don't do anything */
15582 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs != 0)
15584 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
15587 if((ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount) &&
15588 (ue->ulActiveLCs & (1 <<
15589 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
15591 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount--;
15592 ue->ulActiveLCs &= ~(1 <<
15593 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
15598 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs = 0;
15599 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->reportedBs = 0;
15604 for (cnt = ulLcg->lcgId; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
15606 if (ulLcg->lcgId == 0)
15610 /* If Existing BO is zero the don't do anything */
15611 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs == 0)
15613 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
15616 if (!(ue->ulActiveLCs & (1 <<
15617 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
15619 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount++;
15620 ue->ulActiveLCs |= (1 <<
15621 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
15627 ue->ul.nonGbrLcgBs = 0;
15628 ue->ul.nonLcg0Bs = 0;
15629 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
15630 if (TRUE == ue->ul.useExtBSRSizes)
15632 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
15636 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
15638 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15640 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15642 else if(ulLcg->lcgId == 0)
15644 /* This is for handeling LCG0 */
15645 cmnLcg->bs = cmnLcg->reportedBs;
15649 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
15650 cmnLcg->bs = ue->ul.nonGbrLcgBs;
15652 ue->ul.totalBsr = cmnLcg->bs;
15654 for (cnt = ulLcg->lcgId+1; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
15656 /* TODO: The bs for the other LCGs may be stale because some or all of
15657 * the part of bs may have been already scheduled/data received. Please
15658 * consider this when truncated BSR is tested/implemented */
15659 ue->ul.totalBsr += ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs;
15662 rgSCHCmnUpdUlCompEffBsr(ue);
15665 if(cell->emtcEnable)
15669 cellSch->apisEmtcUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
15676 cellSch->apisUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
15680 if (ue->ul.isUlCaEnabled && ue->numSCells)
15682 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
15684 #ifndef PAL_ENABLE_UL_CA
15685 if((ue->cellInfo[sCellIdx] != NULLP) &&
15686 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
15688 if(ue->cellInfo[sCellIdx] != NULLP)
15691 cellSch->apisUl->rgSCHUpdBsrTrunc(ue->cellInfo[sCellIdx]->cell, ue, ulLcg, bsr);
15701 * @brief Long BSR update.
15705 * Function : rgSCHCmnUpdBsrLong
15707 * - Update BSRs for all configured LCGs.
15708 * - Update priority of LCGs if needed.
15709 * - Update UE's position within/across uplink scheduling queues.
15712 * @param[in] RgSchCellCb *cell
15713 * @param[in] RgSchUeCb *ue
15714 * @param[in] uint8_t bsArr[]
15715 * @param[out] RgSchErrInfo *err
15720 S16 rgSCHCmnUpdBsrLong
15728 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15729 uint32_t tmpBsArr[4] = {0, 0, 0, 0};
15730 uint32_t nonGbrBs = 0;
15738 for(idx1 = 1; idx1 < RGSCH_MAX_LCG_PER_UE; idx1++)
15740 /* If Old BO is non zero then do nothing */
15741 if ((((RgSchCmnLcg *)(ue->ul.lcgArr[idx1].sch))->bs == 0)
15744 for(idx2 = 0; idx2 < ue->ul.lcgArr[idx1].numLch; idx2++)
15747 if (!(ue->ulActiveLCs & (1 <<
15748 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1))))
15750 ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->ulUeCount++;
15751 ue->ulActiveLCs |= (1 <<
15752 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1));
15758 ue->ul.nonGbrLcgBs = 0;
15759 ue->ul.nonLcg0Bs = 0;
15761 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[0]))
15763 if (TRUE == ue->ul.useExtBSRSizes)
15765 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnExtBsrTbl[bsArr[0]];
15766 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnExtBsrTbl[bsArr[0]];
15767 tmpBsArr[0] = rgSchCmnExtBsrTbl[bsArr[0]];
15771 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnBsrTbl[bsArr[0]];
15772 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnBsrTbl[bsArr[0]];
15773 tmpBsArr[0] = rgSchCmnBsrTbl[bsArr[0]];
15776 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
15778 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
15780 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
15782 if (TRUE == ue->ul.useExtBSRSizes)
15784 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsArr[lcgId]];
15788 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsArr[lcgId]];
15790 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15792 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15793 tmpBsArr[lcgId] = cmnLcg->bs;
15797 nonGbrBs += cmnLcg->reportedBs;
15798 tmpBsArr[lcgId] = cmnLcg->reportedBs;
15799 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
15803 ue->ul.nonGbrLcgBs = RGSCH_MIN(nonGbrBs,ue->ul.effAmbr);
15805 ue->ul.totalBsr = tmpBsArr[0] + tmpBsArr[1] + tmpBsArr[2] + tmpBsArr[3];
15807 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (ue->ul.totalBsr == 0))
15809 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
15814 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE) /* SPS_FIX */
15816 if(ue->ul.totalBsr - tmpBsArr[1] == 0)
15817 {/* Updaing the BSR to SPS only if LCG1 BS is present in sps active state */
15818 rgSCHCmnSpsBsrRpt(cell, ue, &ue->ul.lcgArr[1]);
15822 rgSCHCmnUpdUlCompEffBsr(ue);
15825 if(cell->emtcEnable)
15829 cellSch->apisEmtcUl->rgSCHUpdBsrLong(cell, ue, bsArr);
15836 cellSch->apisUl->rgSCHUpdBsrLong(cell, ue, bsArr);
15840 if (ue->ul.isUlCaEnabled && ue->numSCells)
15842 for(uint8_t idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
15844 #ifndef PAL_ENABLE_UL_CA
15845 if((ue->cellInfo[idx] != NULLP) &&
15846 (ue->cellInfo[idx]->sCellState == RG_SCH_SCELL_ACTIVE))
15848 if(ue->cellInfo[idx] != NULLP)
15851 cellSch->apisUl->rgSCHUpdBsrLong(ue->cellInfo[idx]->cell, ue, bsArr);
15861 * @brief PHR update.
15865 * Function : rgSCHCmnUpdExtPhr
15867 * Updates extended power headroom information for an UE.
15869 * @param[in] RgSchCellCb *cell
15870 * @param[in] RgSchUeCb *ue
15871 * @param[in] uint8_t phr
15872 * @param[out] RgSchErrInfo *err
15877 S16 rgSCHCmnUpdExtPhr
15881 RgInfExtPhrCEInfo *extPhr,
15885 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15886 RgSchCmnAllocRecord *allRcd;
15887 CmLList *node = ueUl->ulAllocLst.last;
15890 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
15897 allRcd = (RgSchCmnAllocRecord *)node->node;
15899 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
15901 rgSCHPwrUpdExtPhr(cell, ue, extPhr, allRcd);
15906 if(ulSpsUe->isUlSpsActv)
15908 rgSCHCmnSpsPhrInd(cell,ue);
15913 } /* rgSCHCmnUpdExtPhr */
15919 * @brief PHR update.
15923 * Function : rgSCHCmnUpdPhr
15925 * Updates power headroom information for an UE.
15927 * @param[in] RgSchCellCb *cell
15928 * @param[in] RgSchUeCb *ue
15929 * @param[in] uint8_t phr
15930 * @param[out] RgSchErrInfo *err
15943 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15944 RgSchCmnAllocRecord *allRcd;
15945 CmLList *node = ueUl->ulAllocLst.last;
15948 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
15955 allRcd = (RgSchCmnAllocRecord *)node->node;
15957 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
15959 rgSCHPwrUpdPhr(cell, ue, phr, allRcd, RG_SCH_CMN_PWR_USE_CFG_MAX_PWR);
15964 if(ulSpsUe->isUlSpsActv)
15966 rgSCHCmnSpsPhrInd(cell,ue);
15971 } /* rgSCHCmnUpdPhr */
15974 * @brief UL grant for contention resolution.
15978 * Function : rgSCHCmnContResUlGrant
15980 * Add UE to another queue specifically for CRNTI based contention
15984 * @param[in] RgSchUeCb *ue
15985 * @param[out] RgSchErrInfo *err
15990 S16 rgSCHCmnContResUlGrant
15997 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16000 if(cell->emtcEnable)
16004 cellSch->apisEmtcUl->rgSCHContResUlGrant(cell, ue);
16011 cellSch->apisUl->rgSCHContResUlGrant(cell, ue);
16017 * @brief SR reception handling.
16021 * Function : rgSCHCmnSrRcvd
16023 * - Update UE's position within/across uplink scheduling queues
16024 * - Update priority of LCGs if needed.
16026 * @param[in] RgSchCellCb *cell
16027 * @param[in] RgSchUeCb *ue
16028 * @param[in] CmLteTimingInfo frm
16029 * @param[out] RgSchErrInfo *err
16038 CmLteTimingInfo frm,
16042 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16043 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16044 CmLList *node = ueUl->ulAllocLst.last;
16048 emtcStatsUlTomSrInd++;
16051 RGSCH_INCR_SUB_FRAME(frm, 1); /* 1 TTI after the time SR was sent */
16054 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)node->node;
16055 if (RGSCH_TIMEINFO_SAME(frm, allRcd->allocTime))
16061 //TODO_SID Need to check when it is getting triggered
16062 ue->isSrGrant = TRUE;
16064 if(cell->emtcEnable)
16068 cellSch->apisEmtcUl->rgSCHSrRcvd(cell, ue);
16075 cellSch->apisUl->rgSCHSrRcvd(cell, ue);
16081 * @brief Returns first uplink allocation to send reception
16086 * Function: rgSCHCmnFirstRcptnReq(cell)
16087 * Purpose: This function returns the first uplink allocation
16088 * (or NULLP if there is none) in the subframe
16089 * in which is expected to prepare and send reception
16094 * @param[in] RgSchCellCb *cell
16095 * @return RgSchUlAlloc*
16097 RgSchUlAlloc *rgSCHCmnFirstRcptnReq(RgSchCellCb *cell)
16099 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16101 RgSchUlAlloc* alloc = NULLP;
16104 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
16106 RgSchUlSf* sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16107 alloc = rgSCHUtlUlAllocFirst(sf);
16109 if (alloc && alloc->hqProc == NULLP)
16111 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16119 * @brief Returns first uplink allocation to send reception
16124 * Function: rgSCHCmnNextRcptnReq(cell)
16125 * Purpose: This function returns the next uplink allocation
16126 * (or NULLP if there is none) in the subframe
16127 * in which is expected to prepare and send reception
16132 * @param[in] RgSchCellCb *cell
16133 * @return RgSchUlAlloc*
16135 RgSchUlAlloc *rgSCHCmnNextRcptnReq(RgSchCellCb *cell,RgSchUlAlloc *alloc)
16137 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16139 //RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16142 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
16144 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16146 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16147 if (alloc && alloc->hqProc == NULLP)
16149 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16160 * @brief Collates DRX enabled UE's scheduled in this SF
16164 * Function: rgSCHCmnDrxStrtInActvTmrInUl(cell)
16165 * Purpose: This function collates the link
16166 * of UE's scheduled in this SF who
16167 * have drx enabled. It then calls
16168 * DRX specific function to start/restart
16169 * inactivity timer in Ul
16173 * @param[in] RgSchCellCb *cell
16176 Void rgSCHCmnDrxStrtInActvTmrInUl(RgSchCellCb *cell)
16178 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16179 RgSchUlSf *sf = &(cellUl->ulSfArr[cellUl->schdIdx]);
16180 RgSchUlAlloc *alloc = rgSCHUtlUlAllocFirst(sf);
16185 cmLListInit(&ulUeLst);
16193 if (!(alloc->grnt.isRtx) && ueCb->isDrxEnabled && !(ueCb->isSrGrant)
16195 /* ccpu00139513- DRX inactivity timer should not be started for
16196 * UL SPS occasions */
16197 && (alloc->hqProc->isSpsOccnHqP == FALSE)
16201 cmLListAdd2Tail(&ulUeLst,&(ueCb->ulDrxInactvTmrLnk));
16202 ueCb->ulDrxInactvTmrLnk.node = (PTR)ueCb;
16206 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16209 (Void)rgSCHDrxStrtInActvTmr(cell,&ulUeLst,RG_SCH_DRX_UL);
16216 * @brief Returns first uplink allocation to send HARQ feedback
16221 * Function: rgSCHCmnFirstHqFdbkAlloc
16222 * Purpose: This function returns the first uplink allocation
16223 * (or NULLP if there is none) in the subframe
16224 * for which it is expected to prepare and send HARQ
16229 * @param[in] RgSchCellCb *cell
16230 * @param[in] uint8_t idx
16231 * @return RgSchUlAlloc*
16233 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc(RgSchCellCb *cell,uint8_t idx)
16235 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16237 RgSchUlAlloc *alloc = NULLP;
16240 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
16242 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
16243 alloc = rgSCHUtlUlAllocFirst(sf);
16245 while (alloc && (alloc->hqProc == NULLP))
16247 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16255 * @brief Returns next allocation to send HARQ feedback for.
16259 * Function: rgSCHCmnNextHqFdbkAlloc(cell)
16260 * Purpose: This function returns the next uplink allocation
16261 * (or NULLP if there is none) in the subframe
16262 * for which HARQ feedback needs to be sent.
16266 * @param[in] RgSchCellCb *cell
16267 * @return RgSchUlAlloc*
16269 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc(RgSchCellCb *cell,RgSchUlAlloc *alloc,uint8_t idx)
16271 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16273 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
16275 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
16277 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16278 while (alloc && (alloc->hqProc == NULLP))
16280 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16290 /***********************************************************
16292 * Func : rgSCHCmnUlGetITbsFrmIMcs
16294 * Desc : Returns the Itbs that is mapped to an Imcs
16295 * for the case of uplink.
16303 **********************************************************/
16304 uint8_t rgSCHCmnUlGetITbsFrmIMcs(uint8_t iMcs)
16306 return (rgUlIMcsTbl[iMcs].iTbs);
16309 /***********************************************************
16311 * Func : rgSCHCmnUlGetIMcsFrmITbs
16313 * Desc : Returns the Imcs that is mapped to an Itbs
16314 * for the case of uplink.
16318 * Notes: For iTbs 19, iMcs is dependant on modulation order.
16319 * Refer to 36.213, Table 8.6.1-1 and 36.306 Table 4.1-2
16320 * for UE capability information
16324 **********************************************************/
16325 uint8_t rgSCHCmnUlGetIMcsFrmITbs(uint8_t iTbs,CmLteUeCategory ueCtg)
16333 /*a higher layer can force a 64QAM UE to transmit at 16QAM.
16334 * We currently do not support this. Once the support for such
16335 * is added, ueCtg should be replaced by current transmit
16336 * modulation configuration.Refer to 36.213 -8.6.1
16338 else if ( iTbs < 19 )
16342 else if ((iTbs == 19) && (ueCtg != CM_LTE_UE_CAT_5))
16352 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
16353 was seen when IMCS exceeds 20 on T2k TDD*/
16363 /***********************************************************
16365 * Func : rgSCHCmnUlMinTbBitsForITbs
16367 * Desc : Returns the minimum number of bits that can
16368 * be given as grant for a specific CQI.
16376 **********************************************************/
16377 uint32_t rgSCHCmnUlMinTbBitsForITbs(RgSchCmnUlCell *cellUl,uint8_t iTbs)
16380 RGSCH_ARRAY_BOUND_CHECK(0, rgTbSzTbl[0], iTbs);
16382 return (rgTbSzTbl[0][iTbs][cellUl->sbSize-1]);
16385 /***********************************************************
16387 * Func : rgSCHCmnUlSbAlloc
16389 * Desc : Given a required 'number of subbands' and a hole,
16390 * returns a suitable alloc such that the subband
16391 * allocation size is valid
16395 * Notes: Does not assume either passed numSb or hole size
16396 * to be valid for allocation, and hence arrives at
16397 * an acceptable value.
16400 **********************************************************/
16401 RgSchUlAlloc *rgSCHCmnUlSbAlloc
16408 uint8_t holeSz; /* valid hole size */
16409 RgSchUlAlloc *alloc;
16411 if ((holeSz = rgSchCmnMult235Tbl[hole->num].prvMatch) == hole->num)
16413 numSb = rgSchCmnMult235Tbl[numSb].match;
16414 if (numSb >= holeSz)
16416 alloc = rgSCHUtlUlAllocGetCompHole(sf, hole);
16420 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
16425 if (numSb < holeSz)
16427 numSb = rgSchCmnMult235Tbl[numSb].match;
16431 numSb = rgSchCmnMult235Tbl[numSb].prvMatch;
16434 if ( numSb >= holeSz )
16438 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
16444 * @brief To fill the RgSchCmnUeUlAlloc structure of UeCb.
16448 * Function: rgSCHCmnUlUeFillAllocInfo
16449 * Purpose: Specific scheduler to call this API to fill the alloc
16452 * Invoked by: Scheduler
16454 * @param[in] RgSchCellCb *cell
16455 * @param[out] RgSchUeCb *ue
16458 Void rgSCHCmnUlUeFillAllocInfo(RgSchCellCb *cell,RgSchUeCb *ue)
16460 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16461 RgSchCmnUeUlAlloc *ulAllocInfo;
16462 RgSchCmnUlUe *ueUl;
16465 ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16466 ulAllocInfo = &ueUl->alloc;
16468 /* Fill alloc structure */
16469 rgSCHCmnUlAllocFillTpc(cell, ue, ulAllocInfo->alloc);
16470 rgSCHCmnUlAllocFillNdmrs(cellUl, ulAllocInfo->alloc);
16471 rgSCHCmnUlAllocLnkHqProc(ue, ulAllocInfo->alloc, ulAllocInfo->alloc->hqProc,
16472 ulAllocInfo->alloc->hqProc->isRetx);
16474 rgSCHCmnUlFillPdcchWithAlloc(ulAllocInfo->alloc->pdcch,
16475 ulAllocInfo->alloc, ue);
16476 /* Recording information about this allocation */
16477 rgSCHCmnUlRecordUeAlloc(cell, ue);
16479 /* Update the UE's outstanding allocation */
16480 if (!ulAllocInfo->alloc->hqProc->isRetx)
16482 rgSCHCmnUlUpdOutStndAlloc(cell, ue, ulAllocInfo->allocdBytes);
16489 * @brief Update the UEs outstanding alloc based on the BSR report's timing.
16494 * Function: rgSCHCmnUpdUlCompEffBsr
16495 * Purpose: Clear off all the allocations from outstanding allocation that
16496 * are later than or equal to BSR timing information (stored in UEs datIndTime).
16498 * Invoked by: Scheduler
16500 * @param[in] RgSchUeCb *ue
16503 static Void rgSCHCmnUpdUlCompEffBsr(RgSchUeCb *ue)
16505 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,ue->cell);
16506 CmLList *node = ueUl->ulAllocLst.last;
16507 RgSchCmnAllocRecord *allRcd;
16508 uint32_t outStndAlloc=0;
16509 uint32_t nonLcg0OutStndAllocBs=0;
16510 uint32_t nonLcg0Bsr=0;
16512 RgSchCmnLcg *cmnLcg = NULLP;
16516 allRcd = (RgSchCmnAllocRecord *)node->node;
16517 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
16526 allRcd = (RgSchCmnAllocRecord *)node->node;
16528 outStndAlloc += allRcd->alloc;
16531 cmnLcg = (RgSchCmnLcg *)(ue->ul.lcgArr[0].sch);
16532 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
16533 if (cmnLcg->bs > outStndAlloc)
16535 cmnLcg->bs -= outStndAlloc;
16536 ue->ul.minReqBytes = cmnLcg->bs;
16541 nonLcg0OutStndAllocBs = outStndAlloc - cmnLcg->bs;
16545 for(lcgId = 1;lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
16547 if(RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
16549 cmnLcg = ((RgSchCmnLcg *) (ue->ul.lcgArr[lcgId].sch));
16550 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
16552 nonLcg0Bsr += cmnLcg->bs;
16556 nonLcg0Bsr += ue->ul.nonGbrLcgBs;
16557 if (nonLcg0OutStndAllocBs > nonLcg0Bsr)
16563 nonLcg0Bsr -= nonLcg0OutStndAllocBs;
16565 ue->ul.nonLcg0Bs = nonLcg0Bsr;
16566 /* Cap effBsr with nonLcg0Bsr and append lcg0 bs.
16567 * nonLcg0Bsr limit applies only to lcg1,2,3 */
16568 /* better be handled in individual scheduler */
16569 ue->ul.effBsr = nonLcg0Bsr +\
16570 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16575 * @brief Records information about the current allocation.
16579 * Function: rgSCHCmnUlRecordUeAlloc
16580 * Purpose: Records information about the curent allocation.
16581 * This includes the allocated bytes, as well
16582 * as some power information.
16584 * Invoked by: Scheduler
16586 * @param[in] RgSchCellCb *cell
16587 * @param[in] RgSchUeCb *ue
16590 Void rgSCHCmnUlRecordUeAlloc(RgSchCellCb *cell,RgSchUeCb *ue)
16593 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16595 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16596 CmLListCp *lst = &ueUl->ulAllocLst;
16597 CmLList *node = ueUl->ulAllocLst.first;
16598 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
16599 RgSchCmnUeUlAlloc *ulAllocInfo = &ueUl->alloc;
16600 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
16602 cmLListDelFrm(lst, &allRcd->lnk);
16604 /* To the crntTime, add the MIN time at which UE will
16605 * actually send the BSR i.e DELTA+4 */
16606 allRcd->allocTime = cell->crntTime;
16607 /*ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
16609 if(ue->isEmtcUe == TRUE)
16611 RGSCH_INCR_SUB_FRAME_EMTC(allRcd->allocTime,
16612 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
16617 RGSCH_INCR_SUB_FRAME(allRcd->allocTime,
16618 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
16621 allRcd->allocTime = cellUl->schdTime;
16623 cmLListAdd2Tail(lst, &allRcd->lnk);
16625 /* Filling in the parameters to be recorded */
16626 allRcd->alloc = ulAllocInfo->allocdBytes;
16627 //allRcd->numRb = ulAllocInfo->alloc->grnt.numRb;
16628 allRcd->numRb = (ulAllocInfo->alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
16629 /*Recording the UL CQI derived from the maxUlCqi */
16630 allRcd->cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
16631 allRcd->tpc = ulAllocInfo->alloc->grnt.tpc;
16633 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
16635 cell->measurements.ulBytesCnt += ulAllocInfo->allocdBytes;
16640 /** PHR handling for MSG3
16641 * @brief Records allocation information of msg3 in the the UE.
16645 * Function: rgSCHCmnUlRecMsg3Alloc
16646 * Purpose: Records information about msg3 allocation.
16647 * This includes the allocated bytes, as well
16648 * as some power information.
16650 * Invoked by: Scheduler
16652 * @param[in] RgSchCellCb *cell
16653 * @param[in] RgSchUeCb *ue
16654 * @param[in] RgSchRaCb *raCb
16657 Void rgSCHCmnUlRecMsg3Alloc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchRaCb *raCb)
16659 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16660 CmLListCp *lst = &ueUl->ulAllocLst;
16661 CmLList *node = ueUl->ulAllocLst.first;
16662 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
16664 /* Stack Crash problem for TRACE5 changes */
16666 cmLListDelFrm(lst, node);
16667 allRcd->allocTime = raCb->msg3AllocTime;
16668 cmLListAdd2Tail(lst, node);
16670 /* Filling in the parameters to be recorded */
16671 allRcd->alloc = raCb->msg3Grnt.datSz;
16672 allRcd->numRb = raCb->msg3Grnt.numRb;
16673 allRcd->cqi = raCb->ccchCqi;
16674 allRcd->tpc = raCb->msg3Grnt.tpc;
16676 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
16681 * @brief Keeps track of the most recent RG_SCH_CMN_MAX_ALLOC_TRACK
16682 * allocations to track. Adds this allocation to the ueUl's ulAllocLst.
16687 * Function: rgSCHCmnUlUpdOutStndAlloc
16688 * Purpose: Recent Allocation shall be at First Pos'n.
16689 * Remove the last node, update the fields
16690 * with the new allocation and add at front.
16692 * Invoked by: Scheduler
16694 * @param[in] RgSchCellCb *cell
16695 * @param[in] RgSchUeCb *ue
16696 * @param[in] uint32_t alloc
16699 Void rgSCHCmnUlUpdOutStndAlloc(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t alloc)
16701 uint32_t nonLcg0Alloc=0;
16703 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
16704 if (((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs > alloc)
16706 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs -= alloc;
16710 nonLcg0Alloc = alloc - ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16711 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = 0;
16714 if (nonLcg0Alloc >= ue->ul.nonLcg0Bs)
16716 ue->ul.nonLcg0Bs = 0;
16720 ue->ul.nonLcg0Bs -= nonLcg0Alloc;
16722 /* Cap effBsr with effAmbr and append lcg0 bs.
16723 * effAmbr limit applies only to lcg1,2,3 non GBR LCG's*/
16724 /* better be handled in individual scheduler */
16725 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
16726 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16728 if (ue->ul.effBsr == 0)
16730 if (ue->bsrTmr.tmrEvnt != TMR_NONE)
16732 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
16735 if (FALSE == ue->isSrGrant)
16737 if (ue->ul.bsrTmrCfg.isPrdBsrTmrPres)
16740 rgSCHTmrStartTmr(cell, ue, RG_SCH_TMR_BSR,
16741 ue->ul.bsrTmrCfg.prdBsrTmr);
16747 /* Resetting UEs lower Cap */
16748 ue->ul.minReqBytes = 0;
16755 * @brief Returns the "Itbs" for a given UE.
16759 * Function: rgSCHCmnUlGetITbs
16760 * Purpose: This function returns the "Itbs" for a given UE.
16762 * Invoked by: Scheduler
16764 * @param[in] RgSchUeCb *ue
16767 uint8_t rgSCHCmnUlGetITbs
16774 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16775 /* CQI will be capped to maxUlCqi for 16qam UEs */
16776 CmLteUeCategory ueCtgy = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
16780 uint8_t maxiTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ueUl->maxUlCqi];
16784 /* #ifdef RG_SCH_CMN_EXT_CP_SUP For ECP pick index 1 */
16786 if ( (ueCtgy != CM_LTE_UE_CAT_5) &&
16787 (ueUl->validUlCqi > ueUl->maxUlCqi)
16790 cqi = ueUl->maxUlCqi;
16794 cqi = ueUl->validUlCqi;
16798 iTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
16800 RG_SCH_CHK_ITBS_RANGE(iTbs, maxiTbs);
16802 iTbs = RGSCH_MIN(iTbs, ue->cell->thresholds.maxUlItbs);
16805 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
16806 was seen when IMCS exceeds 20 on T2k TDD */
16815 if ( (ueCtgy != CM_LTE_UE_CAT_5) && (ueUl->crntUlCqi[0] > ueUl->maxUlCqi ))
16817 cqi = ueUl->maxUlCqi;
16821 cqi = ueUl->crntUlCqi[0];
16824 return (rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][cqi]);
16828 * @brief This function adds the UE to DLRbAllocInfo TX lst.
16832 * Function: rgSCHCmnDlRbInfoAddUeTx
16833 * Purpose: This function adds the UE to DLRbAllocInfo TX lst.
16835 * Invoked by: Common Scheduler
16837 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16838 * @param[in] RgSchUeCb *ue
16839 * @param[in] RgSchDlHqProcCb *hqP
16843 static Void rgSCHCmnDlRbInfoAddUeTx
16846 RgSchCmnDlRbAllocInfo *allocInfo,
16848 RgSchDlHqProcCb *hqP
16851 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16854 if (hqP->reqLnk.node == NULLP)
16856 if (cellSch->dl.isDlFreqSel)
16858 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16859 &allocInfo->dedAlloc.txHqPLst, hqP);
16864 cmLListAdd2Tail(&allocInfo->dedAlloc.txHqPLst, &hqP->reqLnk);
16866 hqP->reqLnk.node = (PTR)hqP;
16873 * @brief This function adds the UE to DLRbAllocInfo RETX lst.
16877 * Function: rgSCHCmnDlRbInfoAddUeRetx
16878 * Purpose: This function adds the UE to DLRbAllocInfo RETX lst.
16880 * Invoked by: Common Scheduler
16882 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16883 * @param[in] RgSchUeCb *ue
16884 * @param[in] RgSchDlHqProcCb *hqP
16888 static Void rgSCHCmnDlRbInfoAddUeRetx
16891 RgSchCmnDlRbAllocInfo *allocInfo,
16893 RgSchDlHqProcCb *hqP
16896 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
16899 if (cellSch->dl.isDlFreqSel)
16901 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16902 &allocInfo->dedAlloc.retxHqPLst, hqP);
16906 /* checking UE's presence in this lst is unnecessary */
16907 cmLListAdd2Tail(&allocInfo->dedAlloc.retxHqPLst, &hqP->reqLnk);
16908 hqP->reqLnk.node = (PTR)hqP;
16914 * @brief This function adds the UE to DLRbAllocInfo TX-RETX lst.
16918 * Function: rgSCHCmnDlRbInfoAddUeRetxTx
16919 * Purpose: This adds the UE to DLRbAllocInfo TX-RETX lst.
16921 * Invoked by: Common Scheduler
16923 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16924 * @param[in] RgSchUeCb *ue
16925 * @param[in] RgSchDlHqProcCb *hqP
16929 static Void rgSCHCmnDlRbInfoAddUeRetxTx
16932 RgSchCmnDlRbAllocInfo *allocInfo,
16934 RgSchDlHqProcCb *hqP
16937 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
16940 if (cellSch->dl.isDlFreqSel)
16942 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16943 &allocInfo->dedAlloc.txRetxHqPLst, hqP);
16947 cmLListAdd2Tail(&allocInfo->dedAlloc.txRetxHqPLst, &hqP->reqLnk);
16948 hqP->reqLnk.node = (PTR)hqP;
16954 * @brief This function adds the UE to DLRbAllocInfo NonSchdRetxLst.
16958 * Function: rgSCHCmnDlAdd2NonSchdRetxLst
16959 * Purpose: During RB estimation for RETX, if allocation fails
16960 * then appending it to NonSchdRetxLst, the further
16961 * action is taken as part of Finalization in
16962 * respective schedulers.
16964 * Invoked by: Common Scheduler
16966 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16967 * @param[in] RgSchUeCb *ue
16968 * @param[in] RgSchDlHqProcCb *hqP
16972 static Void rgSCHCmnDlAdd2NonSchdRetxLst
16974 RgSchCmnDlRbAllocInfo *allocInfo,
16976 RgSchDlHqProcCb *hqP
16979 CmLList *schdLnkNode;
16983 if ( (hqP->sch != (RgSchCmnDlHqProc *)NULLP) &&
16984 (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP)))
16990 schdLnkNode = &hqP->schdLstLnk;
16991 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
16992 cmLListAdd2Tail(&allocInfo->dedAlloc.nonSchdRetxHqPLst, schdLnkNode);
17000 * @brief This function adds the UE to DLRbAllocInfo NonSchdTxRetxLst.
17004 * Function: rgSCHCmnDlAdd2NonSchdTxRetxLst
17005 * Purpose: During RB estimation for TXRETX, if allocation fails
17006 * then appending it to NonSchdTxRetxLst, the further
17007 * action is taken as part of Finalization in
17008 * respective schedulers.
17010 * Invoked by: Common Scheduler
17012 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
17013 * @param[in] RgSchUeCb *ue
17014 * @param[in] RgSchDlHqProcCb *hqP
17020 * @brief This function handles the initialisation of DL HARQ/ACK feedback
17021 * timing information for eaach DL subframe.
17025 * Function: rgSCHCmnDlANFdbkInit
17026 * Purpose: Each DL subframe stores the sfn and subframe
17027 * information of UL subframe in which it expects
17028 * HARQ ACK/NACK feedback for this subframe.It
17029 * generates the information based on Downlink
17030 * Association Set Index table.
17032 * Invoked by: Scheduler
17034 * @param[in] RgSchCellCb* cell
17038 static S16 rgSCHCmnDlANFdbkInit(RgSchCellCb *cell)
17041 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17042 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17046 uint8_t calcSfnOffset;
17048 uint8_t ulSfCnt =0;
17049 RgSchTddSubfrmInfo ulSubfrmInfo;
17050 uint8_t maxUlSubfrms;
17053 ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17054 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17056 /* Generate HARQ ACK/NACK feedback information for each DL sf in a radio frame
17057 * Calculate this information based on DL Association set Index table */
17058 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17060 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17061 RG_SCH_TDD_UL_SUBFRAME)
17063 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17067 for(idx=0; idx < rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
17068 numFdbkSubfrms; idx++)
17070 calcSfNum = sfNum - rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
17074 calcSfnOffset = RGSCH_CEIL(-calcSfNum, RGSCH_NUM_SUB_FRAMES);
17081 calcSfNum = ((RGSCH_NUM_SUB_FRAMES * calcSfnOffset) + calcSfNum)\
17082 % RGSCH_NUM_SUB_FRAMES;
17084 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17088 else if((ulSubfrmInfo.switchPoints == 2) && (calcSfNum <= \
17089 RG_SCH_CMN_SPL_SUBFRM_6))
17091 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17095 dlIdx = calcSfNum - maxUlSubfrms;
17098 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = sfNum;
17099 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = calcSfnOffset;
17100 cell->subFrms[dlIdx]->dlFdbkInfo.m = idx;
17102 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17105 /* DL subframes in the subsequent radio frames are initialized
17106 * with the previous radio frames */
17107 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;\
17110 sfNum = dlIdx - rgSchTddNumDlSubfrmTbl[ulDlCfgIdx]\
17111 [RGSCH_NUM_SUB_FRAMES-1];
17112 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = \
17113 cell->subFrms[sfNum]->dlFdbkInfo.subframe;
17114 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = \
17115 cell->subFrms[sfNum]->dlFdbkInfo.sfnOffset;
17116 cell->subFrms[dlIdx]->dlFdbkInfo.m = cell->subFrms[sfNum]->dlFdbkInfo.m;
17122 * @brief This function handles the initialization of uplink association
17123 * set information for each DL subframe.
17128 * Function: rgSCHCmnDlKdashUlAscInit
17129 * Purpose: Each DL sf stores the sfn and sf information of UL sf
17130 * in which it expects HQ ACK/NACK trans. It generates the information
17131 * based on k` in UL association set index table.
17133 * Invoked by: Scheduler
17135 * @param[in] RgSchCellCb* cell
17139 static S16 rgSCHCmnDlKdashUlAscInit(RgSchCellCb *cell)
17142 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17143 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17148 uint8_t ulSfCnt =0;
17149 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17150 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17151 uint8_t dlPres = 0;
17154 /* Generate ACK/NACK offset information for each DL subframe in a radio frame
17155 * Calculate this information based on K` in UL Association Set table */
17156 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17158 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17159 RG_SCH_TDD_UL_SUBFRAME)
17161 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17165 calcSfNum = (sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum] + \
17166 RGSCH_NUM_SUB_FRAMES) % RGSCH_NUM_SUB_FRAMES;
17167 calcSfnOffset = sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum];
17168 if(calcSfnOffset < 0)
17170 calcSfnOffset = RGSCH_CEIL(-calcSfnOffset, RGSCH_NUM_SUB_FRAMES);
17177 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17181 else if((ulSubfrmInfo.switchPoints == 2) &&
17182 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
17184 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17188 dlIdx = calcSfNum - maxUlSubfrms;
17191 cell->subFrms[dlIdx]->ulAscInfo.subframe = sfNum;
17192 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset = calcSfnOffset;
17194 /* set dlIdx for which ulAscInfo is updated */
17195 dlPres = dlPres | (1 << dlIdx);
17196 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17199 /* Set Invalid information for which ulAscInfo is not present */
17201 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17204 /* If dlPres is 0, ulAscInfo is not present in that DL index */
17205 if(! ((dlPres >> sfCount)&0x01))
17207 cell->subFrms[sfCount]->ulAscInfo.sfnOffset =
17208 RGSCH_INVALID_INFO;
17209 cell->subFrms[sfCount]->ulAscInfo.subframe =
17210 RGSCH_INVALID_INFO;
17214 /* DL subframes in the subsequent radio frames are initialized
17215 * with the previous radio frames */
17216 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;
17220 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17221 cell->subFrms[dlIdx]->ulAscInfo.subframe =
17222 cell->subFrms[sfNum]->ulAscInfo.subframe;
17223 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset =
17224 cell->subFrms[sfNum]->ulAscInfo.sfnOffset;
17231 * @brief This function initialises the 'Np' value for 'p'
17235 * Function: rgSCHCmnDlNpValInit
17236 * Purpose: To initialise the 'Np' value for each 'p'. It is used
17237 * to find the mapping between nCCE and 'p' and used in
17238 * HARQ ACK/NACK reception.
17240 * Invoked by: Scheduler
17242 * @param[in] RgSchCellCb* cell
17246 static S16 rgSCHCmnDlNpValInit(RgSchCellCb *cell)
17251 /* Always Np is 0 for p=0 */
17252 cell->rgSchTddNpValTbl[0] = 0;
17254 for(idx=1; idx < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; idx++)
17256 np = cell->bwCfg.dlTotalBw * (idx * RG_SCH_CMN_NUM_SUBCAR - 4);
17257 cell->rgSchTddNpValTbl[idx] = (uint8_t) (np/36);
17264 * @brief This function handles the creation of RACH preamble
17265 * list to queue the preambles and process at the scheduled
17270 * Function: rgSCHCmnDlCreateRachPrmLst
17271 * Purpose: To create RACH preamble list based on RA window size.
17272 * It is used to queue the preambles and process it at the
17275 * Invoked by: Scheduler
17277 * @param[in] RgSchCellCb* cell
17281 static S16 rgSCHCmnDlCreateRachPrmLst(RgSchCellCb *cell)
17287 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
17289 lstSize = raArrSz * RGSCH_MAX_RA_RNTI_PER_SUBFRM * RGSCH_NUM_SUB_FRAMES;
17291 cell->raInfo.maxRaSize = raArrSz;
17292 ret = rgSCHUtlAllocSBuf(cell->instIdx,
17293 (Data **)(&cell->raInfo.raReqLst), (Size)(lstSize * sizeof(CmLListCp)));
17299 cell->raInfo.lstSize = lstSize;
17306 * @brief This function handles the initialization of RACH Response
17307 * information at each DL subframe.
17311 * Function: rgSCHCmnDlRachInfoInit
17312 * Purpose: Each DL subframe stores the sfn and subframe information of
17313 * possible RACH response allowed for UL subframes. It generates
17314 * the information based on PRACH configuration.
17316 * Invoked by: Scheduler
17318 * @param[in] RgSchCellCb* cell
17322 static S16 rgSCHCmnDlRachInfoInit(RgSchCellCb *cell)
17325 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17327 uint8_t ulSfCnt =0;
17328 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
17329 [RGSCH_NUM_SUB_FRAMES-1];
17331 RgSchTddRachRspLst rachRspLst[3][RGSCH_NUM_SUB_FRAMES];
17336 uint8_t endSubfrmIdx;
17337 uint8_t startSubfrmIdx;
17339 RgSchTddRachDelInfo *delInfo;
17341 uint8_t numSubfrms;
17344 memset(rachRspLst, 0, sizeof(rachRspLst));
17346 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
17348 /* Include Special subframes */
17349 maxUlSubfrms = maxUlSubfrms + \
17350 rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx].switchPoints;
17351 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17353 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] ==
17354 RG_SCH_TDD_DL_SUBFRAME)
17356 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17360 startWin = (sfNum + RG_SCH_CMN_RARSP_WAIT_PRD + \
17361 ((RgSchCmnCell *)cell->sc.sch)->dl.numRaSubFrms);
17362 endWin = (startWin + cell->rachCfg.raWinSize - 1);
17364 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][startWin%RGSCH_NUM_SUB_FRAMES];
17365 /* Find the next DL subframe starting from Subframe 0 */
17366 if((startSubfrmIdx % RGSCH_NUM_SUB_FRAMES) == 0)
17368 startWin = RGSCH_CEIL(startWin, RGSCH_NUM_SUB_FRAMES);
17369 startWin = startWin * RGSCH_NUM_SUB_FRAMES;
17373 rgSchTddLowDlSubfrmIdxTbl[ulDlCfgIdx][endWin%RGSCH_NUM_SUB_FRAMES];
17374 endWin = (endWin/RGSCH_NUM_SUB_FRAMES) * RGSCH_NUM_SUB_FRAMES \
17376 if(startWin > endWin)
17380 /* Find all the possible RACH Response transmission
17381 * time within the RA window size */
17382 startSubfrmIdx = startWin%RGSCH_NUM_SUB_FRAMES;
17383 for(sfnIdx = startWin/RGSCH_NUM_SUB_FRAMES;
17384 sfnIdx <= endWin/RGSCH_NUM_SUB_FRAMES; sfnIdx++)
17386 if(sfnIdx == endWin/RGSCH_NUM_SUB_FRAMES)
17388 endSubfrmIdx = endWin%RGSCH_NUM_SUB_FRAMES;
17392 endSubfrmIdx = RGSCH_NUM_SUB_FRAMES-1;
17395 /* Find all the possible RACH Response transmission
17396 * time within radio frame */
17397 for(subfrmIdx = startSubfrmIdx;
17398 subfrmIdx <= endSubfrmIdx; subfrmIdx++)
17400 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][subfrmIdx] ==
17401 RG_SCH_TDD_UL_SUBFRAME)
17405 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
17406 /* Find the next DL subframe starting from Subframe 0 */
17407 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
17411 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx], subfrmIdx);
17413 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
17414 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset = sfnIdx;
17415 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].subframe[numSubfrms]
17417 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms++;
17419 startSubfrmIdx = RG_SCH_CMN_SUBFRM_0;
17421 /* Update the subframes to be deleted at this subframe */
17422 /* Get the subframe after the end of RA window size */
17425 sfnOffset = endWin/RGSCH_NUM_SUB_FRAMES;
17428 sfnOffset += raArrSz;
17430 sfnIdx = (endWin/RGSCH_NUM_SUB_FRAMES) % raArrSz;
17432 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx],endSubfrmIdx-1);
17433 if((endSubfrmIdx == RGSCH_NUM_SUB_FRAMES) ||
17434 (rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx] ==
17435 RGSCH_NUM_SUB_FRAMES))
17438 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][RG_SCH_CMN_SUBFRM_0];
17442 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx];
17445 delInfo = &rachRspLst[sfnIdx][subfrmIdx].delInfo;
17446 delInfo->sfnOffset = sfnOffset;
17447 delInfo->subframe[delInfo->numSubfrms] = sfNum;
17448 delInfo->numSubfrms++;
17450 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17453 ret = rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz);
17463 * @brief This function handles the initialization of PHICH information
17464 * for each DL subframe based on PHICH table.
17468 * Function: rgSCHCmnDlPhichOffsetInit
17469 * Purpose: Each DL subf stores the sfn and subf information of UL subframe
17470 * for which it trnsmts PHICH in this subframe. It generates the information
17471 * based on PHICH table.
17473 * Invoked by: Scheduler
17475 * @param[in] RgSchCellCb* cell
17479 static S16 rgSCHCmnDlPhichOffsetInit(RgSchCellCb *cell)
17482 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17483 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17486 uint8_t dlPres = 0;
17487 uint8_t calcSfnOffset;
17489 uint8_t ulSfCnt =0;
17490 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17491 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
17492 [RGSCH_NUM_SUB_FRAMES-1];
17495 /* Generate PHICH offset information for each DL subframe in a radio frame
17496 * Calculate this information based on K in PHICH table */
17497 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17499 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17500 RG_SCH_TDD_UL_SUBFRAME)
17502 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17506 calcSfNum = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) % \
17507 RGSCH_NUM_SUB_FRAMES;
17508 calcSfnOffset = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) / \
17509 RGSCH_NUM_SUB_FRAMES;
17511 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17515 else if((ulSubfrmInfo.switchPoints == 2) &&
17516 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
17518 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17522 dlIdx = calcSfNum - maxUlSubfrms;
17525 cell->subFrms[dlIdx]->phichOffInfo.subframe = sfNum;
17526 cell->subFrms[dlIdx]->phichOffInfo.numSubfrms = 1;
17528 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset = calcSfnOffset;
17530 /* set dlIdx for which phich offset is updated */
17531 dlPres = dlPres | (1 << dlIdx);
17532 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17535 /* Set Invalid information for which phich offset is not present */
17537 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17540 /* If dlPres is 0, phich offset is not present in that DL index */
17541 if(! ((dlPres >> sfCount)&0x01))
17543 cell->subFrms[sfCount]->phichOffInfo.sfnOffset =
17544 RGSCH_INVALID_INFO;
17545 cell->subFrms[sfCount]->phichOffInfo.subframe =
17546 RGSCH_INVALID_INFO;
17547 cell->subFrms[sfCount]->phichOffInfo.numSubfrms = 0;
17551 /* DL subframes in the subsequent radio frames are
17552 * initialized with the previous radio frames */
17553 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms;
17554 dlIdx < maxDlSubfrms; dlIdx++)
17557 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17559 cell->subFrms[dlIdx]->phichOffInfo.subframe =
17560 cell->subFrms[sfNum]->phichOffInfo.subframe;
17562 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset =
17563 cell->subFrms[sfNum]->phichOffInfo.sfnOffset;
17570 * @brief Updation of Sch vars per TTI.
17574 * Function: rgSCHCmnUpdVars
17575 * Purpose: Updation of Sch vars per TTI.
17577 * @param[in] RgSchCellCb *cell
17581 Void rgSCHCmnUpdVars(RgSchCellCb *cell)
17583 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
17584 CmLteTimingInfo timeInfo;
17586 uint8_t ulSubframe;
17587 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17588 uint8_t msg3Subfrm;
17591 /* ccpu00132654-ADD- Initializing all the indices in every subframe*/
17592 rgSCHCmnInitVars(cell);
17594 idx = (cell->crntTime.slot + TFU_ULCNTRL_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
17595 /* Calculate the UL scheduling subframe idx based on the
17597 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][idx] != 0)
17599 /* PUSCH transmission is based on offset from DL
17600 * PDCCH scheduling */
17601 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
17602 ulSubframe = rgSchTddPuschTxKTbl[ulDlCfgIdx][timeInfo.subframe];
17603 /* Add the DCI-0 to PUSCH time to get the time of UL subframe */
17604 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, ulSubframe);
17606 cellUl->schdTti = timeInfo.sfn * 10 + timeInfo.subframe;
17608 /* Fetch the corresponding UL subframe Idx in UL sf array */
17609 cellUl->schdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17610 /* Fetch the corresponding UL Harq Proc ID */
17611 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
17612 cellUl->schdTime = timeInfo;
17614 Mval = rgSchTddPhichMValTbl[ulDlCfgIdx][idx];
17617 /* Fetch the tx time for DL HIDCI-0 */
17618 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
17619 /* Fetch the corresponding n-k tx time of PUSCH */
17620 cellUl->hqFdbkIdx[0] = rgSCHCmnGetPhichUlSfIdx(&timeInfo, cell);
17621 /* Retx will happen according to the Pusch k table */
17622 cellUl->reTxIdx[0] = cellUl->schdIdx;
17624 if(ulDlCfgIdx == 0)
17626 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[0] */
17627 cellUl->reTxIdx[0] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
17628 cellUl->hqFdbkIdx[0]);
17631 /* At Idx 1 store the UL SF adjacent(left) to the UL SF
17633 cellUl->hqFdbkIdx[1] = (cellUl->hqFdbkIdx[0]-1 +
17634 cellUl->numUlSubfrms) % cellUl->numUlSubfrms;
17635 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[1] */
17636 cellUl->reTxIdx[1] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
17637 cellUl->hqFdbkIdx[1]);
17642 idx = (cell->crntTime.slot + TFU_RECPREQ_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
17643 if (rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] == RG_SCH_TDD_UL_SUBFRAME)
17645 RGSCHCMNADDTOCRNTTIME(cell->crntTime, timeInfo, TFU_RECPREQ_DLDELTA)
17646 cellUl->rcpReqIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17648 idx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) % RGSCH_NUM_SUB_FRAMES;
17650 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
17651 special subframe */
17652 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] != RG_SCH_TDD_UL_SUBFRAME)
17654 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
17655 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
17656 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, msg3Subfrm);
17657 cellUl->msg3SchdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17658 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
17661 if(!rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][idx])
17663 cellUl->spsUlRsrvIdx = RGSCH_INVALID_INFO;
17667 /* introduce some reuse with above code? */
17669 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
17670 //offst = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
17671 offst = rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][timeInfo.subframe];
17672 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, offst);
17673 cellUl->spsUlRsrvIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17674 /* The harq proc continues to be accessed and used the same delta before
17675 * actual data occurance, and hence use the same idx */
17676 cellUl->spsUlRsrvHqProcIdx = cellUl->schdHqProcIdx;
17680 /* RACHO: update cmn sched specific RACH variables,
17681 * mainly the prachMaskIndex */
17682 rgSCHCmnUpdRachParam(cell);
17688 * @brief To get 'p' value from nCCE.
17692 * Function: rgSCHCmnGetPValFrmCCE
17693 * Purpose: Gets 'p' value for HARQ ACK/NACK reception from CCE.
17695 * @param[in] RgSchCellCb *cell
17696 * @param[in] uint8_t cce
17700 uint8_t rgSCHCmnGetPValFrmCCE(RgSchCellCb *cell,uint8_t cce)
17704 for(i=1; i < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; i++)
17706 if(cce < cell->rgSchTddNpValTbl[i])
17715 /***********************************************************
17717 * Func : rgSCHCmnUlAdapRetx
17719 * Desc : Adaptive retransmission for an allocation.
17727 **********************************************************/
17728 static Void rgSCHCmnUlAdapRetx(RgSchUlAlloc *alloc,RgSchUlHqProcCb *proc)
17731 rgSCHUhmRetx(proc, alloc);
17733 if (proc->rvIdx != 0)
17735 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[proc->rvIdx];
17740 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
17746 * @brief Scheduler invocation per TTI.
17750 * Function: rgSCHCmnHdlUlInactUes
17753 * Invoked by: Common Scheduler
17755 * @param[in] RgSchCellCb *cell
17758 static Void rgSCHCmnHdlUlInactUes(RgSchCellCb *cell)
17760 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17761 CmLListCp ulInactvLst;
17762 /* Get a List of Inactv UEs for UL*/
17763 cmLListInit(&ulInactvLst);
17765 /* Trigger Spfc Schedulers with Inactive UEs */
17766 rgSCHMeasGapANRepGetUlInactvUe (cell, &ulInactvLst);
17767 /* take care of this in UL retransmission */
17768 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInactvLst);
17774 * @brief Scheduler invocation per TTI.
17778 * Function: rgSCHCmnHdlDlInactUes
17781 * Invoked by: Common Scheduler
17783 * @param[in] RgSchCellCb *cell
17786 static Void rgSCHCmnHdlDlInactUes(RgSchCellCb *cell)
17788 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17789 CmLListCp dlInactvLst;
17790 /* Get a List of Inactv UEs for DL */
17791 cmLListInit(&dlInactvLst);
17793 /* Trigger Spfc Schedulers with Inactive UEs */
17794 rgSCHMeasGapANRepGetDlInactvUe (cell, &dlInactvLst);
17796 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInactvLst);
17800 /* RACHO: Rach handover functions start here */
17801 /***********************************************************
17803 * Func : rgSCHCmnUeIdleExdThrsld
17805 * Desc : RETURN ROK if UE has been idle more
17814 **********************************************************/
17815 static S16 rgSCHCmnUeIdleExdThrsld(RgSchCellCb *cell,RgSchUeCb *ue)
17817 /* Time difference in subframes */
17818 uint32_t sfDiff = RGSCH_CALC_SF_DIFF(cell->crntTime, ue->ul.ulTransTime);
17820 if (sfDiff > (uint32_t)RG_SCH_CMN_UE_IDLE_THRSLD(ue))
17832 * @brief Scheduler processing for Ded Preambles on cell configuration.
17836 * Function : rgSCHCmnCfgRachDedPrm
17838 * This function does requisite initialisation
17839 * for RACH Ded Preambles.
17842 * @param[in] RgSchCellCb *cell
17845 static Void rgSCHCmnCfgRachDedPrm(RgSchCellCb *cell)
17847 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
17848 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
17852 if (cell->macPreambleSet.pres == NOTPRSNT)
17856 cellSch->rachCfg.numDedPrm = cell->macPreambleSet.size;
17857 cellSch->rachCfg.dedPrmStart = cell->macPreambleSet.start;
17858 /* Initialize handover List */
17859 cmLListInit(&cellSch->rachCfg.hoUeLst);
17860 /* Initialize pdcch Order List */
17861 cmLListInit(&cellSch->rachCfg.pdcchOdrLst);
17863 /* Intialize the rapId to UE mapping structure */
17864 for (cnt = 0; cnt<cellSch->rachCfg.numDedPrm; cnt++)
17866 cellSch->rachCfg.rapIdMap[cnt].rapId = cellSch->rachCfg.dedPrmStart + \
17868 cmLListInit(&cellSch->rachCfg.rapIdMap[cnt].assgndUes);
17870 /* Perform Prach Mask Idx, remDedPrm, applFrm initializations */
17871 /* Set remDedPrm as numDedPrm */
17872 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
17873 /* Initialize applFrm */
17874 cellSch->rachCfg.prachMskIndx = 0;
17875 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_EVEN)
17877 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + \
17878 (cell->crntTime.sfn % 2)) % RGSCH_MAX_SFN;
17881 else if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ODD)
17883 if((cell->crntTime.sfn%2) == 0)
17885 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + 1)\
17892 cellSch->rachCfg.applFrm.sfn = cell->crntTime.sfn;
17894 /* Initialize cellSch->rachCfg.applFrm as >= crntTime.
17895 * This is because of RGSCH_CALC_SF_DIFF logic */
17896 if (cellSch->rachCfg.applFrm.sfn == cell->crntTime.sfn)
17898 while (cellSch->rachCfg.prachMskIndx < cell->rachCfg.raOccasion.size)
17900 if (cell->crntTime.slot <\
17901 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx])
17905 cellSch->rachCfg.prachMskIndx++;
17907 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size)
17909 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
17911 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) %\
17916 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) %\
17919 cellSch->rachCfg.prachMskIndx = 0;
17921 cellSch->rachCfg.applFrm.slot = \
17922 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17926 cellSch->rachCfg.applFrm.slot = \
17927 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17930 /* Note first param to this macro should always be the latest in time */
17931 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
17932 while (sfDiff <= gap)
17934 rgSCHCmnUpdNxtPrchMskIdx(cell);
17935 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
17942 * @brief Updates the PRACH MASK INDEX.
17946 * Function: rgSCHCmnUpdNxtPrchMskIdx
17947 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
17948 * CFG is always >= "n"+"DELTA", where "n" is the crntTime
17949 * of the cell. If not, applFrm is updated to the next avl
17950 * PRACH oppurtunity as per the PRACH Cfg Index configuration.
17953 * Invoked by: Common Scheduler
17955 * @param[in] RgSchCellCb *cell
17958 static Void rgSCHCmnUpdNxtPrchMskIdx(RgSchCellCb *cell)
17960 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
17962 /* Determine the next prach mask Index */
17963 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size - 1)
17965 /* PRACH within applFrm.sfn are done, go to next AVL sfn */
17966 cellSch->rachCfg.prachMskIndx = 0;
17967 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
17969 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) % \
17972 else/* RGR_SFN_EVEN or RGR_SFN_ODD */
17974 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) % \
17977 cellSch->rachCfg.applFrm.slot = cell->rachCfg.raOccasion.\
17980 else /* applFrm.sfn is still valid */
17982 cellSch->rachCfg.prachMskIndx += 1;
17983 if ( cellSch->rachCfg.prachMskIndx < RGR_MAX_SUBFRAME_NUM )
17985 cellSch->rachCfg.applFrm.slot = \
17986 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17993 * @brief Updates the Ded preamble RACH parameters
17998 * Function: rgSCHCmnUpdRachParam
17999 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
18000 * CFG is always >= "n"+"6"+"DELTA", where "n" is the crntTime
18001 * of the cell. If not, applFrm is updated to the next avl
18002 * PRACH oppurtunity as per the PRACH Cfg Index configuration,
18003 * accordingly the "remDedPrm" is reset to "numDedPrm" and
18004 * "prachMskIdx" field is updated as per "applFrm".
18007 * Invoked by: Common Scheduler
18009 * @param[in] RgSchCellCb *cell
18012 static Void rgSCHCmnUpdRachParam(RgSchCellCb *cell)
18015 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18016 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
18019 if (cell->macPreambleSet.pres == NOTPRSNT)
18023 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, \
18027 /* applFrm is still a valid next Prach Oppurtunity */
18030 rgSCHCmnUpdNxtPrchMskIdx(cell);
18031 /* Reset remDedPrm as numDedPrm */
18032 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
18038 * @brief Dedicated Preamble allocation function.
18042 * Function: rgSCHCmnAllocPOParam
18043 * Purpose: Allocate pdcch, rapId and PrachMskIdx.
18044 * Set mapping of UE with the allocated rapId.
18046 * Invoked by: Common Scheduler
18048 * @param[in] RgSchCellCb *cell
18049 * @param[in] RgSchDlSf *dlSf
18050 * @param[in] RgSchUeCb *ue
18051 * @param[out] RgSchPdcch **pdcch
18052 * @param[out] uint8_t *rapId
18053 * @param[out] uint8_t *prachMskIdx
18056 static S16 rgSCHCmnAllocPOParam
18061 RgSchPdcch **pdcch,
18063 uint8_t *prachMskIdx
18067 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18068 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18071 if (cell->macPreambleSet.pres == PRSNT_NODEF)
18073 if (cellSch->rachCfg.remDedPrm == 0)
18077 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
18078 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
18082 /* The stored prachMskIdx is the index of PRACH Oppurtunities in
18083 * raOccasions.subframes[].
18084 * Converting the same to the actual PRACHMskIdx to be transmitted. */
18085 *prachMskIdx = cellSch->rachCfg.prachMskIndx + 1;
18086 /* Distribution starts from dedPrmStart till dedPrmStart + numDedPrm */
18087 *rapId = cellSch->rachCfg.dedPrmStart +
18088 cellSch->rachCfg.numDedPrm - cellSch->rachCfg.remDedPrm;
18089 cellSch->rachCfg.remDedPrm--;
18090 /* Map UE with the allocated RapId */
18091 ueDl->rachInfo.asgnOppr = cellSch->rachCfg.applFrm;
18092 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, cellSch->rachCfg.rapIdMap, (*rapId - cellSch->rachCfg.dedPrmStart));
18093 cmLListAdd2Tail(&cellSch->rachCfg.rapIdMap[*rapId - cellSch->rachCfg.dedPrmStart].assgndUes,
18094 &ueDl->rachInfo.rapIdLnk);
18095 ueDl->rachInfo.rapIdLnk.node = (PTR)ue;
18096 ueDl->rachInfo.poRapId = *rapId;
18098 else /* if dedicated preambles not configured */
18100 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
18101 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
18113 * @brief Dowlink Scheduling Handler.
18117 * Function: rgSCHCmnGenPdcchOrder
18118 * Purpose: For each UE in PO Q, grab a PDCCH,
18119 * get an available ded RapId and fill PDCCH
18120 * with PO information.
18122 * Invoked by: Common Scheduler
18124 * @param[in] RgSchCellCb *cell
18125 * @param[in] RgSchDlSf *dlSf
18128 static Void rgSCHCmnGenPdcchOrder(RgSchCellCb *cell,RgSchDlSf *dlSf)
18130 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18131 CmLList *node = cellSch->rachCfg.pdcchOdrLst.first;
18134 uint8_t prachMskIdx;
18135 RgSchPdcch *pdcch = NULLP;
18139 ue = (RgSchUeCb *)node->node;
18141 /* Skip sending for this subframe is Measuring or inActive in UL due
18142 * to MeasGap or inactie due to DRX
18144 if ((ue->measGapCb.isMeasuring == TRUE) ||
18145 (ue->ul.ulInactvMask & RG_MEASGAP_INACTIVE) ||
18146 (ue->isDrxEnabled &&
18147 ue->dl.dlInactvMask & RG_DRX_INACTIVE)
18152 if (rgSCHCmnAllocPOParam(cell, dlSf, ue, &pdcch, &rapId,\
18153 &prachMskIdx) != ROK)
18155 /* No More rapIds left for the valid next avl Oppurtunity.
18156 * Unsatisfied UEs here would be given a chance, when the
18157 * prach Mask Index changes as per rachUpd every TTI */
18159 /* PDDCH can also be ordered with rapId=0, prachMskIdx=0
18160 * so that UE triggers a RACH procedure with non-dedicated preamble.
18161 * But the implementation here does not do this. Instead, the "break"
18162 * here implies, that PDCCH Odr always given with valid rapId!=0,
18163 * prachMskIdx!=0 if dedicated preambles are configured.
18164 * If not configured, then trigger a PO with rapId=0,prchMskIdx=0*/
18167 /* Fill pdcch with pdcch odr information */
18168 rgSCHCmnFillPdcchOdr2Sf(cell, ue, pdcch, rapId, prachMskIdx);
18169 /* Remove this UE from the PDCCH ORDER QUEUE */
18170 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
18171 /* Reset UE's power state */
18172 rgSCHPwrUeReset(cell, ue);
18179 * @brief This function add UE to PdcchOdr Q if not already present.
18183 * Function: rgSCHCmnDlAdd2PdcchOdrQ
18186 * Invoked by: CMN Scheduler
18188 * @param[in] RgSchCellCb* cell
18189 * @param[in] RgSchUeCb* ue
18193 static Void rgSCHCmnDlAdd2PdcchOdrQ(RgSchCellCb *cell,RgSchUeCb *ue)
18195 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18196 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18199 if (ueDl->rachInfo.poLnk.node == NULLP)
18201 cmLListAdd2Tail(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
18202 ueDl->rachInfo.poLnk.node = (PTR)ue;
18209 * @brief This function rmvs UE to PdcchOdr Q if not already present.
18213 * Function: rgSCHCmnDlRmvFrmPdcchOdrQ
18216 * Invoked by: CMN Scheduler
18218 * @param[in] RgSchCellCb* cell
18219 * @param[in] RgSchUeCb* ue
18223 static Void rgSCHCmnDlRmvFrmPdcchOdrQ(RgSchCellCb *cell,RgSchUeCb *ue)
18225 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18226 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18228 cmLListDelFrm(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
18229 ueDl->rachInfo.poLnk.node = NULLP;
18234 * @brief Fill pdcch with PDCCH order information.
18238 * Function: rgSCHCmnFillPdcchOdr2Sf
18239 * Purpose: Fill PDCCH with PDCCH order information,
18241 * Invoked by: Common Scheduler
18243 * @param[in] RgSchUeCb *ue
18244 * @param[in] RgSchPdcch *pdcch
18245 * @param[in] uint8_t rapId
18246 * @param[in] uint8_t prachMskIdx
18249 static Void rgSCHCmnFillPdcchOdr2Sf
18255 uint8_t prachMskIdx
18258 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
18261 pdcch->rnti = ue->ueId;
18262 pdcch->dci.dciFormat = TFU_DCI_FORMAT_1A;
18263 pdcch->dci.u.format1aInfo.isPdcchOrder = TRUE;
18264 pdcch->dci.u.format1aInfo.t.pdcchOrder.preambleIdx = rapId;
18265 pdcch->dci.u.format1aInfo.t.pdcchOrder.prachMaskIdx = prachMskIdx;
18267 /* Request for APer CQI immediately after PDCCH Order */
18268 /* CR ccpu00144525 */
18270 if(ue->dl.ueDlCqiCfg.aprdCqiCfg.pres)
18272 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
18273 acqiCb->aCqiTrigWt = 0;
18282 * @brief UE deletion for scheduler.
18286 * Function : rgSCHCmnDelRachInfo
18288 * This functions deletes all scheduler information
18289 * pertaining to an UE.
18291 * @param[in] RgSchCellCb *cell
18292 * @param[in] RgSchUeCb *ue
18295 static Void rgSCHCmnDelRachInfo(RgSchCellCb *cell,RgSchUeCb *ue)
18297 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18298 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18302 if (ueDl->rachInfo.poLnk.node)
18304 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
18306 if (ueDl->rachInfo.hoLnk.node)
18308 cmLListDelFrm(&cellSch->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
18309 ueDl->rachInfo.hoLnk.node = NULLP;
18311 if (ueDl->rachInfo.rapIdLnk.node)
18313 rapIdIdx = ueDl->rachInfo.poRapId - cellSch->rachCfg.dedPrmStart;
18314 cmLListDelFrm(&cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes,
18315 &ueDl->rachInfo.rapIdLnk);
18316 ueDl->rachInfo.rapIdLnk.node = NULLP;
18322 * @brief This function retrieves the ue which has sent this raReq
18323 * and it allocates grant for UEs undergoing (for which RAR
18324 * is being generated) HandOver/PdcchOrder.
18329 * Function: rgSCHCmnHdlHoPo
18330 * Purpose: This function retrieves the ue which has sent this raReq
18331 * and it allocates grant for UEs undergoing (for which RAR
18332 * is being generated) HandOver/PdcchOrder.
18334 * Invoked by: Common Scheduler
18336 * @param[in] RgSchCellCb *cell
18337 * @param[out] CmLListCp *raRspLst
18338 * @param[in] RgSchRaReqInfo *raReq
18342 static Void rgSCHCmnHdlHoPo
18345 CmLListCp *raRspLst,
18346 RgSchRaReqInfo *raReq
18349 RgSchUeCb *ue = raReq->ue;
18351 if ( ue->isDrxEnabled )
18353 rgSCHDrxDedRa(cell,ue);
18355 rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq);
18360 * @brief This function retrieves the UE which has sent this raReq
18361 * for handover case.
18366 * Function: rgSCHCmnGetHoUe
18367 * Purpose: This function retrieves the UE which has sent this raReq
18368 * for handover case.
18370 * Invoked by: Common Scheduler
18372 * @param[in] RgSchCellCb *cell
18373 * @param[in] RgSchRaReqInfo *raReq
18374 * @return RgSchUeCb*
18377 RgSchUeCb* rgSCHCmnGetHoUe(RgSchCellCb *cell,uint16_t rapId)
18379 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18383 RgSchCmnDlUe *ueDl;
18385 ueLst = &cellSch->rachCfg.hoUeLst;
18386 node = ueLst->first;
18389 ue = (RgSchUeCb *)node->node;
18391 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18392 if (ueDl->rachInfo.hoRapId == rapId)
18400 static Void rgSCHCmnDelDedPreamble(RgSchCellCb *cell,uint8_t preambleId)
18402 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18406 RgSchCmnDlUe *ueDl;
18408 ueLst = &cellSch->rachCfg.hoUeLst;
18409 node = ueLst->first;
18412 ue = (RgSchUeCb *)node->node;
18414 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18415 if (ueDl->rachInfo.hoRapId == preambleId)
18417 cmLListDelFrm(ueLst, &ueDl->rachInfo.hoLnk);
18418 ueDl->rachInfo.hoLnk.node = (PTR)NULLP;
18424 * @brief This function retrieves the UE which has sent this raReq
18425 * for PDCCh Order case.
18430 * Function: rgSCHCmnGetPoUe
18431 * Purpose: This function retrieves the UE which has sent this raReq
18432 * for PDCCH Order case.
18434 * Invoked by: Common Scheduler
18436 * @param[in] RgSchCellCb *cell
18437 * @param[in] RgSchRaReqInfo *raReq
18438 * @return RgSchUeCb*
18441 RgSchUeCb* rgSCHCmnGetPoUe
18445 CmLteTimingInfo timingInfo
18448 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18452 RgSchCmnDlUe *ueDl;
18455 rapIdIdx = rapId -cellSch->rachCfg.dedPrmStart;
18456 ueLst = &cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes;
18457 node = ueLst->first;
18460 ue = (RgSchUeCb *)node->node;
18462 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18463 /* Remove UEs irrespective.
18464 * Old UE associations are removed.*/
18465 cmLListDelFrm(ueLst, &ueDl->rachInfo.rapIdLnk);
18466 ueDl->rachInfo.rapIdLnk.node = (PTR)NULLP;
18467 if (RGSCH_TIMEINFO_SAME(ueDl->rachInfo.asgnOppr, timingInfo))
18478 * @brief This function returns the valid UL cqi for a given UE.
18482 * Function: rgSCHCmnUlGetCqi
18483 * Purpose: This function returns the "valid UL cqi" for a given UE
18484 * based on UE category
18486 * Invoked by: Scheduler
18488 * @param[in] RgSchUeCb *ue
18489 * @param[in] uint8_t ueCtgy
18492 uint8_t rgSCHCmnUlGetCqi
18496 CmLteUeCategory ueCtgy
18499 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18503 cqi = ueUl->maxUlCqi;
18505 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
18506 (ueUl->validUlCqi > ueUl->maxUlCqi)))
18508 cqi = ueUl->validUlCqi;
18511 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
18512 (ueUl->crntUlCqi[0] > ueUl->maxUlCqi )))
18514 cqi = ueUl->crntUlCqi[0];
18518 }/* End of rgSCHCmnUlGetCqi */
18520 /***********************************************************
18522 * Func : rgSCHCmnUlRbAllocForPoHoUe
18524 * Desc : Do uplink RB allocation for a HO/PO UE.
18528 * Notes: Note that as of now, for retx, maxRb
18529 * is not considered. Alternatives, such
18530 * as dropping retx if it crosses maxRb
18531 * could be considered.
18535 **********************************************************/
18536 static S16 rgSCHCmnUlRbAllocForPoHoUe
18544 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18545 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18546 uint8_t sbSize = cellUl->sbSize;
18547 uint32_t maxBits = ue->ul.maxBytesPerUePerTti*8;
18549 RgSchUlAlloc *alloc;
18559 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->msg3SchdHqProcIdx];
18560 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18562 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
18566 /*MS_WORKAROUND for HO ccpu00121116*/
18567 cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
18568 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend], cqi);
18569 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
18570 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
18571 while(iMcs > RG_SCH_CMN_MAX_MSG3_IMCS)
18574 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
18575 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg);
18577 /* Filling the modorder in the grant structure*/
18578 RG_SCH_UL_MCS_TO_MODODR(iMcs,modOdr);
18579 if (!cell->isCpUlExtend)
18581 eff = rgSchCmnNorUlEff[0][iTbs];
18585 eff = rgSchCmnExtUlEff[0][iTbs];
18588 bits = ueUl->alloc.reqBytes * 8;
18590 #if (ERRCLASS & ERRCLS_DEBUG)
18597 if (bits < rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs))
18600 nPrb = numSb * sbSize;
18604 if (bits > maxBits)
18607 nPrb = bits * 1024 / eff / RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
18612 numSb = nPrb / sbSize;
18616 /*ccpu00128775:MOD-Change to get upper threshold nPrb*/
18617 nPrb = RGSCH_CEIL((RGSCH_CEIL(bits * 1024, eff)),
18618 RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl));
18623 numSb = RGSCH_DIV_ROUND(nPrb, sbSize);
18628 alloc = rgSCHCmnUlSbAlloc(sf, (uint8_t)RGSCH_MIN(numSb, cellUl->maxSbPerUe),\
18630 if (alloc == NULLP)
18632 DU_LOG("\nERROR --> SCH : rgSCHCmnUlRbAllocForPoHoUe(): Could not get UlAlloc");
18635 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
18637 /* Filling the modorder in the grant structure start*/
18638 alloc->grnt.modOdr = (TfuModScheme) modOdr;
18639 alloc->grnt.iMcs = iMcs;
18640 alloc->grnt.iMcsCrnt = iMcsCrnt;
18641 alloc->grnt.hop = 0;
18642 /* Fix for ccpu00123915*/
18643 alloc->forMsg3 = TRUE;
18644 alloc->hqProc = proc;
18645 alloc->hqProc->ulSfIdx = cellUl->msg3SchdIdx;
18647 alloc->rnti = ue->ueId;
18648 /* updating initNumRbs in case of HO */
18650 ue->initNumRbs = alloc->grnt.numRb;
18652 ueUl->alloc.alloc = alloc;
18653 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
18654 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
18655 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
18656 /* MS_WORKAROUND for HO ccpu00121124*/
18657 /*[Adi temp change] Need to fil modOdr */
18658 RG_SCH_UL_MCS_TO_MODODR(alloc->grnt.iMcsCrnt,alloc->grnt.modOdr);
18659 rgSCHUhmNewTx(proc, ueUl->hqEnt.maxHqRetx, alloc);
18660 /* No grant attr recorded now */
18665 * @brief This function allocates grant for UEs undergoing (for which RAR
18666 * is being generated) HandOver/PdcchOrder.
18671 * Function: rgSCHCmnAllocPoHoGrnt
18672 * Purpose: This function allocates grant for UEs undergoing (for which RAR
18673 * is being generated) HandOver/PdcchOrder.
18675 * Invoked by: Common Scheduler
18677 * @param[in] RgSchCellCb *cell
18678 * @param[out] CmLListCp *raRspLst,
18679 * @param[in] RgSchUeCb *ue
18680 * @param[in] RgSchRaReqInfo *raReq
18684 static Void rgSCHCmnAllocPoHoGrnt
18687 CmLListCp *raRspLst,
18689 RgSchRaReqInfo *raReq
18692 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18693 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18695 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
18698 /* Clearing previous allocs if any*/
18699 rgSCHCmnUlUeDelAllocs(cell, ue);
18700 /* Fix : syed allocs are limited */
18701 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
18705 ueUl->alloc.reqBytes = RG_SCH_MIN_GRNT_HOPO;
18706 if (rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, RGSCH_MAX_UL_RB) != ROK)
18711 /* Fill grant information */
18712 grnt = &ueUl->alloc.alloc->grnt;
18717 DU_LOG("\nERROR --> SCH : Failed to get"
18718 "the grant for HO/PDCCH Order. CRNTI:%d",ue->ueId);
18721 ue->ul.rarGrnt.rapId = raReq->raReq.rapId;
18722 ue->ul.rarGrnt.hop = grnt->hop;
18723 ue->ul.rarGrnt.rbStart = grnt->rbStart;
18724 ue->ul.rarGrnt.numRb = grnt->numRb;
18725 ue->ul.rarGrnt.tpc = grnt->tpc;
18726 ue->ul.rarGrnt.iMcsCrnt = grnt->iMcsCrnt;
18727 ue->ul.rarGrnt.ta.pres = TRUE;
18728 ue->ul.rarGrnt.ta.val = raReq->raReq.ta;
18729 ue->ul.rarGrnt.datSz = grnt->datSz;
18730 if((sf->numACqiCount < RG_SCH_MAX_ACQI_PER_ULSF) && (RG_SCH_APCQI_NO != ue->dl.reqForCqi))
18734 /* Send two bits cqireq field if more than one cells are configured else one*/
18735 for (idx = 1;idx < CM_LTE_MAX_CELLS;idx++)
18737 if (ue->cellInfo[idx] != NULLP)
18739 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
18743 if (idx == CM_LTE_MAX_CELLS)
18746 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
18748 ue->dl.reqForCqi = RG_SCH_APCQI_NO;
18749 sf->numACqiCount++;
18753 ue->ul.rarGrnt.cqiReqBit = 0;
18755 /* Attach Ho/Po allocation to RAR Rsp cont free Lst */
18756 cmLListAdd2Tail(raRspLst, &ue->ul.rarGrnt.raRspLnk);
18757 ue->ul.rarGrnt.raRspLnk.node = (PTR)ue;
18763 * @brief This is a utility function to set the fields in
18764 * an UL harq proc which is identified for non-adaptive retx
18768 * Function: rgSCHCmnUlNonadapRetx
18769 * Purpose: Sets the fields in UL Harq proc for non-adaptive retx
18771 * @param[in] RgSchCmnUlCell *cellUl
18772 * @param[out] RgSchUlAlloc *alloc
18773 * @param[in] uint8_t idx
18778 static Void rgSCHCmnUlNonadapRetx
18780 RgSchCmnUlCell *cellUl,
18781 RgSchUlAlloc *alloc,
18785 rgSCHUhmRetx(alloc->hqProc, alloc);
18787 /* Update alloc to retx */
18788 alloc->hqProc->isRetx = TRUE;
18789 alloc->hqProc->ulSfIdx = cellUl->reTxIdx[idx];
18791 if (alloc->hqProc->rvIdx != 0)
18793 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[alloc->hqProc->rvIdx];
18797 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
18799 alloc->grnt.isRtx = TRUE;
18800 alloc->pdcch = NULLP;
18804 * @brief Check if 2 allocs overlap
18808 * Function : rgSCHCmnUlAllocsOvrLap
18810 * - Return TRUE if alloc1 and alloc2 overlap.
18812 * @param[in] RgSchUlAlloc *alloc1
18813 * @param[in] RgSchUlAlloc *alloc2
18816 static Bool rgSCHCmnUlAllocsOvrLap(RgSchUlAlloc *alloc1,RgSchUlAlloc *alloc2)
18819 if (((alloc1->sbStart >= alloc2->sbStart) &&
18820 (alloc1->sbStart <= alloc2->sbStart + alloc2->numSb-1)) ||
18821 ((alloc2->sbStart >= alloc1->sbStart) &&
18822 (alloc2->sbStart <= alloc1->sbStart + alloc1->numSb-1)))
18829 * @brief Copy allocation Info from src to dst.
18833 * Function : rgSCHCmnUlCpyAllocInfo
18835 * - Copy allocation Info from src to dst.
18837 * @param[in] RgSchUlAlloc *srcAlloc
18838 * @param[in] RgSchUlAlloc *dstAlloc
18841 static Void rgSCHCmnUlCpyAllocInfo(RgSchCellCb *cell,RgSchUlAlloc *srcAlloc,RgSchUlAlloc *dstAlloc)
18843 RgSchCmnUlUe *ueUl;
18845 dstAlloc->grnt = srcAlloc->grnt;
18846 dstAlloc->hqProc = srcAlloc->hqProc;
18847 /* Fix : syed During UE context release, hqProc->alloc
18848 * was pointing to srcAlloc instead of dstAlloc and
18849 * freeing from incorrect sf->allocDb was
18850 * corrupting the list. */
18851 /* In case of SPS Occasion Allocation is done in advance and
18852 at a later time Hq Proc is linked. Hence HqProc
18853 pointer in alloc shall be NULL */
18855 if (dstAlloc->hqProc)
18858 dstAlloc->hqProc->alloc = dstAlloc;
18860 dstAlloc->ue = srcAlloc->ue;
18861 dstAlloc->rnti = srcAlloc->rnti;
18862 dstAlloc->forMsg3 = srcAlloc->forMsg3;
18863 dstAlloc->raCb = srcAlloc->raCb;
18864 dstAlloc->pdcch = srcAlloc->pdcch;
18865 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
18868 ueUl = RG_SCH_CMN_GET_UL_UE(dstAlloc->ue,cell);
18869 ueUl->alloc.alloc = dstAlloc;
18871 if (dstAlloc->ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
18873 if((dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc != NULLP)
18874 && (dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc == srcAlloc))
18876 dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc = dstAlloc;
18885 * @brief Update TX and RETX subframe's allocation
18890 * Function : rgSCHCmnUlInsAllocFrmNewSf2OldSf
18892 * - Release all preassigned allocations of newSf and merge
18894 * - If alloc of newSf collide with one or more allocs of oldSf
18895 * - mark all such allocs of oldSf for Adaptive Retx.
18896 * - Swap the alloc and hole DB references of oldSf and newSf.
18898 * @param[in] RgSchCellCb *cell
18899 * @param[in] RgSchUlSf *newSf
18900 * @param[in] RgSchUlSf *oldSf
18901 * @param[in] RgSchUlAlloc *srcAlloc
18904 static Void rgSCHCmnUlInsAllocFrmNewSf2OldSf
18909 RgSchUlAlloc *srcAlloc
18912 RgSchUlAlloc *alloc, *dstAlloc, *nxtAlloc;
18914 /* MS_WORKAROUND ccpu00120827 */
18915 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
18918 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
18922 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
18923 /* If there is an overlap between alloc and srcAlloc
18924 * then alloc is marked for Adaptive retx and it is released
18926 if (rgSCHCmnUlAllocsOvrLap(alloc, srcAlloc) == TRUE)
18928 rgSCHCmnUlUpdAllocRetx(cell, alloc);
18929 rgSCHUtlUlAllocRls(oldSf, alloc);
18931 /* No further allocs spanning the srcAlloc subbands */
18932 if (srcAlloc->sbStart + srcAlloc->numSb - 1 <= alloc->sbStart)
18936 } while ((alloc = nxtAlloc) != NULLP);
18939 /* After freeing all the colliding allocs, request for an allocation
18940 * specifying the start and numSb with in txSf. This function should
18941 * always return positively with a nonNULL dstAlloc */
18942 /* MS_WORKAROUND ccpu00120827 */
18943 remAllocs = schCmnCell->ul.maxAllocPerUlSf - *oldSf->allocCountRef;
18946 /* Fix : If oldSf already has max Allocs then release the
18947 * old RETX alloc to make space for new alloc of newSf.
18948 * newSf allocs(i.e new Msg3s) are given higher priority
18949 * over retx allocs. */
18950 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
18954 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
18955 if (!alloc->mrgdNewTxAlloc)
18957 /* If alloc is for RETX */
18958 /* TODO: Incase of this ad also in case of choosing
18959 * and alloc for ADAP RETX, we need to send ACK for
18960 * the corresponding alloc in PHICH */
18961 #ifndef EMTC_ENABLE
18962 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc);
18964 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc,FALSE);
18968 }while((alloc = nxtAlloc) != NULLP);
18971 dstAlloc = rgSCHUtlUlGetSpfcAlloc(oldSf, srcAlloc->sbStart, srcAlloc->numSb);
18973 /* This should never happen */
18974 if (dstAlloc == NULLP)
18976 DU_LOG("\nERROR --> SCH : CRNTI:%d "
18977 "rgSCHUtlUlGetSpfcAlloc failed in rgSCHCmnUlInsAllocFrmNewSf2OldSf",
18982 /* Copy the srcAlloc's state information in to dstAlloc */
18983 rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc);
18984 /* Set new Tx merged Alloc Flag to TRUE, indicating that this
18985 * alloc shall not be processed for non-adaptive retransmission */
18986 dstAlloc->mrgdNewTxAlloc = TRUE;
18990 * @brief Merge all allocations of newSf to oldSf.
18994 * Function : rgSCHCmnUlMergeSfAllocs
18996 * - Merge all allocations of newSf to oldSf.
18997 * - If newSf's alloc collides with oldSf's alloc
18998 * then oldSf's alloc is marked for adaptive Retx
18999 * and is released from oldSf to create space for
19002 * @param[in] RgSchCellCb *cell
19003 * @param[in] RgSchUlSf *oldSf
19004 * @param[in] RgSchUlSf *newSf
19007 static Void rgSCHCmnUlMergeSfAllocs(RgSchCellCb *cell,RgSchUlSf *oldSf,RgSchUlSf *newSf)
19009 RgSchUlAlloc *alloc, *nxtAlloc;
19012 /* Merge each alloc of newSf in to oldSf
19013 * and release it from newSf */
19014 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
19018 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
19019 rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, alloc);
19020 rgSCHUtlUlAllocRls(newSf, alloc);
19021 } while((alloc = nxtAlloc) != NULLP);
19026 * @brief Swap Hole/Alloc DB context of newSf and oldSf.
19030 * Function : rgSCHCmnUlSwapSfAllocs
19032 * - Swap Hole/Alloc DB context of newSf and oldSf.
19034 * @param[in] RgSchCellCb *cell
19035 * @param[in] RgSchUlSf *oldSf
19036 * @param[in] RgSchUlSf *newSf
19039 static Void rgSCHCmnUlSwapSfAllocs(RgSchCellCb *cell,RgSchUlSf *oldSf,RgSchUlSf *newSf)
19041 RgSchUlAllocDb *tempAllocDb = newSf->allocDb;
19042 RgSchUlHoleDb *tempHoleDb = newSf->holeDb;
19043 uint8_t tempAvailSbs = newSf->availSubbands;
19047 newSf->allocDb = oldSf->allocDb;
19048 newSf->holeDb = oldSf->holeDb;
19049 newSf->availSubbands = oldSf->availSubbands;
19051 oldSf->allocDb = tempAllocDb;
19052 oldSf->holeDb = tempHoleDb;
19053 oldSf->availSubbands = tempAvailSbs;
19055 /* Fix ccpu00120610*/
19056 newSf->allocCountRef = &newSf->allocDb->count;
19057 oldSf->allocCountRef = &oldSf->allocDb->count;
19061 * @brief Perform non-adaptive RETX for non-colliding allocs.
19065 * Function : rgSCHCmnUlPrcNonAdptRetx
19067 * - Perform non-adaptive RETX for non-colliding allocs.
19069 * @param[in] RgSchCellCb *cell
19070 * @param[in] RgSchUlSf *newSf
19071 * @param[in] uint8_t idx
19074 static Void rgSCHCmnUlPrcNonAdptRetx(RgSchCellCb *cell,RgSchUlSf *newSf,uint8_t idx)
19076 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19077 RgSchUlAlloc *alloc, *nxtAlloc;
19079 /* perform non-adaptive retx allocation(adjustment) */
19080 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
19084 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
19085 /* A merged new TX alloc, reset the state and skip */
19086 if (alloc->mrgdNewTxAlloc)
19088 alloc->mrgdNewTxAlloc = FALSE;
19093 rgSCHCmnUlNonadapRetx(cellUl, alloc, idx);
19095 } while((alloc = nxtAlloc) != NULLP);
19101 * @brief Update TX and RETX subframe's allocation
19106 * Function : rgSCHCmnUlPrfmSfMerge
19108 * - Release all preassigned allocations of newSf and merge
19110 * - If alloc of newSf collide with one or more allocs of oldSf
19111 * - mark all such allocs of oldSf for Adaptive Retx.
19112 * - Swap the alloc and hole DB references of oldSf and newSf.
19113 * - The allocs which did not collide with pre-assigned msg3
19114 * allocs are marked for non-adaptive RETX.
19116 * @param[in] RgSchCellCb *cell
19117 * @param[in] RgSchUlSf *oldSf
19118 * @param[in] RgSchUlSf *newSf
19119 * @param[in] uint8_t idx
19122 static Void rgSCHCmnUlPrfmSfMerge
19130 /* Preassigned resources for msg3 in newSf.
19131 * Hence do adaptive retx for all NACKED TXs */
19132 rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf);
19133 /* swap alloc and hole DBs of oldSf and newSf. */
19134 rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf);
19135 /* Here newSf has the resultant merged allocs context */
19136 /* Perform non-adaptive RETX for non-colliding allocs */
19137 rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx);
19143 * @brief Update TX and RETX subframe's allocation
19148 * Function : rgSCHCmnUlRmvCmpltdAllocs
19150 * - Free all Transmission which are ACKED
19151 * OR for which MAX retransmission have
19155 * @param[in] RgSchCellCb *cell,
19156 * @param[in] RgSchUlSf *sf
19159 static Void rgSCHCmnUlRmvCmpltdAllocs(RgSchCellCb *cell,RgSchUlSf *sf)
19161 RgSchUlAlloc *alloc, *nxtAlloc;
19163 if ((alloc = rgSCHUtlUlAllocFirst(sf)) == NULLP)
19169 nxtAlloc = rgSCHUtlUlAllocNxt(sf, alloc);
19171 DU_LOG("\nDEBUG --> SCH : rgSCHCmnUlRmvCmpltdAllocs:time(%d %d) alloc->hqProc->remTx %d hqProcId(%d) \n",cell->crntTime.sfn,cell->crntTime.slot,alloc->hqProc->remTx, alloc->grnt.hqProcId);
19173 alloc->hqProc->rcvdCrcInd = TRUE;
19174 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
19177 /* SR_RACH_STATS : MSG 3 MAX RETX FAIL*/
19178 if ((alloc->forMsg3 == TRUE) && (alloc->hqProc->remTx == 0))
19180 rgNumMsg3FailMaxRetx++;
19182 cell->tenbStats->sch.msg3Fail++;
19186 #ifdef MAC_SCH_STATS
19187 if(alloc->ue != NULLP)
19189 /* access from ulHarqProc*/
19190 RgSchUeCb *ueCb = alloc->ue;
19191 RgSchCmnUe *cmnUe = (RgSchCmnUe*)ueCb->sch;
19192 RgSchCmnUlUe *ulUe = &(cmnUe->ul);
19193 uint8_t cqi = ulUe->crntUlCqi[0];
19194 uint16_t numUlRetx = ueCb->ul.hqEnt.maxHqRetx - alloc->hqProc->remTx;
19196 hqRetxStats.ulCqiStat[(cqi - 1)].mcs = alloc->grnt.iMcs;
19201 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1++;
19204 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2++;
19207 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3++;
19210 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4++;
19213 hqRetxStats.ulCqiStat[(cqi - 1)].totalTx = \
19214 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1 + \
19215 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2 * 2) + \
19216 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3 * 3) + \
19217 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4 * 4);
19220 #endif /*MAC_SCH_STATS*/
19221 rgSCHCmnUlFreeAllocation(cell, sf, alloc);
19223 /*ccpu00106104 MOD added check for AckNackRep */
19224 /*added check for acknack so that adaptive retx considers ue
19225 inactivity due to ack nack repetition*/
19226 else if((alloc->ue != NULLP) && (TRUE != alloc->forMsg3))
19228 rgSCHCmnUlUpdAllocRetx(cell, alloc);
19229 rgSCHUtlUlAllocRls(sf, alloc);
19231 } while ((alloc = nxtAlloc) != NULLP);
19237 * @brief Update an uplink subframe.
19241 * Function : rgSCHCmnRlsUlSf
19243 * For each allocation
19244 * - if no more tx needed
19245 * - Release allocation
19247 * - Perform retransmission
19249 * @param[in] RgSchUlSf *sf
19250 * @param[in] uint8_t idx
19253 Void rgSCHCmnRlsUlSf(RgSchCellCb *cell,uint8_t idx)
19256 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19257 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
19259 RgSchUlSf *oldSf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
19261 /* Initialize the reTxLst of UL HqProcs for RETX subframe */
19262 if (rgSCHUtlUlAllocFirst(oldSf) == NULLP)
19266 /* Release all completed TX allocs from sf */
19267 rgSCHCmnUlRmvCmpltdAllocs(cell, oldSf);
19269 oldSf->numACqiCount = 0;
19275 * @brief Handle uplink allocation for retransmission.
19279 * Function : rgSCHCmnUlUpdAllocRetx
19281 * - Perform adaptive retransmission
19283 * @param[in] RgSchUlSf *sf
19284 * @param[in] RgSchUlAlloc *alloc
19287 static Void rgSCHCmnUlUpdAllocRetx(RgSchCellCb *cell,RgSchUlAlloc *alloc)
19289 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
19291 alloc->hqProc->reTxAlloc.rnti = alloc->rnti;
19292 alloc->hqProc->reTxAlloc.numSb = alloc->numSb;
19293 alloc->hqProc->reTxAlloc.iMcs = alloc->grnt.iMcs;
19295 alloc->hqProc->reTxAlloc.dciFrmt = alloc->grnt.dciFrmt;
19296 alloc->hqProc->reTxAlloc.numLyr = alloc->grnt.numLyr;
19297 alloc->hqProc->reTxAlloc.vrbgStart = alloc->grnt.vrbgStart;
19298 alloc->hqProc->reTxAlloc.numVrbg = alloc->grnt.numVrbg;
19299 alloc->hqProc->reTxAlloc.modOdr = alloc->grnt.modOdr;
19301 //iTbs = rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs);
19302 //iTbs = alloc->grnt.iMcs;
19303 //RGSCH_ARRAY_BOUND_CHECK( 0, rgTbSzTbl[0], iTbs);
19304 alloc->hqProc->reTxAlloc.tbSz = alloc->grnt.datSz;
19305 //rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1]/8;
19306 alloc->hqProc->reTxAlloc.ue = alloc->ue;
19307 alloc->hqProc->reTxAlloc.forMsg3 = alloc->forMsg3;
19308 alloc->hqProc->reTxAlloc.raCb = alloc->raCb;
19310 /* Set as retransmission is pending */
19311 alloc->hqProc->isRetx = TRUE;
19312 alloc->hqProc->alloc = NULLP;
19313 alloc->hqProc->ulSfIdx = RGSCH_INVALID_INFO;
19315 DU_LOG("\nDEBUG --> SCH : Adding Harq Proc Id in the retx list hqProcId %d \n",alloc->grnt.hqProcId);
19317 cmLListAdd2Tail(&cmnUlCell->reTxLst, &alloc->hqProc->reTxLnk);
19318 alloc->hqProc->reTxLnk.node = (PTR)alloc->hqProc;
19323 * @brief Attempts allocation for msg3s for which ADAP retransmissions
19328 * Function : rgSCHCmnUlAdapRetxAlloc
19330 * Attempts allocation for msg3s for which ADAP retransmissions
19333 * @param[in] RgSchCellCb *cell
19334 * @param[in] RgSchUlSf *sf
19335 * @param[in] RgSchUlHqProcCb *proc;
19336 * @param[in] RgSchUlHole *hole;
19339 static Bool rgSCHCmnUlAdapRetxAlloc
19343 RgSchUlHqProcCb *proc,
19347 uint8_t numSb = proc->reTxAlloc.numSb;
19348 uint8_t iMcs = proc->reTxAlloc.iMcs;
19349 CmLteTimingInfo frm = cell->crntTime;
19350 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19353 RgSchUlAlloc *alloc;
19355 /* Fetch PDCCH for msg3 */
19356 /* ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
19357 /* Introduced timing delta for UL control */
19358 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
19359 dlSf = rgSCHUtlSubFrmGet(cell, frm);
19360 pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
19361 if (pdcch == NULLP)
19366 /* Fetch UL Alloc for msg3 */
19367 if (numSb <= hole->num)
19369 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
19374 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
19375 DU_LOG("\nERROR --> SCH : UL Alloc fail for msg3 retx for rnti: %d\n",
19376 proc->reTxAlloc.rnti);
19380 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
19381 alloc->grnt.iMcs = iMcs;
19382 alloc->grnt.datSz = proc->reTxAlloc.tbSz;
19385 //RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
19387 /* Fill UL Alloc for msg3 */
19388 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
19389 alloc->grnt.nDmrs = 0;
19390 alloc->grnt.hop = 0;
19391 alloc->grnt.delayBit = 0;
19392 alloc->grnt.isRtx = TRUE;
19393 proc->ulSfIdx = cellUl->schdIdx;
19395 proc->schdTime = cellUl->schdTime;
19396 alloc->grnt.hqProcId = proc->procId;
19397 alloc->grnt.dciFrmt = proc->reTxAlloc.dciFrmt;
19398 alloc->grnt.numLyr = proc->reTxAlloc.numLyr;
19399 alloc->grnt.vrbgStart = proc->reTxAlloc.vrbgStart;
19400 alloc->grnt.numVrbg = proc->reTxAlloc.numVrbg;
19401 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
19402 alloc->grnt.modOdr = proc->reTxAlloc.modOdr;
19404 /* TODO : Hardcoding these as of now */
19405 alloc->grnt.hop = 0;
19406 alloc->grnt.SCID = 0;
19407 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
19408 alloc->grnt.PMI = 0;
19409 alloc->grnt.uciOnxPUSCH = 0;
19411 alloc->rnti = proc->reTxAlloc.rnti;
19412 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
19413 alloc->ue = proc->reTxAlloc.ue;
19414 alloc->pdcch = pdcch;
19415 alloc->forMsg3 = proc->reTxAlloc.forMsg3;
19416 alloc->raCb = proc->reTxAlloc.raCb;
19417 alloc->hqProc = proc;
19418 alloc->isAdaptive = TRUE;
19420 sf->totPrb += alloc->grnt.numRb;
19422 /* FIX : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
19425 alloc->raCb->msg3Grnt= alloc->grnt;
19427 /* To the crntTime, add the time at which UE will
19428 * actually send MSG3 */
19429 alloc->raCb->msg3AllocTime = cell->crntTime;
19430 RGSCH_INCR_SUB_FRAME(alloc->raCb->msg3AllocTime, RG_SCH_CMN_MIN_RETXMSG3_RECP_INTRVL);
19432 alloc->raCb->msg3AllocTime = cellUl->schdTime;
19434 rgSCHCmnUlAdapRetx(alloc, proc);
19435 /* Fill PDCCH with alloc info */
19436 pdcch->rnti = alloc->rnti;
19437 pdcch->dci.dciFormat = TFU_DCI_FORMAT_0;
19438 pdcch->dci.u.format0Info.hoppingEnbld = alloc->grnt.hop;
19439 pdcch->dci.u.format0Info.rbStart = alloc->grnt.rbStart;
19440 pdcch->dci.u.format0Info.numRb = alloc->grnt.numRb;
19441 pdcch->dci.u.format0Info.mcs = alloc->grnt.iMcsCrnt;
19442 pdcch->dci.u.format0Info.ndi = alloc->hqProc->ndi;
19443 pdcch->dci.u.format0Info.nDmrs = alloc->grnt.nDmrs;
19444 pdcch->dci.u.format0Info.tpcCmd = alloc->grnt.tpc;
19448 /* ulIdx setting for cfg 0 shall be appropriately fixed thru ccpu00109015 */
19449 pdcch->dci.u.format0Info.ulIdx = RG_SCH_ULIDX_MSB;
19450 pdcch->dci.u.format0Info.dai = RG_SCH_MAX_DAI_IDX;
19453 pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_0];
19457 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue,cell);
19459 alloc->ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
19462 ue->ul.nPrb = alloc->grnt.numRb;
19464 ueUl->alloc.alloc = alloc;
19465 /* FIx: Removed the call to rgSCHCmnUlAdapRetx */
19466 rgSCHCmnUlUeFillAllocInfo(cell, alloc->ue);
19467 /* Setting csireq as false for Adaptive Retx*/
19468 ueUl->alloc.alloc->pdcch->dci.u.format0Info.cqiReq = RG_SCH_APCQI_NO;
19469 pdcch->dciNumOfBits = alloc->ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
19471 /* Reset as retransmission is done */
19472 proc->isRetx = FALSE;
19474 else /* Intg fix */
19476 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
19477 DU_LOG("\nERROR --> SCH : Num SB not suffiecient for adap retx for rnti: %d",
19478 proc->reTxAlloc.rnti);
19484 /* Fix: syed Adaptive Msg3 Retx crash. */
19486 * @brief Releases all Adaptive Retx HqProcs which failed for
19487 * allocations in this scheduling occassion.
19491 * Function : rgSCHCmnUlSfRlsRetxProcs
19494 * @param[in] RgSchCellCb *cell
19495 * @param[in] RgSchUlSf *sf
19499 static Void rgSCHCmnUlSfRlsRetxProcs(RgSchCellCb *cell,RgSchUlSf *sf)
19503 RgSchUlHqProcCb *proc;
19504 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19506 cp = &(cellUl->reTxLst);
19510 proc = (RgSchUlHqProcCb *)node->node;
19512 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
19513 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
19514 proc->reTxLnk.node = (PTR)NULLP;
19521 * @brief Attempts allocation for UEs for which retransmissions
19526 * Function : rgSCHCmnUlSfReTxAllocs
19528 * Attempts allocation for UEs for which retransmissions
19531 * @param[in] RgSchCellCb *cell
19532 * @param[in] RgSchUlSf *sf
19535 static Void rgSCHCmnUlSfReTxAllocs(RgSchCellCb *cell,RgSchUlSf *sf)
19539 RgSchUlHqProcCb *proc;
19542 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
19543 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19545 cp = &(cellUl->reTxLst);
19549 proc = (RgSchUlHqProcCb *)node->node;
19550 ue = proc->reTxAlloc.ue;
19552 /*ccpu00106104 MOD added check for AckNackRep */
19553 /*added check for acknack so that adaptive retx considers ue
19554 inactivity due to ack nack repetition*/
19555 if((ue != NULLP) &&
19556 ((ue->measGapCb.isMeasuring == TRUE)||
19557 (ue->ackNakRepCb.isAckNakRep == TRUE)))
19561 /* Fix for ccpu00123917: Check if maximum allocs per UL sf have been exhausted */
19562 if (((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
19563 || (sf->allocDb->count == schCmnCell->ul.maxAllocPerUlSf))
19565 /* No more UL BW then return */
19568 /* perform adaptive retx for UE's */
19569 if (rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole) == FALSE)
19573 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
19574 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
19575 /* Fix: syed Adaptive Msg3 Retx crash. */
19576 proc->reTxLnk.node = (PTR)NULLP;
19582 * @brief Handles RB allocation for downlink.
19586 * Function : rgSCHCmnDlRbAlloc
19588 * Invoking Module Processing:
19589 * - This function is invoked for DL RB allocation
19591 * Processing Steps:
19592 * - If cell is frequency selecive,
19593 * - Call rgSCHDlfsAllocRb().
19595 * - Call rgSCHCmnNonDlfsRbAlloc().
19597 * @param[in] RgSchCellCb *cell
19598 * @param[in] RgSchDlRbAllocInfo *allocInfo
19602 static Void rgSCHCmnDlRbAlloc(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
19604 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
19606 if (cellSch->dl.isDlFreqSel)
19608 DU_LOG("\nINFO --> SCH : 5GTF_ERROR DLFS SCH Enabled\n");
19609 cellSch->apisDlfs->rgSCHDlfsAllocRb(cell, allocInfo);
19613 rgSCHCmnNonDlfsRbAlloc(cell, allocInfo);
19621 * @brief Determines number of RBGs and RBG subset sizes for the given DL
19622 * bandwidth and rbgSize
19625 * Function : rgSCHCmnDlGetRbgInfo
19628 * Processing Steps:
19629 * - Fill-up rbgInfo data structure for given DL bandwidth and rbgSize
19631 * @param[in] uint8_t dlTotalBw
19632 * @param[in] uint8_t dlSubsetBw
19633 * @param[in] uint8_t maxRaType1SubsetBw
19634 * @param[in] uint8_t rbgSize
19635 * @param[out] RgSchBwRbgInfo *rbgInfo
19638 Void rgSCHCmnDlGetRbgInfo
19641 uint8_t dlSubsetBw,
19642 uint8_t maxRaType1SubsetBw,
19644 RgSchBwRbgInfo *rbgInfo
19647 #ifdef RGSCH_SPS_UNUSED
19649 uint8_t lastRbgIdx = ((dlTotalBw + rbgSize - 1)/rbgSize) - 1;
19650 uint8_t currRbgSize = rbgSize;
19651 uint8_t subsetSizeIdx = 0;
19652 uint8_t subsetSize[RG_SCH_NUM_RATYPE1_SUBSETS] = {0};
19653 uint8_t lastRbgSize = rbgSize - (dlTotalBw - ((dlTotalBw/rbgSize) * rbgSize));
19654 uint8_t numRaType1Rbgs = (maxRaType1SubsetBw + rbgSize - 1)/rbgSize;
19657 /* Compute maximum number of SPS RBGs for the cell */
19658 rbgInfo->numRbgs = ((dlSubsetBw + rbgSize - 1)/rbgSize);
19660 #ifdef RGSCH_SPS_UNUSED
19661 /* Distribute RBGs across subsets except last RBG */
19662 for (;idx < numRaType1Rbgs - 1; ++idx)
19664 subsetSize[subsetSizeIdx] += currRbgSize;
19665 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
19668 /* Computation for last RBG */
19669 if (idx == lastRbgIdx)
19671 currRbgSize = lastRbgSize;
19673 subsetSize[subsetSizeIdx] += currRbgSize;
19674 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
19677 /* Update the computed sizes */
19678 #ifdef RGSCH_SPS_UNUSED
19679 rbgInfo->lastRbgSize = currRbgSize;
19681 rbgInfo->lastRbgSize = rbgSize -
19682 (dlSubsetBw - ((dlSubsetBw/rbgSize) * rbgSize));
19683 #ifdef RGSCH_SPS_UNUSED
19684 memcpy(rbgInfo->rbgSubsetSize, subsetSize, 4 * sizeof(uint8_t));
19686 rbgInfo->numRbs = (rbgInfo->numRbgs * rbgSize > dlTotalBw) ?
19687 dlTotalBw:(rbgInfo->numRbgs * rbgSize);
19688 rbgInfo->rbgSize = rbgSize;
19692 * @brief Handles RB allocation for Resource allocation type 0
19696 * Function : rgSCHCmnDlRaType0Alloc
19698 * Invoking Module Processing:
19699 * - This function is invoked for DL RB allocation for resource allocation
19702 * Processing Steps:
19703 * - Determine the available positions in the rbgMask.
19704 * - Allocate RBGs in the available positions.
19705 * - Update RA Type 0, RA Type 1 and RA type 2 masks.
19707 * @param[in] RgSchDlSfAllocInfo *allocedInfo
19708 * @param[in] uint8_t rbsReq
19709 * @param[in] RgSchBwRbgInfo *rbgInfo
19710 * @param[out] uint8_t *numAllocRbs
19711 * @param[out] RgSchDlSfAllocInfo *resAllocInfo
19712 * @param[in] Bool isPartialAlloc
19717 uint8_t rgSCHCmnDlRaType0Alloc
19719 RgSchDlSfAllocInfo *allocedInfo,
19721 RgSchBwRbgInfo *rbgInfo,
19722 uint8_t *numAllocRbs,
19723 RgSchDlSfAllocInfo *resAllocInfo,
19724 Bool isPartialAlloc
19727 /* Note: This function atttempts allocation only full allocation */
19728 uint32_t remNumRbs, rbgPosInRbgMask, ueRaType2Mask;
19729 uint8_t type2MaskIdx, cnt, rbIdx;
19730 uint8_t maskSize, rbg;
19731 uint8_t bestNumAvailRbs = 0;
19732 uint8_t usedRbs = 0;
19733 uint8_t numAllocRbgs = 0;
19734 uint8_t rbgSize = rbgInfo->rbgSize;
19735 uint32_t *rbgMask = &(resAllocInfo->raType0Mask);
19736 #ifdef RGSCH_SPS_UNUSED
19738 uint32_t ueRaType1Mask;
19739 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
19740 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
19742 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
19744 uint32_t allocedMask = allocedInfo->raType0Mask;
19746 maskSize = rbgInfo->numRbgs;
19749 RG_SCH_CMN_DL_COUNT_ONES(allocedMask, maskSize, &usedRbs);
19750 if (maskSize == usedRbs)
19752 /* All RBGs are allocated, including the last one */
19757 remNumRbs = (maskSize - usedRbs - 1) * rbgSize; /* vamsee: removed minus 1 */
19759 /* If last RBG is available, add last RBG size */
19760 if (!(allocedMask & (1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(maskSize - 1))))
19762 remNumRbs += rbgInfo->lastRbgSize;
19766 /* If complete allocation is needed, check if total requested RBs are available else
19767 * check the best available RBs */
19768 if (!isPartialAlloc)
19770 if (remNumRbs >= rbsReq)
19772 bestNumAvailRbs = rbsReq;
19777 bestNumAvailRbs = remNumRbs > rbsReq ? rbsReq : remNumRbs;
19780 /* Allocate for bestNumAvailRbs */
19781 if (bestNumAvailRbs)
19783 for (rbg = 0; rbg < maskSize - 1; ++rbg)
19785 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
19786 if (!(allocedMask & rbgPosInRbgMask))
19788 /* Update RBG mask */
19789 *rbgMask |= rbgPosInRbgMask;
19791 /* Compute RB index of the first RB of the RBG allocated */
19792 rbIdx = rbg * rbgSize;
19794 for (cnt = 0; cnt < rbgSize; ++cnt)
19796 #ifdef RGSCH_SPS_UNUSED
19797 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
19799 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19800 #ifdef RGSCH_SPS_UNUSED
19801 /* Update RBG mask for RA type 1 */
19802 raType1Mask[rbgSubset] |= ueRaType1Mask;
19803 raType1UsedRbs[rbgSubset]++;
19805 /* Update RA type 2 mask */
19806 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19809 *numAllocRbs += rbgSize;
19810 remNumRbs -= rbgSize;
19812 if (*numAllocRbs >= bestNumAvailRbs)
19818 /* If last RBG available and allocation is not completed, allocate
19820 if (*numAllocRbs < bestNumAvailRbs)
19822 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
19823 *rbgMask |= rbgPosInRbgMask;
19824 *numAllocRbs += rbgInfo->lastRbgSize;
19826 /* Compute RB index of the first RB of the last RBG */
19827 rbIdx = ((rbgInfo->numRbgs - 1 ) * rbgSize ); /* removed minus 1 vamsee */
19829 for (cnt = 0; cnt < rbgInfo->lastRbgSize; ++cnt)
19831 #ifdef RGSCH_SPS_UNUSED
19832 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
19834 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19835 #ifdef RGSCH_SPS_UNUSED
19836 /* Update RBG mask for RA type 1 */
19837 raType1Mask[rbgSubset] |= ueRaType1Mask;
19838 raType1UsedRbs[rbgSubset]++;
19840 /* Update RA type 2 mask */
19841 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19844 remNumRbs -= rbgInfo->lastRbgSize;
19847 /* Note: this should complete allocation, not checking for the
19851 return (numAllocRbgs);
19854 #ifdef RGSCH_SPS_UNUSED
19856 * @brief Handles RB allocation for Resource allocation type 1
19860 * Function : rgSCHCmnDlRaType1Alloc
19862 * Invoking Module Processing:
19863 * - This function is invoked for DL RB allocation for resource allocation
19866 * Processing Steps:
19867 * - Determine the available positions in the subsets.
19868 * - Allocate RB in the available subset.
19869 * - Update RA Type1, RA type 0 and RA type 2 masks.
19871 * @param[in] RgSchDlSfAllocInfo *allocedInfo
19872 * @param[in] uint8_t rbsReq
19873 * @param[in] RgSchBwRbgInfo *rbgInfo
19874 * @param[in] uint8_t startRbgSubset
19875 * @param[in] uint8_t *allocRbgSubset
19876 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
19877 * @param[in] Bool isPartialAlloc
19880 * Number of allocated RBs
19883 uint8_t rgSCHCmnDlRaType1Alloc
19885 RgSchDlSfAllocInfo *allocedInfo,
19887 RgSchBwRbgInfo *rbgInfo,
19888 uint8_t startRbgSubset,
19889 uint8_t *allocRbgSubset,
19890 RgSchDlSfAllocInfo *resAllocInfo,
19891 Bool isPartialAlloc
19894 /* Note: This function atttempts only full allocation */
19895 uint8_t *rbgSubsetSzArr;
19896 uint8_t type2MaskIdx, subsetIdx, rbIdx, rbInSubset, rbgInSubset;
19897 uint8_t offset, rbg, maskSize, bestSubsetIdx;
19898 uint8_t startPos = 0;
19899 uint8_t bestNumAvailRbs = 0;
19900 uint8_t numAllocRbs = 0;
19901 uint32_t ueRaType2Mask, ueRaType0Mask, rbPosInSubset;
19902 uint32_t remNumRbs, allocedMask;
19903 uint8_t usedRbs = 0;
19904 uint8_t rbgSize = rbgInfo->rbgSize;
19905 uint8_t rbgSubset = startRbgSubset;
19906 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
19907 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
19908 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
19909 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
19910 uint32_t *allocMask = allocedInfo->raType1Mask;
19912 /* Initialize the subset size Array */
19913 rbgSubsetSzArr = rbgInfo->rbgSubsetSize;
19915 /* Perform allocation for RA type 1 */
19916 for (subsetIdx = 0;subsetIdx < rbgSize; ++subsetIdx)
19918 allocedMask = allocMask[rbgSubset];
19919 maskSize = rbgSubsetSzArr[rbgSubset];
19921 /* Determine number of available RBs in the subset */
19922 usedRbs = allocedInfo->raType1UsedRbs[subsetIdx];
19923 remNumRbs = maskSize - usedRbs;
19925 if (remNumRbs >= rbsReq)
19927 bestNumAvailRbs = rbsReq;
19928 bestSubsetIdx = rbgSubset;
19931 else if (isPartialAlloc && (remNumRbs > bestNumAvailRbs))
19933 bestNumAvailRbs = remNumRbs;
19934 bestSubsetIdx = rbgSubset;
19937 rbgSubset = (rbgSubset + 1) % rbgSize;
19938 } /* End of for (each rbgsubset) */
19940 if (bestNumAvailRbs)
19942 /* Initialize alloced mask and subsetSize depending on the RBG
19943 * subset of allocation */
19944 uint8_t startIdx = 0;
19945 maskSize = rbgSubsetSzArr[bestSubsetIdx];
19946 allocedMask = allocMask[bestSubsetIdx];
19947 RG_SCH_CMN_DL_GET_START_POS(allocedMask, maskSize,
19949 for (; startIdx < rbgSize; ++startIdx, ++startPos)
19951 for (rbInSubset = startPos; rbInSubset < maskSize;
19952 rbInSubset = rbInSubset + rbgSize)
19954 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
19955 if (!(allocedMask & rbPosInSubset))
19957 raType1Mask[bestSubsetIdx] |= rbPosInSubset;
19958 raType1UsedRbs[bestSubsetIdx]++;
19960 /* Compute RB index value for the RB being allocated */
19961 rbgInSubset = rbInSubset /rbgSize;
19962 offset = rbInSubset % rbgSize;
19963 rbg = (rbgInSubset * rbgSize) + bestSubsetIdx;
19964 rbIdx = (rbg * rbgSize) + offset;
19966 /* Update RBG mask for RA type 0 allocation */
19967 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
19968 *rbgMask |= ueRaType0Mask;
19970 /* Update RA type 2 mask */
19971 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19972 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19974 /* Update the counters */
19977 if (numAllocRbs == bestNumAvailRbs)
19982 } /* End of for (each position in the subset mask) */
19983 if (numAllocRbs == bestNumAvailRbs)
19987 } /* End of for startIdx = 0 to rbgSize */
19989 *allocRbgSubset = bestSubsetIdx;
19990 } /* End of if (bestNumAvailRbs) */
19992 return (numAllocRbs);
19996 * @brief Handles RB allocation for Resource allocation type 2
20000 * Function : rgSCHCmnDlRaType2Alloc
20002 * Invoking Module Processing:
20003 * - This function is invoked for DL RB allocation for resource allocation
20006 * Processing Steps:
20007 * - Determine the available positions in the mask
20008 * - Allocate best fit cosecutive RBs.
20009 * - Update RA Type2, RA type 1 and RA type 0 masks.
20011 * @param[in] RgSchDlSfAllocInfo *allocedInfo
20012 * @param[in] uint8_t rbsReq
20013 * @param[in] RgSchBwRbgInfo *rbgInfo
20014 * @param[out] uint8_t *rbStart
20015 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
20016 * @param[in] Bool isPartialAlloc
20019 * Number of allocated RBs
20022 uint8_t rgSCHCmnDlRaType2Alloc
20024 RgSchDlSfAllocInfo *allocedInfo,
20026 RgSchBwRbgInfo *rbgInfo,
20028 RgSchDlSfAllocInfo *resAllocInfo,
20029 Bool isPartialAlloc
20032 uint8_t numAllocRbs = 0;
20034 uint8_t rbgSize = rbgInfo->rbgSize;
20035 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
20036 #ifdef RGSCH_SPS_UNUSED
20037 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
20039 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
20040 #ifdef RGSCH_SPS_UNUSED
20041 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
20043 uint32_t *allocedMask = allocedInfo->raType2Mask;
20045 /* Note: This function atttempts only full allocation */
20046 rgSCHCmnDlGetBestFitHole(allocedMask, rbgInfo->numRbs,
20047 raType2Mask, rbsReq, rbStart, &numAllocRbs, isPartialAlloc);
20050 /* Update the allocation in RA type 0 and RA type 1 masks */
20051 uint8_t rbCnt = numAllocRbs;
20052 #ifdef RGSCH_SPS_UNUSED
20054 uint32_t ueRaType1Mask;
20056 uint32_t ueRaType0Mask;
20061 /* Update RBG mask for RA type 0 allocation */
20062 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
20063 *rbgMask |= ueRaType0Mask;
20065 #ifdef RGSCH_SPS_UNUSED
20066 /* Update RBG mask for RA type 1 */
20067 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
20068 raType1Mask[rbgSubset] |= ueRaType1Mask;
20069 raType1UsedRbs[rbgSubset]++;
20071 /* Update the counters */
20077 return (numAllocRbs);
20081 * @brief Determines RA type 0 mask from given RB index.
20085 * Function : rgSCHCmnGetRaType0Mask
20088 * Processing Steps:
20089 * - Determine RA Type 0 mask for given rbIdex and rbg size.
20091 * @param[in] uint8_t rbIdx
20092 * @param[in] uint8_t rbgSize
20093 * @return uint32_t RA type 0 mask
20095 static uint32_t rgSCHCmnGetRaType0Mask(uint8_t rbIdx,uint8_t rbgSize)
20098 uint32_t rbgPosInRbgMask = 0;
20100 rbg = rbIdx/rbgSize;
20101 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
20103 return (rbgPosInRbgMask);
20106 #ifdef RGSCH_SPS_UNUSED
20108 * @brief Determines RA type 1 mask from given RB index.
20112 * Function : rgSCHCmnGetRaType1Mask
20115 * Processing Steps:
20116 * - Determine RA Type 1 mask for given rbIdex and rbg size.
20118 * @param[in] uint8_t rbIdx
20119 * @param[in] uint8_t rbgSize
20120 * @param[out] uint8_t *type1Subset
20121 * @return uint32_t RA type 1 mask
20123 static uint32_t rgSCHCmnGetRaType1Mask(uint8_t rbIdx,uint8_t rbgSize,uint8_t *type1Subset)
20125 uint8_t rbg, rbgSubset, rbgInSubset, offset, rbInSubset;
20126 uint32_t rbPosInSubset;
20128 rbg = rbIdx/rbgSize;
20129 rbgSubset = rbg % rbgSize;
20130 rbgInSubset = rbg/rbgSize;
20131 offset = rbIdx % rbgSize;
20132 rbInSubset = rbgInSubset * rbgSize + offset;
20133 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
20135 *type1Subset = rbgSubset;
20136 return (rbPosInSubset);
20138 #endif /* RGSCH_SPS_UNUSED */
20140 * @brief Determines RA type 2 mask from given RB index.
20144 * Function : rgSCHCmnGetRaType2Mask
20147 * Processing Steps:
20148 * - Determine RA Type 2 mask for given rbIdx and rbg size.
20150 * @param[in] uint8_t rbIdx
20151 * @param[out] uint8_t *maskIdx
20152 * @return uint32_t RA type 2 mask
20154 static uint32_t rgSCHCmnGetRaType2Mask(uint8_t rbIdx,uint8_t *maskIdx)
20156 uint32_t rbPosInType2;
20158 *maskIdx = rbIdx / 32;
20159 rbPosInType2 = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbIdx % 32);
20161 return (rbPosInType2);
20165 * @brief Performs resource allocation for a non-SPS UE in SPS bandwidth
20169 * Function : rgSCHCmnAllocUeInSpsBw
20172 * Processing Steps:
20173 * - Determine allocation for the UE.
20174 * - Use resource allocation type 0, 1 and 2 for allocation
20175 * within maximum SPS bandwidth.
20177 * @param[in] RgSchDlSf *dlSf
20178 * @param[in] RgSchCellCb *cell
20179 * @param[in] RgSchUeCb *ue
20180 * @param[in] RgSchDlRbAlloc *rbAllocInfo
20181 * @param[in] Bool isPartialAlloc
20186 Bool rgSCHCmnAllocUeInSpsBw
20191 RgSchDlRbAlloc *rbAllocInfo,
20192 Bool isPartialAlloc
20195 uint8_t rbgSize = cell->rbgSize;
20196 uint8_t numAllocRbs = 0;
20197 uint8_t numAllocRbgs = 0;
20198 uint8_t rbStart = 0;
20199 uint8_t idx, noLyr, iTbs;
20200 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
20201 RgSchDlSfAllocInfo *dlSfAlloc = &rbAllocInfo->dlSf->dlSfAllocInfo;
20202 RgSchBwRbgInfo *spsRbgInfo = &cell->spsBwRbgInfo;
20204 /* SPS_FIX : Check if this Hq proc is scheduled */
20205 if ((0 == rbAllocInfo->tbInfo[0].schdlngForTb) &&
20206 (0 == rbAllocInfo->tbInfo[1].schdlngForTb))
20211 /* Check if the requirement can be accomodated in SPS BW */
20212 if (dlSf->spsAllocdBw == spsRbgInfo->numRbs)
20214 /* SPS Bandwidth has been exhausted: no further allocations possible */
20217 if (!isPartialAlloc)
20219 if((dlSf->spsAllocdBw + rbAllocInfo->rbsReq) > spsRbgInfo->numRbs)
20225 /* Perform allocation for RA type 0 if rbsReq is multiple of RBG size (also
20226 * if RBG size = 1) */
20227 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20229 rbAllocInfo->rbsReq += (rbgSize - rbAllocInfo->rbsReq % rbgSize);
20230 numAllocRbgs = rgSCHCmnDlRaType0Alloc(dlSfAlloc,
20231 rbAllocInfo->rbsReq, spsRbgInfo, &numAllocRbs,
20232 &rbAllocInfo->resAllocInfo, isPartialAlloc);
20234 #ifdef RGSCH_SPS_UNUSED
20235 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
20237 /* If no RBS could be allocated, attempt RA TYPE 1 */
20239 numAllocRbs = rgSCHCmnDlRaType1Alloc(dlSfAlloc,
20240 rbAllocInfo->rbsReq, spsRbgInfo, (uint8_t)dlSfAlloc->nxtRbgSubset,
20241 &rbAllocInfo->allocInfo.raType1.rbgSubset,
20242 &rbAllocInfo->resAllocInfo, isPartialAlloc);
20246 dlSfAlloc->nxtRbgSubset =
20247 (rbAllocInfo->allocInfo.raType1.rbgSubset + 1 ) % rbgSize;
20251 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20253 numAllocRbs = rgSCHCmnDlRaType2Alloc(dlSfAlloc,
20254 rbAllocInfo->rbsReq, spsRbgInfo,
20255 &rbStart, &rbAllocInfo->resAllocInfo, isPartialAlloc);
20262 if (!(rbAllocInfo->pdcch =
20263 rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi,\
20264 rbAllocInfo->dciFormat, FALSE)))
20266 /* Note: Returning TRUE since PDCCH might be available for another UE */
20270 /* Update Tb info for each scheduled TB */
20271 iTbs = rbAllocInfo->tbInfo[0].iTbs;
20272 noLyr = rbAllocInfo->tbInfo[0].noLyr;
20273 rbAllocInfo->tbInfo[0].bytesAlloc =
20274 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
20276 if (rbAllocInfo->tbInfo[1].schdlngForTb)
20278 iTbs = rbAllocInfo->tbInfo[1].iTbs;
20279 noLyr = rbAllocInfo->tbInfo[1].noLyr;
20280 rbAllocInfo->tbInfo[1].bytesAlloc =
20281 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
20284 /* Update rbAllocInfo with the allocation information */
20285 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20287 rbAllocInfo->allocInfo.raType0.dlAllocBitMask =
20288 rbAllocInfo->resAllocInfo.raType0Mask;
20289 rbAllocInfo->allocInfo.raType0.numDlAlloc = numAllocRbgs;
20291 #ifdef RGSCH_SPS_UNUSED
20292 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
20294 rbAllocInfo->allocInfo.raType1.dlAllocBitMask =
20295 rbAllocInfo->resAllocInfo.raType1Mask[rbAllocInfo->allocInfo.raType1.rbgSubset];
20296 rbAllocInfo->allocInfo.raType1.numDlAlloc = numAllocRbs;
20297 rbAllocInfo->allocInfo.raType1.shift = 0;
20300 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20302 rbAllocInfo->allocInfo.raType2.isLocal = TRUE;
20303 rbAllocInfo->allocInfo.raType2.rbStart = rbStart;
20304 rbAllocInfo->allocInfo.raType2.numRb = numAllocRbs;
20307 rbAllocInfo->rbsAlloc = numAllocRbs;
20308 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
20310 /* Update allocation masks for RA types 0, 1 and 2 in DL SF */
20312 /* Update type 0 allocation mask */
20313 dlSfAlloc->raType0Mask |= rbAllocInfo->resAllocInfo.raType0Mask;
20314 #ifdef RGSCH_SPS_UNUSED
20315 /* Update type 1 allocation masks */
20316 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
20318 dlSfAlloc->raType1Mask[idx] |= rbAllocInfo->resAllocInfo.raType1Mask[idx];
20319 dlSfAlloc->raType1UsedRbs[idx] +=
20320 rbAllocInfo->resAllocInfo.raType1UsedRbs[idx];
20323 /* Update type 2 allocation masks */
20324 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
20326 dlSfAlloc->raType2Mask[idx] |= rbAllocInfo->resAllocInfo.raType2Mask[idx];
20329 dlSf->spsAllocdBw += numAllocRbs;
20333 /***********************************************************
20335 * Func : rgSCHCmnDlGetBestFitHole
20338 * Desc : Converts the best fit hole into allocation and returns the
20339 * allocation information.
20349 **********************************************************/
20350 static Void rgSCHCmnDlGetBestFitHole
20352 uint32_t *allocMask,
20353 uint8_t numMaskRbs,
20354 uint32_t *crntAllocMask,
20356 uint8_t *allocStart,
20357 uint8_t *allocNumRbs,
20358 Bool isPartialAlloc
20361 uint8_t maskSz = (numMaskRbs + 31)/32;
20362 uint8_t maxMaskPos = (numMaskRbs % 32);
20363 uint8_t maskIdx, maskPos;
20364 uint8_t numAvailRbs = 0;
20365 uint8_t bestAvailNumRbs = 0;
20366 S8 bestStartPos = -1;
20368 uint32_t tmpMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
20369 uint32_t bestMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
20371 *allocNumRbs = numAvailRbs;
20374 for (maskIdx = 0; maskIdx < maskSz; ++maskIdx)
20377 if (maskIdx == (maskSz - 1))
20379 if (numMaskRbs % 32)
20381 maxMaskPos = numMaskRbs % 32;
20384 for (maskPos = 0; maskPos < maxMaskPos; ++maskPos)
20386 if (!(allocMask[maskIdx] & (1 << (31 - maskPos))))
20388 tmpMask[maskIdx] |= (1 << (31 - maskPos));
20389 if (startPos == -1)
20391 startPos = maskIdx * 32 + maskPos;
20394 if (numAvailRbs == rbsReq)
20396 *allocStart = (uint8_t)startPos;
20397 *allocNumRbs = rbsReq;
20403 if (numAvailRbs > bestAvailNumRbs)
20405 bestAvailNumRbs = numAvailRbs;
20406 bestStartPos = startPos;
20407 memcpy(bestMask, tmpMask, 4 * sizeof(uint32_t));
20411 memset(tmpMask, 0, 4 * sizeof(uint32_t));
20414 if (*allocNumRbs == rbsReq)
20420 if (*allocNumRbs == rbsReq)
20422 /* Convert the hole into allocation */
20423 memcpy(crntAllocMask, tmpMask, 4 * sizeof(uint32_t));
20428 if (bestAvailNumRbs && isPartialAlloc)
20430 /* Partial allocation could have been done */
20431 *allocStart = (uint8_t)bestStartPos;
20432 *allocNumRbs = bestAvailNumRbs;
20433 /* Convert the hole into allocation */
20434 memcpy(crntAllocMask, bestMask, 4 * sizeof(uint32_t));
20440 #endif /* LTEMAC_SPS */
20442 /***************************************************************************
20444 * NON-DLFS Allocation functions
20446 * *************************************************************************/
20450 * @brief Function to find out code rate
20454 * Function : rgSCHCmnFindCodeRate
20456 * Processing Steps:
20458 * @param[in] RgSchCellCb *cell
20459 * @param[in] RgSchDlSf *dlSf
20460 * @param[in,out] RgSchDlRbAlloc *allocInfo
20464 static Void rgSCHCmnFindCodeRate
20468 RgSchDlRbAlloc *allocInfo,
20477 /* Adjust the Imcs and bytes allocated also with respect to the adjusted
20478 RBs - Here we will find out the Imcs by identifying first Highest
20479 number of bits compared to the original bytes allocated. */
20481 * @brief Adjust IMCS according to tbSize and ITBS
20485 * Function : rgSCHCmnNonDlfsPbchTbImcsAdj
20487 * Processing Steps:
20488 * - Adjust Imcs according to tbSize and ITBS.
20490 * @param[in,out] RgSchDlRbAlloc *allocInfo
20491 * @param[in] uint8_t *idx
20494 static Void rgSCHCmnNonDlfsPbchTbImcsAdj
20497 RgSchDlRbAlloc *allocInfo,
20502 uint8_t noLyrs = 0;
20504 uint32_t origBytesReq;
20505 uint8_t noRbgs = 0;
20507 RgSchDlSf *dlSf = allocInfo->dlSf;
20509 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
20510 noLyrs = allocInfo->tbInfo[idx].noLyr;
20512 if((allocInfo->raType == RG_SCH_CMN_RA_TYPE0))
20514 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + dlSf->lstRbgDfct), cell->rbgSize);
20515 noRbs = (noRbgs * cell->rbgSize) - dlSf->lstRbgDfct;
20519 noRbs = allocInfo->rbsReq;
20522 /* This line will help in case if tbs is zero and reduction in MCS is not possible */
20523 if (allocInfo->rbsReq == 0 )
20527 origBytesReq = rgTbSzTbl[noLyrs - 1][tbs][rbsReq - 1]/8;
20529 /* Find out the ITbs & Imcs by identifying first Highest
20530 number of bits compared to the original bytes allocated.*/
20533 if(((rgTbSzTbl[noLyrs - 1][0][noRbs - 1])/8) < origBytesReq)
20535 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyrs - 1], tbs);
20536 while(((rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1])/8) > origBytesReq)
20545 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1]/8;
20546 allocInfo->tbInfo[idx].iTbs = tbs;
20547 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
20552 /* Added funcion to adjust TBSize*/
20554 * @brief Function to adjust the tbsize in case of subframe 0 & 5 when
20555 * we were not able to do RB alloc adjustment by adding extra required Rbs
20559 * Function : rgSCHCmnNonDlfsPbchTbSizeAdj
20561 * Processing Steps:
20563 * @param[in,out] RgSchDlRbAlloc *allocInfo
20564 * @param[in] uint8_t numOvrlapgPbchRb
20565 * @param[in] uint8_t idx
20566 * @param[in] uint8_t pbchSsRsSym
20569 static Void rgSCHCmnNonDlfsPbchTbSizeAdj
20571 RgSchDlRbAlloc *allocInfo,
20572 uint8_t numOvrlapgPbchRb,
20573 uint8_t pbchSsRsSym,
20578 uint32_t reducedTbs = 0;
20579 uint8_t noLyrs = 0;
20582 noLyrs = allocInfo->tbInfo[idx].noLyr;
20584 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
20586 reducedTbs = bytesReq - (((uint32_t)numOvrlapgPbchRb * (uint32_t)pbchSsRsSym * 6)/8);
20588 /* find out the ITbs & Imcs by identifying first Highest
20589 number of bits compared with reduced bits considering the bits that are
20590 reserved for PBCH/PSS/SSS */
20591 if(((rgTbSzTbl[noLyrs - 1][0][allocInfo->rbsReq - 1])/8) < reducedTbs)
20593 while(((rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1])/8) > reducedTbs)
20602 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1]/8;
20603 allocInfo->tbInfo[idx].iTbs = tbs;
20604 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
20609 /* Added this function to find num of ovrlapping PBCH rb*/
20611 * @brief Function to find out how many additional rbs are available
20612 * in the entire bw which can be allocated to a UE
20615 * Function : rgSCHCmnFindNumAddtlRbsAvl
20617 * Processing Steps:
20618 * - Calculates number of additinal rbs available
20620 * @param[in] RgSchCellCb *cell
20621 * @param[in] RgSchDlSf *dlSf
20622 * @param[in,out] RgSchDlRbAlloc *allocInfo
20623 * @param[out] uint8_t addtlRbsAvl
20626 static uint8_t rgSCHCmnFindNumAddtlRbsAvl(RgSchCellCb *cell,RgSchDlSf *dlSf,RgSchDlRbAlloc *allocInfo)
20628 uint8_t addtlRbsAvl = 0;
20629 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20631 addtlRbsAvl = (((dlSf->type0End - dlSf->type2End + 1)*\
20632 cell->rbgSize) - dlSf->lstRbgDfct) - allocInfo->rbsReq;
20634 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20636 addtlRbsAvl = (dlSf->bw - dlSf->bwAlloced) - allocInfo->rbsReq;
20639 return (addtlRbsAvl);
20642 /* Added this function to find num of ovrlapping PBCH rb*/
20644 * @brief Function to find out how many of the requested RBs are
20645 * falling in the center 6 RBs of the downlink bandwidth.
20648 * Function : rgSCHCmnFindNumPbchOvrlapRbs
20650 * Processing Steps:
20651 * - Calculates number of overlapping rbs
20653 * @param[in] RgSchCellCb *cell
20654 * @param[in] RgSchDlSf *dlSf
20655 * @param[in,out] RgSchDlRbAlloc *allocInfo
20656 * @param[out] uint8_t* numOvrlapgPbchRb
20659 static Void rgSCHCmnFindNumPbchOvrlapRbs
20663 RgSchDlRbAlloc *allocInfo,
20664 uint8_t *numOvrlapgPbchRb
20667 *numOvrlapgPbchRb = 0;
20668 /*Find if we have already crossed the start boundary for PBCH 6 RBs,
20669 * if yes then lets find the number of RBs which are getting overlapped
20670 * with this allocation.*/
20671 if(dlSf->bwAlloced <= (cell->pbchRbStart))
20673 /*We have not crossed the start boundary of PBCH RBs. Now we need
20674 * to know that if take this allocation then how much PBCH RBs
20675 * are overlapping with this allocation.*/
20676 /* Find out the overlapping RBs in the centre 6 RBs */
20677 if((dlSf->bwAlloced + allocInfo->rbsReq) > cell->pbchRbStart)
20679 *numOvrlapgPbchRb = (dlSf->bwAlloced + allocInfo->rbsReq) - (cell->pbchRbStart);
20680 if(*numOvrlapgPbchRb > 6)
20681 *numOvrlapgPbchRb = 6;
20684 else if ((dlSf->bwAlloced > (cell->pbchRbStart)) &&
20685 (dlSf->bwAlloced < (cell->pbchRbEnd)))
20687 /*We have already crossed the start boundary of PBCH RBs.We need to
20688 * find that if we take this allocation then how much of the RBs for
20689 * this allocation will overlap with PBCH RBs.*/
20690 /* Find out the overlapping RBs in the centre 6 RBs */
20691 if(dlSf->bwAlloced + allocInfo->rbsReq < (cell->pbchRbEnd))
20693 /*If we take this allocation then also we are not crossing the
20694 * end boundary of PBCH 6 RBs.*/
20695 *numOvrlapgPbchRb = allocInfo->rbsReq;
20699 /*If we take this allocation then we are crossing the
20700 * end boundary of PBCH 6 RBs.*/
20701 *numOvrlapgPbchRb = (cell->pbchRbEnd) - dlSf->bwAlloced;
20708 * @brief Performs RB allocation adjustment if the requested RBs are
20709 * falling in the center 6 RBs of the downlink bandwidth.
20712 * Function : rgSCHCmnNonDlfsPbchRbAllocAdj
20714 * Processing Steps:
20715 * - Allocate consecutively available RBs.
20717 * @param[in] RgSchCellCb *cell
20718 * @param[in,out] RgSchDlRbAlloc *allocInfo
20719 * @param[in] uint8_t pbchSsRsSym
20722 static Void rgSCHCmnNonDlfsPbchRbAllocAdj
20725 RgSchDlRbAlloc *allocInfo,
20726 uint8_t pbchSsRsSym,
20730 RgSchDlSf *dlSf = allocInfo->dlSf;
20731 uint8_t numOvrlapgPbchRb = 0;
20732 uint8_t numOvrlapgAdtlPbchRb = 0;
20734 uint8_t addtlRbsReq = 0;
20735 uint8_t moreAddtlRbsReq = 0;
20736 uint8_t addtlRbsAdd = 0;
20737 uint8_t moreAddtlRbsAdd = 0;
20739 uint8_t origRbsReq = 0;
20747 origRbsReq = allocInfo->rbsReq;
20748 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20750 totSym = (cell->isCpDlExtend) ? RGSCH_TOT_NUM_SYM_EXTCP : RGSCH_TOT_NUM_SYM_NORCP;
20752 /* Additional RBs are allocated by considering the loss due to
20753 the reserved symbols for CFICH, PBCH, PSS, SSS and cell specific RS */
20755 divResult = (numOvrlapgPbchRb * pbchSsRsSym)/totSym;
20756 if((numOvrlapgPbchRb * pbchSsRsSym) % totSym)
20760 addtlRbsReq = divResult;
20762 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, addtlRbsReq, addtlRbsAdd)
20764 /*Now RBs requires is original requested RBs + these additional RBs to make
20765 * up for PSS/SSS/BCCH.*/
20766 allocInfo->rbsReq = allocInfo->rbsReq + addtlRbsAdd;
20768 /*Check if with these additional RBs we have taken up, these are also falling
20769 * under PBCH RBs range, if yes then we would need to account for
20770 * PSS/BSS/BCCH for these additional RBs too.*/
20771 if(addtlRbsAdd && ((dlSf->bwAlloced + allocInfo->rbsReq - addtlRbsAdd) < (cell->pbchRbEnd)))
20773 if((dlSf->bwAlloced + allocInfo->rbsReq) <= (cell->pbchRbEnd))
20775 /*With additional RBs taken into account, we are not crossing the
20776 * PBCH RB end boundary.Thus here we need to account just for
20777 * overlapping PBCH RBs for these additonal RBs.*/
20778 divResult = (addtlRbsAdd * pbchSsRsSym)/totSym;
20779 if((addtlRbsAdd * pbchSsRsSym) % totSym)
20784 moreAddtlRbsReq = divResult;
20786 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
20788 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
20793 /*Here we have crossed the PBCH RB end boundary, thus we need to take
20794 * into account the overlapping RBs for additional RBs which will be
20795 * subset of addtlRbs.*/
20796 numOvrlapgAdtlPbchRb = (cell->pbchRbEnd) - ((dlSf->bwAlloced + allocInfo->rbsReq) - addtlRbsAdd);
20798 divResult = (numOvrlapgAdtlPbchRb * pbchSsRsSym)/totSym;
20799 if((numOvrlapgAdtlPbchRb * pbchSsRsSym) % totSym)
20804 moreAddtlRbsReq = divResult;
20806 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
20808 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
20811 if (isBcchPcch == TRUE)
20816 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
20819 /* This case might be for Imcs value 6 and NPrb = 1 case - Not
20820 Adjusting either RBs or Imcs or Bytes Allocated */
20821 allocInfo->rbsReq = allocInfo->rbsReq - addtlRbsAdd - moreAddtlRbsAdd;
20823 else if(tbs && ((0 == addtlRbsAdd) && (moreAddtlRbsAdd == 0)))
20825 /*In case of a situation where we the entire bandwidth is already occupied
20826 * and we dont have room to add additional Rbs then in order to decrease the
20827 * code rate we reduce the tbsize such that we reduce the present calculated
20828 * tbsize by number of bytes that would be occupied by PBCH/PSS/SSS in overlapping
20829 * rbs and find the nearest tbsize which would be less than this deduced value*/
20831 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20833 noLyr = allocInfo->tbInfo[0].noLyr;
20834 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyr - 1], tbs);
20835 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
20837 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,bytesReq);
20839 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20841 noLyr = allocInfo->tbInfo[1].noLyr;
20842 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
20843 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,bytesReq);
20847 else if(tbs && ((addtlRbsAdd != addtlRbsReq) ||
20848 (addtlRbsAdd && (moreAddtlRbsReq != moreAddtlRbsAdd))))
20850 /*In case of a situation where we were not able to add required number of
20851 * additional RBs then we adjust the Imcs based on original RBs requested.
20852 * Doing this would comensate for the few extra Rbs we have added but inorder
20853 * to comensate for number of RBS we couldnt add we again do the TBSize adjustment*/
20855 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
20857 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20859 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
20862 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20863 numOvrlapgPbchRb = numOvrlapgPbchRb - (addtlRbsAdd + moreAddtlRbsAdd);
20865 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,allocInfo->tbInfo[0].bytesReq);
20867 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20869 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,allocInfo->tbInfo[1].bytesReq);
20875 /*We hit this code when we were able to add the required additional RBS
20876 * hence we should adjust the IMcs based on orignals RBs requested*/
20878 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
20880 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20882 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
20887 } /* end of rgSCHCmnNonDlfsPbchRbAllocAdj */
20891 * @brief Performs RB allocation for frequency non-selective cell.
20895 * Function : rgSCHCmnNonDlfsCmnRbAlloc
20897 * Processing Steps:
20898 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
20900 * @param[in] RgSchCellCb *cell
20901 * @param[in, out] RgSchDlRbAlloc *allocInfo
20906 static S16 rgSCHCmnNonDlfsCmnRbAlloc(RgSchCellCb *cell,RgSchDlRbAlloc *allocInfo)
20911 uint8_t pbchSsRsSym = 0;
20912 uint8_t pbchFrame = 0;
20914 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
20916 RgSchDlSf *dlSf = allocInfo->dlSf;
20918 uint8_t rbStart = 0;
20919 uint8_t spsRbsAlloc = 0;
20920 RgSchDlSfAllocInfo *dlSfAlloc = &allocInfo->dlSf->dlSfAllocInfo;
20923 allocInfo->tbInfo[0].noLyr = 1;
20926 /* Note: Initialize the masks to 0, this might not be needed since alloInfo
20927 * is initialized to 0 at the beginning of allcoation */
20928 allocInfo->resAllocInfo.raType0Mask = 0;
20929 memset(allocInfo->resAllocInfo.raType1Mask, 0,
20930 RG_SCH_NUM_RATYPE1_32BIT_MASK * sizeof (uint32_t));
20931 memset(allocInfo->resAllocInfo.raType2Mask, 0,
20932 RG_SCH_NUM_RATYPE2_32BIT_MASK * sizeof (uint32_t));
20934 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
20935 (dlSf->bwAlloced == dlSf->bw))
20937 if(dlSf->bwAlloced == dlSf->bw)
20943 if (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced))
20946 if ((allocInfo->tbInfo[0].imcs < 29) && (dlSf->bwAlloced < dlSf->bw))
20948 if(allocInfo->tbInfo[0].imcs < 29)
20951 /* set the remaining RBs for the requested UE */
20952 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
20953 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
20954 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[0][tbs][allocInfo->rbsReq - 1]/8;
20959 /* Attempt RA Type 2 allocation in SPS Bandwidth */
20960 if (dlSf->spsAllocdBw < cell->spsBwRbgInfo.numRbs)
20963 rgSCHCmnDlRaType2Alloc(dlSfAlloc,
20964 allocInfo->rbsReq, &cell->spsBwRbgInfo, &rbStart,
20965 &allocInfo->resAllocInfo, FALSE);
20966 /* rbsAlloc assignment moved from line 16671 to here to avoid
20967 * compilation error. Recheck */
20968 dlSf->spsAllocdBw += spsRbsAlloc;
20971 #endif /* LTEMAC_SPS */
20979 /* Update allocation information */
20980 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
20981 if (allocInfo->pdcch == NULLP)
20985 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
20986 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
20987 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
20988 allocInfo->allocInfo.raType2.isLocal = TRUE;
20992 allocInfo->allocInfo.raType2.rbStart = rbStart;
20993 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
20994 allocInfo->rbsAlloc = allocInfo->rbsReq;
21005 if(!(dlSf->sfNum == 5))
21007 /* case for subframes 1 to 9 except 5 */
21009 allocInfo->allocInfo.raType2.rbStart = rbStart;
21011 /*Fix for ccpu00123918*/
21012 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21017 pbchFrame = 1; /* case for subframe 5 */
21018 /* In subframe 5, symbols are reserved for PSS and SSS and CFICH
21019 and Cell Specific Reference Signals */
21020 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PSS_SSS_SYM) *
21021 RGSCH_NUM_SC_IN_RB + cell->numCellRSPerSf);
21027 /* In subframe 0, symbols are reserved for PSS, SSS, PBCH, CFICH and
21028 and Cell Specific Reference signals */
21029 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PBCH_SYM +
21030 RGSCH_NUM_PSS_SSS_SYM) * RGSCH_NUM_SC_IN_RB +
21031 cell->numCellRSPerSf);
21032 } /* end of outer else */
21035 (((dlSf->bwAlloced + allocInfo->rbsReq) - cell->pbchRbStart) > 0)&&
21036 (dlSf->bwAlloced < cell->pbchRbEnd))
21038 if(allocInfo->tbInfo[0].imcs < 29)
21040 rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo, pbchSsRsSym, TRUE);
21052 /*Fix for ccpu00123918*/
21053 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21054 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
21055 allocInfo->rbsAlloc = allocInfo->rbsReq;
21057 /* LTE_ADV_FLAG_REMOVED_START */
21059 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
21061 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
21062 allocInfo->allocInfo.raType2.rbStart, \
21063 allocInfo->allocInfo.raType2.numRb);
21068 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
21069 allocInfo->allocInfo.raType2.rbStart, \
21070 allocInfo->allocInfo.raType2.numRb);
21076 /* LTE_ADV_FLAG_REMOVED_END */
21077 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21084 /* Update type 0, 1 and 2 masks */
21085 dlSfAlloc->raType0Mask |= allocInfo->resAllocInfo.raType0Mask;
21086 #ifdef RGSCH_SPS_UNUSED
21087 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
21089 dlSfAlloc->raType1Mask[idx] |=
21090 allocInfo->resAllocInfo.raType1Mask[idx];
21091 dlSfAlloc->raType1UsedRbs[idx] +=
21092 allocInfo->resAllocInfo.raType1UsedRbs[idx];
21095 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
21097 dlSfAlloc->raType2Mask[idx] |=
21098 allocInfo->resAllocInfo.raType2Mask[idx];
21108 * @brief Performs RB allocation for frequency non-selective cell.
21112 * Function : rgSCHCmnNonDlfsCmnRbAllocRar
21114 * Processing Steps:
21115 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
21117 * @param[in] RgSchCellCb *cell
21118 * @param[in, out] RgSchDlRbAlloc *allocInfo
21123 static S16 rgSCHCmnNonDlfsCmnRbAllocRar(RgSchCellCb *cell,RgSchDlRbAlloc *allocInfo)
21125 RgSchDlSf *dlSf = allocInfo->dlSf;
21127 if(dlSf->bwAlloced == dlSf->bw)
21132 allocInfo->tbInfo[0].noLyr = 1;
21134 /* Update allocation information */
21135 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
21136 if (allocInfo->pdcch == NULLP)
21140 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
21141 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
21142 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
21143 allocInfo->allocInfo.raType2.isLocal = TRUE;
21145 /*Fix for ccpu00123918*/
21146 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21147 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
21148 allocInfo->rbsAlloc = allocInfo->rbsReq;
21150 /* LTE_ADV_FLAG_REMOVED_END */
21151 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21154 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, NULLP, dlSf, 13, TFU_DCI_FORMAT_B1, FALSE);
21155 if (allocInfo->pdcch == NULLP)
21159 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
21160 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
21162 DU_LOG("\nINFO --> SCH : 5GTF_ERROR vrbg allocated > 25\n");
21166 allocInfo->tbInfo[0].cmnGrnt.vrbgStart = beamInfo->vrbgStart;
21167 allocInfo->tbInfo[0].cmnGrnt.numVrbg = allocInfo->vrbgReq;
21169 /* Update allocation information */
21170 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
21172 allocInfo->tbInfo[0].cmnGrnt.xPDSCHRange = 1;
21173 allocInfo->tbInfo[0].cmnGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
21174 allocInfo->tbInfo[0].cmnGrnt.vrbgStart, allocInfo->tbInfo[0].cmnGrnt.numVrbg);
21176 allocInfo->tbInfo[0].cmnGrnt.rbStrt = (allocInfo->tbInfo[0].cmnGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
21177 allocInfo->tbInfo[0].cmnGrnt.numRb = (allocInfo->tbInfo[0].cmnGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
21179 beamInfo->vrbgStart += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
21180 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
21181 allocInfo->tbInfo[0].cmnGrnt.rv = 0;
21182 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21185 DU_LOG("\nINFO --> SCH : [%s],allocInfo->tbInfo[0].bytesAlloc:%u,vrbgReq:%u\n",
21186 __func__,allocInfo->tbInfo[0].bytesAlloc,allocInfo->vrbgReq);
21192 /* LTE_ADV_FLAG_REMOVED_START */
21195 * @brief To check if DL BW available for non-DLFS allocation.
21199 * Function : rgSCHCmnNonDlfsBwAvlbl
21201 * Processing Steps:
21202 * - Determine availability based on RA Type.
21204 * @param[in] RgSchCellCb *cell
21205 * @param[in] RgSchDlSf *dlSf
21206 * @param[in] RgSchDlRbAlloc *allocInfo
21213 static Bool rgSCHCmnNonDlfsSFRBwAvlbl
21216 RgSchSFRPoolInfo **sfrpoolInfo,
21218 RgSchDlRbAlloc *allocInfo,
21226 RgSchSFRPoolInfo *sfrPool;
21227 RgSchSFRPoolInfo *sfrCEPool;
21231 RgSchSFRPoolInfo *poolWithMaxAvlblBw = NULLP;
21232 uint32_t bwAvlbl = 0;
21233 uint32_t addtnlPRBs = 0;
21235 if (dlSf->bw <= dlSf->bwAlloced)
21237 DU_LOG("\nERROR --> SCH : BW is fully allocated for subframe (%d) CRNTI:%d", dlSf->sfNum,allocInfo->rnti);
21241 if (dlSf->sfrTotalPoolInfo.ccBwFull == TRUE)
21243 DU_LOG("\nERROR --> SCH : BW is fully allocated for CC Pool CRNTI:%d",allocInfo->rnti);
21247 if ((dlSf->sfrTotalPoolInfo.ceBwFull == TRUE) && (isUeCellEdge))
21249 DU_LOG("\nERROR --> SCH : BW is fully allocated for CE Pool CRNTI:%d",allocInfo->rnti);
21253 /* We first check if the ue scheduled is a cell edge or cell centre and accordingly check the avaialble
21254 memory in their pool. If the cell centre UE doesnt have Bw available in its pool, then it will check
21255 Bw availability in cell edge pool but the other way around is NOT possible. */
21258 l = &dlSf->sfrTotalPoolInfo.cePool;
21262 l = &dlSf->sfrTotalPoolInfo.ccPool;
21265 n = cmLListFirst(l);
21269 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
21271 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21273 /* MS_FIX for ccpu00123919 : Number of RBs in case of RETX should be same as that of initial transmission. */
21274 if(allocInfo->tbInfo[0].tbCb->txCntr)
21276 /* If RB assignment is being done for RETX. Then if reqRbs are a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
21277 * not a multiple of rbgSize then check if lsgRbgDfct exists */
21278 if (allocInfo->rbsReq % cell->rbgSize == 0)
21280 if ((sfrPool->type2End == dlSf->type2End) && dlSf->lstRbgDfct)
21282 /* In this scenario we are wasting the last RBG for this dlSf */
21283 sfrPool->type0End--;
21284 sfrPool->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21286 dlSf->lstRbgDfct = 0;
21288 /*ABHINAV To check if these variables need to be taken care of*/
21290 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21295 if (dlSf->lstRbgDfct)
21297 /* Check if type0 allocation can cater to this RETX requirement */
21298 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
21304 if (sfrPool->type2End != dlSf->type2End) /*Search again for some pool which has the END RBG of the BandWidth*/
21312 /* cannot allocate same number of required RBs */
21318 /*rg002.301 ccpu00120391 MOD condition is modified approprialtely to find if rbsReq is less than available RBS*/
21319 if(allocInfo->rbsReq <= (((sfrPool->type0End - sfrPool->type2End + 1)*\
21320 cell->rbgSize) - dlSf->lstRbgDfct))
21322 *sfrpoolInfo = sfrPool;
21327 if (sfrPool->bw <= sfrPool->bwAlloced + cell->rbgSize)
21329 n = cmLListNext(l);
21330 /* If the ue is cell centre then it will simply check the memory available in next pool.
21331 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
21333 if((!isUeCellEdge) && (!n->node))
21335 l = &dlSf->sfrTotalPoolInfo.cePool;
21336 n = cmLListFirst(l);
21342 /* MS_FIX: Number of RBs in case of RETX should be same as that of initial transmission */
21343 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21345 /*rg002.301 ccpu00120391 MOD setting the remaining RBs for the requested UE*/
21346 allocInfo->rbsReq = (((sfrPool->type0End - sfrPool->type2End + 1)*\
21347 cell->rbgSize) - dlSf->lstRbgDfct);
21348 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21349 noLyrs = allocInfo->tbInfo[0].noLyr;
21350 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21351 *sfrpoolInfo = sfrPool;
21356 n = cmLListNext(l);
21358 /* If the ue is cell centre then it will simply check the memory available in next pool.
21359 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
21360 if((!isUeCellEdge) && (!n->node))
21362 l = &dlSf->sfrTotalPoolInfo.cePool;
21363 n = cmLListFirst(l);
21372 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
21374 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21375 /* This is a Case where a UE was CC and had more RBs allocated than present in CE pool.
21376 In case this UE whn become CE with retx going on, then BW is not sufficient for Retx */
21377 if ((isUeCellEdge) &&
21378 (allocInfo->tbInfo[0].tbCb->txCntr != 0))
21380 if(allocInfo->rbsReq > (sfrPool->bw - sfrPool->bwAlloced))
21382 /* Adjust CE BW such that Retx alloc is successful */
21383 /* Check if merging CE with adjacent CC pool will be sufficient to process Retx */
21385 /* If no Type 0 allocations are made from this pool */
21386 if (sfrPool->type0End == (((sfrPool->poolendRB + 1) / cell->rbgSize) - 1))
21388 if (sfrPool->adjCCPool &&
21389 (sfrPool->adjCCPool->type2Start == sfrPool->poolendRB + 1) &&
21390 (allocInfo->rbsReq <= ((sfrPool->bw - sfrPool->bwAlloced) +
21391 ((sfrPool->adjCCPool->bw - sfrPool->adjCCPool->bwAlloced)))))
21393 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
21395 /* Adjusting CE Pool Info */
21396 sfrPool->bw += addtnlPRBs;
21397 sfrPool->type0End = ((sfrPool->poolendRB + addtnlPRBs + 1) /
21398 cell->rbgSize) - 1;
21400 /* Adjusting CC Pool Info */
21401 sfrPool->adjCCPool->type2Start += addtnlPRBs;
21402 sfrPool->adjCCPool->type2End = RGSCH_CEIL(sfrPool->adjCCPool->type2Start,
21404 sfrPool->adjCCPool->bw -= addtnlPRBs;
21405 *sfrpoolInfo = sfrPool;
21412 /* Check if CC pool is one of the following:
21413 * 1. |CE| + |CC "CCPool2Exists" = TRUE|
21414 * 2. |CC "CCPool2Exists" = FALSE| + |CE| + |CC "CCPool2Exists" = TRUE|
21416 if(TRUE == sfrPool->CCPool2Exists)
21418 l1 = &dlSf->sfrTotalPoolInfo.cePool;
21419 n1 = cmLListFirst(l1);
21420 sfrCEPool = (RgSchSFRPoolInfo*)(n1->node);
21421 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced))
21423 *sfrpoolInfo = sfrCEPool;
21426 else if(allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
21428 *sfrpoolInfo = sfrPool;
21431 /* Check if CE and CC boundary has unallocated prbs */
21432 else if ((sfrPool->poolstartRB == sfrPool->type2Start) &&
21433 (sfrCEPool->type0End == ((sfrCEPool->poolendRB + 1) / cell->rbgSize) - 1))
21435 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced) +
21436 (sfrPool->bw - sfrPool->bwAlloced))
21438 /* Checking if BW can be allocated partly from CE pool and partly
21441 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
21442 /* Updating CE and CC type2 parametrs based on the RBs allocated
21443 * from these pools*/
21444 sfrPool->type2Start -= addtnlPRBs;
21445 sfrPool->type2End = RGSCH_CEIL(sfrPool->type2Start, cell->rbgSize);
21446 sfrPool->bw += addtnlPRBs;
21447 if (addtnlPRBs == (sfrCEPool->bw - sfrCEPool->bwAlloced))
21449 sfrCEPool->bwAlloced = sfrCEPool->bw;
21450 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21454 sfrCEPool->bw -= addtnlPRBs;
21455 sfrCEPool->type0End = ((sfrCEPool->poolendRB + 1 - addtnlPRBs) / cell->rbgSize) - 1;
21457 *sfrpoolInfo = sfrPool;
21460 else if ( bwAvlbl <
21461 ((sfrCEPool->bw - sfrCEPool->bwAlloced) +
21462 (sfrPool->bw - sfrPool->bwAlloced)))
21464 /* All the Prbs from CE BW shall be allocated */
21465 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21467 sfrPool->type2Start = sfrCEPool->type2Start;
21468 sfrPool->bw += sfrCEPool->bw - sfrCEPool->bwAlloced;
21469 sfrCEPool->type2Start = sfrCEPool->poolendRB + 1;
21470 sfrCEPool->bwAlloced = sfrCEPool->bw;
21471 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21473 /* set the remaining RBs for the requested UE */
21474 allocInfo->rbsReq = (sfrPool->bw - sfrPool->bwAlloced);
21475 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21476 noLyrs = allocInfo->tbInfo[0].noLyr;
21477 allocInfo->tbInfo[0].bytesReq =
21478 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21479 *sfrpoolInfo = sfrPool;
21490 /* Checking if no. of RBs required can be allocated from
21492 * 1. If available return the SFR pool.
21493 * 2. Else update the RBs required parameter based on the
21494 * BW available in the pool
21495 * 3. Return FALSE if no B/W is available.
21497 if (allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
21499 *sfrpoolInfo = sfrPool;
21504 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21506 if (bwAvlbl < sfrPool->bw - sfrPool->bwAlloced)
21510 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21512 bwAvlbl = sfrPool->bw - sfrPool->bwAlloced;
21513 poolWithMaxAvlblBw = sfrPool;
21515 n = cmLListNext(l);
21517 if ((isUeCellEdge == FALSE) && (n == NULLP))
21519 if(l != &dlSf->sfrTotalPoolInfo.cePool)
21521 l = &dlSf->sfrTotalPoolInfo.cePool;
21522 n = cmLListFirst(l);
21532 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21536 dlSf->sfrTotalPoolInfo.ccBwFull = TRUE;
21542 /* set the remaining RBs for the requested UE */
21543 allocInfo->rbsReq = poolWithMaxAvlblBw->bw -
21544 poolWithMaxAvlblBw->bwAlloced;
21545 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21546 noLyrs = allocInfo->tbInfo[0].noLyr;
21547 allocInfo->tbInfo[0].bytesReq =
21548 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21549 *sfrpoolInfo = poolWithMaxAvlblBw;
21556 n = cmLListNext(l);
21558 if ((isUeCellEdge == FALSE) && (n == NULLP))
21560 if(l != &dlSf->sfrTotalPoolInfo.cePool)
21562 l = &dlSf->sfrTotalPoolInfo.cePool;
21563 n = cmLListFirst(l);
21579 #endif /* end of ifndef LTE_TDD*/
21580 /* LTE_ADV_FLAG_REMOVED_END */
21583 * @brief To check if DL BW available for non-DLFS allocation.
21587 * Function : rgSCHCmnNonDlfsUeRbAlloc
21589 * Processing Steps:
21590 * - Determine availability based on RA Type.
21592 * @param[in] RgSchCellCb *cell
21593 * @param[in] RgSchDlSf *dlSf
21594 * @param[in] RgSchDlRbAlloc *allocInfo
21601 static Bool rgSCHCmnNonDlfsBwAvlbl
21605 RgSchDlRbAlloc *allocInfo
21610 uint8_t ignoredDfctRbg = FALSE;
21612 if (dlSf->bw <= dlSf->bwAlloced)
21614 DU_LOG("\nERROR --> SCH : (%d:%d)FAILED CRNTI:%d",
21615 dlSf->bw, dlSf->bwAlloced,allocInfo->rnti);
21618 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
21620 /* Fix for ccpu00123919 : Number of RBs in case of RETX should be same as
21621 * that of initial transmission. */
21622 if(allocInfo->tbInfo[0].tbCb->txCntr)
21624 /* If RB assignment is being done for RETX. Then if reqRbs are
21625 * a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
21626 * not a multiple of rbgSize then check if lsgRbgDfct exists */
21627 if (allocInfo->rbsReq % cell->rbgSize == 0)
21629 if (dlSf->lstRbgDfct)
21631 /* In this scenario we are wasting the last RBG for this dlSf */
21634 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21635 /* Fix: MUE_PERTTI_DL */
21636 dlSf->lstRbgDfct = 0;
21637 ignoredDfctRbg = TRUE;
21643 if (dlSf->lstRbgDfct)
21645 /* Check if type0 allocation can cater to this RETX requirement */
21646 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
21653 /* cannot allocate same number of required RBs */
21659 /* Condition is modified approprialtely to find
21660 * if rbsReq is less than available RBS*/
21661 if(allocInfo->rbsReq <= (((dlSf->type0End - dlSf->type2End + 1)*\
21662 cell->rbgSize) - dlSf->lstRbgDfct))
21666 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
21667 * allocation in TDD when requested RBs are more than available RBs*/
21670 /* MS_WORKAROUND for ccpu00122022 */
21671 if (dlSf->bw < dlSf->bwAlloced + cell->rbgSize)
21673 /* ccpu00132358- Re-assigning the values which were updated above
21674 * if it is RETX and Last RBG available*/
21675 if(ignoredDfctRbg == TRUE)
21678 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
21679 dlSf->lstRbgDfct = 1;
21685 /* Fix: Number of RBs in case of RETX should be same as
21686 * that of initial transmission. */
21687 if(allocInfo->tbInfo[0].tbCb->txCntr == 0
21689 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
21693 /* Setting the remaining RBs for the requested UE*/
21694 allocInfo->rbsReq = (((dlSf->type0End - dlSf->type2End + 1)*\
21695 cell->rbgSize) - dlSf->lstRbgDfct);
21696 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21697 noLyrs = allocInfo->tbInfo[0].noLyr;
21698 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21699 /* DwPts Scheduling Changes Start */
21701 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
21703 allocInfo->tbInfo[0].bytesReq =
21704 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
21707 /* DwPts Scheduling Changes End */
21711 /* ccpu00132358- Re-assigning the values which were updated above
21712 * if it is RETX and Last RBG available*/
21713 if(ignoredDfctRbg == TRUE)
21716 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
21717 dlSf->lstRbgDfct = 1;
21720 DU_LOG("\nERROR --> SCH : FAILED for CRNTI:%d",
21722 DU_LOG("\nERROR --> SCH : RB Alloc failed for LAA TB type 0\n");
21728 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
21730 if (allocInfo->rbsReq <= (dlSf->bw - dlSf->bwAlloced))
21734 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
21735 * allocation in TDD when requested RBs are more than available RBs*/
21738 /* Fix: Number of RBs in case of RETX should be same as
21739 * that of initial transmission. */
21740 if((allocInfo->tbInfo[0].tbCb->txCntr == 0)
21742 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
21746 /* set the remaining RBs for the requested UE */
21747 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
21748 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21749 noLyrs = allocInfo->tbInfo[0].noLyr;
21750 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21751 /* DwPts Scheduling Changes Start */
21753 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
21755 allocInfo->tbInfo[0].bytesReq =
21756 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
21759 /* DwPts Scheduling Changes End */
21763 DU_LOG("\nERROR --> SCH : RB Alloc failed for LAA TB type 2\n");
21764 DU_LOG("\nERROR --> SCH : FAILED for CRNTI:%d",allocInfo->rnti);
21767 /* Fix: Number of RBs in case of RETX should be same as
21768 * that of initial transmission. */
21772 DU_LOG("\nERROR --> SCH : FAILED for CRNTI:%d",allocInfo->rnti);
21776 /* LTE_ADV_FLAG_REMOVED_START */
21779 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21783 * Function : rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
21785 * Processing Steps:
21787 * @param[in] RgSchCellCb *cell
21788 * @param[in] RgSchDlSf *dlSf
21789 * @param[in] uint8_t rbStrt
21790 * @param[in] uint8_t numRb
21794 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
21804 RgSchSFRPoolInfo *sfrPool;
21806 l = &dlSf->sfrTotalPoolInfo.ccPool;
21808 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
21809 dlSf->bwAlloced += numRb;
21810 dlSf->type2Start += numRb;
21811 n = cmLListFirst(l);
21815 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21816 n = cmLListNext(l);
21818 /* If the pool contains some RBs allocated in this allocation, e.g: Pool is [30.50]. Pool->type2Start is 40 , dlSf->type2Start is 45. then update the variables in pool */
21819 if((sfrPool->poolendRB >= dlSf->type2Start) && (sfrPool->type2Start < dlSf->type2Start))
21821 sfrPool->type2End = dlSf->type2End;
21822 sfrPool->bwAlloced = dlSf->type2Start - sfrPool->poolstartRB;
21823 sfrPool->type2Start = dlSf->type2Start;
21827 /* If the pool contains all RBs allocated in this allocation*/
21828 if(dlSf->type2Start > sfrPool->poolendRB)
21830 sfrPool->type2End = sfrPool->type0End + 1;
21831 sfrPool->bwAlloced = sfrPool->bw;
21832 sfrPool->type2Start = sfrPool->poolendRB + 1;
21837 if (l != &dlSf->sfrTotalPoolInfo.cePool)
21839 l = &dlSf->sfrTotalPoolInfo.cePool;
21840 n = cmLListFirst(l);
21850 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21854 * Function : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
21856 * Processing Steps:
21858 * @param[in] RgSchCellCb *cell
21859 * @param[in] RgSchDlSf *dlSf
21860 * @param[in] uint8_t rbStrt
21861 * @param[in] uint8_t numRb
21866 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
21877 RgSchSFRPoolInfo *sfrCCPool1 = NULL;
21878 RgSchSFRPoolInfo *sfrCCPool2 = NULL;
21881 /* Move the type2End pivot forward */
21884 l = &dlSf->sfrTotalPoolInfo.ccPool;
21885 n = cmLListFirst(l);
21888 sfrCCPool1 = (RgSchSFRPoolInfo*)(n->node);
21890 if (sfrCCPool1 == NULLP)
21892 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21893 "sfrCCPool1 is NULL for CRNTI:%d",ue->ueId);
21896 n = cmLListNext(l);
21899 sfrCCPool2 = (RgSchSFRPoolInfo*)(n->node);
21900 n = cmLListNext(l);
21902 if((sfrCCPool1) && (sfrCCPool2))
21904 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
21905 if(((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
21906 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb)) ||
21907 ((dlSf->type2Start >= sfrCCPool2->pwrHiCCRange.startRb) &&
21908 (dlSf->type2Start + numRb < sfrCCPool2->pwrHiCCRange.endRb)))
21910 ue->lteAdvUeCb.isCCUePHigh = TRUE;
21912 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
21913 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
21916 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21917 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
21924 if((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
21925 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb))
21927 ue->lteAdvUeCb.isCCUePHigh = TRUE;
21929 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
21930 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
21933 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21934 "rgSCHCmnBuildRntpInfo() function returned RFAILED CRNTI:%d",ue->ueId);
21940 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
21942 dlSf->bwAlloced += numRb;
21943 /*MS_FIX for ccpu00123918*/
21944 dlSf->type2Start += numRb;
21950 #endif /* end of ifndef LTE_TDD*/
21951 /* LTE_ADV_FLAG_REMOVED_END */
21953 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21957 * Function : rgSCHCmnNonDlfsUpdTyp2Alloc
21959 * Processing Steps:
21961 * @param[in] RgSchCellCb *cell
21962 * @param[in] RgSchDlSf *dlSf
21963 * @param[in] uint8_t rbStrt
21964 * @param[in] uint8_t numRb
21968 static Void rgSCHCmnNonDlfsUpdTyp2Alloc
21976 /* Move the type2End pivot forward */
21977 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
21978 //#ifndef LTEMAC_SPS
21979 dlSf->bwAlloced += numRb;
21980 /*Fix for ccpu00123918*/
21981 dlSf->type2Start += numRb;
21987 * @brief To do DL allocation using TYPE0 RA.
21991 * Function : rgSCHCmnNonDlfsType0Alloc
21993 * Processing Steps:
21994 * - Perform TYPE0 allocation using the RBGs between
21995 * type0End and type2End.
21996 * - Build the allocation mask as per RBG positioning.
21997 * - Update the allocation parameters.
21999 * @param[in] RgSchCellCb *cell
22000 * @param[in] RgSchDlSf *dlSf
22001 * @param[in] RgSchDlRbAlloc *allocInfo
22006 static Void rgSCHCmnNonDlfsType0Alloc
22010 RgSchDlRbAlloc *allocInfo,
22014 uint32_t dlAllocMsk = 0;
22015 uint8_t rbgFiller = dlSf->lstRbgDfct;
22016 uint8_t noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
22017 //uint8_t noRbgs = (allocInfo->rbsReq + rbgFiller)/ cell->rbgSize;
22021 uint32_t tb1BytesAlloc = 0;
22022 uint32_t tb2BytesAlloc = 0;
22023 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22025 //if(noRbgs == 0) noRbgs = 1; /* Not required as ceilling is used above*/
22027 /* Fix for ccpu00123919*/
22028 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22029 if (dlSf->bwAlloced + noRbs > dlSf->bw)
22035 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22038 /* Fix for ccpu00138701: Ceilling is using to derive num of RBGs, Therefore,
22039 * after this operation,checking Max TB size and Max RBs are not crossed
22040 * if it is crossed then decrement num of RBGs. */
22041 //if((noRbs + rbgFiller) % cell->rbgSize)
22042 if((noRbs > allocInfo->rbsReq) &&
22043 (allocInfo->rbsReq + rbgFiller) % cell->rbgSize)
22044 {/* considering ue category limitation
22045 * due to ceiling */
22048 if (rgSCHLaaIsLaaTB(allocInfo)== FALSE)
22051 if ((allocInfo->tbInfo[0].schdlngForTb) && (!allocInfo->tbInfo[0].tbCb->txCntr))
22053 iTbs = allocInfo->tbInfo[0].iTbs;
22054 noLyr = allocInfo->tbInfo[0].noLyr;
22055 tb1BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22058 if ((allocInfo->tbInfo[1].schdlngForTb) && (!allocInfo->tbInfo[1].tbCb->txCntr))
22060 iTbs = allocInfo->tbInfo[1].iTbs;
22061 noLyr = allocInfo->tbInfo[1].noLyr;
22062 tb2BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22066 /* Only Check for New Tx No need for Retx */
22067 if (tb1BytesAlloc || tb2BytesAlloc)
22069 if (( ue->dl.aggTbBits >= dlUe->maxTbBits) ||
22070 (tb1BytesAlloc >= dlUe->maxTbSz/8) ||
22071 (tb2BytesAlloc >= dlUe->maxTbSz/8) ||
22072 (noRbs >= dlUe->maxRb))
22078 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22082 /* type0End would have been initially (during subfrm Init) at the bit position
22083 * (cell->noOfRbgs - 1), 0 being the most significant.
22084 * Getting DlAllocMsk for noRbgs and at the appropriate position */
22085 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - dlSf->type0End));
22086 /* Move backwards the type0End pivot */
22087 dlSf->type0End -= noRbgs;
22088 /*Fix for ccpu00123919*/
22089 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
22090 /* Update the bwAlloced field accordingly */
22091 //#ifndef LTEMAC_SPS /* ccpu00129474*/
22092 dlSf->bwAlloced += noRbs;
22094 /* Update Type0 Alloc Info */
22095 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
22096 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
22097 allocInfo->rbsAlloc = noRbs;
22099 /* Update Tb info for each scheduled TB */
22100 iTbs = allocInfo->tbInfo[0].iTbs;
22101 noLyr = allocInfo->tbInfo[0].noLyr;
22102 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
22103 * RETX TB Size is same as Init TX TB Size */
22104 if (allocInfo->tbInfo[0].tbCb->txCntr)
22106 allocInfo->tbInfo[0].bytesAlloc =
22107 allocInfo->tbInfo[0].bytesReq;
22111 allocInfo->tbInfo[0].bytesAlloc =
22112 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22113 /* DwPts Scheduling Changes Start */
22115 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
22117 allocInfo->tbInfo[0].bytesAlloc =
22118 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
22121 /* DwPts Scheduling Changes End */
22124 if (allocInfo->tbInfo[1].schdlngForTb)
22126 iTbs = allocInfo->tbInfo[1].iTbs;
22127 noLyr = allocInfo->tbInfo[1].noLyr;
22128 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
22129 * RETX TB Size is same as Init TX TB Size */
22130 if (allocInfo->tbInfo[1].tbCb->txCntr)
22132 allocInfo->tbInfo[1].bytesAlloc =
22133 allocInfo->tbInfo[1].bytesReq;
22137 allocInfo->tbInfo[1].bytesAlloc =
22138 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22139 /* DwPts Scheduling Changes Start */
22141 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
22143 allocInfo->tbInfo[1].bytesAlloc =
22144 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
22147 /* DwPts Scheduling Changes End */
22151 /* The last RBG which can be smaller than the RBG size is consedered
22152 * only for the first time allocation of TYPE0 UE */
22153 dlSf->lstRbgDfct = 0;
22160 * @brief To prepare RNTP value from the PRB allocation (P-High -> 1 and P-Low -> 0)
22164 * Function : rgSCHCmnBuildRntpInfo
22166 * Processing Steps:
22168 * @param[in] uint8_t *rntpPtr
22169 * @param[in] uint8_t startRb
22170 * @param[in] uint8_t numRb
22175 static S16 rgSCHCmnBuildRntpInfo
22184 uint16_t rbPtrStartIdx; /* Start Index of Octete Buffer to be filled */
22185 uint16_t rbPtrEndIdx; /* End Index of Octete Buffer to be filled */
22186 uint16_t rbBitLoc; /* Bit Location to be set as 1 in the current Byte */
22187 uint16_t nmbRbPerByte; /* PRB's to be set in the current Byte (in case of multiple Bytes) */
22190 rbPtrStartIdx = (startRb)/8;
22191 rbPtrEndIdx = (startRb + nmbRb)/8;
22193 if (rntpPtr == NULLP)
22195 DU_LOG("\nERROR --> SCH : rgSCHCmnBuildRntpInfo():"
22196 "rntpPtr can't be NULLP (Memory Allocation Failed)");
22200 while(rbPtrStartIdx <= rbPtrEndIdx)
22202 rbBitLoc = (startRb)%8;
22204 /* case 1: startRb and endRb lies in same Byte */
22205 if (rbPtrStartIdx == rbPtrEndIdx)
22207 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
22208 | (((1<<nmbRb)-1)<<rbBitLoc);
22211 /* case 2: startRb and endRb lies in different Byte */
22212 if (rbPtrStartIdx != rbPtrEndIdx)
22214 nmbRbPerByte = 8 - rbBitLoc;
22215 nmbRb = nmbRb - nmbRbPerByte;
22216 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
22217 | (((1<<nmbRbPerByte)-1)<<rbBitLoc);
22218 startRb = startRb + nmbRbPerByte;
22224 /* dsfr_pal_fixes ** 21-March-2013 ** SKS ** Adding Debug logs */
22226 /* dsfr_pal_fixes ** 25-March-2013 ** SKS ** Adding Debug logs to print RNTP */
22232 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
22236 * Function : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
22238 * Processing Steps:
22240 * @param[in] RgSchCellCb *cell
22241 * @param[in] RgSchDlSf *dlSf
22242 * @param[in] uint8_t rbStrt
22243 * @param[in] uint8_t numRb
22247 static S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
22252 RgSchSFRPoolInfo *sfrPool,
22261 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22262 sfrPool->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22265 dlSf->type2Start += numRb;
22266 dlSf->bwAlloced += numRb;
22268 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
22270 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
22271 if(FALSE == ue->lteAdvUeCb.rgrLteAdvUeCfg.isUeCellEdge)
22273 if((sfrPool->type2Start >= sfrPool->pwrHiCCRange.startRb) &&
22274 (sfrPool->type2Start + numRb < sfrPool->pwrHiCCRange.endRb))
22276 ue->lteAdvUeCb.isCCUePHigh = TRUE;
22278 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
22279 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
22282 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
22283 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
22290 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
22291 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
22294 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
22295 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
22300 sfrPool->type2Start += numRb;
22301 sfrPool->bwAlloced += numRb;
22308 * @brief To do DL allocation using TYPE0 RA.
22312 * Function : rgSCHCmnNonDlfsSFRPoolType0Alloc
22314 * Processing Steps:
22315 * - Perform TYPE0 allocation using the RBGs between type0End and type2End.
22316 * - Build the allocation mask as per RBG positioning.
22317 * - Update the allocation parameters.
22319 * @param[in] RgSchCellCb *cell
22320 * @param[in] RgSchDlSf *dlSf
22321 * @param[in] RgSchDlRbAlloc *allocInfo
22325 static Void rgSCHCmnNonDlfsSFRPoolType0Alloc
22329 RgSchSFRPoolInfo *poolInfo,
22330 RgSchDlRbAlloc *allocInfo
22333 uint32_t dlAllocMsk = 0;
22334 uint8_t rbgFiller = 0;
22335 uint8_t noRbgs = 0;
22341 if (poolInfo->poolstartRB + poolInfo->bw == dlSf->bw)
22343 if (poolInfo->type0End == dlSf->bw/4)
22345 rbgFiller = dlSf->lstRbgDfct;
22346 /* The last RBG which can be smaller than the RBG size is consedered
22347 * only for the first time allocation of TYPE0 UE */
22348 dlSf->lstRbgDfct = 0;
22352 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
22354 /* Abhinav to-do start */
22355 /* MS_FIX for ccpu00123919*/
22356 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22357 if (dlSf->bwAlloced + noRbs > dlSf->bw)
22363 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22365 /* Abhinav to-do end */
22369 /* type0End would have been initially (during subfrm Init) at the bit position
22370 * (cell->noOfRbgs - 1), 0 being the most significant.
22371 * Getting DlAllocMsk for noRbgs and at the appropriate position */
22372 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - poolInfo->type0End));
22373 /* Move backwards the type0End pivot */
22374 poolInfo->type0End -= noRbgs;
22375 /*MS_FIX for ccpu00123919*/
22376 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
22377 /* Update the bwAlloced field accordingly */
22378 poolInfo->bwAlloced += noRbs + dlSf->lstRbgDfct;
22379 dlSf->bwAlloced += noRbs + dlSf->lstRbgDfct;
22381 /* Update Type0 Alloc Info */
22382 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
22383 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
22384 allocInfo->rbsAlloc = noRbs;
22386 /* Update Tb info for each scheduled TB */
22387 iTbs = allocInfo->tbInfo[0].iTbs;
22388 noLyr = allocInfo->tbInfo[0].noLyr;
22389 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
22390 * RETX TB Size is same as Init TX TB Size */
22391 if (allocInfo->tbInfo[0].tbCb->txCntr)
22393 allocInfo->tbInfo[0].bytesAlloc =
22394 allocInfo->tbInfo[0].bytesReq;
22398 allocInfo->tbInfo[0].bytesAlloc =
22399 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22402 if (allocInfo->tbInfo[1].schdlngForTb)
22404 iTbs = allocInfo->tbInfo[1].iTbs;
22405 noLyr = allocInfo->tbInfo[1].noLyr;
22406 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
22407 * RETX TB Size is same as Init TX TB Size */
22408 if (allocInfo->tbInfo[1].tbCb->txCntr)
22410 allocInfo->tbInfo[1].bytesAlloc =
22411 allocInfo->tbInfo[1].bytesReq;
22415 allocInfo->tbInfo[1].bytesAlloc =
22416 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22420 /* The last RBG which can be smaller than the RBG size is consedered
22421 * only for the first time allocation of TYPE0 UE */
22422 dlSf->lstRbgDfct = 0;
22427 * @brief Computes RNTP Info for a subframe.
22431 * Function : rgSCHCmnNonDlfsDsfrRntpComp
22433 * Processing Steps:
22434 * - Computes RNTP info from individual pools.
22436 * @param[in] RgSchDlSf *dlSf
22441 static void rgSCHCmnNonDlfsDsfrRntpComp(RgSchCellCb *cell,RgSchDlSf *dlSf)
22443 static uint16_t samples = 0;
22445 uint16_t bwBytes = (dlSf->bw-1)/8;
22446 RgrLoadInfIndInfo *rgrLoadInf;
22448 uint16_t ret = ROK;
22451 len = (dlSf->bw % 8 == 0) ? dlSf->bw/8 : dlSf->bw/8 + 1;
22453 /* RNTP info is ORed every TTI and the sample is stored in cell control block */
22454 for(i = 0; i <= bwBytes; i++)
22456 cell->rntpAggrInfo.val[i] |= dlSf->rntpInfo.val[i];
22458 samples = samples + 1;
22459 /* After every 1000 ms, the RNTP info will be sent to application to be further sent to all neighbouring eNB
22460 informing them about the load indication for cell edge users */
22461 if(RG_SCH_MAX_RNTP_SAMPLES == samples)
22464 ret = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&rgrLoadInf,
22465 sizeof(RgrLoadInfIndInfo));
22468 DU_LOG("\nERROR --> SCH : Could not "
22469 "allocate memory for sending LoadInfo");
22473 rgrLoadInf->u.rntpInfo.pres = cell->rntpAggrInfo.pres;
22474 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
22475 rgrLoadInf->u.rntpInfo.len = len;
22477 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
22478 rgrLoadInf->u.rntpInfo.val = cell->rntpAggrInfo.val;
22479 rgrLoadInf->cellId = cell->cellId;
22481 /* dsfr_pal_fixes ** 22-March-2013 ** SKS */
22482 rgrLoadInf->bw = dlSf->bw;
22483 rgrLoadInf->type = RGR_SFR;
22485 ret = rgSCHUtlRgrLoadInfInd(cell, rgrLoadInf);
22488 DU_LOG("\nERROR --> SCH : rgSCHCmnNonDlfsDsfrRntpComp():"
22489 "rgSCHUtlRgrLoadInfInd() returned RFAILED");
22492 memset(cell->rntpAggrInfo.val,0,len);
22496 /* LTE_ADV_FLAG_REMOVED_END */
22498 /* LTE_ADV_FLAG_REMOVED_START */
22500 * @brief Performs RB allocation per UE from a pool.
22504 * Function : rgSCHCmnSFRNonDlfsUeRbAlloc
22506 * Processing Steps:
22507 * - Allocate consecutively available RBs.
22509 * @param[in] RgSchCellCb *cell
22510 * @param[in] RgSchUeCb *ue
22511 * @param[in] RgSchDlSf *dlSf
22512 * @param[out] uint8_t *isDlBwAvail
22519 static S16 rgSCHCmnSFRNonDlfsUeRbAlloc
22524 uint8_t *isDlBwAvail
22527 RgSchDlRbAlloc *allocInfo;
22528 RgSchCmnDlUe *dlUe;
22530 RgSchSFRPoolInfo *sfrpoolInfo = NULLP;
22533 isUECellEdge = RG_SCH_CMN_IS_UE_CELL_EDGE(ue);
22535 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22536 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
22537 *isDlBwAvail = TRUE;
22539 /*Find which pool is available for this UE*/
22540 if (rgSCHCmnNonDlfsSFRBwAvlbl(cell, &sfrpoolInfo, dlSf, allocInfo, isUECellEdge) != TRUE)
22542 /* SFR_FIX - If this is CE UE there may be BW available in CC Pool
22543 So CC UEs will be scheduled */
22546 *isDlBwAvail = TRUE;
22550 *isDlBwAvail = FALSE;
22555 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX || dlUe->proc->tbInfo[1].isAckNackDtx)
22557 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
22561 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
22564 if (!(allocInfo->pdcch))
22566 /* Returning ROK since PDCCH might be available for another UE and further allocations could be done */
22571 allocInfo->rnti = ue->ueId;
22574 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22576 allocInfo->allocInfo.raType2.isLocal = TRUE;
22577 /* rg004.201 patch - ccpu00109921 fix end */
22578 /* MS_FIX for ccpu00123918*/
22579 allocInfo->allocInfo.raType2.rbStart = (uint8_t)sfrpoolInfo->type2Start;
22580 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22581 /* rg007.201 - Changes for MIMO feature addition */
22582 /* rg008.201 - Removed dependency on MIMO compile-time flag */
22583 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrpoolInfo, \
22584 allocInfo->allocInfo.raType2.rbStart, \
22585 allocInfo->allocInfo.raType2.numRb);
22586 allocInfo->rbsAlloc = allocInfo->rbsReq;
22587 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22589 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22591 rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, sfrpoolInfo, allocInfo);
22595 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,0);
22596 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
22598 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,1);
22603 #if defined(LTEMAC_SPS)
22604 /* Update the sub-frame with new allocation */
22605 dlSf->bwAlloced += allocInfo->rbsReq;
22611 /* LTE_ADV_FLAG_REMOVED_END */
22612 #endif /* LTE_TDD */
22615 * @brief Performs RB allocation per UE for frequency non-selective cell.
22619 * Function : rgSCHCmnNonDlfsUeRbAlloc
22621 * Processing Steps:
22622 * - Allocate consecutively available RBs.
22624 * @param[in] RgSchCellCb *cell
22625 * @param[in] RgSchUeCb *ue
22626 * @param[in] RgSchDlSf *dlSf
22627 * @param[out] uint8_t *isDlBwAvail
22633 static S16 rgSCHCmnNonDlfsUeRbAlloc
22638 uint8_t *isDlBwAvail
22641 RgSchDlRbAlloc *allocInfo;
22642 RgSchCmnDlUe *dlUe;
22644 uint32_t dbgRbsReq = 0;
22648 RgSch5gtfUeCb *ue5gtfCb = &(ue->ue5gtfCb);
22649 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[ue5gtfCb->BeamId]);
22651 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22652 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
22653 *isDlBwAvail = TRUE;
22655 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
22657 DU_LOG("\nERROR --> SCH : 5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
22659 DU_LOG("\nERROR --> SCH : 5GTF_ERROR vrbg allocated > 25\n");
22663 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX
22664 || dlUe->proc->tbInfo[1].isAckNackDtx)
22666 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
22670 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
22672 if (!(allocInfo->pdcch))
22674 /* Returning ROK since PDCCH might be available for another UE and
22675 * further allocations could be done */
22676 DU_LOG("\nERROR --> SCH : 5GTF_ERROR : PDCCH allocation failed :ue (%u)",
22678 DU_LOG("\nERROR --> SCH : 5GTF_ERROR PDCCH allocation failed\n");
22682 //maxPrb = RGSCH_MIN((allocInfo->vrbgReq * MAX_5GTF_VRBG_SIZE), ue5gtfCb->maxPrb);
22683 //maxPrb = RGSCH_MIN(maxPrb,
22684 //((beamInfo->totVrbgAvail - beamInfo->vrbgStart)* MAX_5GTF_VRBG_SIZE)));
22685 //TODO_SID Need to check for vrbg available after scheduling for same beam.
22686 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
22687 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
22688 //TODO_SID: Setting for max TP
22689 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
22690 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
22691 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
22692 allocInfo->tbInfo[0].tbCb->dlGrnt.SCID = 0;
22693 allocInfo->tbInfo[0].tbCb->dlGrnt.dciFormat = allocInfo->dciFormat;
22694 //Filling temporarily
22695 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
22696 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
22698 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
22699 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
22700 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22708 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
22712 * Function : rgSCHCmnNonDlfsCcchSduAlloc
22714 * Processing Steps:
22715 * - For each element in the list, Call rgSCHCmnNonDlfsCcchSduRbAlloc().
22716 * - If allocation is successful, add the ueCb to scheduled list of CCCH
22718 * - else, add UeCb to non-scheduled list.
22720 * @param[in] RgSchCellCb *cell
22721 * @param[in, out] RgSchCmnCcchSduRbAlloc *allocInfo
22722 * @param[in] uint8_t isRetx
22726 static Void rgSCHCmnNonDlfsCcchSduAlloc
22729 RgSchCmnCcchSduRbAlloc *allocInfo,
22734 CmLListCp *ccchSduLst = NULLP;
22735 CmLListCp *schdCcchSduLst = NULLP;
22736 CmLListCp *nonSchdCcchSduLst = NULLP;
22737 CmLList *schdLnkNode = NULLP;
22738 CmLList *toBeSchdLnk = NULLP;
22739 RgSchDlSf *dlSf = allocInfo->ccchSduDlSf;
22740 RgSchUeCb *ueCb = NULLP;
22741 RgSchDlHqProcCb *hqP = NULLP;
22745 /* Initialize re-transmitting lists */
22746 ccchSduLst = &(allocInfo->ccchSduRetxLst);
22747 schdCcchSduLst = &(allocInfo->schdCcchSduRetxLst);
22748 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduRetxLst);
22752 /* Initialize transmitting lists */
22753 ccchSduLst = &(allocInfo->ccchSduTxLst);
22754 schdCcchSduLst = &(allocInfo->schdCcchSduTxLst);
22755 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduTxLst);
22758 /* Perform allocaations for the list */
22759 toBeSchdLnk = cmLListFirst(ccchSduLst);
22760 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
22762 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
22763 ueCb = hqP->hqE->ue;
22764 schdLnkNode = &hqP->schdLstLnk;
22765 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
22766 ret = rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf);
22769 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
22770 * list and return */
22773 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
22774 ueCb = hqP->hqE->ue;
22775 schdLnkNode = &hqP->schdLstLnk;
22776 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
22777 cmLListAdd2Tail(nonSchdCcchSduLst, schdLnkNode);
22778 toBeSchdLnk = toBeSchdLnk->next;
22779 } while(toBeSchdLnk);
22783 /* Allocation successful: Add UE to the scheduled list */
22784 cmLListAdd2Tail(schdCcchSduLst, schdLnkNode);
22792 * @brief Performs RB allocation for CcchSdu for frequency non-selective cell.
22796 * Function : rgSCHCmnNonDlfsCcchSduRbAlloc
22798 * Processing Steps:
22800 * - Allocate consecutively available RBs
22802 * @param[in] RgSchCellCb *cell
22803 * @param[in] RgSchUeCb *ueCb
22804 * @param[in] RgSchDlSf *dlSf
22809 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc
22816 RgSchDlRbAlloc *allocInfo;
22817 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
22821 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb,cell);
22823 /* [ccpu00138802]-MOD-If Bw is less than required, return fail
22824 It will be allocated in next TTI */
22826 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
22827 (dlSf->bwAlloced == dlSf->bw))
22829 if((dlSf->bwAlloced == dlSf->bw) ||
22830 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
22835 /* Retrieve PDCCH */
22836 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
22837 if (ueDl->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
22839 /* allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, dlSf, y, ueDl->cqi,
22840 * TFU_DCI_FORMAT_1A, TRUE);*/
22841 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, TRUE);
22845 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE);
22847 if (!(allocInfo->pdcch))
22849 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
22853 /* Update allocation information */
22854 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
22855 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
22856 allocInfo->allocInfo.raType2.isLocal = TRUE;
22858 /*Fix for ccpu00123918*/
22859 /* Push this harq process back to the free queue */
22860 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
22861 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22862 allocInfo->rbsAlloc = allocInfo->rbsReq;
22863 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22864 /* Update the sub-frame with new allocation */
22866 /* LTE_ADV_FLAG_REMOVED_START */
22868 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
22870 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf,
22871 allocInfo->allocInfo.raType2.rbStart,
22872 allocInfo->allocInfo.raType2.numRb);
22875 #endif /* end of ifndef LTE_TDD*/
22877 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf,
22878 allocInfo->allocInfo.raType2.rbStart,
22879 allocInfo->allocInfo.raType2.numRb);
22882 /* LTE_ADV_FLAG_REMOVED_END */
22883 /* ccpu00131941 - bwAlloced is updated from SPS bandwidth */
22891 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
22895 * Function : rgSCHCmnNonDlfsMsg4RbAlloc
22897 * Processing Steps:
22899 * - Allocate consecutively available RBs
22901 * @param[in] RgSchCellCb *cell
22902 * @param[in] RgSchRaCb *raCb
22903 * @param[in] RgSchDlSf *dlSf
22908 static S16 rgSCHCmnNonDlfsMsg4RbAlloc
22915 RgSchDlRbAlloc *allocInfo;
22918 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_RACB(raCb);
22921 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
22922 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
22924 DU_LOG("\nERROR --> SCH : 5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
22926 DU_LOG("\nERROR --> SCH : 5GTF_ERROR vrbg allocated > 25\n");
22931 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
22932 (dlSf->bwAlloced == dlSf->bw))
22934 if((dlSf->bwAlloced == dlSf->bw) ||
22935 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
22942 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
22943 if (raCb->dlHqE->msg4Proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
22945 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, TRUE);
22949 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, FALSE);
22951 if (!(allocInfo->pdcch))
22953 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
22958 /* SR_RACH_STATS : MSG4 TX Failed */
22959 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
22961 /* Update allocation information */
22962 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
22963 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
22964 allocInfo->allocInfo.raType2.isLocal = TRUE;
22967 /*Fix for ccpu00123918*/
22968 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
22969 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22970 /* LTE_ADV_FLAG_REMOVED_START */
22972 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
22974 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
22975 allocInfo->allocInfo.raType2.rbStart, \
22976 allocInfo->allocInfo.raType2.numRb);
22979 #endif /* end of ifndef LTE_TDD */
22981 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
22982 allocInfo->allocInfo.raType2.rbStart, \
22983 allocInfo->allocInfo.raType2.numRb);
22985 /* LTE_ADV_FLAG_REMOVED_END */
22987 allocInfo->rbsAlloc = allocInfo->rbsReq;
22988 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22992 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
22994 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
22995 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
22997 /* Update allocation information */
22998 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23000 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
23001 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
23002 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
23004 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
23005 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
23008 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
23009 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
23010 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23018 * @brief Performs RB allocation for Msg4 lists of frequency non-selective cell.
23022 * Function : rgSCHCmnNonDlfsMsg4Alloc
23024 * Processing Steps:
23025 * - For each element in the list, Call rgSCHCmnNonDlfsMsg4RbAlloc().
23026 * - If allocation is successful, add the raCb to scheduled list of MSG4.
23027 * - else, add RaCb to non-scheduled list.
23029 * @param[in] RgSchCellCb *cell
23030 * @param[in, out] RgSchCmnMsg4RbAlloc *allocInfo
23031 * @param[in] uint8_t isRetx
23035 static Void rgSCHCmnNonDlfsMsg4Alloc
23038 RgSchCmnMsg4RbAlloc *allocInfo,
23043 CmLListCp *msg4Lst = NULLP;
23044 CmLListCp *schdMsg4Lst = NULLP;
23045 CmLListCp *nonSchdMsg4Lst = NULLP;
23046 CmLList *schdLnkNode = NULLP;
23047 CmLList *toBeSchdLnk = NULLP;
23048 RgSchDlSf *dlSf = allocInfo->msg4DlSf;
23049 RgSchRaCb *raCb = NULLP;
23050 RgSchDlHqProcCb *hqP = NULLP;
23054 /* Initialize re-transmitting lists */
23055 msg4Lst = &(allocInfo->msg4RetxLst);
23056 schdMsg4Lst = &(allocInfo->schdMsg4RetxLst);
23057 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4RetxLst);
23061 /* Initialize transmitting lists */
23062 msg4Lst = &(allocInfo->msg4TxLst);
23063 schdMsg4Lst = &(allocInfo->schdMsg4TxLst);
23064 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4TxLst);
23067 /* Perform allocaations for the list */
23068 toBeSchdLnk = cmLListFirst(msg4Lst);
23069 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
23071 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23072 raCb = hqP->hqE->raCb;
23073 schdLnkNode = &hqP->schdLstLnk;
23074 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23075 ret = rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf);
23078 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
23079 * list and return */
23082 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23083 raCb = hqP->hqE->raCb;
23084 schdLnkNode = &hqP->schdLstLnk;
23085 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23086 cmLListAdd2Tail(nonSchdMsg4Lst, schdLnkNode);
23087 toBeSchdLnk = toBeSchdLnk->next;
23088 } while(toBeSchdLnk);
23092 /* Allocation successful: Add UE to the scheduled list */
23093 cmLListAdd2Tail(schdMsg4Lst, schdLnkNode);
23104 * @brief Performs RB allocation for the list of UEs of a frequency
23105 * non-selective cell.
23109 * Function : rgSCHCmnNonDlfsDedRbAlloc
23111 * Processing Steps:
23112 * - For each element in the list, Call rgSCHCmnNonDlfsUeRbAlloc().
23113 * - If allocation is successful, add the ueCb to scheduled list of UEs.
23114 * - else, add ueCb to non-scheduled list of UEs.
23116 * @param[in] RgSchCellCb *cell
23117 * @param[in, out] RgSchCmnUeRbAlloc *allocInfo
23118 * @param[in] CmLListCp *ueLst,
23119 * @param[in, out] CmLListCp *schdHqPLst,
23120 * @param[in, out] CmLListCp *nonSchdHqPLst
23124 Void rgSCHCmnNonDlfsDedRbAlloc
23127 RgSchCmnUeRbAlloc *allocInfo,
23129 CmLListCp *schdHqPLst,
23130 CmLListCp *nonSchdHqPLst
23134 CmLList *schdLnkNode = NULLP;
23135 CmLList *toBeSchdLnk = NULLP;
23136 RgSchDlSf *dlSf = allocInfo->dedDlSf;
23137 RgSchUeCb *ue = NULLP;
23138 RgSchDlHqProcCb *hqP = NULLP;
23139 uint8_t isDlBwAvail;
23142 /* Perform allocaations for the list */
23143 toBeSchdLnk = cmLListFirst(ueLst);
23144 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
23146 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23148 schdLnkNode = &hqP->schdLstLnk;
23149 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23151 ret = rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, &isDlBwAvail);
23154 /* Allocation failed: Add remaining UEs to non-scheduled
23155 * list and return */
23158 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23160 schdLnkNode = &hqP->schdLstLnk;
23161 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23162 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
23163 toBeSchdLnk = toBeSchdLnk->next;
23164 } while(toBeSchdLnk);
23170 #if defined (TENB_STATS) && defined (RG_5GTF)
23171 cell->tenbStats->sch.dl5gtfRbAllocPass++;
23173 /* Allocation successful: Add UE to the scheduled list */
23174 cmLListAdd2Tail(schdHqPLst, schdLnkNode);
23178 #if defined (TENB_STATS) && defined (RG_5GTF)
23179 cell->tenbStats->sch.dl5gtfRbAllocFail++;
23181 /* Allocation failed : Add UE to the non-scheduled list */
23182 DU_LOG("\nERROR --> SCH : 5GTF_ERROR Dl rb alloc failed adding nonSchdHqPLst\n");
23183 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
23191 * @brief Handles RB allocation for frequency non-selective cell.
23195 * Function : rgSCHCmnNonDlfsRbAlloc
23197 * Invoking Module Processing:
23198 * - SCH shall invoke this if downlink frequency selective is disabled for
23199 * the cell for RB allocation.
23200 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
23201 * estimate and subframe for each allocation to be made to SCH.
23203 * Processing Steps:
23204 * - Allocate sequentially for common channels.
23205 * - For transmitting and re-transmitting UE list.
23207 * - Perform wide-band allocations for UE in increasing order of
23209 * - Determine Imcs for the allocation.
23210 * - Determine RA type.
23211 * - Determine DCI format.
23213 * @param[in] RgSchCellCb *cell
23214 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
23218 Void rgSCHCmnNonDlfsRbAlloc
23221 RgSchCmnDlRbAllocInfo *allocInfo
23224 uint8_t raRspCnt = 0;
23225 RgSchDlRbAlloc *reqAllocInfo;
23227 /* Allocate for MSG4 retransmissions */
23228 if (allocInfo->msg4Alloc.msg4RetxLst.count)
23230 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc RetxLst\n");
23231 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), TRUE);
23234 /* Allocate for MSG4 transmissions */
23235 /* Assuming all the nodes in the list need allocations: rbsReq is valid */
23236 if (allocInfo->msg4Alloc.msg4TxLst.count)
23238 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc txLst\n");
23239 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), FALSE);
23242 /* Allocate for CCCH SDU (received after guard timer expiry)
23243 * retransmissions */
23244 if (allocInfo->ccchSduAlloc.ccchSduRetxLst.count)
23246 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
23247 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), TRUE);
23250 /* Allocate for CCCD SDU transmissions */
23251 /* Allocate for CCCH SDU (received after guard timer expiry) transmissions */
23252 if (allocInfo->ccchSduAlloc.ccchSduTxLst.count)
23254 DU_LOG("\nINFO --> SCH : 5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
23255 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), FALSE);
23259 /* Allocate for Random access response */
23260 for (raRspCnt = 0; raRspCnt < RG_SCH_CMN_MAX_CMN_PDCCH; ++raRspCnt)
23262 /* Assuming that the requests will be filled in sequentially */
23263 reqAllocInfo = &(allocInfo->raRspAlloc[raRspCnt]);
23264 if (!reqAllocInfo->rbsReq)
23268 DU_LOG("\nINFO --> SCH : 5GTF_ERROR calling RAR rgSCHCmnNonDlfsCmnRbAlloc\n");
23269 // if ((rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo)) != ROK)
23270 if ((rgSCHCmnNonDlfsCmnRbAllocRar(cell, reqAllocInfo)) != ROK)
23276 /* Allocate for RETX+TX UEs */
23277 if(allocInfo->dedAlloc.txRetxHqPLst.count)
23279 DU_LOG("\nDEBUG --> SCH : 5GTF_ERROR TX RETX rgSCHCmnNonDlfsDedRbAlloc\n");
23280 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23281 &(allocInfo->dedAlloc.txRetxHqPLst),
23282 &(allocInfo->dedAlloc.schdTxRetxHqPLst),
23283 &(allocInfo->dedAlloc.nonSchdTxRetxHqPLst));
23286 if((allocInfo->dedAlloc.retxHqPLst.count))
23288 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23289 &(allocInfo->dedAlloc.retxHqPLst),
23290 &(allocInfo->dedAlloc.schdRetxHqPLst),
23291 &(allocInfo->dedAlloc.nonSchdRetxHqPLst));
23294 /* Allocate for transmitting UEs */
23295 if((allocInfo->dedAlloc.txHqPLst.count))
23297 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23298 &(allocInfo->dedAlloc.txHqPLst),
23299 &(allocInfo->dedAlloc.schdTxHqPLst),
23300 &(allocInfo->dedAlloc.nonSchdTxHqPLst));
23303 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cell);
23304 if ((allocInfo->dedAlloc.txRetxHqPLst.count +
23305 allocInfo->dedAlloc.retxHqPLst.count +
23306 allocInfo->dedAlloc.txHqPLst.count) >
23307 cmnCell->dl.maxUePerDlSf)
23309 #ifndef ALIGN_64BIT
23310 DU_LOG("\nERROR --> SCH : UEs selected by"
23311 " scheduler exceed maximumUePerDlSf(%u)tx-retx %ld retx %ld tx %ld\n",
23312 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
23313 allocInfo->dedAlloc.retxHqPLst.count,
23314 allocInfo->dedAlloc.txHqPLst.count);
23316 DU_LOG("\nERROR --> SCH : UEs selected by"
23317 " scheduler exceed maximumUePerDlSf(%u)tx-retx %d retx %d tx %d\n",
23318 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
23319 allocInfo->dedAlloc.retxHqPLst.count,
23320 allocInfo->dedAlloc.txHqPLst.count);
23325 /* LTE_ADV_FLAG_REMOVED_START */
23326 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
23328 DU_LOG("\nINFO --> SCH : 5GTF_ERROR RETX rgSCHCmnNonDlfsDsfrRntpComp\n");
23329 rgSCHCmnNonDlfsDsfrRntpComp(cell, allocInfo->dedAlloc.dedDlSf);
23331 /* LTE_ADV_FLAG_REMOVED_END */
23332 #endif /* LTE_TDD */
23336 /***********************************************************
23338 * Func : rgSCHCmnCalcRiv
23340 * Desc : This function calculates RIV.
23346 * File : rg_sch_utl.c
23348 **********************************************************/
23350 uint32_t rgSCHCmnCalcRiv
23357 uint32_t rgSCHCmnCalcRiv
23365 uint8_t numRbMinus1 = numRb - 1;
23369 if (numRbMinus1 <= bw/2)
23371 riv = bw * numRbMinus1 + rbStart;
23375 riv = bw * (bw - numRbMinus1) + (bw - rbStart - 1);
23378 } /* rgSCHCmnCalcRiv */
23382 * @brief This function allocates and copies the RACH response scheduling
23383 * related information into cell control block.
23387 * Function: rgSCHCmnDlCpyRachInfo
23388 * Purpose: This function allocates and copies the RACH response
23389 * scheduling related information into cell control block
23390 * for each DL subframe.
23393 * Invoked by: Scheduler
23395 * @param[in] RgSchCellCb* cell
23396 * @param[in] RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES]
23397 * @param[in] uint8_t raArrSz
23401 static S16 rgSCHCmnDlCpyRachInfo
23404 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
23408 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
23411 uint16_t subfrmIdx;
23413 uint8_t numSubfrms;
23418 /* Allocate RACH response information for each DL
23419 * subframe in a radio frame */
23420 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cell->rachRspLst,
23421 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1] *
23422 sizeof(RgSchTddRachRspLst));
23428 for(sfnIdx=raArrSz-1; sfnIdx>=0; sfnIdx--)
23430 for(subfrmIdx=0; subfrmIdx < RGSCH_NUM_SUB_FRAMES; subfrmIdx++)
23432 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
23433 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
23438 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx],subfrmIdx);
23440 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
23442 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddNumDlSubfrmTbl[ulDlCfgIdx],subfrmIdx);
23443 sfNum = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][subfrmIdx]-1;
23444 numRfs = cell->rachRspLst[sfNum].numRadiofrms;
23445 /* For each DL subframe in which RACH response can
23446 * be sent is updated */
23449 cell->rachRspLst[sfNum].rachRsp[numRfs].sfnOffset =
23450 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset;
23451 for(sfcount=0; sfcount < numSubfrms; sfcount++)
23453 cell->rachRspLst[sfNum].rachRsp[numRfs].\
23454 subframe[sfcount] =
23455 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].\
23458 cell->rachRspLst[sfNum].rachRsp[numRfs].numSubfrms =
23459 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
23460 cell->rachRspLst[sfNum].numRadiofrms++;
23463 /* Copy the subframes to be deleted at ths subframe */
23465 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
23468 cell->rachRspLst[sfNum].delInfo.sfnOffset =
23469 rachRspLst[sfnIdx][subfrmIdx].delInfo.sfnOffset;
23470 for(sfcount=0; sfcount < numSubfrms; sfcount++)
23472 cell->rachRspLst[sfNum].delInfo.subframe[sfcount] =
23473 rachRspLst[sfnIdx][subfrmIdx].delInfo.subframe[sfcount];
23475 cell->rachRspLst[sfNum].delInfo.numSubfrms =
23476 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
23484 * @brief This function determines the iTbs based on the new CFI,
23485 * CQI and BLER based delta iTbs
23489 * Function: rgSchCmnFetchItbs
23490 * Purpose: Fetch the new iTbs when CFI changes.
23492 * @param[in] RgSchCellCb *cell
23493 * @param[in] RgSchCmnDlUe *ueDl
23494 * @param[in] uint8_t cqi
23500 static S32 rgSchCmnFetchItbs
23503 RgSchCmnDlUe *ueDl,
23511 static S32 rgSchCmnFetchItbs
23514 RgSchCmnDlUe *ueDl,
23523 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
23528 /* Special Handling for Spl Sf when CFI is 3 as
23529 * CFI in Spl Sf will be max 2 */
23530 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
23532 if((cellDl->currCfi == 3) ||
23533 ((cell->bwCfg.dlTotalBw <= 10) && (cellDl->currCfi == 1)))
23535 /* Use CFI 2 in this case */
23536 iTbs = (ueDl->laCb[cwIdx].deltaiTbs +
23537 ((*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][2]))[cqi])* 100)/100;
23539 RG_SCH_CHK_ITBS_RANGE(iTbs, RGSCH_NUM_ITBS - 1);
23543 iTbs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1];
23545 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
23547 else /* CFI Changed. Update with new iTbs Reset the BLER*/
23550 S32 tmpiTbs = (*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][cfi]))[cqi];
23552 iTbs = (ueDl->laCb[cwIdx].deltaiTbs + tmpiTbs*100)/100;
23554 RG_SCH_CHK_ITBS_RANGE(iTbs, tmpiTbs);
23556 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
23558 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1] = iTbs;
23560 ueDl->lastCfi = cfi;
23561 ueDl->laCb[cwIdx].deltaiTbs = 0;
23568 * @brief This function determines the RBs and Bytes required for BO
23569 * transmission for UEs configured with TM 1/2/6/7.
23573 * Function: rgSCHCmnDlAllocTxRb1Tb1Cw
23574 * Purpose: Allocate TB1 on CW1.
23576 * Reference Parameter effBo is filled with alloced bytes.
23577 * Returns RFAILED if BO not satisfied at all.
23579 * Invoked by: rgSCHCmnDlAllocTxRbTM1/2/6/7
23581 * @param[in] RgSchCellCb *cell
23582 * @param[in] RgSchDlSf *subFrm
23583 * @param[in] RgSchUeCb *ue
23584 * @param[in] uint32_t bo
23585 * @param[out] uint32_t *effBo
23586 * @param[in] RgSchDlHqProcCb *proc
23587 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23591 static Void rgSCHCmnDlAllocTxRb1Tb1Cw
23598 RgSchDlHqProcCb *proc,
23599 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23602 RgSchDlRbAlloc *allocInfo;
23607 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
23609 if (ue->ue5gtfCb.rank == 2)
23611 allocInfo->dciFormat = TFU_DCI_FORMAT_B2;
23615 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23618 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
23619 allocInfo->raType);
23621 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
23622 bo, &numRb, effBo);
23623 if (ret == RFAILED)
23625 /* If allocation couldn't be made then return */
23628 /* Adding UE to RbAllocInfo TX Lst */
23629 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
23630 /* Fill UE alloc Info */
23631 allocInfo->rbsReq = numRb;
23632 allocInfo->dlSf = subFrm;
23634 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
23642 * @brief This function determines the RBs and Bytes required for BO
23643 * retransmission for UEs configured with TM 1/2/6/7.
23647 * Function: rgSCHCmnDlAllocRetxRb1Tb1Cw
23648 * Purpose: Allocate TB1 on CW1.
23650 * Reference Parameter effBo is filled with alloced bytes.
23651 * Returns RFAILED if BO not satisfied at all.
23653 * Invoked by: rgSCHCmnDlAllocRetxRbTM1/2/6/7
23655 * @param[in] RgSchCellCb *cell
23656 * @param[in] RgSchDlSf *subFrm
23657 * @param[in] RgSchUeCb *ue
23658 * @param[in] uint32_t bo
23659 * @param[out] uint32_t *effBo
23660 * @param[in] RgSchDlHqProcCb *proc
23661 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23665 static Void rgSCHCmnDlAllocRetxRb1Tb1Cw
23672 RgSchDlHqProcCb *proc,
23673 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23676 RgSchDlRbAlloc *allocInfo;
23681 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
23684 /* 5GTF: RETX DCI format same as TX */
23685 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
23686 &allocInfo->raType);
23689 /* Get the Allocation in terms of RBs that are required for
23690 * this retx of TB1 */
23691 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
23693 if (ret == RFAILED)
23695 /* Allocation couldn't be made for Retx */
23696 /* Fix : syed If TxRetx allocation failed then add the UE along with the proc
23697 * to the nonSchdTxRetxUeLst and let spfc scheduler take care of it during
23699 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
23702 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
23703 /* Fill UE alloc Info */
23704 allocInfo->rbsReq = numRb;
23705 allocInfo->dlSf = subFrm;
23707 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
23715 * @brief This function determines the RBs and Bytes required for BO
23716 * transmission for UEs configured with TM 2.
23720 * Function: rgSCHCmnDlAllocTxRbTM1
23723 * Reference Parameter effBo is filled with alloced bytes.
23724 * Returns RFAILED if BO not satisfied at all.
23726 * Invoked by: rgSCHCmnDlAllocTxRb
23728 * @param[in] RgSchCellCb *cell
23729 * @param[in] RgSchDlSf *subFrm
23730 * @param[in] RgSchUeCb *ue
23731 * @param[in] uint32_t bo
23732 * @param[out] uint32_t *effBo
23733 * @param[in] RgSchDlHqProcCb *proc
23734 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23738 static Void rgSCHCmnDlAllocTxRbTM1
23745 RgSchDlHqProcCb *proc,
23746 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23749 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23755 * @brief This function determines the RBs and Bytes required for BO
23756 * retransmission for UEs configured with TM 2.
23760 * Function: rgSCHCmnDlAllocRetxRbTM1
23763 * Reference Parameter effBo is filled with alloced bytes.
23764 * Returns RFAILED if BO not satisfied at all.
23766 * Invoked by: rgSCHCmnDlAllocRetxRb
23768 * @param[in] RgSchCellCb *cell
23769 * @param[in] RgSchDlSf *subFrm
23770 * @param[in] RgSchUeCb *ue
23771 * @param[in] uint32_t bo
23772 * @param[out] uint32_t *effBo
23773 * @param[in] RgSchDlHqProcCb *proc
23774 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23778 static Void rgSCHCmnDlAllocRetxRbTM1
23785 RgSchDlHqProcCb *proc,
23786 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23789 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23795 * @brief This function determines the RBs and Bytes required for BO
23796 * transmission for UEs configured with TM 2.
23800 * Function: rgSCHCmnDlAllocTxRbTM2
23803 * Reference Parameter effBo is filled with alloced bytes.
23804 * Returns RFAILED if BO not satisfied at all.
23806 * Invoked by: rgSCHCmnDlAllocTxRb
23808 * @param[in] RgSchCellCb *cell
23809 * @param[in] RgSchDlSf *subFrm
23810 * @param[in] RgSchUeCb *ue
23811 * @param[in] uint32_t bo
23812 * @param[out] uint32_t *effBo
23813 * @param[in] RgSchDlHqProcCb *proc
23814 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23818 static Void rgSCHCmnDlAllocTxRbTM2
23825 RgSchDlHqProcCb *proc,
23826 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23829 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23835 * @brief This function determines the RBs and Bytes required for BO
23836 * retransmission for UEs configured with TM 2.
23840 * Function: rgSCHCmnDlAllocRetxRbTM2
23843 * Reference Parameter effBo is filled with alloced bytes.
23844 * Returns RFAILED if BO not satisfied at all.
23846 * Invoked by: rgSCHCmnDlAllocRetxRb
23848 * @param[in] RgSchCellCb *cell
23849 * @param[in] RgSchDlSf *subFrm
23850 * @param[in] RgSchUeCb *ue
23851 * @param[in] uint32_t bo
23852 * @param[out] uint32_t *effBo
23853 * @param[in] RgSchDlHqProcCb *proc
23854 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23858 static Void rgSCHCmnDlAllocRetxRbTM2
23865 RgSchDlHqProcCb *proc,
23866 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23869 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23875 * @brief This function determines the RBs and Bytes required for BO
23876 * transmission for UEs configured with TM 3.
23880 * Function: rgSCHCmnDlAllocTxRbTM3
23883 * Reference Parameter effBo is filled with alloced bytes.
23884 * Returns RFAILED if BO not satisfied at all.
23886 * Invoked by: rgSCHCmnDlAllocTxRb
23888 * @param[in] RgSchCellCb *cell
23889 * @param[in] RgSchDlSf *subFrm
23890 * @param[in] RgSchUeCb *ue
23891 * @param[in] uint32_t bo
23892 * @param[out] uint32_t *effBo
23893 * @param[in] RgSchDlHqProcCb *proc
23894 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23898 static Void rgSCHCmnDlAllocTxRbTM3
23905 RgSchDlHqProcCb *proc,
23906 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23911 /* Both TBs free for TX allocation */
23912 rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo,\
23913 proc, cellWdAllocInfo);
23920 * @brief This function determines the RBs and Bytes required for BO
23921 * retransmission for UEs configured with TM 3.
23925 * Function: rgSCHCmnDlAllocRetxRbTM3
23928 * Reference Parameter effBo is filled with alloced bytes.
23929 * Returns RFAILED if BO not satisfied at all.
23931 * Invoked by: rgSCHCmnDlAllocRetxRb
23933 * @param[in] RgSchCellCb *cell
23934 * @param[in] RgSchDlSf *subFrm
23935 * @param[in] RgSchUeCb *ue
23936 * @param[in] uint32_t bo
23937 * @param[out] uint32_t *effBo
23938 * @param[in] RgSchDlHqProcCb *proc
23939 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23943 static Void rgSCHCmnDlAllocRetxRbTM3
23950 RgSchDlHqProcCb *proc,
23951 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23956 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
23957 (proc->tbInfo[1].state == HQ_TB_NACKED))
23960 DU_LOG("\nDEBUG --> SCH : RETX RB TM3 nack for both hqp %d cell %d \n", proc->procId, proc->hqE->cell->cellId);
23962 /* Both TBs require RETX allocation */
23963 rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo,\
23964 proc, cellWdAllocInfo);
23968 /* One of the TBs need RETX allocation. Other TB may/maynot
23969 * be available for new TX allocation. */
23970 rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo,\
23971 proc, cellWdAllocInfo);
23979 * @brief This function performs the DCI format selection in case of
23980 * Transmit Diversity scheme where there can be more
23981 * than 1 option for DCI format selection.
23985 * Function: rgSCHCmnSlctPdcchFrmt
23986 * Purpose: 1. If DLFS is enabled, then choose TM specific
23987 * DCI format for Transmit diversity. All the
23988 * TM Specific DCI Formats support Type0 and/or
23989 * Type1 resource allocation scheme. DLFS
23990 * supports only Type-0&1 Resource allocation.
23991 * 2. If DLFS is not enabled, select a DCI format
23992 * which is of smaller size. Since Non-DLFS
23993 * scheduler supports all Resource allocation
23994 * schemes, selection is based on efficiency.
23996 * Invoked by: DL UE Allocation by Common Scheduler.
23998 * @param[in] RgSchCellCb *cell
23999 * @param[in] RgSchUeCb *ue
24000 * @param[out] uint8_t *raType
24001 * @return TfuDciFormat
24004 TfuDciFormat rgSCHCmnSlctPdcchFrmt
24011 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
24014 /* ccpu00140894- Selective DCI Format and RA type should be selected only
24015 * after TX Mode transition is completed*/
24016 if ((cellSch->dl.isDlFreqSel) && (ue->txModeTransCmplt))
24018 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciRAType;
24019 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciFrmt);
24023 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciRAType;
24024 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciFrmt);
24030 * @brief This function handles Retx allocation in case of TM3 UEs
24031 * where both the TBs were NACKED previously.
24035 * Function: rgSCHCmnDlTM3RetxRetx
24036 * Purpose: If forceTD flag enabled
24037 * TD for TB1 on CW1.
24039 * DCI Frmt 2A and RA Type 0
24040 * RI layered SM of both TBs on 2 CWs
24041 * Add UE to cell Alloc Info.
24042 * Fill UE alloc Info.
24045 * Successful allocation is indicated by non-zero effBo value.
24047 * Invoked by: rgSCHCmnDlAllocRbTM3
24049 * @param[in] RgSchCellCb *cell
24050 * @param[in] RgSchDlSf *subFrm
24051 * @param[in] RgSchUeCb *ue
24052 * @param[in] uint32_t bo
24053 * @param[out] uint32_t *effBo
24054 * @param[in] RgSchDlHqProcCb *proc
24055 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24059 static Void rgSCHCmnDlTM3RetxRetx
24066 RgSchDlHqProcCb *proc,
24067 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24071 RgSchDlRbAlloc *allocInfo;
24076 uint8_t precInfoAntIdx;
24080 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24082 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
24084 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
24085 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
24087 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
24089 if (ret == RFAILED)
24091 /* Allocation couldn't be made for Retx */
24092 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24095 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
24096 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
24097 #ifdef FOUR_TX_ANTENNA
24098 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1 should
24099 * have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
24100 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
24103 proc->cwSwpEnabled = TRUE;
24106 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24107 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
24111 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24114 /* Adding UE to allocInfo RETX Lst */
24115 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24117 /* Fill UE alloc Info scratch pad */
24118 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24119 precInfo, noTxLyrs, subFrm);
24126 * @brief This function handles Retx allocation in case of TM4 UEs
24127 * where both the TBs were NACKED previously.
24131 * Function: rgSCHCmnDlTM4RetxRetx
24132 * Purpose: If forceTD flag enabled
24133 * TD for TB1 on CW1.
24135 * DCI Frmt 2 and RA Type 0
24137 * 1 layer SM of TB1 on CW1.
24139 * RI layered SM of both TBs on 2 CWs
24140 * Add UE to cell Alloc Info.
24141 * Fill UE alloc Info.
24144 * Successful allocation is indicated by non-zero effBo value.
24146 * Invoked by: rgSCHCmnDlAllocRbTM4
24148 * @param[in] RgSchCellCb *cell
24149 * @param[in] RgSchDlSf *subFrm
24150 * @param[in] RgSchUeCb *ue
24151 * @param[in] uint32_t bo
24152 * @param[out] uint32_t *effBo
24153 * @param[in] RgSchDlHqProcCb *proc
24154 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24158 static Void rgSCHCmnDlTM4RetxRetx
24165 RgSchDlHqProcCb *proc,
24166 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24170 RgSchDlRbAlloc *allocInfo;
24172 Bool swpFlg = FALSE;
24174 #ifdef FOUR_TX_ANTENNA
24175 uint8_t precInfoAntIdx;
24181 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24183 /* Irrespective of RI Schedule both CWs */
24184 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
24185 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
24187 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
24189 if (ret == RFAILED)
24191 /* Allocation couldn't be made for Retx */
24192 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24195 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
24197 #ifdef FOUR_TX_ANTENNA
24198 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1
24199 * should have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
24200 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
24203 proc->cwSwpEnabled = TRUE;
24205 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24206 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
24210 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24213 /* Adding UE to allocInfo RETX Lst */
24214 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24216 /* Fill UE alloc Info scratch pad */
24217 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24218 precInfo, noTxLyrs, subFrm);
24226 * @brief This function determines Transmission attributes
24227 * incase of Spatial multiplexing for TX and RETX TBs.
24231 * Function: rgSCHCmnDlSMGetAttrForTxRetx
24232 * Purpose: 1. Reached here for a TM3/4 UE's HqP whose one of the TBs is
24233 * NACKED and the other TB is either NACKED or WAITING.
24234 * 2. Select the NACKED TB for RETX allocation.
24235 * 3. Allocation preference for RETX TB by mapping it to a better
24236 * CW (better in terms of efficiency).
24237 * 4. Determine the state of the other TB.
24238 * Determine if swapFlag were to be set.
24239 * Swap flag would be set if Retx TB is cross
24241 * 5. If UE has new data available for TX and if the other TB's state
24242 * is ACKED then set furtherScope as TRUE.
24244 * Invoked by: rgSCHCmnDlTM3[4]TxRetx
24246 * @param[in] RgSchUeCb *ue
24247 * @param[in] RgSchDlHqProcCb *proc
24248 * @param[out] RgSchDlHqTbCb **retxTb
24249 * @param[out] RgSchDlHqTbCb **txTb
24250 * @param[out] Bool *frthrScp
24251 * @param[out] Bool *swpFlg
24255 static Void rgSCHCmnDlSMGetAttrForTxRetx
24258 RgSchDlHqProcCb *proc,
24259 RgSchDlHqTbCb **retxTb,
24260 RgSchDlHqTbCb **txTb,
24265 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,proc->hqE->cell);
24266 RgSchDlRbAlloc *allocInfo;
24269 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24271 *retxTb = &proc->tbInfo[0];
24272 *txTb = &proc->tbInfo[1];
24273 /* TENB_BRDCM_TM4- Currently disabling swapflag for TM3/TM4, since
24274 * HqFeedback processing does not consider a swapped hq feedback */
24275 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 1))
24278 proc->cwSwpEnabled = TRUE;
24280 if (proc->tbInfo[1].state == HQ_TB_ACKED)
24282 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
24283 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
24288 *retxTb = &proc->tbInfo[1];
24289 *txTb = &proc->tbInfo[0];
24290 /* TENB_BRDCM_TM4 - Currently disabling swapflag for TM3/TM4, since
24291 * HqFeedback processing does not consider a swapped hq feedback */
24292 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 0))
24295 proc->cwSwpEnabled = TRUE;
24297 if (proc->tbInfo[0].state == HQ_TB_ACKED)
24299 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
24300 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
24308 * @brief Determine Precoding information for TM3 2 TX Antenna.
24312 * Function: rgSCHCmnDlTM3PrecInf2
24315 * Invoked by: rgSCHCmnDlGetAttrForTM3
24317 * @param[in] RgSchUeCb *ue
24318 * @param[in] uint8_t numTxLyrs
24319 * @param[in] Bool bothCwEnbld
24323 static uint8_t rgSCHCmnDlTM3PrecInf2
24337 * @brief Determine Precoding information for TM4 2 TX Antenna.
24341 * Function: rgSCHCmnDlTM4PrecInf2
24342 * Purpose: To determine a logic of deriving precoding index
24343 * information from 36.212 table 5.3.3.1.5-4
24345 * Invoked by: rgSCHCmnDlGetAttrForTM4
24347 * @param[in] RgSchUeCb *ue
24348 * @param[in] uint8_t numTxLyrs
24349 * @param[in] Bool bothCwEnbld
24353 static uint8_t rgSCHCmnDlTM4PrecInf2
24361 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24365 if (ueDl->mimoInfo.ri == numTxLyrs)
24367 if (ueDl->mimoInfo.ri == 2)
24369 /* PrecInfo corresponding to 2 CW
24371 if (ue->mimoInfo.puschFdbkVld)
24377 precIdx = ueDl->mimoInfo.pmi - 1;
24382 /* PrecInfo corresponding to 1 CW
24384 if (ue->mimoInfo.puschFdbkVld)
24390 precIdx = ueDl->mimoInfo.pmi + 1;
24394 else if (ueDl->mimoInfo.ri > numTxLyrs)
24396 /* In case of choosing among the columns of a
24397 * precoding matrix, choose the column corresponding
24398 * to the MAX-CQI */
24399 if (ue->mimoInfo.puschFdbkVld)
24405 precIdx = (ueDl->mimoInfo.pmi- 1)* 2 + 1;
24408 else /* if RI < numTxLyrs */
24410 precIdx = (ueDl->mimoInfo.pmi < 2)? 0:1;
24417 * @brief Determine Precoding information for TM3 4 TX Antenna.
24421 * Function: rgSCHCmnDlTM3PrecInf4
24422 * Purpose: To determine a logic of deriving precoding index
24423 * information from 36.212 table 5.3.3.1.5A-2
24425 * Invoked by: rgSCHCmnDlGetAttrForTM3
24427 * @param[in] RgSchUeCb *ue
24428 * @param[in] uint8_t numTxLyrs
24429 * @param[in] Bool bothCwEnbld
24433 static uint8_t rgSCHCmnDlTM3PrecInf4
24446 precIdx = numTxLyrs - 2;
24448 else /* one 1 CW transmission */
24457 * @brief Determine Precoding information for TM4 4 TX Antenna.
24461 * Function: rgSCHCmnDlTM4PrecInf4
24462 * Purpose: To determine a logic of deriving precoding index
24463 * information from 36.212 table 5.3.3.1.5-5
24465 * Invoked by: rgSCHCmnDlGetAttrForTM4
24467 * @param[in] RgSchUeCb *ue
24468 * @param[in] uint8_t numTxLyrs
24469 * @param[in] Bool bothCwEnbld
24473 static uint8_t rgSCHCmnDlTM4PrecInf4
24481 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24482 uint8_t precInfoBaseIdx, precIdx;
24485 precInfoBaseIdx = (ue->mimoInfo.puschFdbkVld)? (16):
24486 (ueDl->mimoInfo.pmi);
24489 precIdx = precInfoBaseIdx + (numTxLyrs-2)*17;
24491 else /* one 1 CW transmission */
24493 precInfoBaseIdx += 1;
24494 precIdx = precInfoBaseIdx + (numTxLyrs-1)*17;
24501 * @brief This function determines Transmission attributes
24502 * incase of TM3 scheduling.
24506 * Function: rgSCHCmnDlGetAttrForTM3
24507 * Purpose: Determine retx TB and tx TB based on TB states.
24508 * If forceTD enabled
24509 * perform only retx TB allocation.
24510 * If retxTB == TB2 then DCI Frmt = 2A, RA Type = 0.
24511 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
24513 * perform retxTB allocation on CW1.
24515 * Determine further Scope and Swap Flag attributes
24516 * assuming a 2 CW transmission of RetxTB and new Tx TB.
24517 * If no further scope for new TX allocation
24518 * Allocate only retx TB using 2 layers if
24519 * this TB was previously transmitted using 2 layers AND
24520 * number of Tx antenna ports == 4.
24521 * otherwise do single layer precoding.
24523 * Invoked by: rgSCHCmnDlTM3TxRetx
24525 * @param[in] RgSchUeCb *ue
24526 * @param[in] RgSchDlHqProcCb *proc
24527 * @param[out] uint8_t *numTxLyrs
24528 * @param[out] Bool *isTraDiv
24529 * @param[out] uint8_t *prcdngInf
24530 * @param[out] uint8_t *raType
24534 static Void rgSCHCmnDlGetAttrForTM3
24538 RgSchDlHqProcCb *proc,
24539 uint8_t *numTxLyrs,
24540 TfuDciFormat *dciFrmt,
24541 uint8_t *prcdngInf,
24542 RgSchDlHqTbCb **retxTb,
24543 RgSchDlHqTbCb **txTb,
24549 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24550 uint8_t precInfoAntIdx;
24553 /* Avoiding Tx-Retx for LAA cell as firstSchedTime is associated with
24555 /* Integration_fix: SPS Proc shall always have only one Cw */
24557 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
24558 (ueDl->mimoInfo.forceTD))
24560 ||(TRUE == rgSCHLaaSCellEnabled(cell))
24564 if ((ueDl->mimoInfo.forceTD)
24566 || (TRUE == rgSCHLaaSCellEnabled(cell))
24571 /* Transmit Diversity. Format based on dlfsEnabled
24572 * No further scope */
24573 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24575 *retxTb = &proc->tbInfo[0];
24576 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24580 *retxTb = &proc->tbInfo[1];
24581 *dciFrmt = TFU_DCI_FORMAT_2A;
24582 *raType = RG_SCH_CMN_RA_TYPE0;
24590 /* Determine the 2 TB transmission attributes */
24591 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
24595 /* Prefer allocation of RETX TB over 2 layers rather than combining
24596 * it with a new TX. */
24597 if ((ueDl->mimoInfo.ri == 2)
24598 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
24600 /* Allocate TB on CW1, using 2 Lyrs,
24601 * Format 2, precoding accordingly */
24607 *numTxLyrs= ((*retxTb)->numLyrs + ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)].noLyr);
24609 if((*retxTb)->tbIdx == 0 && ((*retxTb)->numLyrs == 2 ) && *numTxLyrs ==3)
24612 proc->cwSwpEnabled = TRUE;
24614 else if((*retxTb)->tbIdx == 1 && ((*retxTb)->numLyrs == 1) && *numTxLyrs ==3)
24617 proc->cwSwpEnabled = TRUE;
24621 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24622 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])\
24623 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
24624 *dciFrmt = TFU_DCI_FORMAT_2A;
24625 *raType = RG_SCH_CMN_RA_TYPE0;
24627 else /* frthrScp == FALSE */
24629 if (cell->numTxAntPorts == 2)
24631 /* Transmit Diversity */
24633 if ((*retxTb)->tbIdx == 0)
24635 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24639 /* If retxTB is TB2 then use format 2A */
24640 *dciFrmt = TFU_DCI_FORMAT_2A;
24641 *raType = RG_SCH_CMN_RA_TYPE0;
24646 else /* NumAntPorts == 4 */
24648 if ((*retxTb)->numLyrs == 2)
24650 /* Allocate TB on CW1, using 2 Lyrs,
24651 * Format 2A, precoding accordingly */
24653 *dciFrmt = TFU_DCI_FORMAT_2A;
24654 *raType = RG_SCH_CMN_RA_TYPE0;
24655 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24656 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, *numTxLyrs, *frthrScp);
24661 /* Transmit Diversity */
24663 if ((*retxTb)->tbIdx == 0)
24665 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24669 /* If retxTB is TB2 then use format 2A */
24670 *dciFrmt = TFU_DCI_FORMAT_2A;
24671 *raType = RG_SCH_CMN_RA_TYPE0;
24685 * @brief This function determines Transmission attributes
24686 * incase of TM4 scheduling.
24690 * Function: rgSCHCmnDlGetAttrForTM4
24691 * Purpose: Determine retx TB and tx TB based on TB states.
24692 * If forceTD enabled
24693 * perform only retx TB allocation.
24694 * If retxTB == TB2 then DCI Frmt = 2, RA Type = 0.
24695 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
24697 * perform retxTB allocation on CW1.
24699 * Determine further Scope and Swap Flag attributes
24700 * assuming a 2 CW transmission of RetxTB and new Tx TB.
24701 * If no further scope for new TX allocation
24702 * Allocate only retx TB using 2 layers if
24703 * this TB was previously transmitted using 2 layers AND
24704 * number of Tx antenna ports == 4.
24705 * otherwise do single layer precoding.
24707 * Invoked by: rgSCHCmnDlTM4TxRetx
24709 * @param[in] RgSchUeCb *ue
24710 * @param[in] RgSchDlHqProcCb *proc
24711 * @param[out] uint8_t *numTxLyrs
24712 * @param[out] Bool *isTraDiv
24713 * @param[out] uint8_t *prcdngInf
24714 * @param[out] uint8_t *raType
24718 static Void rgSCHCmnDlGetAttrForTM4
24722 RgSchDlHqProcCb *proc,
24723 uint8_t *numTxLyrs,
24724 TfuDciFormat *dciFrmt,
24725 uint8_t *prcdngInf,
24726 RgSchDlHqTbCb **retxTb,
24727 RgSchDlHqTbCb **txTb,
24733 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24734 uint8_t precInfoAntIdx;
24738 /* Integration_fix: SPS Proc shall always have only one Cw */
24740 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
24741 (ueDl->mimoInfo.forceTD))
24743 ||(TRUE == rgSCHLaaSCellEnabled(cell))
24747 if ((ueDl->mimoInfo.forceTD)
24749 || (TRUE == rgSCHLaaSCellEnabled(cell))
24754 /* Transmit Diversity. Format based on dlfsEnabled
24755 * No further scope */
24756 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24758 *retxTb = &proc->tbInfo[0];
24759 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24763 *retxTb = &proc->tbInfo[1];
24764 *dciFrmt = TFU_DCI_FORMAT_2;
24765 *raType = RG_SCH_CMN_RA_TYPE0;
24773 if (ueDl->mimoInfo.ri == 1)
24775 /* single layer precoding. Format 2.
24776 * No further scope */
24777 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24779 *retxTb = &proc->tbInfo[0];
24783 *retxTb = &proc->tbInfo[1];
24786 *dciFrmt = TFU_DCI_FORMAT_2;
24787 *raType = RG_SCH_CMN_RA_TYPE0;
24789 *prcdngInf = 0; /*When RI= 1*/
24793 /* Determine the 2 TB transmission attributes */
24794 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
24796 *dciFrmt = TFU_DCI_FORMAT_2;
24797 *raType = RG_SCH_CMN_RA_TYPE0;
24800 /* Prefer allocation of RETX TB over 2 layers rather than combining
24801 * it with a new TX. */
24802 if ((ueDl->mimoInfo.ri == 2)
24803 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
24805 /* Allocate TB on CW1, using 2 Lyrs,
24806 * Format 2, precoding accordingly */
24810 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24811 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])
24812 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
24814 else /* frthrScp == FALSE */
24816 if (cell->numTxAntPorts == 2)
24818 /* single layer precoding. Format 2. */
24820 *prcdngInf = (getPrecInfoFunc[1][cell->numTxAntPorts/2 - 1])\
24821 (cell, ue, *numTxLyrs, *frthrScp);
24824 else /* NumAntPorts == 4 */
24826 if ((*retxTb)->numLyrs == 2)
24828 /* Allocate TB on CW1, using 2 Lyrs,
24829 * Format 2, precoding accordingly */
24831 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24832 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
24833 (cell, ue, *numTxLyrs, *frthrScp);
24838 /* Allocate TB with 1 lyr precoding,
24839 * Format 2, precoding info accordingly */
24841 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24842 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
24843 (cell, ue, *numTxLyrs, *frthrScp);
24854 * @brief This function handles Retx allocation in case of TM3 UEs
24855 * where previously one of the TBs was NACKED and the other
24856 * TB is either ACKED/WAITING.
24860 * Function: rgSCHCmnDlTM3TxRetx
24861 * Purpose: Determine the TX attributes for TM3 TxRetx Allocation.
24862 * If futher Scope for New Tx Allocation on other TB
24863 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
24864 * Add UE to cell wide RetxTx List.
24866 * Perform only RETX alloc'n on CW1.
24867 * Add UE to cell wide Retx List.
24869 * effBo is set to a non-zero value if allocation is
24872 * Invoked by: rgSCHCmnDlAllocRbTM3
24874 * @param[in] RgSchCellCb *cell
24875 * @param[in] RgSchDlSf *subFrm
24876 * @param[in] RgSchUeCb *ue
24877 * @param[in] uint32_t bo
24878 * @param[out] uint32_t *effBo
24879 * @param[in] RgSchDlHqProcCb *proc
24880 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24884 static Void rgSCHCmnDlTM3TxRetx
24891 RgSchDlHqProcCb *proc,
24892 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24896 RgSchDlRbAlloc *allocInfo;
24898 RgSchDlHqTbCb *retxTb, *txTb;
24907 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24910 /* Determine the transmission attributes */
24911 rgSCHCmnDlGetAttrForTM3(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
24912 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
24913 &allocInfo->raType);
24918 DU_LOG("\nDEBUG --> SCH : TX RETX called from proc %d cell %d \n",proc->procId, cell->cellId);
24920 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
24922 if (ret == RFAILED)
24924 /* Allocation couldn't be made for Retx */
24925 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24928 /* Adding UE to RbAllocInfo RETX-TX Lst */
24929 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
24933 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
24934 numTxLyrs, &numRb, effBo);
24935 if (ret == RFAILED)
24937 /* Allocation couldn't be made for Retx */
24938 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24942 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24945 /* Adding UE to allocInfo RETX Lst */
24946 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24949 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24950 prcdngInf, numTxLyrs, subFrm);
24957 * @brief This function handles Retx allocation in case of TM4 UEs
24958 * where previously one of the TBs was NACKED and the other
24959 * TB is either ACKED/WAITING.
24963 * Function: rgSCHCmnDlTM4TxRetx
24964 * Purpose: Determine the TX attributes for TM4 TxRetx Allocation.
24965 * If futher Scope for New Tx Allocation on other TB
24966 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
24967 * Add UE to cell wide RetxTx List.
24969 * Perform only RETX alloc'n on CW1.
24970 * Add UE to cell wide Retx List.
24972 * effBo is set to a non-zero value if allocation is
24975 * Invoked by: rgSCHCmnDlAllocRbTM4
24977 * @param[in] RgSchCellCb *cell
24978 * @param[in] RgSchDlSf *subFrm
24979 * @param[in] RgSchUeCb *ue
24980 * @param[in] uint32_t bo
24981 * @param[out] uint32_t *effBo
24982 * @param[in] RgSchDlHqProcCb *proc
24983 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24987 static Void rgSCHCmnDlTM4TxRetx
24994 RgSchDlHqProcCb *proc,
24995 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24999 RgSchDlRbAlloc *allocInfo;
25001 RgSchDlHqTbCb *retxTb, *txTb;
25009 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25012 /* Determine the transmission attributes */
25013 rgSCHCmnDlGetAttrForTM4(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
25014 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
25015 &allocInfo->raType);
25019 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
25021 if (ret == RFAILED)
25023 /* Fix : syed If TxRetx allocation failed then add the UE along
25024 * with the proc to the nonSchdTxRetxUeLst and let spfc scheduler
25025 * take care of it during finalization. */
25026 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25029 /* Adding UE to RbAllocInfo RETX-TX Lst */
25030 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
25034 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
25035 numTxLyrs, &numRb, effBo);
25036 if (ret == RFAILED)
25038 /* Allocation couldn't be made for Retx */
25039 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25043 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25046 /* Adding UE to allocInfo RETX Lst */
25047 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
25050 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
25051 prcdngInf, numTxLyrs, subFrm)
25058 * @brief This function handles Retx allocation in case of TM4 UEs
25059 * where previously both the TBs were ACKED and ACKED
25064 * Function: rgSCHCmnDlTM3TxTx
25065 * Purpose: Reached here for a TM3 UE's HqP's fresh allocation
25066 * where both the TBs are free for TX scheduling.
25067 * If forceTD flag is set
25068 * perform TD on CW1 with TB1.
25073 * RI layered precoding 2 TB on 2 CW.
25074 * Set precoding info.
25075 * Add UE to cellAllocInfo.
25076 * Fill ueAllocInfo.
25078 * effBo is set to a non-zero value if allocation is
25081 * Invoked by: rgSCHCmnDlAllocRbTM3
25083 * @param[in] RgSchCellCb *cell
25084 * @param[in] RgSchDlSf *subFrm
25085 * @param[in] RgSchUeCb *ue
25086 * @param[in] uint32_t bo
25087 * @param[out] uint32_t *effBo
25088 * @param[in] RgSchDlHqProcCb *proc
25089 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25093 static Void rgSCHCmnDlTM3TxTx
25100 RgSchDlHqProcCb *proc,
25101 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25104 RgSchCmnDlUe *ueDl;
25105 RgSchDlRbAlloc *allocInfo;
25110 uint8_t precInfoAntIdx;
25114 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25115 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25117 /* Integration_fix: SPS Proc shall always have only one Cw */
25119 #ifdef FOUR_TX_ANTENNA
25120 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25121 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
25123 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25124 (ueDl->mimoInfo.forceTD))
25127 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
25130 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
25131 &allocInfo->raType);
25132 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25133 bo, &numRb, effBo);
25134 if (ret == RFAILED)
25136 /* If allocation couldn't be made then return */
25140 precInfo = 0; /* TD */
25142 else /* Precoding */
25144 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
25145 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
25147 /* Spatial Multiplexing using 2 CWs */
25148 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
25149 if (ret == RFAILED)
25151 /* If allocation couldn't be made then return */
25154 noTxLyrs = ueDl->mimoInfo.ri;
25155 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
25156 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, getPrecInfoFunc[0], precInfoAntIdx);
25157 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
25161 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25164 /* Adding UE to RbAllocInfo TX Lst */
25165 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25167 /* Fill UE allocInfo scrath pad */
25168 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
25169 precInfo, noTxLyrs, subFrm);
25176 * @brief This function handles Retx allocation in case of TM4 UEs
25177 * where previously both the TBs were ACKED and ACKED
25182 * Function: rgSCHCmnDlTM4TxTx
25183 * Purpose: Reached here for a TM4 UE's HqP's fresh allocation
25184 * where both the TBs are free for TX scheduling.
25185 * If forceTD flag is set
25186 * perform TD on CW1 with TB1.
25192 * Single layer precoding of TB1 on CW1.
25193 * Set precoding info.
25195 * RI layered precoding 2 TB on 2 CW.
25196 * Set precoding info.
25197 * Add UE to cellAllocInfo.
25198 * Fill ueAllocInfo.
25200 * effBo is set to a non-zero value if allocation is
25203 * Invoked by: rgSCHCmnDlAllocRbTM4
25205 * @param[in] RgSchCellCb *cell
25206 * @param[in] RgSchDlSf *subFrm
25207 * @param[in] RgSchUeCb *ue
25208 * @param[in] uint32_t bo
25209 * @param[out] uint32_t *effBo
25210 * @param[in] RgSchDlHqProcCb *proc
25211 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25215 static Void rgSCHCmnDlTM4TxTx
25222 RgSchDlHqProcCb *proc,
25223 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25226 RgSchCmnDlUe *ueDl;
25227 RgSchDlRbAlloc *allocInfo;
25231 uint8_t precInfoAntIdx;
25236 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25237 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25239 /* Integration_fix: SPS Proc shall always have only one Cw */
25241 #ifdef FOUR_TX_ANTENNA
25242 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25243 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
25245 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25246 (ueDl->mimoInfo.forceTD))
25249 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
25252 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
25253 &allocInfo->raType);
25255 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25256 bo, &numRb, effBo);
25257 if (ret == RFAILED)
25259 /* If allocation couldn't be made then return */
25263 precInfo = 0; /* TD */
25265 else /* Precoding */
25267 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
25268 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
25270 if (ueDl->mimoInfo.ri == 1)
25272 /* Single Layer SM using FORMAT 2 */
25273 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25274 bo, &numRb, effBo);
25275 if (ret == RFAILED)
25277 /* If allocation couldn't be made then return */
25281 precInfo = 0; /* PrecInfo as 0 for RI=1*/
25285 /* Spatial Multiplexing using 2 CWs */
25286 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
25287 if (ret == RFAILED)
25289 /* If allocation couldn't be made then return */
25292 noTxLyrs = ueDl->mimoInfo.ri;
25293 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
25294 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
25300 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25303 /* Adding UE to RbAllocInfo TX Lst */
25304 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25307 /* Fill UE allocInfo scrath pad */
25308 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
25309 precInfo, noTxLyrs, subFrm);
25316 * @brief This function determines the RBs and Bytes required for BO
25317 * transmission for UEs configured with TM 4.
25321 * Function: rgSCHCmnDlAllocTxRbTM4
25322 * Purpose: Invokes the functionality particular to the
25323 * current state of the TBs of the "proc".
25325 * Reference Parameter effBo is filled with alloced bytes.
25326 * Returns RFAILED if BO not satisfied at all.
25328 * Invoked by: rgSCHCmnDlAllocTxRb
25330 * @param[in] RgSchCellCb *cell
25331 * @param[in] RgSchDlSf *subFrm
25332 * @param[in] RgSchUeCb *ue
25333 * @param[in] uint32_t bo
25334 * @param[out] uint32_t *effBo
25335 * @param[in] RgSchDlHqProcCb *proc
25336 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25340 static Void rgSCHCmnDlAllocTxRbTM4
25347 RgSchDlHqProcCb *proc,
25348 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25352 /* Both TBs free for TX allocation */
25353 rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo,\
25354 proc, cellWdAllocInfo);
25361 * @brief This function determines the RBs and Bytes required for BO
25362 * retransmission for UEs configured with TM 4.
25366 * Function: rgSCHCmnDlAllocRetxRbTM4
25367 * Purpose: Invokes the functionality particular to the
25368 * current state of the TBs of the "proc".
25370 * Reference Parameter effBo is filled with alloced bytes.
25371 * Returns RFAILED if BO not satisfied at all.
25373 * Invoked by: rgSCHCmnDlAllocRetxRb
25375 * @param[in] RgSchCellCb *cell
25376 * @param[in] RgSchDlSf *subFrm
25377 * @param[in] RgSchUeCb *ue
25378 * @param[in] uint32_t bo
25379 * @param[out] uint32_t *effBo
25380 * @param[in] RgSchDlHqProcCb *proc
25381 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25385 static Void rgSCHCmnDlAllocRetxRbTM4
25392 RgSchDlHqProcCb *proc,
25393 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25397 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
25398 (proc->tbInfo[1].state == HQ_TB_NACKED))
25400 /* Both TBs require RETX allocation */
25401 rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo,\
25402 proc, cellWdAllocInfo);
25406 /* One of the TBs need RETX allocation. Other TB may/maynot
25407 * be available for new TX allocation. */
25408 rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo,\
25409 proc, cellWdAllocInfo);
25418 * @brief This function determines the RBs and Bytes required for BO
25419 * transmission for UEs configured with TM 5.
25423 * Function: rgSCHCmnDlAllocTxRbTM5
25426 * Reference Parameter effBo is filled with alloced bytes.
25427 * Returns RFAILED if BO not satisfied at all.
25429 * Invoked by: rgSCHCmnDlAllocTxRb
25431 * @param[in] RgSchCellCb *cell
25432 * @param[in] RgSchDlSf *subFrm
25433 * @param[in] RgSchUeCb *ue
25434 * @param[in] uint32_t bo
25435 * @param[out] uint32_t *effBo
25436 * @param[in] RgSchDlHqProcCb *proc
25437 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25441 static Void rgSCHCmnDlAllocTxRbTM5
25448 RgSchDlHqProcCb *proc,
25449 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25452 #if (ERRCLASS & ERRCLS_DEBUG)
25453 DU_LOG("\nERROR --> SCH : Invalid TM 5 for CRNTI:%d",ue->ueId);
25460 * @brief This function determines the RBs and Bytes required for BO
25461 * retransmission for UEs configured with TM 5.
25465 * Function: rgSCHCmnDlAllocRetxRbTM5
25468 * Reference Parameter effBo is filled with alloced bytes.
25469 * Returns RFAILED if BO not satisfied at all.
25471 * Invoked by: rgSCHCmnDlAllocRetxRb
25473 * @param[in] RgSchCellCb *cell
25474 * @param[in] RgSchDlSf *subFrm
25475 * @param[in] RgSchUeCb *ue
25476 * @param[in] uint32_t bo
25477 * @param[out] uint32_t *effBo
25478 * @param[in] RgSchDlHqProcCb *proc
25479 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25483 static Void rgSCHCmnDlAllocRetxRbTM5
25490 RgSchDlHqProcCb *proc,
25491 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25494 #if (ERRCLASS & ERRCLS_DEBUG)
25495 DU_LOG("\nERROR --> SCH : Invalid TM 5 for CRNTI:%d",ue->ueId);
25503 * @brief This function determines the RBs and Bytes required for BO
25504 * transmission for UEs configured with TM 6.
25508 * Function: rgSCHCmnDlAllocTxRbTM6
25511 * Reference Parameter effBo is filled with alloced bytes.
25512 * Returns RFAILED if BO not satisfied at all.
25514 * Invoked by: rgSCHCmnDlAllocTxRb
25516 * @param[in] RgSchCellCb *cell
25517 * @param[in] RgSchDlSf *subFrm
25518 * @param[in] RgSchUeCb *ue
25519 * @param[in] uint32_t bo
25520 * @param[out] uint32_t *effBo
25521 * @param[in] RgSchDlHqProcCb *proc
25522 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25526 static Void rgSCHCmnDlAllocTxRbTM6
25533 RgSchDlHqProcCb *proc,
25534 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25537 RgSchDlRbAlloc *allocInfo;
25538 RgSchCmnDlUe *ueDl;
25544 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25545 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25547 if (ueDl->mimoInfo.forceTD)
25549 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25550 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25554 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
25555 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25556 /* Fill precoding information for FORMAT 1B */
25557 /* First 4 least significant bits to indicate PMI.
25558 * 4th most significant corresponds to pmi Confirmation.
25560 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
25561 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
25563 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25564 bo, &numRb, effBo);
25565 if (ret == RFAILED)
25567 /* If allocation couldn't be made then return */
25572 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25575 /* Adding UE to RbAllocInfo TX Lst */
25576 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25578 /* Fill UE alloc Info */
25579 allocInfo->rbsReq = numRb;
25580 allocInfo->dlSf = subFrm;
25586 * @brief This function determines the RBs and Bytes required for BO
25587 * retransmission for UEs configured with TM 6.
25591 * Function: rgSCHCmnDlAllocRetxRbTM6
25594 * Reference Parameter effBo is filled with alloced bytes.
25595 * Returns RFAILED if BO not satisfied at all.
25597 * Invoked by: rgSCHCmnDlAllocRetxRb
25599 * @param[in] RgSchCellCb *cell
25600 * @param[in] RgSchDlSf *subFrm
25601 * @param[in] RgSchUeCb *ue
25602 * @param[in] uint32_t bo
25603 * @param[out] uint32_t *effBo
25604 * @param[in] RgSchDlHqProcCb *proc
25605 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25609 static Void rgSCHCmnDlAllocRetxRbTM6
25616 RgSchDlHqProcCb *proc,
25617 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25620 RgSchDlRbAlloc *allocInfo;
25621 RgSchCmnDlUe *ueDl;
25627 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25628 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25630 if (ueDl->mimoInfo.forceTD)
25632 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25633 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25637 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
25638 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25639 /* Fill precoding information for FORMAT 1B */
25640 /* First 4 least significant bits to indicate PMI.
25641 * 4th most significant corresponds to pmi Confirmation.
25643 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
25644 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
25647 /* Get the Allocation in terms of RBs that are required for
25648 * this retx of TB1 */
25649 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
25651 if (ret == RFAILED)
25653 /* Allocation couldn't be made for Retx */
25654 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25657 /* Adding UE to allocInfo RETX Lst */
25658 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
25659 /* Fill UE alloc Info */
25660 allocInfo->rbsReq = numRb;
25661 allocInfo->dlSf = subFrm;
25667 * @brief This function determines the RBs and Bytes required for BO
25668 * transmission for UEs configured with TM 7.
25672 * Function: rgSCHCmnDlAllocTxRbTM7
25675 * Reference Parameter effBo is filled with alloced bytes.
25676 * Returns RFAILED if BO not satisfied at all.
25678 * Invoked by: rgSCHCmnDlAllocTxRb
25680 * @param[in] RgSchCellCb *cell
25681 * @param[in] RgSchDlSf *subFrm
25682 * @param[in] RgSchUeCb *ue
25683 * @param[in] uint32_t bo
25684 * @param[out] uint32_t *effBo
25685 * @param[in] RgSchDlHqProcCb *proc
25686 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25690 static Void rgSCHCmnDlAllocTxRbTM7
25697 RgSchDlHqProcCb *proc,
25698 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25701 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
25707 * @brief This function determines the RBs and Bytes required for BO
25708 * retransmission for UEs configured with TM 7.
25712 * Function: rgSCHCmnDlAllocRetxRbTM7
25715 * Reference Parameter effBo is filled with alloced bytes.
25716 * Returns RFAILED if BO not satisfied at all.
25718 * Invoked by: rgSCHCmnDlAllocRetxRb
25720 * @param[in] RgSchCellCb *cell
25721 * @param[in] RgSchDlSf *subFrm
25722 * @param[in] RgSchUeCb *ue
25723 * @param[in] uint32_t bo
25724 * @param[out] uint32_t *effBo
25725 * @param[in] RgSchDlHqProcCb *proc
25726 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25730 static Void rgSCHCmnDlAllocRetxRbTM7
25737 RgSchDlHqProcCb *proc,
25738 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25741 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
25747 * @brief This function invokes the TM specific DL TX RB Allocation routine.
25751 * Function: rgSCHCmnDlAllocTxRb
25752 * Purpose: This function invokes the TM specific
25753 * DL TX RB Allocation routine.
25755 * Invoked by: Specific Schedulers
25757 * @param[in] RgSchCellCb *cell
25758 * @param[in] RgSchDlSf *subFrm
25759 * @param[in] RgSchUeCb *ue
25760 * @param[in] uint32_t bo
25761 * @param[out] uint32_t *effBo
25762 * @param[in] RgSchDlHqProcCb *proc
25763 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25767 S16 rgSCHCmnDlAllocTxRb
25774 RgSchDlHqProcCb *proc,
25775 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25778 uint32_t newSchBits = 0;
25779 uint32_t prevSchBits = 0;
25780 RgSchDlRbAlloc *allocInfo;
25783 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
25785 ue->dl.aggTbBits = 0;
25789 /* Calculate totals bits previously allocated */
25790 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25791 if (allocInfo->tbInfo[0].schdlngForTb)
25793 prevSchBits += allocInfo->tbInfo[0].bytesReq;
25795 if (allocInfo->tbInfo[1].schdlngForTb)
25797 prevSchBits += allocInfo->tbInfo[1].bytesReq;
25800 /* Call TM specific RB allocation routine */
25801 (dlAllocTxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
25802 proc, cellWdAllocInfo);
25806 /* Calculate totals bits newly allocated */
25807 if (allocInfo->tbInfo[0].schdlngForTb)
25809 newSchBits += allocInfo->tbInfo[0].bytesReq;
25811 if (allocInfo->tbInfo[1].schdlngForTb)
25813 newSchBits += allocInfo->tbInfo[1].bytesReq;
25815 if (newSchBits > prevSchBits)
25817 ue->dl.aggTbBits += ((newSchBits - prevSchBits) * 8);
25818 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
25825 /* DwPTS Scheduling Changes Start */
25828 * @brief Retransmit decision for TDD. Retx is avoided in below cases
25829 * 1) DL Sf -> Spl Sf
25830 * 2) DL SF -> DL SF 0
25834 * Function: rgSCHCmnRetxAvoidTdd
25835 * Purpose: Avoid allocating RETX for cases 1, 2
25837 * Invoked by: rgSCHCmnRetxAvoidTdd
25839 * @param[in] RgSchDlSf *curSf
25840 * @param[in] RgSchCellCb *cell
25841 * @param[in] RgSchDlHqProcCb *proc
25845 Bool rgSCHCmnRetxAvoidTdd
25849 RgSchDlHqProcCb *proc
25852 RgSchTddSfType txSfType = 0;
25855 /* Get the RBs of TB that will be retransmitted */
25856 if (proc->tbInfo[0].state == HQ_TB_NACKED)
25858 txSfType = proc->tbInfo[0].sfType;
25860 #ifdef XEON_SPECIFIC_CHANGES
25861 #ifndef XEON_TDD_SPCL
25862 /* Avoid re-transmission on Normal SF when the corresponding TB wss transmitted on SPCL SF */
25863 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
25870 if (proc->tbInfo[1].state == HQ_TB_NACKED)
25872 /* Select the TxSf with the highest num of possible REs
25873 * In ascending order -> 1) SPL SF 2) DL_SF_0 3) DL_SF */
25874 txSfType = RGSCH_MAX(txSfType, proc->tbInfo[1].sfType);
25876 #ifdef XEON_SPECIFIC_CHANGES
25877 #ifndef XEON_TDD_SPCL
25878 /* Avoid re-transmission on Normal SF when the corresponding TB wss tranmitted on SPCL SF */
25879 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
25887 if (txSfType > curSf->sfType)
25898 /* DwPTS Scheduling Changes End */
25901 * @brief Avoid allocating RETX incase of collision
25902 * with reserved resources for BCH/PSS/SSS occassions.
25906 * Function: rgSCHCmnRetxAllocAvoid
25907 * Purpose: Avoid allocating RETX incase of collision
25908 * with reserved resources for BCH/PSS/SSS occassions
25910 * Invoked by: rgSCHCmnDlAllocRetxRb
25912 * @param[in] RgSchDlSf *subFrm
25913 * @param[in] RgSchUeCb *ue
25914 * @param[in] RgSchDlHqProcCb *proc
25918 Bool rgSCHCmnRetxAllocAvoid
25922 RgSchDlHqProcCb *proc
25928 if (proc->tbInfo[0].state == HQ_TB_NACKED)
25930 reqRbs = proc->tbInfo[0].dlGrnt.numRb;
25934 reqRbs = proc->tbInfo[1].dlGrnt.numRb;
25936 /* Consider the dlGrnt.numRb of the Retransmitting proc->tbInfo
25937 * and current available RBs to determine if this RETX TB
25938 * will collide with the BCH/PSS/SSS occassion */
25939 if (subFrm->sfNum % 5 == 0)
25941 if ((subFrm->bwAssigned < cell->pbchRbEnd) &&
25942 (((subFrm->bwAssigned + reqRbs) - cell->pbchRbStart) > 0))
25954 * @brief This function invokes the TM specific DL RETX RB Allocation routine.
25958 * Function: rgSCHCmnDlAllocRetxRb
25959 * Purpose: This function invokes the TM specific
25960 * DL RETX RB Allocation routine.
25962 * Invoked by: Specific Schedulers
25964 * @param[in] RgSchCellCb *cell
25965 * @param[in] RgSchDlSf *subFrm
25966 * @param[in] RgSchUeCb *ue
25967 * @param[in] uint32_t bo
25968 * @param[out] uint32_t *effBo
25969 * @param[in] RgSchDlHqProcCb *proc
25970 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25974 S16 rgSCHCmnDlAllocRetxRb
25981 RgSchDlHqProcCb *proc,
25982 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25985 uint32_t newSchBits = 0;
25986 RgSchDlRbAlloc *allocInfo;
25989 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
25991 ue->dl.aggTbBits = 0;
25995 /* Check for DL BW exhaustion */
25996 if (subFrm->bw <= subFrm->bwAssigned)
26000 /* Call TM specific RB allocation routine */
26001 (dlAllocRetxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
26002 proc, cellWdAllocInfo);
26006 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26007 /* Calculate totals bits newly allocated */
26008 if (allocInfo->tbInfo[0].schdlngForTb)
26010 newSchBits += allocInfo->tbInfo[0].bytesReq;
26012 if (allocInfo->tbInfo[1].schdlngForTb)
26014 newSchBits += allocInfo->tbInfo[1].bytesReq;
26016 ue->dl.aggTbBits += (newSchBits * 8);
26017 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
26025 * @brief This function determines the RBs and Bytes required for
26026 * Transmission on 1 CW.
26030 * Function: rgSCHCmnDlAlloc1CwTxRb
26031 * Purpose: This function determines the RBs and Bytes required
26032 * for Transmission of DL SVC BO on 1 CW.
26033 * Also, takes care of SVC by SVC allocation by tracking
26034 * previous SVCs allocations.
26035 * Returns RFAILED if BO not satisfied at all.
26037 * Invoked by: DL UE Allocation
26039 * @param[in] RgSchCellCb *cell
26040 * @param[in] RgSchDlSf *subFrm
26041 * @param[in] RgSchUeCb *ue
26042 * @param[in] RgSchDlHqTbCb *tbInfo
26043 * @param[in] uint32_t bo
26044 * @param[out] uint8_t *numRb
26045 * @param[out] uint32_t *effBo
26049 static S16 rgSCHCmnDlAlloc1CwTxRb
26054 RgSchDlHqTbCb *tbInfo,
26063 RgSchCmnDlUe *ueDl;
26064 RgSchDlRbAlloc *allocInfo;
26067 /* Correcting wrap around issue.
26068 * This change has been done at mutliple places in this function.*/
26069 uint32_t tempNumRb;
26072 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26073 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26074 oldReq = ueDl->outStndAlloc;
26077 //TODO_SID: Currently setting max Tb size wrt to 5GTF TM3
26078 iTbs = ue->ue5gtfCb.mcs;
26079 ueDl->maxTbSz = MAX_5GTF_TB_SIZE * ue->ue5gtfCb.rank;
26080 ueDl->maxRb = MAX_5GTF_PRBS;
26082 ueDl->outStndAlloc += bo;
26083 /* consider Cumulative amount of this BO and bytes so far allocated */
26084 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbSz/8);
26085 /* Get the number of REs needed for this bo. */
26086 //noRes = ((bo * 8 * 1024) / eff);
26088 /* Get the number of RBs needed for this transmission */
26089 /* Number of RBs = No of REs / No of REs per RB */
26090 //tempNumRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
26091 tempNumRb = MAX_5GTF_PRBS;
26092 tbSz = RGSCH_MIN(bo, (rgSch5gtfTbSzTbl[iTbs]/8) * ue->ue5gtfCb.rank);
26094 /* DwPts Scheduling Changes End */
26095 *effBo = RGSCH_MIN(tbSz - oldReq, reqBytes);
26098 //RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs);
26103 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbSz, \
26104 iTbs, imcs, tbInfo, ue->ue5gtfCb.rank);
26105 *numRb = (uint8_t) tempNumRb;
26107 /* Update the subframe Allocated BW field */
26108 subFrm->bwAssigned = subFrm->bwAssigned + tempNumRb - allocInfo->rbsReq;
26115 * @brief This function is invoked in the event of any TB's allocation
26116 * being underutilized by the specific scheduler. Here we reduce iMcs
26117 * to increase redundancy and hence increase reception quality at UE.
26121 * Function: rgSCHCmnRdcImcsTxTb
26122 * Purpose: This function shall reduce the iMcs in accordance with
26123 * the total consumed bytes by the UE at allocation
26126 * Invoked by: UE DL Allocation finalization routine
26127 * of specific scheduler.
26129 * @param[in] RgSchDlRbAlloc *allocInfo
26130 * @param[in] uint8_t tbInfoIdx
26131 * @param[in] uint32_t cnsmdBytes
26135 Void rgSCHCmnRdcImcsTxTb
26137 RgSchDlRbAlloc *allocInfo,
26139 uint32_t cnsmdBytes
26143 /*The below functionality is not needed.*/
26149 iTbs = allocInfo->tbInfo[tbInfoIdx].iTbs;
26150 noLyr = allocInfo->tbInfo[tbInfoIdx].noLyr;
26151 numRb = allocInfo->rbsAlloc;
26154 if ((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) == cnsmdBytes)
26159 /* Get iTbs as suitable for the consumed bytes */
26160 while((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) > cnsmdBytes)
26164 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].\
26165 tbCb->dlGrnt.iMcs);
26171 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].tbCb->dlGrnt.iMcs);
26178 * @brief This function determines the RBs and Bytes required for
26179 * Transmission on 2 CWs.
26183 * Function: rgSCHCmnDlAlloc2CwTxRb
26184 * Purpose: This function determines the RBs and Bytes required
26185 * for Transmission of DL SVC BO on 2 CWs.
26186 * Also, takes care of SVC by SVC allocation by tracking
26187 * previous SVCs allocations.
26188 * Returns RFAILED if BO not satisfied at all.
26190 * Invoked by: TM3 and TM4 DL UE Allocation
26192 * @param[in] RgSchCellCb *cell
26193 * @param[in] RgSchDlSf *subFrm
26194 * @param[in] RgSchUeCb *ue
26195 * @param[in] RgSchDlHqProcCb *proc
26196 * @param[in] RgSchDlHqProcCb bo
26197 * @param[out] uint8_t *numRb
26198 * @param[out] uint32_t *effBo
26202 static S16 rgSCHCmnDlAlloc2CwTxRb
26207 RgSchDlHqProcCb *proc,
26214 uint32_t eff1, eff2;
26215 uint32_t tb1Sz, tb2Sz;
26216 uint8_t imcs1, imcs2;
26217 uint8_t noLyr1, noLyr2;
26218 uint8_t iTbs1, iTbs2;
26219 RgSchCmnDlCell *cellDl;
26220 RgSchCmnDlUe *ueDl;
26221 RgSchDlRbAlloc *allocInfo;
26224 /* Fix: MUE_PERTTI_DL */
26226 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
26227 uint8_t cfi = cellSch->dl.currCfi;
26229 uint32_t availBits = 0;
26231 uint32_t boTmp = bo;
26236 cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26237 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26238 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26239 oldReq = ueDl->outStndAlloc;
26242 if (ueDl->maxTbBits > ue->dl.aggTbBits)
26244 availBits = ueDl->maxTbBits - ue->dl.aggTbBits;
26246 /* check if we can further allocate to this UE */
26247 if ((ue->dl.aggTbBits >= ueDl->maxTbBits) ||
26248 (allocInfo->tbInfo[0].bytesReq >= ueDl->maxTbSz/8) ||
26249 (allocInfo->tbInfo[1].bytesReq >= ueDl->maxTbSz/8) ||
26250 (allocInfo->rbsReq >= ueDl->maxRb))
26252 DU_LOG("\nDEBUG --> SCH : rgSCHCmnDlAllocRb(): UEs max allocation exceed");
26256 noLyr1 = ueDl->mimoInfo.cwInfo[0].noLyr;
26257 noLyr2 = ueDl->mimoInfo.cwInfo[1].noLyr;
26259 /* If there is no CFI change, continue to use the BLER based
26261 if (ueDl->lastCfi == cfi)
26263 iTbs1 = ueDl->mimoInfo.cwInfo[0].iTbs[noLyr1 - 1];
26264 iTbs2 = ueDl->mimoInfo.cwInfo[1].iTbs[noLyr2 - 1];
26268 uint8_t cqi = ueDl->mimoInfo.cwInfo[0].cqi;
26270 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 0, noLyr1);
26272 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 0, noLyr1);
26275 cqi = ueDl->mimoInfo.cwInfo[1].cqi;
26277 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 1, noLyr2);
26279 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 1, noLyr2);
26283 /*ccpu00131191 and ccpu00131317 - Fix for RRC Reconfig failure
26284 * issue for VoLTE call */
26285 //if ((proc->hasDcch) || (TRUE == rgSCHLaaSCellEnabled(cell)))
26305 else if(!cellSch->dl.isDlFreqSel)
26308 /* for Tdd reduce iTbs only for SF0. SF5 contains only
26309 * SSS and can be ignored */
26310 if (subFrm->sfNum == 0)
26312 (iTbs1 > 1)? (iTbs1 -= 1) : (iTbs1 = 0);
26313 (iTbs2 > 1)? (iTbs2 -= 1) : (iTbs2 = 0);
26315 /* For SF 3 and 8 CRC is getting failed in DL.
26316 Need to do proper fix after the replay from
26318 #ifdef CA_PHY_BRDCM_61765
26319 if ((subFrm->sfNum == 3) || (subFrm->sfNum == 8))
26321 (iTbs1 > 2)? (iTbs1 -= 2) : (iTbs1 = 0);
26322 (iTbs2 > 2)? (iTbs2 -= 2) : (iTbs2 = 0);
26330 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26332 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
26336 eff1 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr1 - 1][cfi]))[iTbs1];
26337 eff2 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr2 - 1][cfi]))[iTbs2];
26340 bo = RGSCH_MIN(bo,availBits/8);
26341 ueDl->outStndAlloc += bo;
26342 /* consider Cumulative amount of this BO and bytes so far allocated */
26343 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbBits/8);
26344 bo = RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff1)/(eff1+eff2)),
26346 RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff2)/(eff1+eff2)),
26347 (ueDl->maxTbSz)/8) +
26348 1; /* Add 1 to adjust the truncation at weighted averaging */
26349 /* Get the number of REs needed for this bo. */
26350 noRes = ((bo * 8 * 1024) / (eff1 + eff2));
26352 /* Get the number of RBs needed for this transmission */
26353 /* Number of RBs = No of REs / No of REs per RB */
26354 numRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
26355 /* Cannot exceed the maximum number of RBs per UE */
26356 if (numRb > ueDl->maxRb)
26358 numRb = ueDl->maxRb;
26363 if(RFAILED == rgSCHLaaCmn2CwAdjustPrb(allocInfo, boTmp, &numRb, ueDl, noLyr1, noLyr2, iTbs1, iTbs2))
26366 while ((numRb <= ueDl->maxRb) &&
26367 (rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1] <= ueDl->maxTbSz) &&
26368 (rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1] <= ueDl->maxTbSz) &&
26369 ((rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8 +
26370 rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8) <= bo))
26376 availBw = subFrm->bw - subFrm->bwAssigned;
26377 /* Cannot exceed the total number of RBs in the cell */
26378 if ((S16)(numRb - allocInfo->rbsReq) > availBw)
26380 numRb = availBw + allocInfo->rbsReq;
26382 tb1Sz = rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8;
26383 tb2Sz = rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8;
26384 /* DwPts Scheduling Changes Start */
26386 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26388 /* Max Rb for Special Sf is approximated as 4/3 of maxRb */
26389 rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, (uint8_t*)&numRb, ueDl->maxRb*4/3,
26390 &iTbs1, &iTbs2, noLyr1,
26391 noLyr2, &tb1Sz, &tb2Sz, cfi);
26392 /* Check for available Bw */
26393 if ((S16)numRb - allocInfo->rbsReq > availBw)
26395 numRb = availBw + allocInfo->rbsReq;
26396 tb1Sz = rgTbSzTbl[noLyr1-1][iTbs1][RGSCH_MAX(numRb*3/4,1)-1]/8;
26397 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs2][RGSCH_MAX(numRb*3/4,1)-1]/8;
26401 /* DwPts Scheduling Changes End */
26402 /* Update the subframe Allocated BW field */
26403 subFrm->bwAssigned = subFrm->bwAssigned + numRb - \
26406 *effBo = RGSCH_MIN((tb1Sz + tb2Sz) - oldReq, reqBytes);
26409 if (ROK != rgSCHLaaCmn2TBPrbCheck(allocInfo, tb1Sz, tb2Sz, boTmp, effBo, iTbs1, iTbs2, numRb, proc))
26415 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs1, imcs1);
26416 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs2, imcs2);
26417 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tb1Sz, \
26418 iTbs1, imcs1, &proc->tbInfo[0], noLyr1);
26419 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
26420 iTbs2, imcs2, &proc->tbInfo[1], noLyr2);
26421 *numRbRef = (uint8_t)numRb;
26429 * @brief This function determines the RBs and Bytes required for
26430 * Transmission & Retransmission on 2 CWs.
26434 * Function: rgSCHCmnDlAlloc2CwTxRetxRb
26435 * Purpose: This function determines the RBs and Bytes required
26436 * for Transmission & Retransmission on 2 CWs. Allocate
26437 * RETX TB on a better CW and restrict new TX TB by
26439 * Returns RFAILED if BO not satisfied at all.
26441 * Invoked by: TM3 and TM4 DL UE Allocation
26443 * @param[in] RgSchCellCb *cell
26444 * @param[in] RgSchDlSf *subFrm
26445 * @param[in] RgSchUeCb *ue
26446 * @param[in] RgSchDlHqTbCb *reTxTb
26447 * @param[in] RgSchDlHqTbCb *txTb
26448 * @param[out] uint8_t *numRb
26449 * @param[out] uint32_t *effBo
26453 static S16 rgSCHCmnDlAlloc2CwTxRetxRb
26458 RgSchDlHqTbCb *reTxTb,
26459 RgSchDlHqTbCb *txTb,
26464 RgSchCmnDlUe *ueDl;
26465 RgSchDlRbAlloc *allocInfo;
26466 uint8_t imcs1, imcs2;
26469 RgSchCmnDlUeCwInfo *otherCw;
26471 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26472 uint8_t cfi = cellDl->currCfi;
26476 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26477 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26478 otherCw = &ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)];
26481 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26482 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26484 availBw = subFrm->bw - subFrm->bwAssigned;
26485 *numRb = reTxTb->dlGrnt.numRb;
26487 #ifdef XEON_TDD_SPCL
26488 *numRb = (reTxTb->initTxNumRbs);
26489 if(reTxTb->sfType == RG_SCH_SPL_SF_DATA && subFrm->sfType != RG_SCH_SPL_SF_DATA)
26491 *numRb = (reTxTb->initTxNumRbs*3/4);
26495 DU_LOG("\nERROR --> SCH : Number of RBs [%d] are less than or equal to 3",*numRb);
26501 if ((S16)*numRb > availBw)
26505 /* Update the subframe Allocated BW field */
26506 subFrm->bwAssigned += *numRb;
26507 noLyr2 = otherCw->noLyr;
26508 RG_SCH_CMN_GET_MCS_FOR_RETX(reTxTb, imcs1);
26510 /* If there is no CFI change, continue to use the BLER based
26512 if (ueDl->lastCfi == cfi)
26514 iTbs = otherCw->iTbs[noLyr2-1];
26519 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, otherCw->cqi, cfi,
26520 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
26522 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, otherCw->cqi, cfi,
26523 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
26526 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs][*numRb-1]/8;
26527 /* DwPts Scheduling Changes Start */
26530 /* DwPts Scheduling Changes End */
26531 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs2);
26533 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], reTxTb->tbSz, \
26534 0, imcs1, reTxTb, reTxTb->numLyrs);
26536 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
26537 iTbs, imcs2, txTb, noLyr2);
26539 *effBo = reTxTb->tbSz + tb2Sz;
26546 * @brief This function determines the RBs and Bytes required for BO
26547 * Retransmission on 2 CWs.
26551 * Function: rgSCHCmnDlAlloc2CwRetxRb
26552 * Purpose: This function determines the RBs and Bytes required
26553 * for BO Retransmission on 2 CWs. Allocate larger TB
26554 * on a better CW and check if the smaller TB can be
26555 * accomodated on the other CW.
26556 * Returns RFAILED if BO not satisfied at all.
26558 * Invoked by: Common Scheduler
26560 * @param[in] RgSchCellCb *cell
26561 * @param[in] RgSchDlSf *subFrm
26562 * @param[in] RgSchUeCb *ue
26563 * @param[in] RgSchDlHqProcCb *proc
26564 * @param[out] uint8_t *numRb
26565 * @param[out] Bool *swpFlg
26566 * @param[out] uint32_t *effBo
26570 static S16 rgSCHCmnDlAlloc2CwRetxRb
26575 RgSchDlHqProcCb *proc,
26581 RgSchDlRbAlloc *allocInfo;
26584 RgSchDlHqTbCb *lrgTbInfo, *othrTbInfo;
26587 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26590 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26591 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26593 lrgTbInfo = &proc->tbInfo[0];
26594 othrTbInfo = &proc->tbInfo[1];
26595 *numRb = lrgTbInfo->dlGrnt.numRb;
26596 #ifdef XEON_TDD_SPCL
26597 if((lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA || othrTbInfo->sfType == RG_SCH_SPL_SF_DATA))
26599 if(lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA)
26601 *numRb = (lrgTbInfo->initTxNumRbs);
26605 *numRb = (othrTbInfo->initTxNumRbs);
26608 if(subFrm->sfType != RG_SCH_SPL_SF_DATA)
26610 *numRb = (*numRb)*3/4;
26615 DU_LOG("\nERROR --> SCH : Number of RBs [%d] are less than or equal to 3",*numRb);
26620 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
26624 /* Update the subframe Allocated BW field */
26625 subFrm->bwAssigned += *numRb;
26626 RG_SCH_CMN_GET_MCS_FOR_RETX(lrgTbInfo, imcs1);
26627 RG_SCH_CMN_GET_MCS_FOR_RETX(othrTbInfo, imcs2);
26628 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], lrgTbInfo->tbSz, \
26629 0, imcs1, lrgTbInfo, lrgTbInfo->numLyrs);
26630 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], othrTbInfo->tbSz, \
26631 0, imcs2, othrTbInfo, othrTbInfo->numLyrs);
26632 *effBo = lrgTbInfo->tbSz + othrTbInfo->tbSz;
26641 * @brief This function determines the RBs and Bytes required for BO
26642 * Retransmission on 1 CW.
26646 * Function: rgSCHCmnDlAlloc1CwRetxRb
26647 * Purpose: This function determines the RBs and Bytes required
26648 * for BO Retransmission on 1 CW, the first CW.
26649 * Returns RFAILED if BO not satisfied at all.
26651 * Invoked by: Common Scheduler
26653 * @param[in] RgSchCellCb *cell
26654 * @param[in] RgSchDlSf *subFrm
26655 * @param[in] RgSchUeCb *ue
26656 * @param[in] RgSchDlHqTbCb *tbInfo
26657 * @param[in] uint8_t noLyr
26658 * @param[out] uint8_t *numRb
26659 * @param[out] uint32_t *effBo
26663 static S16 rgSCHCmnDlAlloc1CwRetxRb
26668 RgSchDlHqTbCb *tbInfo,
26674 RgSchDlRbAlloc *allocInfo;
26678 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26681 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26682 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26684 *numRb = tbInfo->dlGrnt.numRb;
26685 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
26689 /* Update the subframe Allocated BW field */
26690 subFrm->bwAssigned += *numRb;
26691 imcs = tbInfo->dlGrnt.iMcs;
26692 allocInfo->dciFormat = tbInfo->dlGrnt.dciFormat;
26693 /* Fix: For a RETX TB the iTbs is irrelevant, hence setting 0 */
26694 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbInfo->tbSz, \
26695 0, imcs, tbInfo, tbInfo->numLyrs);
26696 *effBo = tbInfo->tbSz;
26704 * @brief This function is called to handle Release PDCCH feedback for SPS UE
26708 * Function: rgSCHCmnDlRelPdcchFbk
26709 * Purpose: Invokes SPS module to handle release PDCCH feedback
26713 * @param[in] RgSchCellCb *cell
26714 * @param[in] RgSchUeCb *ue
26715 * @param[in] Bool isAck
26719 Void rgSCHCmnDlRelPdcchFbk
26727 rgSCHCmnSpsDlRelPdcchFbk(cell, ue, isAck);
26734 * @brief This function is invoked to handle Ack processing for a HARQ proc.
26738 * Function: rgSCHCmnDlProcAck
26739 * Purpose: DTX processing for HARQ proc
26743 * @param[in] RgSchCellCb *cell
26744 * @param[in] RgSchDlHqProcCb *hqP
26748 Void rgSCHCmnDlProcAck
26751 RgSchDlHqProcCb *hqP
26756 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
26758 /* Invoke SPS module if SPS service was scheduled for this HARQ proc */
26759 rgSCHCmnSpsDlProcAck(cell, hqP);
26763 #ifdef RGSCH_SPS_STATS
26764 uint32_t rgSchStatCrntiCeRcvCnt;
26767 * @brief This function is invoked to handle CRNTI CE reception for an UE
26771 * Function: rgSCHCmnHdlCrntiCE
26772 * Purpose: Handle CRNTI CE reception
26776 * @param[in] RgSchCellCb *cell
26777 * @param[in] RgSchDlHqProcCb *hqP
26781 Void rgSCHCmnHdlCrntiCE
26788 #ifdef RGSCH_SPS_STATS
26789 rgSchStatCrntiCeRcvCnt++;
26792 /* When UL sync lost happened due to TA timer expiry UE is being moved to
26793 PDCCH order inactivity list.But when CRNTI CE received in msg3 from UE
26794 we are not moving UE into active state due to that RRC Reconfiguration is
26796 So here we are moving UE to active list whenever we receive the CRNTI CE and
26798 /* CR ccpu00144525 */
26799 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
26801 /* Activate this UE if it was inactive */
26802 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
26803 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
26806 /* Handling is same as reception of UE RESET for both DL and UL */
26807 if (ue->dl.dlSpsCfg.isDlSpsEnabled)
26809 rgSCHCmnSpsDlUeReset(cell, ue);
26811 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26813 rgSCHCmnSpsUlUeReset(cell, ue);
26821 * @brief This function is called to handle relInd from MAC for a UE
26825 * Function: rgSCHCmnUlSpsRelInd
26826 * Purpose: Invokes SPS module to handle UL SPS release for a UE
26828 * Invoked by: SCH_UTL
26830 * @param[in] RgSchCellCb *cell
26831 * @param[in] RgSchUeCb *ue
26832 * @param[in] Bool isExplRel
26836 Void rgSCHCmnUlSpsRelInd
26844 rgSCHCmnSpsUlProcRelInd(cell, ue, isExplRel);
26847 } /* end of rgSCHCmnUlSpsRelInd */
26850 * @brief This function is called to handle SPS Activate Ind from MAC for a UE
26854 * Function: rgSCHCmnUlSpsActInd
26855 * Purpose: Invokes SPS module to handle UL SPS activate for a UE
26857 * Invoked by: SCH_UTL
26859 * @param[in] RgSchCellCb *cell
26860 * @param[in] RgSchUeCb *ue
26864 Void rgSCHCmnUlSpsActInd
26868 uint16_t spsSduSize
26873 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26875 rgSCHCmnSpsUlProcActInd(cell, ue,spsSduSize);
26879 } /* end of rgSCHCmnUlSpsActInd */
26882 * @brief This function is called to handle CRC in UL for UEs
26883 * undergoing SPS release
26887 * Function: rgSCHCmnUlCrcInd
26888 * Purpose: Invokes SPS module to handle CRC in UL for SPS UE
26890 * Invoked by: SCH_UTL
26892 * @param[in] RgSchCellCb *cell
26893 * @param[in] RgSchUeCb *ue
26894 * @param[in] CmLteTimingInfo crcTime
26898 Void rgSCHCmnUlCrcInd
26902 CmLteTimingInfo crcTime
26906 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26908 rgSCHCmnSpsUlProcCrcInd(cell, ue, crcTime);
26912 } /* end of rgSCHCmnUlCrcFailInd */
26915 * @brief This function is called to handle CRC failure in UL
26919 * Function: rgSCHCmnUlCrcFailInd
26920 * Purpose: Invokes SPS module to handle CRC failure in UL for SPS UE
26922 * Invoked by: SCH_UTL
26924 * @param[in] RgSchCellCb *cell
26925 * @param[in] RgSchUeCb *ue
26926 * @param[in] CmLteTimingInfo crcTime
26930 Void rgSCHCmnUlCrcFailInd
26934 CmLteTimingInfo crcTime
26938 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26940 rgSCHCmnSpsUlProcDtxInd(cell, ue, crcTime);
26944 } /* end of rgSCHCmnUlCrcFailInd */
26946 #endif /* LTEMAC_SPS */
26949 * @brief BCH,BCCH,PCCH Dowlink Scheduling Handler.
26953 * Function: rgSCHCmnDlBcchPcchAlloc
26954 * Purpose: This function calls common scheduler APIs to
26955 * schedule for BCCH/PCCH.
26956 * It then invokes Allocator for actual RB
26957 * allocations. It processes on the actual resources allocated
26958 * against requested to the allocator module.
26960 * Invoked by: Common Scheduler
26962 * @param[in] RgSchCellCb *cell
26965 static Void rgSCHCmnDlBcchPcchAlloc(RgSchCellCb *cell)
26968 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
26970 #ifdef LTEMAC_HDFDD
26971 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
26973 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
26976 RgInfSfAlloc *nextsfAlloc = &(cell->sfAllocArr[nextSfIdx]);
26977 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
26978 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
26982 /*Reset the bitmask for BCCH/PCCH*/
26983 rgSCHUtlResetSfAlloc(nextsfAlloc,TRUE,FALSE);
26984 #ifndef DISABLE_MIB_SIB /* Not sending MIB and SIB to CL */
26986 rgSCHChkNUpdSiCfg(cell);
26987 rgSCHSelectSi(cell);
26990 /*Perform the scheduling for BCCH,PCCH*/
26991 rgSCHCmnDlBcchPcch(cell, allocInfo, nextsfAlloc);
26993 /* Call common allocator for RB Allocation */
26994 rgSCHBcchPcchDlRbAlloc(cell, allocInfo);
26996 /* Finalize the Allocations for reqested Against alloced */
26997 rgSCHCmnDlBcchPcchFnlz(cell, allocInfo);
26998 #endif /* DISABLE_MIB_SIB */
27003 * @brief Handles RB allocation for BCCH/PCCH for downlink.
27007 * Function : rgSCHBcchPcchDlRbAlloc
27009 * Invoking Module Processing:
27010 * - This function is invoked for DL RB allocation of BCCH/PCCH
27012 * Processing Steps:
27013 * - If cell is frequency selecive,
27014 * - Call rgSCHDlfsBcchPcchAllocRb().
27016 * - Do the processing
27018 * @param[in] RgSchCellCb *cell
27019 * @param[in] RgSchDlRbAllocInfo *allocInfo
27023 static Void rgSCHBcchPcchDlRbAlloc
27026 RgSchCmnDlRbAllocInfo *allocInfo
27029 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27033 if (cellSch->dl.isDlFreqSel)
27035 cellSch->apisDlfs->rgSCHDlfsBcchPcchAllocRb(cell, allocInfo);
27039 rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo);
27046 * @brief Handles RB allocation for BCCH,PCCH for frequency
27047 * non-selective cell.
27051 * Function : rgSCHCmnNonDlfsBcchPcchRbAlloc
27053 * Invoking Module Processing:
27054 * - SCH shall invoke this if downlink frequency selective is disabled for
27055 * the cell for RB allocation.
27056 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
27057 * estimate and subframe for each allocation to be made to SCH.
27059 * Processing Steps:
27060 * - Allocate sequentially for BCCH,PCCH common channels.
27062 * @param[in] RgSchCellCb *cell
27063 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
27067 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc
27070 RgSchCmnDlRbAllocInfo *allocInfo
27073 RgSchDlRbAlloc *reqAllocInfo;
27077 /* Allocate for PCCH */
27078 reqAllocInfo = &(allocInfo->pcchAlloc);
27079 if (reqAllocInfo->rbsReq)
27081 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
27083 /* Allocate for BCCH on DLSCH */
27084 reqAllocInfo = &(allocInfo->bcchAlloc);
27085 if (reqAllocInfo->rbsReq)
27087 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
27095 * @brief This function implements the handling to check and
27096 * update the SI cfg at the start of the modificiation period.
27100 * Function: rgSCHChkNUpdSiCfg
27101 * Purpose: This function implements handling for update of SI Cfg
27102 * at the start of modification period.
27104 * Invoked by: Scheduler
27106 * @param[in] RgSchCellCb* cell
27111 static Void rgSCHChkNUpdSiCfg
27116 CmLteTimingInfo pdSchTmInfo;
27120 pdSchTmInfo = cell->crntTime;
27121 #ifdef LTEMAC_HDFDD
27122 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27123 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27124 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27126 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA);
27130 /* Updating the SIB1 for Warning SI message immediately after it is received
27131 * from application. No need to wait for next modification period.
27133 if((pdSchTmInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
27134 && (RGSCH_SIB1_TX_SF_NUM == (pdSchTmInfo.slot % RGSCH_NUM_SUB_FRAMES)))
27136 /*Check whether SIB1 with PWS has been updated*/
27137 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_PWS_UPD)
27139 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
27140 cell->siCb.newSiInfo.sib1Info.sib1);
27141 cell->siCb.crntSiInfo.sib1Info.mcs =
27142 cell->siCb.newSiInfo.sib1Info.mcs;
27143 cell->siCb.crntSiInfo.sib1Info.nPrb =
27144 cell->siCb.newSiInfo.sib1Info.nPrb;
27145 cell->siCb.crntSiInfo.sib1Info.msgLen =
27146 cell->siCb.newSiInfo.sib1Info.msgLen;
27147 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_PWS_UPD;
27151 /*Check if this SFN and SF No marks the start of next modification
27152 period. If current SFN,SF No doesn't marks the start of next
27153 modification period, then return. */
27154 if(!((pdSchTmInfo.sfn % cell->siCfg.modPrd == 0)
27155 && (0 == pdSchTmInfo.slot)))
27156 /*if(!((((pdSchTmInfo.hSfn * 1024) + pdSchTmInfo.sfn) % cell->siCfg.modPrd == 0)
27157 && (0 == pdSchTmInfo.slot)))*/
27162 /*Check whether MIB has been updated*/
27163 if(cell->siCb.siBitMask & RGSCH_SI_MIB_UPD)
27165 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.mib,
27166 cell->siCb.newSiInfo.mib);
27167 cell->siCb.siBitMask &= ~RGSCH_SI_MIB_UPD;
27170 /*Check whether SIB1 has been updated*/
27171 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_UPD)
27173 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
27174 cell->siCb.newSiInfo.sib1Info.sib1);
27175 cell->siCb.crntSiInfo.sib1Info.mcs = cell->siCb.newSiInfo.sib1Info.mcs;
27176 cell->siCb.crntSiInfo.sib1Info.nPrb = cell->siCb.newSiInfo.sib1Info.nPrb;
27177 cell->siCb.crntSiInfo.sib1Info.msgLen =
27178 cell->siCb.newSiInfo.sib1Info.msgLen;
27179 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_UPD;
27182 /*Check whether SIs have been updated*/
27183 if(cell->siCb.siBitMask & RGSCH_SI_SI_UPD)
27187 /*Check if SI cfg have been modified And Check if numSi have
27188 been changed, if yes then we would need to update the
27189 pointers for all the SIs */
27190 if((cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD) &&
27191 (cell->siCfg.numSi != cell->siCb.newSiCfg.numSi))
27193 for(idx = 0;idx < cell->siCb.newSiCfg.numSi;idx++)
27195 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
27196 cell->siCb.newSiInfo.siInfo[idx].si);
27197 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
27198 cell->siCb.siArray[idx].isWarningSi = FALSE;
27200 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
27201 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
27202 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
27205 /*If numSi have been reduced then we need to free the
27206 pointers at the indexes in crntSiInfo which haven't
27207 been exercised. If numSi has increased then nothing
27208 additional is requires as above handling has taken
27210 if(cell->siCfg.numSi > cell->siCb.newSiCfg.numSi)
27212 for(idx = cell->siCb.newSiCfg.numSi;
27213 idx < cell->siCfg.numSi;idx++)
27215 RGSCH_FREE_MSG(cell->siCb.crntSiInfo.siInfo[idx].si);
27216 cell->siCb.siArray[idx].si = NULLP;
27222 /*numSi has not been updated, we just need to update the
27223 pointers for the SIs which are set to NON NULLP */
27224 /*ccpu00118260 - Correct Update of SIB2 */
27225 for(idx = 0;idx < cell->siCfg.numSi;idx++)
27227 if(NULLP != cell->siCb.newSiInfo.siInfo[idx].si)
27229 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
27230 cell->siCb.newSiInfo.siInfo[idx].si);
27232 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
27233 cell->siCb.siArray[idx].isWarningSi = FALSE;
27234 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
27235 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
27236 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
27240 cell->siCb.siBitMask &= ~RGSCH_SI_SI_UPD;
27243 /*Check whether SI cfg have been updated*/
27244 if(cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD)
27246 cell->siCfg = cell->siCb.newSiCfg;
27247 cell->siCb.siBitMask &= ~RGSCH_SI_SICFG_UPD;
27255 * @brief This function implements the selection of the SI
27256 * that is to be scheduled.
27260 * Function: rgSCHSelectSi
27261 * Purpose: This function implements the selection of SI
27262 * that is to be scheduled.
27264 * Invoked by: Scheduler
27266 * @param[in] RgSchCellCb* cell
27271 static Void rgSCHSelectSi
27276 CmLteTimingInfo crntTmInfo;
27283 crntTmInfo = cell->crntTime;
27284 #ifdef LTEMAC_HDFDD
27285 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27286 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27287 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27289 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA);
27292 siWinSize = cell->siCfg.siWinSize;
27294 /* Select SI only once at the starting of the new window */
27295 if(cell->siCb.inWindow)
27297 if ((crntTmInfo.sfn % cell->siCfg.minPeriodicity) == 0 &&
27298 crntTmInfo.slot == 0)
27300 /* Reinit inWindow at the beginning of every SI window */
27301 cell->siCb.inWindow = siWinSize - 1;
27305 cell->siCb.inWindow--;
27309 else /* New window. Re-init the winSize counter with the window length */
27311 if((cell->siCb.siArray[cell->siCb.siCtx.siId - 1].isWarningSi == TRUE)&&
27312 (cell->siCb.siCtx.retxCntRem != 0))
27314 rgSCHUtlFreeWarningSiPdu(cell);
27315 cell->siCb.siCtx.warningSiFlag = FALSE;
27318 cell->siCb.inWindow = siWinSize - 1;
27321 x = rgSCHCmnGetSiSetId(crntTmInfo.sfn, crntTmInfo.slot,
27322 cell->siCfg.minPeriodicity);
27324 /* Window Id within a SI set. This window Id directly maps to a
27326 windowId = (((crntTmInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) +
27327 crntTmInfo.slot) - (x * (cell->siCfg.minPeriodicity * 10)))
27330 if(windowId >= RGR_MAX_NUM_SI)
27333 /* Update the siCtx if there is a valid SI and its periodicity
27335 if (NULLP != cell->siCb.siArray[windowId].si)
27337 /* Warning SI Periodicity is same as SIB2 Periodicity */
27338 if(((cell->siCb.siArray[windowId].isWarningSi == FALSE) &&
27339 (x % (cell->siCfg.siPeriodicity[windowId]
27340 /cell->siCfg.minPeriodicity) == 0)) ||
27341 ((cell->siCb.siArray[windowId].isWarningSi == TRUE) &&
27342 (x % (cell->siCfg.siPeriodicity[0]
27343 /cell->siCfg.minPeriodicity) == 0)))
27345 cell->siCb.siCtx.siId = windowId+1;
27346 cell->siCb.siCtx.retxCntRem = cell->siCfg.retxCnt;
27347 cell->siCb.siCtx.warningSiFlag = cell->siCb.siArray[windowId].
27349 cell->siCb.siCtx.timeToTx.sfn = crntTmInfo.sfn;
27350 cell->siCb.siCtx.timeToTx.slot = crntTmInfo.slot;
27352 RG_SCH_ADD_TO_CRNT_TIME(cell->siCb.siCtx.timeToTx,
27353 cell->siCb.siCtx.maxTimeToTx, (siWinSize - 1))
27357 {/* Update the siCtx with invalid si Id */
27358 cell->siCb.siCtx.siId = 0;
27366 * @brief This function implements scheduler DL allocation for
27371 * Function: rgSCHDlSiSched
27372 * Purpose: This function implements scheduler for DL allocation
27375 * Invoked by: Scheduler
27377 * @param[in] RgSchCellCb* cell
27382 static Void rgSCHDlSiSched
27385 RgSchCmnDlRbAllocInfo *allocInfo,
27386 RgInfSfAlloc *subfrmAlloc
27389 CmLteTimingInfo crntTimInfo;
27395 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27396 /* DwPTS Scheduling Changes Start */
27399 uint8_t cfi = cellDl->currCfi;
27401 /* DwPTS Scheduling Changes End */
27405 crntTimInfo = cell->crntTime;
27406 #ifdef LTEMAC_HDFDD
27407 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27408 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27409 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27411 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA);
27414 /* Compute the subframe for which allocation is being made.
27415 Essentially, we need pointer to the dl frame for this subframe */
27416 sf = rgSCHUtlSubFrmGet(cell, crntTimInfo);
27418 /*Check if scheduling of MIB is required */
27420 /* since we are adding the MIB repetition logic for EMTC UEs, checking if
27421 * emtcEnabled or not, If enabled MIB would be repeted at as part of EMTC
27422 * feature, otherwise scheduling at (n,0) */
27423 if(0 == cell->emtcEnable)
27426 if((crntTimInfo.sfn % RGSCH_MIB_PERIODICITY == 0)
27427 && (RGSCH_MIB_TX_SF_NUM == crntTimInfo.slot))
27430 uint8_t sfnOctet, mibOct2 = 0;
27431 uint8_t mibOct1 = 0;
27432 /*If MIB has not been yet setup by Application, return*/
27433 if(NULLP == cell->siCb.crntSiInfo.mib)
27436 SFndLenMsg(cell->siCb.crntSiInfo.mib, &mibLen);
27437 sf->bch.tbSize = mibLen;
27438 /*Fill the interface information */
27439 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, NULLD, NULLD);
27441 /*Set the bits of MIB to reflect SFN */
27442 /*First get the Most signficant 8 bits of SFN */
27443 sfnOctet = (uint8_t)(crntTimInfo.sfn >> 2);
27444 /*Get the first two octets of MIB, and then update them
27445 using the SFN octet value obtained above.*/
27446 if(ROK != SExamMsg((Data *)(&mibOct1),
27447 cell->siCb.crntSiInfo.mib, 0))
27450 if(ROK != SExamMsg((Data *)(&mibOct2),
27451 cell->siCb.crntSiInfo.mib, 1))
27454 /* ccpu00114572- Fix for improper way of MIB Octet setting for SFN */
27455 mibOct1 = (mibOct1 & 0xFC) | (sfnOctet >> 6);
27456 mibOct2 = (mibOct2 & 0x03) | (sfnOctet << 2);
27457 /* ccpu00114572- Fix ends*/
27459 /*Now, replace the two octets in MIB */
27460 if(ROK != SRepMsg((Data)(mibOct1),
27461 cell->siCb.crntSiInfo.mib, 0))
27464 if(ROK != SRepMsg((Data)(mibOct2),
27465 cell->siCb.crntSiInfo.mib, 1))
27468 /*Copy the MIB msg buff into interface buffer */
27469 SCpyMsgMsg(cell->siCb.crntSiInfo.mib,
27470 rgSchCb[cell->instIdx].rgSchInit.region,
27471 rgSchCb[cell->instIdx].rgSchInit.pool,
27472 &subfrmAlloc->cmnLcInfo.bchInfo.pdu);
27473 /* Added Dl TB count for MIB message transmission
27474 * This counter is incremented 4 times to consider
27475 * the retransmission at the PHY level on PBCH channel*/
27477 cell->dlUlTbCnt.tbTransDlTotalCnt += RG_SCH_MIB_CNT;
27484 allocInfo->bcchAlloc.schdFirst = FALSE;
27485 /*Check if scheduling of SIB1 is required.
27486 Check of (crntTimInfo.sfn % RGSCH_SIB1_PERIODICITY == 0)
27487 is not required here since the below check takes care
27488 of SFNs applicable for this one too.*/
27489 if((crntTimInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
27490 && (RGSCH_SIB1_TX_SF_NUM == crntTimInfo.slot))
27492 /*If SIB1 has not been yet setup by Application, return*/
27493 if(NULLP == (cell->siCb.crntSiInfo.sib1Info.sib1))
27498 allocInfo->bcchAlloc.schdFirst = TRUE;
27499 mcs = cell->siCb.crntSiInfo.sib1Info.mcs;
27500 nPrb = cell->siCb.crntSiInfo.sib1Info.nPrb;
27501 msgLen = cell->siCb.crntSiInfo.sib1Info.msgLen;
27505 /*Check if scheduling of SI can be performed.*/
27506 Bool invalid = FALSE;
27508 if(cell->siCb.siCtx.siId == 0)
27511 /*Check if the Si-Window for the current Si-Context is completed*/
27512 invalid = rgSCHCmnChkPastWin(crntTimInfo, cell->siCb.siCtx.maxTimeToTx);
27515 /* LTE_ADV_FLAG_REMOVED_START */
27516 if(cell->siCb.siCtx.retxCntRem)
27518 DU_LOG("\nERROR --> SCH : rgSCHDlSiSched(): SI not scheduled and window expired");
27520 /* LTE_ADV_FLAG_REMOVED_END */
27521 if(cell->siCb.siCtx.warningSiFlag == TRUE)
27523 rgSCHUtlFreeWarningSiPdu(cell);
27524 cell->siCb.siCtx.warningSiFlag = FALSE;
27529 /*Check the timinginfo of the current SI-Context to see if its
27530 transmission can be scheduled. */
27531 if(FALSE == (rgSCHCmnChkInWin(crntTimInfo,
27532 cell->siCb.siCtx.timeToTx,
27533 cell->siCb.siCtx.maxTimeToTx)))
27538 /*Check if retransmission count has become 0*/
27539 if(0 == cell->siCb.siCtx.retxCntRem)
27544 /* LTE_ADV_FLAG_REMOVED_START */
27545 /* Check if ABS is enabled/configured */
27546 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
27548 /* The pattern type is RGR_ABS_MUTE, then eNB need to blank the subframe */
27549 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
27551 /* Determine next scheduling subframe is ABS or not */
27552 if(RG_SCH_ABS_ENABLED_ABS_SF == (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern
27553 [((crntTimInfo.sfn*RGSCH_NUM_SUB_FRAMES) + crntTimInfo.slot) % RGR_ABS_PATTERN_LEN]))
27555 /* Skip the SI scheduling to next tti */
27560 /* LTE_ADV_FLAG_REMOVED_END */
27562 /*Schedule the transmission of the current SI-Context */
27563 /*Find out the messg length for the SI message */
27564 /* warningSiFlag is to differentiate between Warning SI
27566 if((rgSCHUtlGetMcsAndNPrb(cell, &nPrb, &mcs, &msgLen)) != ROK)
27571 cell->siCb.siCtx.i = RGSCH_CALC_SF_DIFF(crntTimInfo,
27572 cell->siCb.siCtx.timeToTx);
27576 /*Get the number of rb required */
27577 /*rgSCHCmnClcRbAllocForFxdTb(cell, msgLen, cellDl->ccchCqi, &rb);*/
27578 if(cellDl->bitsPerRb==0)
27580 while ((rgTbSzTbl[0][0][rb]) < (uint32_t) (msgLen*8))
27588 rb = RGSCH_CEIL((msgLen*8), cellDl->bitsPerRb);
27590 /* DwPTS Scheduling Changes Start */
27592 if (sf->sfType == RG_SCH_SPL_SF_DATA)
27594 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
27596 /* Calculate the less RE's because of DwPTS */
27597 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
27599 /* Increase number of RBs in Spl SF to compensate for lost REs */
27600 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
27603 /* DwPTS Scheduling Changes End */
27604 /*ccpu00115595- end*/
27605 /* Additional check to see if required RBs
27606 * exceeds the available */
27607 if (rb > sf->bw - sf->bwAssigned)
27609 DU_LOG("\nERROR --> SCH : rgSCHDlSiSched(): "
27610 "BW allocation failed CRNTI:%d",RGSCH_SI_RNTI);
27614 /* Update the subframe Allocated BW field */
27615 sf->bwAssigned = sf->bwAssigned + rb;
27617 /*Fill the parameters in allocInfo */
27618 allocInfo->bcchAlloc.rnti = RGSCH_SI_RNTI;
27619 allocInfo->bcchAlloc.dlSf = sf;
27620 allocInfo->bcchAlloc.rbsReq = rb;
27621 /*ccpu00116710- MCS is not getting assigned */
27622 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
27624 /* ccpu00117510 - ADD - Assignment of nPrb and other information */
27625 allocInfo->bcchAlloc.nPrb = nPrb;
27626 allocInfo->bcchAlloc.tbInfo[0].bytesReq = msgLen;
27627 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
27630 #endif /*RGR_SI_SCH*/
27633 /* ccpu00117452 - MOD - Changed macro name from
27634 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
27635 #ifdef RGR_CQI_REPT
27637 * @brief This function Updates the DL CQI for the UE.
27641 * Function: rgSCHCmnUeDlPwrCtColltCqiRept
27642 * Purpose: Manages PUSH N CQI reporting
27643 * Step 1: Store the CQI in collation array
27644 * Step 2: Increament the tracking count
27645 * Step 3: Check is it time to to send the report
27646 * Step 4: if yes, Send StaInd to RRM
27647 * Step 4.1: Fill StaInd for sending collated N CQI rpeorts
27648 * Step 4.2: Call utility function (rgSCHUtlRgrStaInd) to send rpts to RRM
27649 * Step 4.2.1: If sending was not sucessful, return RFAILED
27650 * Step 4.2.2: If sending was sucessful, return ROK
27651 * Step 5: If no, return
27652 * Invoked by: rgSCHCmnDlCqiInd
27654 * @param[in] RgSchCellCb *cell
27655 * @param[in] RgSchUeCb *ue
27656 * @param[in] RgrUeCqiRept *ueCqiRpt
27660 static S16 rgSCHCmnUeDlPwrCtColltCqiRept
27664 RgrUeCqiRept *ueCqiRpt
27667 uint8_t *cqiCount = NULLP;
27669 RgrStaIndInfo *staInfo = NULLP;
27672 /* Step 1: Store the CQI in collation array */
27673 /* Step 2: Increament the tracking count */
27674 cqiCount = &(ue->schCqiInfo.cqiCount);
27675 ue->schCqiInfo.cqiRept[(*cqiCount)++] =
27679 /* Step 3: Check is it time to to send the report */
27680 if(RG_SCH_CQIR_IS_TIMTOSEND_CQIREPT(ue))
27682 /* Step 4: if yes, Send StaInd to RRM */
27683 retVal = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&staInfo,
27684 sizeof(RgrStaIndInfo));
27687 DU_LOG("\nERROR --> SCH : Could not "
27688 "allocate memory for sending StaInd CRNTI:%d",ue->ueId);
27692 /* Step 4.1: Fill StaInd for sending collated N CQI rpeorts */
27695 uint32_t gCqiReptToAppCount;
27696 gCqiReptToAppCount++;
27701 retVal = rgSCHUtlFillSndStaInd(cell, ue, staInfo,
27702 ue->cqiReptCfgInfo.numColltdCqiRept);
27708 } /* End of rgSCHCmnUeDlPwrCtColltCqiRept */
27710 #endif /* End of RGR_CQI_REPT */
27713 * @brief This function checks for the retransmisson
27714 * for a DTX scenario.
27721 * @param[in] RgSchCellCb *cell
27722 * @param[in] RgSchUeCb *ue
27727 Void rgSCHCmnChkRetxAllowDtx
27731 RgSchDlHqProcCb *proc,
27739 if ((proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX))
27741 *reTxAllwd = FALSE;
27748 * @brief API for calculating the SI Set Id
27752 * Function: rgSCHCmnGetSiSetId
27754 * This API is used for calculating the SI Set Id, as shown below
27756 * siSetId = 0 siSetId = 1
27757 * |******************|******************|---------------->
27758 * (0,0) (8,0) (16,0) (SFN, SF)
27761 * @param[in] uint16_t sfn
27762 * @param[in] uint8_t sf
27763 * @return uint16_t siSetId
27765 uint16_t rgSCHCmnGetSiSetId
27769 uint16_t minPeriodicity
27772 /* 80 is the minimum SI periodicity in sf. Also
27773 * all other SI periodicities are multiples of 80 */
27774 return (((sfn * RGSCH_NUM_SUB_FRAMES_5G) + sf) / (minPeriodicity * 10));
27778 * @brief API for calculating the DwPts Rb, Itbs and tbSz
27782 * Function: rgSCHCmnCalcDwPtsTbSz
27784 * @param[in] RgSchCellCb *cell
27785 * @param[in] uint32_t bo
27786 * @param[in/out] uint8_t *rb
27787 * @param[in/out] uint8_t *iTbs
27788 * @param[in] uint8_t lyr
27789 * @param[in] uint8_t cfi
27790 * @return uint32_t tbSz
27792 static uint32_t rgSCHCmnCalcDwPtsTbSz
27803 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27804 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
27805 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
27808 /* DwPts Rb cannot exceed the cell Bw */
27809 numDwPtsRb = RGSCH_MIN(numDwPtsRb, cellDl->maxDlBwPerUe);
27811 /* Adjust the iTbs for optimum usage of the DwPts region.
27812 * Using the same iTbs adjustment will not work for all
27813 * special subframe configurations and iTbs levels. Hence use the
27814 * static iTbs Delta table for adjusting the iTbs */
27815 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs);
27819 while(rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1] < bo*8 &&
27820 numDwPtsRb < cellDl->maxDlBwPerUe)
27825 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
27829 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
27837 * @brief API for calculating the DwPts Rb, Itbs and tbSz
27841 * Function: rgSCHCmnCalcDwPtsTbSz2Cw
27843 * @param[in] RgSchCellCb *cell
27844 * @param[in] uint32_t bo
27845 * @param[in/out] uint8_t *rb
27846 * @param[in] uint8_t maxRb
27847 * @param[in/out] uint8_t *iTbs1
27848 * @param[in/out] uint8_t *iTbs2
27849 * @param[in] uint8_t lyr1
27850 * @param[in] uint8_t lyr2
27851 * @return[in/out] uint32_t *tb1Sz
27852 * @return[in/out] uint32_t *tb2Sz
27853 * @param[in] uint8_t cfi
27855 static Void rgSCHCmnCalcDwPtsTbSz2Cw
27870 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27871 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
27872 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
27875 /* DwPts Rb cannot exceed the cell Bw */
27876 numDwPtsRb = RGSCH_MIN(numDwPtsRb, maxRb);
27878 /* Adjust the iTbs for optimum usage of the DwPts region.
27879 * Using the same iTbs adjustment will not work for all
27880 * special subframe configurations and iTbs levels. Hence use the
27881 * static iTbs Delta table for adjusting the iTbs */
27882 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs1);
27883 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs2);
27885 while((rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1] +
27886 rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1])< bo*8 &&
27887 numDwPtsRb < maxRb)
27892 *tb1Sz = rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
27893 *tb2Sz = rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
27903 * @brief Updates the GBR LCGs when datInd is received from MAC
27907 * Function: rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
27908 * Purpose: This function updates the GBR LCGs
27909 * when datInd is received from MAC.
27913 * @param[in] RgSchCellCb *cell
27914 * @param[in] RgSchUeCb *ue
27915 * @param[in] RgInfUeDatInd *datInd
27918 Void rgSCHCmnUpdUeDataIndLcg
27922 RgInfUeDatInd *datInd
27926 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27929 for (idx = 0; (idx < RGINF_MAX_LCG_PER_UE - 1); idx++)
27931 if (datInd->lcgInfo[idx].bytesRcvd != 0)
27933 uint8_t lcgId = datInd->lcgInfo[idx].lcgId;
27934 uint32_t bytesRcvd = datInd->lcgInfo[idx].bytesRcvd;
27936 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
27938 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
27939 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
27941 if(bytesRcvd > cmnLcg->effGbr)
27943 bytesRcvd -= cmnLcg->effGbr;
27944 cmnLcg->effDeltaMbr = (cmnLcg->effDeltaMbr > bytesRcvd) ? \
27945 (cmnLcg->effDeltaMbr - bytesRcvd) : (0);
27946 cmnLcg->effGbr = 0;
27950 cmnLcg->effGbr -= bytesRcvd;
27952 /* To keep BS updated with the amount of data received for the GBR */
27953 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
27954 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27955 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr+cmnLcg->effDeltaMbr);
27957 else if(lcgId != 0)
27959 ue->ul.effAmbr = (ue->ul.effAmbr > datInd->lcgInfo[idx].bytesRcvd) ? \
27960 (ue->ul.effAmbr - datInd->lcgInfo[idx].bytesRcvd) : (0);
27961 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
27962 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27963 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
27964 ue->ul.nonGbrLcgBs = (ue->ul.nonGbrLcgBs > datInd->lcgInfo[idx].bytesRcvd) ? \
27965 (ue->ul.nonGbrLcgBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27967 ue->ul.nonLcg0Bs = (ue->ul.nonLcg0Bs > datInd->lcgInfo[idx].bytesRcvd) ? \
27968 (ue->ul.nonLcg0Bs - datInd->lcgInfo[idx].bytesRcvd) : (0);
27977 if(TRUE == ue->isEmtcUe)
27979 if (cellSch->apisEmtcUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
27981 DU_LOG("\nERROR --> SCH : rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure");
27988 if (cellSch->apisUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
27990 DU_LOG("\nERROR --> SCH : rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure");
27996 /** @brief This function initializes DL allocation lists and prepares
28001 * Function: rgSCHCmnInitRbAlloc
28003 * @param [in] RgSchCellCb *cell
28008 static Void rgSCHCmnInitRbAlloc
28013 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28014 CmLteTimingInfo frm;
28019 /* Initializing RgSchCmnUlRbAllocInfo structure.*/
28020 rgSCHCmnInitDlRbAllocInfo(&cellSch->allocInfo);
28022 frm = cellSch->dl.time;
28024 dlSf = rgSCHUtlSubFrmGet(cell, frm);
28026 dlSf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
28027 dlSf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
28028 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
28030 dlSf->sfBeamInfo[idx].totVrbgAllocated = 0;
28031 dlSf->sfBeamInfo[idx].totVrbgRequired = 0;
28032 dlSf->sfBeamInfo[idx].vrbgStart = 0;
28035 dlSf->remUeCnt = cellSch->dl.maxUePerDlSf;
28036 /* Updating the Subframe information in RBAllocInfo */
28037 cellSch->allocInfo.dedAlloc.dedDlSf = dlSf;
28038 cellSch->allocInfo.msg4Alloc.msg4DlSf = dlSf;
28040 /* LTE_ADV_FLAG_REMOVED_START */
28041 /* Determine next scheduling subframe is ABS or not */
28042 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
28044 cell->lteAdvCb.absPatternDlIdx =
28045 ((frm.sfn*RGSCH_NUM_SUB_FRAMES_5G) + frm.slot) % RGR_ABS_PATTERN_LEN;
28046 cell->lteAdvCb.absDlSfInfo = (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern[
28047 cell->lteAdvCb.absPatternDlIdx]);
28052 cell->lteAdvCb.absDlSfInfo = RG_SCH_ABS_DISABLED;
28054 /* LTE_ADV_FLAG_REMOVED_END */
28057 cellSch->allocInfo.ccchSduAlloc.ccchSduDlSf = dlSf;
28060 /* Update subframe-wide allocation information with SPS allocation */
28061 rgSCHCmnSpsDlUpdDlSfAllocWithSps(cell, frm, dlSf);
28070 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
28075 * Function: rgSCHCmnSendTxModeInd(cell, ueUl, newTxMode)
28076 * Purpose: This function sends the TX mode Change
28077 * indication to RRM
28082 * @param[in] RgSchCellCb *cell
28083 * @param[in] RgSchUeCb *ue
28084 * @param[in] uint8_t newTxMode
28087 static Void rgSCHCmnSendTxModeInd
28094 RgmTransModeInd *txModeChgInd;
28095 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28098 if(!(ueDl->mimoInfo.forceTD & RG_SCH_CMN_TD_TXMODE_RECFG))
28101 if(SGetSBuf(cell->rgmSap->sapCfg.sapPst.region,
28102 cell->rgmSap->sapCfg.sapPst.pool, (Data**)&txModeChgInd,
28103 sizeof(RgmTransModeInd)) != ROK)
28107 RG_SCH_FILL_RGM_TRANSMODE_IND(ue->ueId, cell->cellId, newTxMode, txModeChgInd);
28108 RgUiRgmChangeTransModeInd(&(cell->rgmSap->sapCfg.sapPst),
28109 cell->rgmSap->sapCfg.suId, txModeChgInd);
28112 ue->mimoInfo.txModUpChgFactor = 0;
28113 ue->mimoInfo.txModDownChgFactor = 0;
28114 ueDl->laCb[0].deltaiTbs = 0;
28120 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
28125 * Function: rgSchCheckAndTriggerModeChange(cell, ueUl, iTbsNew)
28126 * Purpose: This function update and check for threashold for TM mode
28131 * @param[in] RgSchCellCb *cell
28132 * @param[in] RgSchUeCb *ue
28133 * @param[in] uint8_t iTbs
28136 Void rgSchCheckAndTriggerModeChange
28140 uint8_t reportediTbs,
28145 RgrTxMode txMode; /*!< UE's Transmission Mode */
28146 RgrTxMode modTxMode; /*!< UE's Transmission Mode */
28149 txMode = ue->mimoInfo.txMode;
28151 /* Check for Step down */
28152 /* Step down only when TM4 is configured. */
28153 if(RGR_UE_TM_4 == txMode)
28155 if((previTbs <= reportediTbs) && ((reportediTbs - previTbs) >= RG_SCH_MODE_CHNG_STEPDOWN_CHECK_FACTOR))
28157 ue->mimoInfo.txModDownChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
28161 ue->mimoInfo.txModDownChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
28164 ue->mimoInfo.txModDownChgFactor =
28165 RGSCH_MAX(ue->mimoInfo.txModDownChgFactor, -(RG_SCH_MODE_CHNG_STEPDOWN_THRSHD));
28167 if(ue->mimoInfo.txModDownChgFactor >= RG_SCH_MODE_CHNG_STEPDOWN_THRSHD)
28169 /* Trigger Mode step down */
28170 modTxMode = RGR_UE_TM_3;
28171 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
28175 /* Check for Setup up */
28176 /* Step Up only when TM3 is configured, Max possible Mode is TM4*/
28177 if(RGR_UE_TM_3 == txMode)
28179 if((previTbs > reportediTbs) || (maxiTbs == previTbs))
28181 ue->mimoInfo.txModUpChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
28185 ue->mimoInfo.txModUpChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
28188 ue->mimoInfo.txModUpChgFactor =
28189 RGSCH_MAX(ue->mimoInfo.txModUpChgFactor, -(RG_SCH_MODE_CHNG_STEPUP_THRSHD));
28191 /* Check if TM step up need to be triggered */
28192 if(ue->mimoInfo.txModUpChgFactor >= RG_SCH_MODE_CHNG_STEPUP_THRSHD)
28194 /* Trigger mode chnage */
28195 modTxMode = RGR_UE_TM_4;
28196 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
28205 * @brief Updates the GBR LCGs when datInd is received from MAC
28209 * Function: rgSCHCmnIsDlCsgPrio (cell)
28210 * Purpose: This function returns if csg UEs are
28211 * having priority at current time
28213 * Invoked by: Scheduler
28215 * @param[in] RgSchCellCb *cell
28216 * @param[in] RgSchUeCb *ue
28217 * @param[in] RgInfUeDatInd *datInd
28220 Bool rgSCHCmnIsDlCsgPrio(RgSchCellCb *cell)
28223 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
28225 /* Calculating the percentage resource allocated */
28226 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
28232 if(((cmnDlCell->ncsgPrbCnt * 100) / cmnDlCell->totPrbCnt) < cell->minDlResNonCsg)
28244 * @brief Updates the GBR LCGs when datInd is received from MAC
28248 * Function: rgSCHCmnIsUlCsgPrio (cell)
28249 * Purpose: This function returns if csg UEs are
28250 * having priority at current time
28252 * Invoked by: Scheduler
28254 * @param[in] RgSchCellCb *cell
28255 * @param[in] RgSchUeCb *ue
28256 * @param[in] RgInfUeDatInd *datInd
28259 Bool rgSCHCmnIsUlCsgPrio(RgSchCellCb *cell)
28261 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
28264 /* Calculating the percentage resource allocated */
28265 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
28271 if (((cmnUlCell->ncsgPrbCnt * 100) /cmnUlCell->totPrbCnt) < cell->minUlResNonCsg)
28282 /** @brief DL scheduler for SPS, and all other downlink data
28286 * Function: rgSchCmnPreDlSch
28288 * @param [in] Inst schInst;
28292 Void rgSchCmnPreDlSch
28294 RgSchCellCb **cell,
28296 RgSchCellCb **cellLst
28299 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell[0]);
28304 if(nCell > CM_LTE_MAX_CELLS)
28309 if (cell[0]->isDlDataAllwd && (cell[0]->stopDlSch == FALSE))
28311 /* Specific DL scheduler to perform UE scheduling */
28312 cellSch->apisDl->rgSCHDlPreSched(cell[0]);
28314 /* Rearranging the cell entries based on their remueCnt in SF.
28315 * cells will be processed in the order of number of ue scheduled
28317 for (idx = 0; idx < nCell; idx++)
28320 cellSch = RG_SCH_CMN_GET_CELL(cell[idx]);
28321 sf = cellSch->allocInfo.dedAlloc.dedDlSf;
28325 cellLst[idx] = cell[idx];
28329 for(j = 0; j < idx; j++)
28331 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cellLst[j]);
28332 RgSchDlSf *subfrm = cmnCell->allocInfo.dedAlloc.dedDlSf;
28334 if(sf->remUeCnt < subfrm->remUeCnt)
28337 for(k = idx; k > j; k--)
28339 cellLst[k] = cellLst[k-1];
28344 cellLst[j] = cell[idx];
28349 for (idx = 0; idx < nCell; idx++)
28351 cellLst[idx] = cell[idx];
28357 /** @brief DL scheduler for SPS, and all other downlink data
28360 * Function: rgSchCmnPstDlSch
28362 * @param [in] Inst schInst;
28366 Void rgSchCmnPstDlSch(RgSchCellCb *cell)
28368 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28371 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
28373 cellSch->apisDl->rgSCHDlPstSched(cell->instIdx);
28377 uint8_t rgSCHCmnCalcPcqiBitSz(RgSchUeCb *ueCb, uint8_t numTxAnt)
28379 uint8_t confRepMode;
28382 RgSchUePCqiCb *cqiCb = ueCb->nPCqiCb;
28385 confRepMode = cqiCb->cqiCfg.cqiSetup.prdModeEnum;
28386 if((ueCb->mimoInfo.txMode != RGR_UE_TM_3) &&
28387 (ueCb->mimoInfo.txMode != RGR_UE_TM_4))
28393 ri = cqiCb->perRiVal;
28395 switch(confRepMode)
28397 case RGR_PRD_CQI_MOD10:
28403 case RGR_PRD_CQI_MOD11:
28416 else if(numTxAnt == 4)
28429 /* This is number of antenna case 1.
28430 * This is not applicable for Mode 1-1.
28431 * So setting it to invalid value */
28437 case RGR_PRD_CQI_MOD20:
28445 pcqiSz = 4 + cqiCb->label;
28450 case RGR_PRD_CQI_MOD21:
28465 else if(numTxAnt == 4)
28478 /* This might be number of antenna case 1.
28479 * For mode 2-1 wideband case only antenna port 2 or 4 is supported.
28480 * So setting invalid value.*/
28488 pcqiSz = 4 + cqiCb->label;
28492 pcqiSz = 7 + cqiCb->label;
28505 /** @brief DL scheduler for SPS, and all other downlink data
28509 * Function: rgSCHCmnDlSch
28511 * @param [in] RgSchCellCb *cell
28516 Void rgSCHCmnDlSch(RgSchCellCb *cell)
28519 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28521 RgSchDynTddCb *rgSchDynTddInfo = &(rgSchCb[cell->instIdx].rgSchDynTdd);
28522 uint16_t dlCntrlSfIdx;
28526 dlSf = rgSCHUtlSubFrmGet(cell, cellSch->dl.time);
28528 if (rgSchDynTddInfo->isDynTddEnbld)
28530 RG_SCH_DYN_TDD_GET_SFIDX(dlCntrlSfIdx, rgSchDynTddInfo->crntDTddSfIdx,
28531 RG_SCH_CMN_DL_DELTA);
28532 if(RG_SCH_DYNTDD_DLC_ULD == rgSchDynTddInfo->sfInfo[dlCntrlSfIdx].sfType)
28534 if(1 == cell->cellId)
28536 ul5gtfsidDlAlreadyMarkUl++;
28538 DU_LOG("\nINFO --> SCH : ul5gtfsidDlAlreadyMarkUl: %d, [sfn:sf] [%04d:%02d]\n",
28539 ul5gtfsidDlAlreadyMarkUl, cellSch->dl.time.sfn,
28540 cellSch->dl.time.slot);
28548 /* Specific DL scheduler to perform UE scheduling */
28549 cellSch->apisDl->rgSCHDlNewSched(cell, &cellSch->allocInfo);
28550 /* LTE_ADV_FLAG_REMOVED_END */
28552 /* call common allocator for RB Allocation */
28553 rgSCHCmnDlRbAlloc(cell, &cellSch->allocInfo);
28555 /* Finalize the Allocations for reqested Against alloced */
28556 rgSCHCmnDlAllocFnlz(cell);
28558 /* Perform Pdcch allocations for PDCCH Order Q.
28559 * As of now, giving this the least preference.
28560 * This func call could be moved above other allocations
28562 rgSCHCmnGenPdcchOrder(cell, dlSf);
28564 /* Do group power control for PUCCH */
28565 rgSCHCmnGrpPwrCntrlPucch(cell, dlSf);
28570 /**********************************************************************
28573 **********************************************************************/