1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /************************************************************************
25 Desc: C source code for Entry point fucntions
29 **********************************************************************/
31 /** @file rg_sch_cmn.c
32 @brief This file implements the schedulers main access to MAC layer code.
35 static const char* RLOG_MODULE_NAME="MAC";
36 static int RLOG_FILE_ID=187;
37 static int RLOG_MODULE_ID=4096;
39 /* header include files -- defines (.h) */
40 #include "common_def.h"
46 #include "rg_sch_err.h"
47 #include "rg_sch_inf.h"
49 #include "rg_sch_cmn.h"
50 #include "rl_interface.h"
51 #include "rl_common.h"
53 /* header/extern include files (.x) */
54 #include "tfu.x" /* TFU types */
55 #include "lrg.x" /* layer management typedefs for MAC */
56 #include "rgr.x" /* layer management typedefs for MAC */
57 #include "rgm.x" /* layer management typedefs for MAC */
58 #include "rg_sch_inf.x" /* typedefs for Scheduler */
59 #include "rg_sch.x" /* typedefs for Scheduler */
60 #include "rg_sch_cmn.x" /* typedefs for Scheduler */
62 #include "lrg.x" /* Stats Structures */
63 #endif /* MAC_SCH_STATS */
66 #endif /* __cplusplus */
69 uint32_t emtcStatsUlTomSrInd;
70 uint32_t emtcStatsUlBsrTmrTxp;
73 #define RG_ITBS_DIFF(_x, _y) ((_x) > (_y) ? (_x) - (_y) : (_y) - (_x))
74 Void rgSCHSc1UlInit ARGS((RgUlSchdApis *apis));
75 #ifdef RG_PHASE2_SCHED
76 Void rgSCHRrUlInit ARGS((RgUlSchdApis *apis));
78 Void rgSCHEmtcHqInfoFree ARGS((RgSchCellCb *cell, RgSchDlHqProcCb *hqP));
79 Void rgSCHEmtcRrUlInit ARGS((RgUlSchdApis *apis));
80 Void rgSCHEmtcCmnDlInit ARGS((Void));
81 Void rgSCHEmtcCmnUlInit ARGS((Void));
82 Void rgSCHEmtcCmnUeNbReset ARGS((RgSchUeCb *ueCb));
83 RgSchCmnCqiToTbs *rgSchEmtcCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
85 Void rgSCHMaxciUlInit ARGS((RgUlSchdApis *apis));
86 Void rgSCHPfsUlInit ARGS((RgUlSchdApis *apis));
88 Void rgSCHSc1DlInit ARGS((RgDlSchdApis *apis));
89 #ifdef RG_PHASE2_SCHED
90 Void rgSCHRrDlInit ARGS((RgDlSchdApis *apis));
92 Void rgSCHEmtcRrDlInit ARGS((RgDlEmtcSchdApis *apis));
94 Void rgSCHMaxciDlInit ARGS((RgDlSchdApis *apis));
95 Void rgSCHPfsDlInit ARGS((RgDlSchdApis *apis));
97 Void rgSCHDlfsInit ARGS((RgDlfsSchdApis *apis));
101 Void rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl ARGS((RgSchCellCb *cell));
102 Void rgSCHCmnGetEmtcDciFrmtSizes ARGS((RgSchCellCb *cell));
103 Void rgSCHEmtcRrUlProcRmvFrmRetx ARGS((RgSchCellCb *cell, RgSchUlHqProcCb *proc));
104 S16 rgSCHCmnPrecompEmtcMsg3Vars
106 RgSchCmnUlCell *cellUl,
112 Void rgSCHEmtcCmnUeCcchSduDel
117 Void rgSCHEmtcRmvFrmTaLst
119 RgSchCmnDlCell *cellDl,
122 Void rgSCHEmtcInitTaLst
124 RgSchCmnDlCell *cellDl
126 Void rgSCHEmtcAddToTaLst
128 RgSchCmnDlCell *cellDl,
135 static Void rgSCHDlSiSched ARGS((RgSchCellCb *cell,
136 RgSchCmnDlRbAllocInfo *allocInfo,
137 RgInfSfAlloc *subfrmAlloc));
138 static Void rgSCHChkNUpdSiCfg ARGS((RgSchCellCb *cell));
139 static Void rgSCHSelectSi ARGS((RgSchCellCb *cell));
140 #endif /*RGR_SI_SCH*/
141 /* LTE_ADV_FLAG_REMOVED_START */
144 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
152 static S16 rgSCHCmnBuildRntpInfo (
160 static Void rgSCHCmnNonDlfsType0Alloc
164 RgSchDlRbAlloc *allocInfo,
167 static uint8_t rgSchCmnUlRvIdxToIMcsTbl[4] = {32, 30, 31, 29};
168 static Void rgSCHCmnUlNonadapRetx ARGS((
169 RgSchCmnUlCell *cellUl,
173 static Void rgSCHCmnUlSfRlsRetxProcs ARGS((
179 static S16 rgSCHCmnUlMdfyGrntForCqi ARGS((
186 uint32_t stepDownItbs,
190 static Void rgSCHCmnFillHqPPdcchDciFrmt1 ARGS((
192 RgSchDlRbAlloc *rbAllocInfo,
193 RgSchDlHqProcCb *hqP,
197 static Void rgSCHCmnFillHqPPdcchDciFrmt1A ARGS((
199 RgSchDlRbAlloc *rbAllocInfo,
200 RgSchDlHqProcCb *hqP,
204 static Void rgSCHCmnFillHqPPdcchDciFrmt1B ARGS((
206 RgSchDlRbAlloc *rbAllocInfo,
207 RgSchDlHqProcCb *hqP,
211 static Void rgSCHCmnFillHqPPdcchDciFrmt2 ARGS((
213 RgSchDlRbAlloc *rbAllocInfo,
214 RgSchDlHqProcCb *hqP,
218 static Void rgSCHCmnFillHqPPdcchDciFrmt2A ARGS((
220 RgSchDlRbAlloc *rbAllocInfo,
221 RgSchDlHqProcCb *hqP,
228 Void rgSCHCmnDlSpsSch
232 /* LTE_ADV_FLAG_REMOVED_END */
234 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc ARGS((
236 RgSchCmnDlRbAllocInfo *allocInfo
238 static Void rgSCHBcchPcchDlRbAlloc ARGS((
240 RgSchCmnDlRbAllocInfo *allocInfo
242 static Void rgSCHCmnDlBcchPcchAlloc ARGS((
246 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
249 TfuDlCqiPucch *pucchCqi,
250 RgrUeCqiRept *ueCqiRept,
252 Bool *is2ndCwCqiAvail
254 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
257 TfuDlCqiPusch *puschCqi,
258 RgrUeCqiRept *ueCqiRept,
260 Bool *is2ndCwCqiAvail
263 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
266 TfuDlCqiPucch *pucchCqi
268 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
271 TfuDlCqiPusch *puschCqi
274 /* ccpu00117452 - MOD - Changed macro name from
275 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
277 static S16 rgSCHCmnUeDlPwrCtColltCqiRept ARGS((
280 RgrUeCqiRept *ueCqiRept));
281 #endif /* End of RGR_CQI_REPT */
282 /* Fix: syed align multiple UEs to refresh at same time */
283 static Void rgSCHCmnGetRefreshPer ARGS((
287 static S16 rgSCHCmnApplyUeRefresh ARGS((
291 Void rgSCHCmnDlSetUeAllocLmtLa ARGS
296 static Void rgSCHCheckAndSetTxScheme ARGS
304 static uint32_t rgSCHCmnCalcDwPtsTbSz ARGS
314 static Void rgSCHCmnCalcDwPtsTbSz2Cw ARGS
330 static Void rgSCHCmnInitRbAlloc ARGS
336 #endif /* __cplusplus */
340 RgSchdApis rgSchCmnApis;
341 static RgUlSchdApis rgSchUlSchdTbl[RGSCH_NUM_SCHEDULERS];
342 static RgDlSchdApis rgSchDlSchdTbl[RGSCH_NUM_SCHEDULERS];
344 static RgUlSchdApis rgSchEmtcUlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
345 static RgDlEmtcSchdApis rgSchEmtcDlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
347 #ifdef RG_PHASE2_SCHED
348 static RgDlfsSchdApis rgSchDlfsSchdTbl[RGSCH_NUM_DLFS_SCHEDULERS];
350 RgUlSchdInits rgSchUlSchdInits = RGSCH_ULSCHED_INITS;
351 RgDlSchdInits rgSchDlSchdInits = RGSCH_DLSCHED_INITS;
353 static RgEmtcUlSchdInits rgSchEmtcUlSchdInits = RGSCH_EMTC_ULSCHED_INITS;
354 static RgEmtcDlSchdInits rgSchEmtcDlSchdInits = RGSCH_EMTC_DLSCHED_INITS;
356 #if (defined (RG_PHASE2_SCHED) && defined (TFU_UPGRADE))
357 static RgDlfsSchdInits rgSchDlfsSchdInits = RGSCH_DLFSSCHED_INITS;
360 typedef Void (*RgSchCmnDlAllocRbFunc) ARGS((RgSchCellCb *cell, RgSchDlSf *subFrm,
361 RgSchUeCb *ue, uint32_t bo, uint32_t *effBo, RgSchDlHqProcCb *proc,
362 RgSchCmnDlRbAllocInfo *cellWdAllocInfo));
363 typedef uint8_t (*RgSchCmnDlGetPrecInfFunc) ARGS((RgSchCellCb *cell, RgSchUeCb *ue,
364 uint8_t numLyrs, Bool bothCwEnbld));
365 static Void rgSCHCmnDlAllocTxRbTM1 ARGS((
371 RgSchDlHqProcCb *proc,
372 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
374 static Void rgSCHCmnDlAllocTxRbTM2 ARGS((
380 RgSchDlHqProcCb *proc,
381 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
383 static Void rgSCHCmnDlAllocTxRbTM3 ARGS((
389 RgSchDlHqProcCb *proc,
390 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
392 static Void rgSCHCmnDlAllocTxRbTM4 ARGS((
398 RgSchDlHqProcCb *proc,
399 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
402 static Void rgSCHCmnDlAllocTxRbTM5 ARGS((
408 RgSchDlHqProcCb *proc,
409 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
412 static Void rgSCHCmnDlAllocTxRbTM6 ARGS((
418 RgSchDlHqProcCb *proc,
419 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
421 static Void rgSCHCmnDlAllocTxRbTM7 ARGS((
427 RgSchDlHqProcCb *proc,
428 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
430 static Void rgSCHCmnDlAllocRetxRbTM1 ARGS((
436 RgSchDlHqProcCb *proc,
437 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
439 static Void rgSCHCmnDlAllocRetxRbTM2 ARGS((
445 RgSchDlHqProcCb *proc,
446 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
448 static Void rgSCHCmnDlAllocRetxRbTM3 ARGS((
454 RgSchDlHqProcCb *proc,
455 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
457 static Void rgSCHCmnDlAllocRetxRbTM4 ARGS((
463 RgSchDlHqProcCb *proc,
464 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
467 static Void rgSCHCmnDlAllocRetxRbTM5 ARGS((
473 RgSchDlHqProcCb *proc,
474 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
477 static Void rgSCHCmnDlAllocRetxRbTM6 ARGS((
483 RgSchDlHqProcCb *proc,
484 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
486 static Void rgSCHCmnDlAllocRetxRbTM7 ARGS((
492 RgSchDlHqProcCb *proc,
493 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
497 static uint8_t rgSchGetN1ResCount ARGS ((
501 Bool rgSchCmnChkDataOnlyOnPcell
507 uint8_t rgSCHCmnCalcPcqiBitSz
514 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
515 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[7] = {rgSCHCmnDlAllocTxRbTM1,
516 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
517 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7};
519 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
520 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[7] = {rgSCHCmnDlAllocRetxRbTM1,
521 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
522 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7};
524 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
525 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[9] = {rgSCHCmnDlAllocTxRbTM1,
526 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
527 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7, NULLP, NULLP};
529 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
530 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[9] = {rgSCHCmnDlAllocRetxRbTM1,
531 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
532 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7, NULLP, NULLP};
537 static uint8_t rgSCHCmnDlTM3PrecInf2 ARGS((
543 static uint8_t rgSCHCmnDlTM3PrecInf4 ARGS((
549 static uint8_t rgSCHCmnDlTM4PrecInf2 ARGS((
555 static uint8_t rgSCHCmnDlTM4PrecInf4 ARGS((
561 /* Functions specific to each transmission mode for DL RB Allocation*/
562 RgSchCmnDlGetPrecInfFunc getPrecInfoFunc[2][2] = {
563 {rgSCHCmnDlTM3PrecInf2, rgSCHCmnDlTM3PrecInf4},
564 {rgSCHCmnDlTM4PrecInf2, rgSCHCmnDlTM4PrecInf4}
567 static S16 rgSCHCmnDlAlloc1CwRetxRb ARGS((
571 RgSchDlHqTbCb *tbInfo,
576 static S16 rgSCHCmnDlAlloc2CwRetxRb ARGS((
580 RgSchDlHqProcCb *proc,
585 static Void rgSCHCmnDlTM3TxTx ARGS((
591 RgSchDlHqProcCb *proc,
592 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
594 static Void rgSCHCmnDlTM3TxRetx ARGS((
600 RgSchDlHqProcCb *proc,
601 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
603 static Void rgSCHCmnDlTM3RetxRetx ARGS((
609 RgSchDlHqProcCb *proc,
610 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
613 static Void rgSCHCmnNonDlfsUpdTyp2Alloc ARGS((
619 /* LTE_ADV_FLAG_REMOVED_START */
621 static Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc ARGS((
628 /* LTE_ADV_FLAG_REMOVED_END */
629 static Void rgSCHCmnDlRbInfoAddUeTx ARGS((
631 RgSchCmnDlRbAllocInfo *allocInfo,
633 RgSchDlHqProcCb *proc
635 static Void rgSCHCmnDlRbInfoAddUeRetx ARGS((
637 RgSchCmnDlRbAllocInfo *allocInfo,
641 static Void rgSCHCmnDlAdd2NonSchdRetxLst ARGS((
642 RgSchCmnDlRbAllocInfo *allocInfo,
644 RgSchDlHqProcCb *proc
646 static S16 rgSCHCmnDlAlloc2CwTxRetxRb ARGS((
650 RgSchDlHqTbCb *reTxTb,
655 static S16 rgSCHCmnDlAlloc2CwTxRb ARGS((
659 RgSchDlHqProcCb *proc,
664 static S16 rgSCHCmnDlAlloc1CwTxRb ARGS((
668 RgSchDlHqTbCb *tbInfo,
674 static Void rgSCHCmnFillHqPTb ARGS((
676 RgSchDlRbAlloc *rbAllocInfo,
682 static Void rgSCHCmnDlGetBestFitHole ARGS((
685 uint32_t *crntAllocMask,
688 uint8_t *allocNumRbs,
691 #ifdef RGSCH_SPS_UNUSED
692 static uint32_t rgSCHCmnGetRaType1Mask ARGS((
698 static uint32_t rgSCHCmnGetRaType0Mask ARGS((
702 static uint32_t rgSCHCmnGetRaType2Mask ARGS((
708 Bool rgSCHCmnRetxAllocAvoid ARGS((
711 RgSchDlHqProcCb *proc
714 uint16_t rgSCHCmnGetSiSetId ARGS((
717 uint16_t minPeriodicity
722 //TODO_SID: Currenly table is only for 100 Prbs. Need to modify wrt VRBG table 8.1.5.2.1-1 V5G_213
723 uint32_t rgSch5gtfTbSzTbl[MAX_5GTF_MCS] =
724 {1864, 5256, 8776, 13176, 17576, 21976, 26376, 31656, 35176, 39576, 43976, 47496, 52776, 59376, 66392};
725 uint32_t g5gtfTtiCnt = 0;
726 uint32_t gUl5gtfSrRecv = 0;
727 uint32_t gUl5gtfBsrRecv = 0;
728 uint32_t gUl5gtfUeSchPick = 0;
729 uint32_t gUl5gtfPdcchSchd = 0;
730 uint32_t gUl5gtfAllocAllocated = 0;
731 uint32_t gUl5gtfUeRbAllocDone = 0;
732 uint32_t gUl5gtfUeRmvFnlzZeroBo = 0;
733 uint32_t gUl5gtfUeFnlzReAdd = 0;
734 uint32_t gUl5gtfPdcchSend = 0;
735 uint32_t gUl5gtfRbAllocFail = 0;
736 uint32_t ul5gtfsidUlMarkUl = 0;
737 uint32_t ul5gtfsidDlSchdPass = 0;
738 uint32_t ul5gtfsidDlAlreadyMarkUl = 0;
739 uint32_t ul5gtfTotSchdCnt = 0;
742 /* CQI Offset Index to Beta CQI Offset value mapping,
743 * stored as parts per 1000. Reserved is set to 0.
744 * Refer 36.213 sec 8.6.3 Tbl 8.6.3-3 */
745 uint32_t rgSchCmnBetaCqiOffstTbl[16] = {0, 0, 1125,
746 1250, 1375, 1625, 1750, 2000, 2250, 2500, 2875,
747 3125, 3500, 4000, 5000, 6250};
748 uint32_t rgSchCmnBetaHqOffstTbl[16] = {2000, 2500, 3125,
749 4000, 5000, 6250, 8000,10000, 12625, 15875, 20000,
750 31000, 50000,80000,126000,0};
751 uint32_t rgSchCmnBetaRiOffstTbl[16] = {1250, 1625, 2000,
752 2500, 3125, 4000, 5000, 6250, 8000, 10000,12625,
754 S8 rgSchCmnDlCqiDiffOfst[8] = {0, 1, 2, 3, -4, -3, -2, -1};
756 /* Include CRS REs while calculating Efficiency */
757 const static uint8_t rgSchCmnAntIdx[5] = {0,0,1,0,2};
758 const static uint8_t rgSchCmnNumResForCrs[5] = {0,6,12,0,16};
759 uint32_t cfiSwitchCnt ;
765 S8 rgSchCmnApUeSelDiffCqi[4] = {1, 2, 3, 4};
766 S8 rgSchCmnApEnbConfDiffCqi[4] = {0, 1, 2, -1};
769 typedef struct rgSchCmnDlUeDciFrmtOptns
771 TfuDciFormat spfcDciFrmt; /* TM(Transmission Mode) specific DCI format.
772 * Search space : UE Specific by C-RNTI only. */
773 uint8_t spfcDciRAType; /* Resource Alloctn(RA) type for spfcDciFrmt */
774 TfuDciFormat prfrdDciFrmt; /* Preferred DCI format among the available
775 * options for TD (Transmit Diversity) */
776 uint8_t prfrdDciRAType; /* Resource Alloctn(RA) type for prfrdDciFrmt */
777 }RgSchCmnDlUeDciFrmtOptns;
780 /* DCI Format options for each Transmission Mode */
781 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[7] = {
782 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
783 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
784 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
785 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
786 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
787 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
788 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
792 /* DCI Format options for each Transmission Mode */
793 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[9] = {
794 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
795 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
796 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
797 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
798 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
799 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
800 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
805 typedef struct rgSchCmnDlImcsTbl
807 uint8_t modOdr; /* Modulation Order */
808 uint8_t iTbs; /* ITBS */
809 }RgSchCmnDlImcsTbl[29];
811 const struct rgSchCmnMult235Info
813 uint8_t match; /* Closest number satisfying 2^a.3^b.5^c, with a bias
814 * towards the smaller number */
815 uint8_t prvMatch; /* Closest number not greater than array index
816 * satisfying 2^a.3^b.5^c */
817 } rgSchCmnMult235Tbl[110+1] = {
819 {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {6, 6}, {8, 8},
820 {9, 9}, {10, 10}, {10, 10}, {12, 12}, {12, 12}, {15, 12}, {15, 15},
821 {16, 16}, {16, 16}, {18, 18}, {18, 18}, {20, 20}, {20, 20}, {20, 20},
822 {24, 20}, {24, 24}, {25, 25}, {25, 25}, {27, 27}, {27, 27}, {30, 27},
823 {30, 30}, {30, 30}, {32, 32}, {32, 32}, {32, 32}, {36, 32}, {36, 36},
824 {36, 36}, {36, 36}, {40, 36}, {40, 40}, {40, 40}, {40, 40}, {45, 40},
825 {45, 40}, {45, 45}, {45, 45}, {48, 45}, {48, 48}, {48, 48}, {50, 50},
826 {50, 50}, {50, 50}, {54, 50}, {54, 54}, {54, 54}, {54, 54}, {54, 54},
827 {60, 54}, {60, 54}, {60, 60}, {60, 60}, {60, 60}, {64, 60}, {64, 64},
828 {64, 64}, {64, 64}, {64, 64}, {64, 64}, {72, 64}, {72, 64}, {72, 64},
829 {72, 72}, {72, 72}, {75, 72}, {75, 75}, {75, 75}, {75, 75}, {80, 75},
830 {80, 75}, {80, 80}, {81, 81}, {81, 81}, {81, 81}, {81, 81}, {81, 81},
831 {90, 81}, {90, 81}, {90, 81}, {90, 81}, {90, 90}, {90, 90}, {90, 90},
832 {90, 90}, {96, 90}, {96, 90}, {96, 96}, {96, 96}, {96, 96}, {100, 96},
833 {100, 100}, {100, 100}, {100, 100}, {100, 100}, {100, 100}, {108, 100},
834 {108, 100}, {108, 100}, {108, 108}, {108, 108}, {108, 108}
838 /* BI table from 36.321 Table 7.2.1 */
839 const static S16 rgSchCmnBiTbl[RG_SCH_CMN_NUM_BI_VAL] = {
840 0, 10, 20, 30,40,60,80,120,160,240,320,480,960};
841 RgSchCmnUlCqiInfo rgSchCmnUlCqiTbl[RG_SCH_CMN_UL_NUM_CQI] = {
843 {RGSCH_CMN_QM_CQI_1,RGSCH_CMN_UL_EFF_CQI_1 },
844 {RGSCH_CMN_QM_CQI_2,RGSCH_CMN_UL_EFF_CQI_2 },
845 {RGSCH_CMN_QM_CQI_3,RGSCH_CMN_UL_EFF_CQI_3 },
846 {RGSCH_CMN_QM_CQI_4,RGSCH_CMN_UL_EFF_CQI_4 },
847 {RGSCH_CMN_QM_CQI_5,RGSCH_CMN_UL_EFF_CQI_5 },
848 {RGSCH_CMN_QM_CQI_6,RGSCH_CMN_UL_EFF_CQI_6 },
849 {RGSCH_CMN_QM_CQI_7,RGSCH_CMN_UL_EFF_CQI_7 },
850 {RGSCH_CMN_QM_CQI_8,RGSCH_CMN_UL_EFF_CQI_8 },
851 {RGSCH_CMN_QM_CQI_9,RGSCH_CMN_UL_EFF_CQI_9 },
852 {RGSCH_CMN_QM_CQI_10,RGSCH_CMN_UL_EFF_CQI_10 },
853 {RGSCH_CMN_QM_CQI_11,RGSCH_CMN_UL_EFF_CQI_11 },
854 {RGSCH_CMN_QM_CQI_12,RGSCH_CMN_UL_EFF_CQI_12 },
855 {RGSCH_CMN_QM_CQI_13,RGSCH_CMN_UL_EFF_CQI_13 },
856 {RGSCH_CMN_QM_CQI_14,RGSCH_CMN_UL_EFF_CQI_14 },
857 {RGSCH_CMN_QM_CQI_15,RGSCH_CMN_UL_EFF_CQI_15 },
861 /* This table maps a (delta_offset * 2 + 2) to a (beta * 8)
862 * where beta is 10^-(delta_offset/10) rounded off to nearest 1/8
864 static uint16_t rgSchCmnUlBeta8Tbl[29] = {
865 6, RG_SCH_CMN_UL_INVALID_BETA8, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23,
866 25, 28, 32, RG_SCH_CMN_UL_INVALID_BETA8, 40, RG_SCH_CMN_UL_INVALID_BETA8,
867 50, RG_SCH_CMN_UL_INVALID_BETA8, 64, RG_SCH_CMN_UL_INVALID_BETA8, 80,
868 RG_SCH_CMN_UL_INVALID_BETA8, 101, RG_SCH_CMN_UL_INVALID_BETA8, 127,
869 RG_SCH_CMN_UL_INVALID_BETA8, 160
873 /* QCI to SVC priority mapping. Index specifies the Qci*/
874 static uint8_t rgSchCmnDlQciPrio[RG_SCH_CMN_MAX_QCI] = RG_SCH_CMN_QCI_TO_PRIO;
876 /* The configuration is efficiency measured per 1024 REs. */
877 /* The first element stands for when CQI is not known */
878 /* This table is used to translate CQI to its corrospoding */
879 /* allocation parameters. These are currently from 36.213 */
880 /* Just this talbe needs to be edited for modifying the */
881 /* the resource allocation behaviour */
883 /* ADD CQI to MCS mapping correction
884 * single dimensional array is replaced by 2 dimensions for different CFI*/
885 static uint16_t rgSchCmnCqiPdschEff[4][16] = {RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI1,
886 RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI2,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI3};
888 static uint16_t rgSchCmn2LyrCqiPdschEff[4][16] = {RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI1,
889 RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI2, RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI3};
891 /* This configuration determines the transalation of a UEs CQI to its */
892 /* PDCCH coding efficiency. This may be edited based on the installation */
893 static uint8_t rgSchCmnDlRvTbl[4] = {0, 2, 3, 1}; /* RVIdx sequence is corrected*/
895 /* Indexed by [DciFrmt].
896 * Considering the following definition in determining the dciFrmt index.
911 static uint16_t rgSchCmnDciFrmtSizes[10];
913 static uint16_t rgSchCmnCqiPdcchEff[16] = RG_SCH_CMN_CQI_TO_PDCCH_EFF;
917 RgSchTddUlDlSubfrmTbl rgSchTddUlDlSubfrmTbl = {
918 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME},
919 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
920 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
921 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
922 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
923 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
924 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME}
929 uint8_t rgSchTddSpsDlMaxRetxTbl[RGSCH_MAX_TDD_UL_DL_CFG] = {
941 /* Special Subframes in OFDM symbols */
942 /* ccpu00134197-MOD-Correct the number of symbols */
943 RgSchTddSplSubfrmInfoTbl rgSchTddSplSubfrmInfoTbl = {
947 {11, 1, 1, 10, 1, 1},
955 /* PHICH 'm' value Table */
956 RgSchTddPhichMValTbl rgSchTddPhichMValTbl = {
957 {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
958 {0, 1, 0, 0, 1, 0, 1, 0, 0, 1},
959 {0, 0, 0, 1, 0, 0, 0, 0, 1, 0},
960 {1, 0, 0, 0, 0, 0, 0, 0, 1, 1},
961 {0, 0, 0, 0, 0, 0, 0, 0, 1, 1},
962 {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
963 {1, 1, 0, 0, 0, 1, 1, 0, 0, 1}
966 /* PHICH 'K' value Table */
967 RgSchTddKPhichTbl rgSchTddKPhichTbl = {
968 {0, 0, 4, 7, 6, 0, 0, 4, 7, 6},
969 {0, 0, 4, 6, 0, 0, 0, 4, 6, 0},
970 {0, 0, 6, 0, 0, 0, 0, 6, 0, 0},
971 {0, 0, 6, 6, 6, 0, 0, 0, 0, 0},
972 {0, 0, 6, 6, 0, 0, 0, 0, 0, 0},
973 {0, 0, 6, 0, 0, 0, 0, 0, 0, 0},
974 {0, 0, 4, 6, 6, 0, 0, 4, 7, 0}
977 /* Uplink association index 'K' value Table */
978 RgSchTddUlAscIdxKDashTbl rgSchTddUlAscIdxKDashTbl = {
979 {0, 0, 6, 4, 0, 0, 0, 6, 4, 0},
980 {0, 0, 4, 0, 0, 0, 0, 4, 0, 0},
981 {0, 0, 4, 4, 4, 0, 0, 0, 0, 0},
982 {0, 0, 4, 4, 0, 0, 0, 0, 0, 0},
983 {0, 0, 4, 0, 0, 0, 0, 0, 0, 0},
984 {0, 0, 7, 7, 5, 0, 0, 7, 7, 0}
988 /* PUSCH 'K' value Table */
989 RgSchTddPuschTxKTbl rgSchTddPuschTxKTbl = {
990 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
991 {0, 6, 0, 0, 4, 0, 6, 0, 0, 4},
992 {0, 0, 0, 4, 0, 0, 0, 0, 4, 0},
993 {4, 0, 0, 0, 0, 0, 0, 0, 4, 4},
994 {0, 0, 0, 0, 0, 0, 0, 0, 4, 4},
995 {0, 0, 0, 0, 0, 0, 0, 0, 4, 0},
996 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
999 /* PDSCH to PUCCH Table for DL Harq Feed back. Based on the
1000 Downlink association set index 'K' table */
1001 uint8_t rgSchTddPucchTxTbl[7][10] = {
1002 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1003 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1004 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1005 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1006 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1007 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1008 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1011 /* Table to fetch the next DL sf idx for applying the
1012 new CFI. The next Dl sf Idx at which the new CFI
1013 is applied is always the starting Sf of the next ACK/NACK
1016 Ex: In Cfg-2, sf4 and sf9 are the only subframes at which
1017 a new ACK/NACK bundle of DL subframes can start
1019 D S U D D D S U D D D S U D D D S U D D
1022 dlSf Array for Cfg-2:
1023 sfNum: 0 1 3 4 5 6 8 9 0 1 3 4 5 6 8 9
1024 sfIdx: 0 1 2 3 4 5 6 7 8 9 10 11 12 12 14 15
1026 If CFI changes at sf0, nearest DL SF bundle >= 4 TTI is sf4
1027 So at sf4 the new CFI can be applied. To arrive at sf4 from
1028 sf0, the sfIdx has to be increased by 3 */
1030 uint8_t rgSchTddPdcchSfIncTbl[7][10] = {
1031 /* A/N Bundl: 0,1,5,6*/ {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
1032 /* A/N Bundl: 0,4,5,9*/ {2, 2, 0, 0, 3, 2, 2, 0, 0, 3},
1033 /* A/N Bundl: 4,9*/ {3, 6, 0, 5, 4, 3, 6, 0, 5, 4},
1034 /* A/N Bundl: 1,7,9*/ {4, 3, 0, 0, 0, 4, 5, 4, 6, 5},
1035 /* A/N Bundl: 0,6*/ {4, 3, 0, 0, 6, 5, 4, 7, 6, 5},
1036 /* A/N Bundl: 9*/ {8, 7, 0, 6, 5, 4, 12, 11, 10, 9},
1037 /* A/N Bundl: 0,1,5,6,9*/ {2, 1, 0, 0, 0, 2, 2, 0, 0, 3}
1041 /* combine compilation fixes */
1043 /* subframe offset values to be used when twoIntervalsConfig is enabled in UL
1045 RgSchTddSfOffTbl rgSchTddSfOffTbl = {
1046 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1047 {0, 0, 1, -1, 0, 0, 0, 1, -1, 0},
1048 {0, 0, 5, 0, 0, 0, 0, -5, 0, 0},
1049 {0, 0, 1, 1, -2, 0, 0, 0, 0, 0},
1050 {0, 0, 1, -1, 0, 0, 0, 0, 0, 0},
1051 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1052 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1056 /* Table to determine when uplink SPS configured grants should
1057 * explicitly be reserved in a subframe. When enries are same
1058 * as that of Msg3SubfrmTbl, indicates competition with msg3.
1059 * As of now, this is same as Msg3SubfrmTbl (leaving out uldlcfg 2),
1060 * except that all 255s are now zeros. */
1061 RgSchTddSpsUlRsrvTbl rgSchTddSpsUlRsrvTbl = {
1062 {0, 0, 0, 6, 8, 0, 0, 0, 6, 8},
1063 {0, 0, 6, 9, 0, 0, 0, 6, 9, 0},
1064 {0, 0, 10, 0, 0, 0, 0, 10, 0, 0},
1065 {0, 0, 0, 0, 8, 0, 7, 7, 14, 0},
1066 {0, 0, 0, 9, 0, 0, 7, 15, 0, 0},
1067 {0, 0, 10, 0, 0, 0, 16, 0, 0, 0},
1068 {0, 0, 0, 0, 8, 0, 0, 0, 9, 0}
1071 /* Inverse DL Assoc Set index Table */
1072 RgSchTddInvDlAscSetIdxTbl rgSchTddInvDlAscSetIdxTbl = {
1073 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1074 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1075 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1076 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1077 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1078 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1079 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1082 #endif /* (LTEMAC_SPS ) */
1084 /* Number of Uplink subframes Table */
1085 static uint8_t rgSchTddNumUlSf[] = {6, 4, 2, 3, 2, 1, 5};
1087 /* Downlink HARQ processes Table */
1088 RgSchTddUlNumHarqProcTbl rgSchTddUlNumHarqProcTbl = { 7, 4, 2, 3, 2, 1, 6};
1090 /* Uplink HARQ processes Table */
1091 RgSchTddDlNumHarqProcTbl rgSchTddDlNumHarqProcTbl = { 4, 7, 10, 9, 12, 15, 6};
1093 /* Downlink association index set 'K' value Table */
1094 RgSchTddDlAscSetIdxKTbl rgSchTddDlAscSetIdxKTbl = {
1095 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1097 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1099 { {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}} },
1101 { {0, {0}}, {0, {0}}, {3, {7, 6, 11}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1103 { {0, {0}}, {0, {0}}, {4, {12, 8, 7, 11}}, {4, {6, 5, 4, 7}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1105 { {0, {0}}, {0, {0}}, {9, {13, 12, 9, 8, 7, 5, 4, 11, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1107 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1110 /* ccpu132282-ADD-the table rgSchTddDlAscSetIdxKTbl is rearranged in
1111 * decreasing order of Km, this is used to calculate the NCE used for
1112 * calculating N1Pucch Resource for Harq*/
1113 RgSchTddDlAscSetIdxKTbl rgSchTddDlHqPucchResCalTbl = {
1114 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1116 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1118 { {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}} },
1120 { {0, {0}}, {0, {0}}, {3, {11, 7, 6}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1122 { {0, {0}}, {0, {0}}, {4, {12, 11, 8, 7}}, {4, {7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1124 { {0, {0}}, {0, {0}}, {9, {13, 12, 11, 9, 8, 7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1126 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1129 /* Minimum number of Ack/Nack feeback information to be
1130 stored for each UL-DL configuration */
1131 RgSchTddANFdbkMapTbl rgSchTddANFdbkMapTbl = {4, 4, 2, 3, 2, 1, 5};
1133 /* Uplink switch points and number of UL subframes Table */
1134 RgSchTddMaxUlSubfrmTbl rgSchTddMaxUlSubfrmTbl = {
1135 {2,3,3}, {2,2,2}, {2,1,1}, {1,3,0}, {1,2,0}, {1,1,0}, {2,3,2}
1138 /* Uplink switch points and number of DL subframes Table */
1139 RgSchTddMaxDlSubfrmTbl rgSchTddMaxDlSubfrmTbl = {
1140 {2,2,2}, {2,3,3}, {2,4,4}, {1,7,0}, {1,8,0}, {1,9,0}, {2,2,3}
1143 /* Number of UL subframes present before a particular subframe */
1144 RgSchTddNumUlSubfrmTbl rgSchTddNumUlSubfrmTbl = {
1145 {0, 0, 1, 2, 3, 3, 3, 4, 5, 6},
1146 {0, 0, 1, 2, 2, 2, 2, 3, 4, 4},
1147 {0, 0, 1, 1, 1, 1, 1, 2, 2, 2},
1148 {0, 0, 1, 2, 3, 3, 3, 3, 3, 3},
1149 {0, 0, 1, 2, 2, 2, 2, 2, 2, 2},
1150 {0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
1151 {0, 0, 1, 2, 3, 3, 3, 4, 5, 5}
1154 /* Number of DL subframes present till a particular subframe */
1155 RgSchTddNumDlSubfrmTbl rgSchTddNumDlSubfrmTbl = {
1156 {1, 2, 2, 2, 2, 3, 4, 4, 4, 4},
1157 {1, 2, 2, 2, 3, 4, 5, 5, 5, 6},
1158 {1, 2, 2, 3, 4, 5, 6, 6, 7, 8},
1159 {1, 2, 2, 2, 2, 3, 4, 5, 6, 7},
1160 {1, 2, 2, 2, 3, 4, 5, 6, 7, 8},
1161 {1, 2, 2, 3, 4, 5, 6, 7, 8, 9},
1162 {1, 2, 2, 2, 2, 3, 4, 4, 4, 5}
1166 /* Nearest possible UL subframe Index from UL subframe
1167 * DL Index < UL Index */
1168 RgSchTddLowDlSubfrmIdxTbl rgSchTddLowDlSubfrmIdxTbl = {
1169 {0, 1, 1, 1, 1, 5, 6, 6, 6, 6},
1170 {0, 1, 1, 1, 4, 5, 6, 6, 6, 9},
1171 {0, 1, 1, 3, 4, 5, 6, 6, 8, 9},
1172 {0, 1, 1, 1, 1, 5, 6, 7, 8, 9},
1173 {0, 1, 1, 1, 4, 5, 6, 7, 8, 9},
1174 {0, 1, 1, 3, 4, 5, 6, 7, 8, 9},
1175 {0, 1, 1, 1, 1, 5, 6, 6, 6, 9}
1178 /* Nearest possible DL subframe Index from UL subframe
1179 * DL Index > UL Index
1180 * 10 represents Next SFN low DL Idx */
1181 RgSchTddHighDlSubfrmIdxTbl rgSchTddHighDlSubfrmIdxTbl = {
1182 {0, 1, 5, 5, 5, 5, 6, 10, 10, 10},
1183 {0, 1, 4, 4, 4, 5, 6, 9, 9, 9},
1184 {0, 1, 3, 3, 4, 5, 6, 8, 8, 9},
1185 {0, 1, 5, 5, 5, 5, 6, 7, 8, 9},
1186 {0, 1, 4, 4, 4, 5, 6, 7, 8, 9},
1187 {0, 1, 3, 3, 4, 5, 6, 7, 8, 9},
1188 {0, 1, 5, 5, 5, 5, 6, 9, 9, 9}
1191 /* RACH Message3 related information */
1192 RgSchTddMsg3SubfrmTbl rgSchTddMsg3SubfrmTbl = {
1193 {7, 6, 255, 255, 255, 7, 6, 255, 255, 255},
1194 {7, 6, 255, 255, 8, 7, 6, 255, 255, 8},
1195 {7, 6, 255, 9, 8, 7, 6, 255, 9, 8},
1196 {12, 11, 255, 255, 255, 7, 6, 6, 6, 13},
1197 {12, 11, 255, 255, 8, 7, 6, 6, 14, 13},
1198 {12, 11, 255, 9, 8, 7, 6, 15, 14, 13},
1199 {7, 6, 255, 255, 255, 7, 6, 255, 255, 8}
1202 /* ccpu00132341-DEL Removed rgSchTddRlsDlSubfrmTbl and used Kset table for
1203 * releasing DL HARQs */
1205 /* DwPTS Scheduling Changes Start */
1206 /* Provides the number of Cell Reference Signals in DwPTS
1208 static uint8_t rgSchCmnDwptsCrs[2][3] = {/* [Spl Sf cfg][Ant Port] */
1209 {4, 8, 16}, /* Spl Sf cfg 1,2,3,6,7,8 */
1210 {6, 12, 20}, /* Spl Sf cfg 4 */
1213 static S8 rgSchCmnSplSfDeltaItbs[9] = RG_SCH_DWPTS_ITBS_ADJ;
1214 /* DwPTS Scheduling Changes End */
1218 static uint32_t rgSchCmnBsrTbl[64] = {
1219 0, 10, 12, 14, 17, 19, 22, 26,
1220 31, 36, 42, 49, 57, 67, 78, 91,
1221 107, 125, 146, 171, 200, 234, 274, 321,
1222 376, 440, 515, 603, 706, 826, 967, 1132,
1223 1326, 1552, 1817, 2127, 2490, 2915, 3413, 3995,
1224 4677, 5476, 6411, 7505, 8787, 10287, 12043, 14099,
1225 16507, 19325, 22624, 26487, 31009, 36304, 42502, 49759,
1226 58255, 68201, 79846, 93479, 109439, 128125, 150000, 220000
1229 static uint32_t rgSchCmnExtBsrTbl[64] = {
1230 0, 10, 13, 16, 19, 23, 29, 35,
1231 43, 53, 65, 80, 98, 120, 147, 181,
1232 223, 274, 337, 414, 509, 625, 769, 945,
1233 1162, 1429, 1757, 2161, 2657, 3267, 4017, 4940,
1234 6074, 7469, 9185, 11294, 13888, 17077, 20999, 25822,
1235 31752, 39045, 48012, 59039, 72598, 89272, 109774, 134986,
1236 165989, 204111, 250990, 308634, 379519, 466683, 573866, 705666,
1237 867737, 1067031, 1312097, 1613447, 1984009, 2439678, 3000000, 3100000
1240 uint8_t rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_MAX_CP][RG_SCH_CMN_UL_NUM_CQI];
1242 RgSchTbSzTbl rgTbSzTbl = {
1244 {16, 32, 56, 88, 120, 152, 176, 208, 224, 256, 288, 328, 344, 376, 392, 424, 456, 488, 504, 536, 568, 600, 616, 648, 680, 712, 744, 776, 776, 808, 840, 872, 904, 936, 968, 1000, 1032, 1032, 1064, 1096, 1128, 1160, 1192, 1224, 1256, 1256, 1288, 1320, 1352, 1384, 1416, 1416, 1480, 1480, 1544, 1544, 1608, 1608, 1608, 1672, 1672, 1736, 1736, 1800, 1800, 1800, 1864, 1864, 1928, 1928, 1992, 1992, 2024, 2088, 2088, 2088, 2152, 2152, 2216, 2216, 2280, 2280, 2280, 2344, 2344, 2408, 2408, 2472, 2472, 2536, 2536, 2536, 2600, 2600, 2664, 2664, 2728, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 2984, 2984, 3112},
1245 {24, 56, 88, 144, 176, 208, 224, 256, 328, 344, 376, 424, 456, 488, 520, 568, 600, 632, 680, 712, 744, 776, 808, 872, 904, 936, 968, 1000, 1032, 1064, 1128, 1160, 1192, 1224, 1256, 1288, 1352, 1384, 1416, 1416, 1480, 1544, 1544, 1608, 1608, 1672, 1736, 1736, 1800, 1800, 1864, 1864, 1928, 1992, 1992, 2024, 2088, 2088, 2152, 2152, 2216, 2280, 2280, 2344, 2344, 2408, 2472, 2472, 2536, 2536, 2600, 2600, 2664, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008},
1246 {32, 72, 144, 176, 208, 256, 296, 328, 376, 424, 472, 520, 568, 616, 648, 696, 744, 776, 840, 872, 936, 968, 1000, 1064, 1096, 1160, 1192, 1256, 1288, 1320, 1384, 1416, 1480, 1544, 1544, 1608, 1672, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2024, 2088, 2088, 2152, 2216, 2216, 2280, 2344, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2664, 2728, 2792, 2856, 2856, 2856, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968},
1247 {40, 104, 176, 208, 256, 328, 392, 440, 504, 568, 616, 680, 744, 808, 872, 904, 968, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 1992, 2024, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6200, 6456, 6456},
1248 {56, 120, 208, 256, 328, 408, 488, 552, 632, 696, 776, 840, 904, 1000, 1064, 1128, 1192, 1288, 1352, 1416, 1480, 1544, 1608, 1736, 1800, 1864, 1928, 1992, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2600, 2664, 2728, 2792, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992},
1249 {72, 144, 224, 328, 424, 504, 600, 680, 776, 872, 968, 1032, 1128, 1224, 1320, 1384, 1480, 1544, 1672, 1736, 1864, 1928, 2024, 2088, 2216, 2280, 2344, 2472, 2536, 2664, 2728, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4264, 4392, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9528},
1250 {328, 176, 256, 392, 504, 600, 712, 808, 936, 1032, 1128, 1224, 1352, 1480, 1544, 1672, 1736, 1864, 1992, 2088, 2216, 2280, 2408, 2472, 2600, 2728, 2792, 2984, 2984, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3880, 4008, 4136, 4136, 4264, 4392, 4584, 4584, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448},
1251 {104, 224, 328, 472, 584, 712, 840, 968, 1096, 1224, 1320, 1480, 1608, 1672, 1800, 1928, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536},
1252 {120, 256, 392, 536, 680, 808, 968, 1096, 1256, 1384, 1544, 1672, 1800, 1928, 2088, 2216, 2344, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15264},
1253 {136, 296, 456, 616, 776, 936, 1096, 1256, 1416, 1544, 1736, 1864, 2024, 2216, 2344, 2536, 2664, 2856, 2984, 3112, 3368, 3496, 3624, 3752, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 5160, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568},
1254 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080},
1255 {176, 376, 584, 776, 1000, 1192, 1384, 1608, 1800, 2024, 2216, 2408, 2600, 2792, 2984, 3240, 3496, 3624, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152},
1256 {208, 440, 680, 904, 1128, 1352, 1608, 1800, 2024, 2280, 2472, 2728, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 23688, 24496, 24496, 24496, 24496, 25456},
1257 {224, 488, 744, 1000, 1256, 1544, 1800, 2024, 2280, 2536, 2856, 3112, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 26416, 27376, 27376, 27376, 27376, 28336, 28336},
1258 {256, 552, 840, 1128, 1416, 1736, 1992, 2280, 2600, 2856, 3112, 3496, 3752, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 5992, 6200, 6456, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704},
1259 {280, 600, 904, 1224, 1544, 1800, 2152, 2472, 2728, 3112, 3368, 3624, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 6200, 6456, 6712, 6968, 7224, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008},
1260 {328, 632, 968, 1288, 1608, 1928, 2280, 2600, 2984, 3240, 3624, 3880, 4264, 4584, 4968, 5160, 5544, 5992, 6200, 6456, 6712, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 35160},
1261 {336, 696, 1064, 1416, 1800, 2152, 2536, 2856, 3240, 3624, 4008, 4392, 4776, 5160, 5352, 5736, 6200, 6456, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 39232},
1262 {376, 776, 1160, 1544, 1992, 2344, 2792, 3112, 3624, 4008, 4392, 4776, 5160, 5544, 5992, 6200, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14112, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816},
1263 {408, 840, 1288, 1736, 2152, 2600, 2984, 3496, 3880, 4264, 4776, 5160, 5544, 5992, 6456, 6968, 7224, 7736, 8248, 8504, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19080, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888},
1264 {440, 904, 1384, 1864, 2344, 2792, 3240, 3752, 4136, 4584, 5160, 5544, 5992, 6456, 6968, 7480, 7992, 8248, 8760, 9144, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 20616, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 48936, 51024, 51024, 51024},
1265 {488, 1000, 1480, 1992, 2472, 2984, 3496, 4008, 4584, 4968, 5544, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 9912, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056},
1266 {520, 1064, 1608, 2152, 2664, 3240, 3752, 4264, 4776, 5352, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 10296, 10680, 11448, 11832, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 19080, 19080, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256},
1267 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776},
1268 {584, 1192, 1800, 2408, 2984, 3624, 4264, 4968, 5544, 5992, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 22920, 23688, 24496, 25456, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592},
1269 {616, 1256, 1864, 2536, 3112, 3752, 4392, 5160, 5736, 6200, 6968, 7480, 8248, 8760, 9528, 10296, 10680, 11448, 12216, 12576, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592, 68808, 68808, 68808, 71112},
1270 {712, 1480, 2216, 2984, 3752, 4392, 5160, 5992, 6712, 7480, 8248, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 14688, 15264, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376}
1273 {32, 88, 152, 208, 256, 328, 376, 424, 488, 536, 600, 648, 712, 776, 808, 872, 936, 1000, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2088, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 5992, 6200},
1274 {56, 144, 208, 256, 344, 424, 488, 568, 632, 712, 776, 872, 936, 1000, 1064, 1160, 1224, 1288, 1384, 1416, 1544, 1608, 1672, 1736, 1800, 1864, 1992, 2024, 2088, 2152, 2280, 2344, 2408, 2472, 2536, 2600, 2728, 2792, 2856, 2856, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5352, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992},
1275 {72, 176, 256, 328, 424, 520, 616, 696, 776, 872, 968, 1064, 1160, 1256, 1320, 1416, 1544, 1608, 1672, 1800, 1864, 1992, 2088, 2152, 2216, 2344, 2408, 2536, 2600, 2664, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3368, 3496, 3624, 3624, 3752, 3880, 4008, 4008, 4136, 4264, 4264, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912},
1276 {104, 208, 328, 440, 568, 680, 808, 904, 1032, 1160, 1256, 1384, 1480, 1608, 1736, 1864, 1992, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2856, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4392, 4584, 4776, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12576, 12960, 12960},
1277 {120, 256, 408, 552, 696, 840, 1000, 1128, 1288, 1416, 1544, 1736, 1864, 1992, 2152, 2280, 2408, 2600, 2728, 2856, 2984, 3112, 3240, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840},
1278 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19080},
1279 {176, 392, 600, 808, 1032, 1224, 1480, 1672, 1864, 2088, 2280, 2472, 2728, 2984, 3112, 3368, 3496, 3752, 4008, 4136, 4392, 4584, 4776, 4968, 5160, 5352, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7992, 8248, 8248, 8504, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920},
1280 {224, 472, 712, 968, 1224, 1480, 1672, 1928, 2216, 2472, 2664, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376},
1281 {256, 536, 808, 1096, 1384, 1672, 1928, 2216, 2536, 2792, 3112, 3368, 3624, 3880, 4264, 4584, 4776, 4968, 5352, 5544, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576},
1282 {296, 616, 936, 1256, 1544, 1864, 2216, 2536, 2856, 3112, 3496, 3752, 4136, 4392, 4776, 5160, 5352, 5736, 5992, 6200, 6712, 6968, 7224, 7480, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160},
1283 {328, 680, 1032, 1384, 1736, 2088, 2472, 2792, 3112, 3496, 3880, 4264, 4584, 4968, 5352, 5736, 5992, 6200, 6712, 6968, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 21384, 21384, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888},
1284 {376, 776, 1192, 1608, 2024, 2408, 2792, 3240, 3624, 4008, 4392, 4776, 5352, 5736, 5992, 6456, 6968, 7224, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816},
1285 {440, 904, 1352, 1800, 2280, 2728, 3240, 3624, 4136, 4584, 4968, 5544, 5992, 6456, 6712, 7224, 7736, 8248, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 17568, 17568, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024},
1286 {488, 1000, 1544, 2024, 2536, 3112, 3624, 4136, 4584, 5160, 5736, 6200, 6712, 7224, 7736, 8248, 8760, 9144, 9912, 10296, 10680, 11448, 11832, 12216, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336},
1287 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776},
1288 {600, 1224, 1800, 2472, 3112, 3624, 4264, 4968, 5544, 6200, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 23688, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808},
1289 {632, 1288, 1928, 2600, 3240, 3880, 4584, 5160, 5992, 6456, 7224, 7736, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 68808, 71112, 71112, 71112, 71112},
1290 {696, 1416, 2152, 2856, 3624, 4392, 5160, 5736, 6456, 7224, 7992, 8760, 9528, 10296, 10680, 11448, 12216, 12960, 13536, 14688, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 71112, 73712, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 78704},
1291 {776, 1544, 2344, 3112, 4008, 4776, 5544, 6200, 7224, 7992, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 27376, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936},
1292 {840, 1736, 2600, 3496, 4264, 5160, 5992, 6968, 7736, 8504, 9528, 10296, 11064, 12216, 12960, 13536, 14688, 15264, 16416, 16992, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800},
1293 {904, 1864, 2792, 3752, 4584, 5544, 6456, 7480, 8248, 9144, 10296, 11064, 12216, 12960, 14112, 14688, 15840, 16992, 17568, 18336, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 42368, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 97896, 101840, 101840, 101840},
1294 {1000, 1992, 2984, 4008, 4968, 5992, 6968, 7992, 9144, 9912, 11064, 12216, 12960, 14112, 15264, 15840, 16992, 18336, 19080, 19848, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 31704, 32856, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136},
1295 {1064, 2152, 3240, 4264, 5352, 6456, 7480, 8504, 9528, 10680, 11832, 12960, 14112, 15264, 16416, 16992, 18336, 19080, 20616, 21384, 22920, 23688, 24496, 25456, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816},
1296 {1128, 2280, 3496, 4584, 5736, 6968, 7992, 9144, 10296, 11448, 12576, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 45352, 45352, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840,101840,101840,101840,105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496},
1297 {1192, 2408, 3624, 4968, 5992, 7224, 8504, 9912, 11064, 12216, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 42368, 43816, 45352, 46888, 46888, 48936, 51024, 51024, 52752, 52752, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208},
1298 {1256, 2536, 3752, 5160, 6200, 7480, 8760, 10296, 11448, 12576, 14112, 15264, 16416, 17568, 19080, 20616, 21384, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 43816, 43816, 45352, 46888, 48936, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040,115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208, 137792, 137792, 137792, 142248},
1299 {1480, 2984, 4392, 5992, 7480, 8760, 10296, 11832, 13536, 14688, 16416, 17568, 19080, 20616, 22152, 23688, 25456, 26416, 28336, 29296, 30576, 32856, 34008, 35160, 36696, 37888, 40576, 40576, 42368, 43816, 45352, 46888, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 63776, 63776, 66592, 68808, 68808, 71112, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 81176, 84760, 84760, 87936, 87936, 90816, 90816, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 128496, 128496, 128496, 133208, 133208, 133208, 137792, 137792, 137792, 142248, 142248, 142248, 146856, 146856,149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776}
1302 RgSchUlIMcsTbl rgUlIMcsTbl = {
1303 {2, 0}, {2, 1}, {2, 2}, {2, 3}, {2, 4}, {2, 5},
1304 {2, 6}, {2, 7}, {2, 8}, {2, 9}, {2, 10},
1305 {4, 10}, {4, 11}, {4, 12}, {4, 13}, {4, 14},
1306 {4, 15}, {4, 16}, {4, 17}, {4, 18}, {4, 19},
1307 {6, 19}, {6, 20}, {6, 21}, {6, 22}, {6, 23},
1308 {6, 24}, {6, 25}, {6, 26}
1310 RgSchUeCatTbl rgUeCatTbl = {
1311 /*Column1:Maximum number of bits of an UL-SCH
1312 transport block transmitted within a TTI
1314 Column2:Maximum number of bits of a DLSCH
1315 transport block received within a TTI
1317 Column3:Total number of soft channel bits
1319 Column4:Support for 64QAM in UL
1321 Column5:Maximum number of DL-SCH transport
1322 block bits received within a TTI
1324 Column6:Maximum number of supported layers for
1325 spatial multiplexing in DL
1327 {5160, {10296,0}, 250368, FALSE, 10296, 1},
1328 {25456, {51024,0}, 1237248, FALSE, 51024, 2},
1329 {51024, {75376,0}, 1237248, FALSE, 102048, 2},
1330 {51024, {75376,0}, 1827072, FALSE, 150752, 2},
1331 {75376, {149776,0}, 3667200, TRUE, 299552, 4},
1332 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1333 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1334 {149776,{299856,0}, 35982720,TRUE, 2998560, 8}
1337 /* [ccpu00138532]-ADD-The below table stores the min HARQ RTT time
1338 in Downlink for TDD and FDD. Indices 0 to 6 map to tdd UL DL config 0-6.
1339 Index 7 map to FDD */
1340 uint8_t rgSchCmnHarqRtt[8] = {4,7,10,9,12,15,6,8};
1341 /* Number of CFI Switchover Index is equals to 7 TDD Indexes + 1 FDD index */
1342 uint8_t rgSchCfiSwitchOvrWinLen[] = {7, 4, 2, 3, 2, 1, 6, 8};
1344 /* EffTbl is calculated for single layer and two layers.
1345 * CqiToTbs is calculated for single layer and two layers */
1346 RgSchCmnTbSzEff rgSchCmnNorCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1347 RgSchCmnTbSzEff rgSchCmnNorCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1348 /* New variable to store UL effiency values for normal and extended CP*/
1349 RgSchCmnTbSzEff rgSchCmnNorUlEff[1],rgSchCmnExtUlEff[1];
1350 RgSchCmnCqiToTbs rgSchCmnNorCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1351 RgSchCmnCqiToTbs rgSchCmnNorCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1352 RgSchCmnCqiToTbs *rgSchCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
1353 RgSchCmnTbSzEff rgSchCmnExtCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1354 RgSchCmnTbSzEff rgSchCmnExtCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1355 RgSchCmnCqiToTbs rgSchCmnExtCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1356 RgSchCmnCqiToTbs rgSchCmnExtCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1357 /* Include CRS REs while calculating Efficiency */
1358 RgSchCmnTbSzEff *rgSchCmnEffTbl[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_ANT_CONF][RG_SCH_CMN_MAX_CFI];
1359 RgSchCmnTbSzEff *rgSchCmnUlEffTbl[RG_SCH_CMN_MAX_CP];
1361 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3, 1};
1363 /* Added matrix 'rgRaPrmblToRaFrmTbl'for computation of RA sub-frames from RA preamble */
1364 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3};
1367 RgUlSchdInits rgSchUlSchdInits;
1368 RgDlSchdInits rgSchDlSchdInits;
1369 RgDlfsSchdInits rgSchDlfsSchdInits;
1371 RgEmtcUlSchdInits rgSchEmtcUlSchdInits;
1372 RgEmtcDlSchdInits rgSchEmtcDlSchdInits;
1376 static S16 rgSCHCmnUeIdleExdThrsld ARGS((
1380 RgSchUeCb* rgSCHCmnGetHoUe ARGS((
1384 static Void rgSCHCmnDelDedPreamble ARGS((
1388 RgSchUeCb* rgSCHCmnGetPoUe ARGS((
1391 CmLteTimingInfo timingInfo
1393 static Void rgSCHCmnDelRachInfo ARGS((
1397 static S16 rgSCHCmnUlRbAllocForPoHoUe ARGS((
1403 static Void rgSCHCmnHdlHoPo ARGS((
1405 CmLListCp *raRspLst,
1406 RgSchRaReqInfo *raReq
1408 static Void rgSCHCmnAllocPoHoGrnt ARGS((
1410 CmLListCp *raRspLst,
1412 RgSchRaReqInfo *raReq
1414 static Void rgSCHCmnFillPdcchOdr2Sf ARGS((
1421 static Void rgSCHCmnDlAdd2PdcchOdrQ ARGS((
1425 static Void rgSCHCmnDlRmvFrmPdcchOdrQ ARGS((
1429 static Void rgSCHCmnUpdNxtPrchMskIdx ARGS((
1432 static Void rgSCHCmnUpdRachParam ARGS((
1435 static S16 rgSCHCmnAllocPOParam ARGS((
1441 uint8_t *prachMskIdx
1443 static Void rgSCHCmnGenPdcchOrder ARGS((
1447 static Void rgSCHCmnCfgRachDedPrm ARGS((
1452 static Void rgSCHCmnHdlUlInactUes ARGS((
1455 static Void rgSCHCmnHdlDlInactUes ARGS((
1458 static Void rgSCHCmnUlInit ARGS((Void
1460 static Void rgSCHCmnDlInit ARGS((Void
1462 static Void rgSCHCmnInitDlRbAllocInfo ARGS((
1463 RgSchCmnDlRbAllocInfo *allocInfo
1465 static Void rgSCHCmnUpdUlCompEffBsr ARGS((
1469 static Void rgSCHCmnUlSetAllUnSched ARGS((
1470 RgSchCmnUlRbAllocInfo *allocInfo
1472 static Void rgSCHCmnUlUpdSf ARGS((
1474 RgSchCmnUlRbAllocInfo *allocInfo,
1477 static Void rgSCHCmnUlHndlAllocRetx ARGS((
1479 RgSchCmnUlRbAllocInfo *allocInfo,
1484 static Void rgSCHCmnGrpPwrCntrlPucch ARGS((
1488 static Void rgSCHCmnGrpPwrCntrlPusch ARGS((
1492 static Void rgSCHCmnDelUeFrmRefreshQ ARGS((
1496 static S16 rgSCHCmnTmrExpiry ARGS((
1497 PTR cb, /* Pointer to timer control block */
1498 S16 tmrEvnt /* Timer Event */
1500 static S16 rgSCHCmnTmrProc ARGS((
1503 static Void rgSCHCmnAddUeToRefreshQ ARGS((
1508 static Void rgSCHCmnDlCcchRetx ARGS((
1510 RgSchCmnDlRbAllocInfo *allocInfo
1512 static Void rgSCHCmnUpdUeMimoInfo ARGS((
1516 RgSchCmnCell *cellSchd
1518 static Void rgSCHCmnUpdUeUlCqiInfo ARGS((
1522 RgSchCmnUe *ueSchCmn,
1523 RgSchCmnCell *cellSchd,
1527 static Void rgSCHCmnDlCcchSduRetx ARGS((
1529 RgSchCmnDlRbAllocInfo *allocInfo
1531 static Void rgSCHCmnDlCcchSduTx ARGS((
1533 RgSchCmnDlRbAllocInfo *allocInfo
1535 static S16 rgSCHCmnCcchSduAlloc ARGS((
1538 RgSchCmnDlRbAllocInfo *allocInfo
1540 static S16 rgSCHCmnCcchSduDedAlloc ARGS((
1544 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc ARGS((
1550 static Void rgSCHCmnInitVars ARGS((
1554 /*ccpu00117180 - DEL - Moved rgSCHCmnUpdVars to .x as its access is now */
1555 static Void rgSCHCmnUlRbAllocForLst ARGS((
1561 CmLListCp *nonSchdLst,
1564 static S16 rgSCHCmnUlRbAllocForUe ARGS((
1571 static Void rgSCHCmnMsg3GrntReq ARGS((
1575 RgSchUlHqProcCb *hqProc,
1576 RgSchUlAlloc **ulAllocRef,
1577 uint8_t *hqProcIdRef
1579 static Void rgSCHCmnDlCcchRarAlloc ARGS((
1582 static Void rgSCHCmnDlCcchTx ARGS((
1584 RgSchCmnDlRbAllocInfo *allocInfo
1586 static Void rgSCHCmnDlBcchPcch ARGS((
1588 RgSchCmnDlRbAllocInfo *allocInfo,
1589 RgInfSfAlloc *subfrmAlloc
1591 Bool rgSCHCmnChkInWin ARGS((
1592 CmLteTimingInfo frm,
1593 CmLteTimingInfo start,
1596 Bool rgSCHCmnChkPastWin ARGS((
1597 CmLteTimingInfo frm,
1600 static Void rgSCHCmnClcAlloc ARGS((
1603 RgSchClcDlLcCb *lch,
1605 RgSchCmnDlRbAllocInfo *allocInfo
1608 static Void rgSCHCmnClcRbAlloc ARGS((
1619 static S16 rgSCHCmnMsg4Alloc ARGS((
1622 RgSchCmnDlRbAllocInfo *allocInfo
1624 static S16 rgSCHCmnMsg4DedAlloc ARGS((
1628 static Void rgSCHCmnDlRaRsp ARGS((
1630 RgSchCmnDlRbAllocInfo *allocInfo
1632 static S16 rgSCHCmnRaRspAlloc ARGS((
1638 RgSchCmnDlRbAllocInfo *allocInfo
1640 static Void rgSCHCmnUlUeDelAllocs ARGS((
1644 static Void rgSCHCmnDlSetUeAllocLmt ARGS((
1649 static S16 rgSCHCmnDlRgrCellCfg ARGS((
1654 static Void rgSCHCmnUlAdapRetx ARGS((
1655 RgSchUlAlloc *alloc,
1656 RgSchUlHqProcCb *proc
1658 static Void rgSCHCmnUlUpdAllocRetx ARGS((
1662 static Void rgSCHCmnUlSfReTxAllocs ARGS((
1666 /* Fix: syed Adaptive Msg3 Retx crash. */
1668 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1672 RgrUeRecfg *ueRecfg,
1676 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1686 * DL RB allocation specific functions
1689 static Void rgSCHCmnDlRbAlloc ARGS((
1691 RgSchCmnDlRbAllocInfo *allocInfo
1693 static Void rgSCHCmnNonDlfsRbAlloc ARGS((
1695 RgSchCmnDlRbAllocInfo *allocInfo
1697 static S16 rgSCHCmnNonDlfsCmnRbAlloc ARGS((
1699 RgSchDlRbAlloc *cmnAllocInfo));
1702 static Void rgSCHCmnNonDlfsPbchRbAllocAdj ARGS((
1704 RgSchDlRbAlloc *cmnAllocInfo,
1705 uint8_t pbchSsRsSym,
1708 /* Added function to adjust TBSize*/
1709 static Void rgSCHCmnNonDlfsPbchTbSizeAdj ARGS((
1710 RgSchDlRbAlloc *allocInfo,
1711 uint8_t numOvrlapgPbchRb,
1712 uint8_t pbchSsRsSym,
1717 /* Added function to find num of overlapping PBCH rb*/
1718 static Void rgSCHCmnFindNumPbchOvrlapRbs ARGS((
1721 RgSchDlRbAlloc *allocInfo,
1722 uint8_t *numOvrlapgPbchRb
1725 static uint8_t rgSCHCmnFindNumAddtlRbsAvl ARGS((
1728 RgSchDlRbAlloc *allocInfo
1732 static Void rgSCHCmnFindCodeRate ARGS((
1735 RgSchDlRbAlloc *allocInfo,
1741 static Void rgSCHCmnNonDlfsMsg4Alloc ARGS((
1743 RgSchCmnMsg4RbAlloc *msg4AllocInfo,
1746 static S16 rgSCHCmnNonDlfsMsg4RbAlloc ARGS((
1752 static S16 rgSCHCmnNonDlfsUeRbAlloc ARGS((
1756 uint8_t *isDlBwAvail
1759 static uint32_t rgSCHCmnCalcRiv ARGS(( uint8_t bw,
1765 static Void rgSCHCmnUpdHqAndDai ARGS((
1766 RgSchDlHqProcCb *hqP,
1768 RgSchDlHqTbCb *tbCb,
1771 static S16 rgSCHCmnUlCalcAvailBw ARGS((
1773 RgrCellCfg *cellCfg,
1775 uint8_t *rbStartRef,
1778 static S16 rgSCHCmnDlKdashUlAscInit ARGS((
1781 static S16 rgSCHCmnDlANFdbkInit ARGS((
1784 static S16 rgSCHCmnDlNpValInit ARGS((
1787 static S16 rgSCHCmnDlCreateRachPrmLst ARGS((
1790 static S16 rgSCHCmnDlCpyRachInfo ARGS((
1792 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
1795 static S16 rgSCHCmnDlRachInfoInit ARGS((
1798 static S16 rgSCHCmnDlPhichOffsetInit ARGS((
1803 static Void rgSCHCmnFindUlCqiUlTxAnt ARGS
1809 static RgSchCmnRank rgSCHCmnComputeRank ARGS
1812 uint32_t *pmiBitMap,
1816 static RgSchCmnRank rgSCHCmnComp2TxMode3 ARGS
1821 static RgSchCmnRank rgSCHCmnComp4TxMode3 ARGS
1826 static RgSchCmnRank rgSCHCmnComp2TxMode4 ARGS
1831 static RgSchCmnRank rgSCHCmnComp4TxMode4 ARGS
1836 static uint8_t rgSCHCmnCalcWcqiFrmSnr ARGS
1843 /* comcodsepa : start */
1846 * @brief This function computes efficiency and stores in a table.
1850 * Function: rgSCHCmnCompEff
1851 * Purpose: this function computes the efficiency as number of
1852 * bytes per 1024 symbols. The CFI table is also filled
1853 * with the same information such that comparison is valid
1855 * Invoked by: Scheduler
1857 * @param[in] uint8_t noPdcchSym
1858 * @param[in] uint8_t cpType
1859 * @param[in] uint8_t txAntIdx
1860 * @param[in] RgSchCmnTbSzEff* effTbl
1864 static Void rgSCHCmnCompEff(uint8_t noPdcchSym,uint8_t cpType,uint8_t txAntIdx,RgSchCmnTbSzEff *effTbl)
1868 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1874 case RG_SCH_CMN_NOR_CP:
1877 case RG_SCH_CMN_EXT_CP:
1881 /* Generate a log error. This case should never be executed */
1885 /* Depending on the Tx Antenna Index, deduct the
1886 * Resource elements for the CRS */
1890 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
1893 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
1896 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
1899 /* Generate a log error. This case should never be executed */
1902 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
1903 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1906 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1908 /* This line computes the coding efficiency per 1024 REs */
1909 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1911 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1916 * @brief This function computes efficiency and stores in a table.
1920 * Function: rgSCHCmnCompUlEff
1921 * Purpose: this function computes the efficiency as number of
1922 * bytes per 1024 symbols. The CFI table is also filled
1923 * with the same information such that comparison is valid
1925 * Invoked by: Scheduler
1927 * @param[in] uint8_t noUlRsSym
1928 * @param[in] uint8_t cpType
1929 * @param[in] uint8_t txAntIdx
1930 * @param[in] RgSchCmnTbSzEff* effTbl
1934 static Void rgSCHCmnCompUlEff(uint8_t noUlRsSym,uint8_t cpType,RgSchCmnTbSzEff *effTbl)
1943 case RG_SCH_CMN_NOR_CP:
1946 case RG_SCH_CMN_EXT_CP:
1950 /* Generate a log error. This case should never be executed */
1954 noResPerRb = ((noSymPerRb - noUlRsSym) * RB_SCH_CMN_NUM_SCS_PER_RB);
1955 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1958 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1960 /* This line computes the coding efficiency per 1024 REs */
1961 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1963 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1969 * @brief This function computes efficiency for 2 layers and stores in a table.
1973 * Function: rgSCHCmn2LyrCompEff
1974 * Purpose: this function computes the efficiency as number of
1975 * bytes per 1024 symbols. The CFI table is also filled
1976 * with the same information such that comparison is valid
1978 * Invoked by: Scheduler
1980 * @param[in] uint8_t noPdcchSym
1981 * @param[in] uint8_t cpType
1982 * @param[in] uint8_t txAntIdx
1983 * @param[in] RgSchCmnTbSzEff* effTbl2Lyr
1987 static Void rgSCHCmn2LyrCompEff(uint8_t noPdcchSym,uint8_t cpType,uint8_t txAntIdx,RgSchCmnTbSzEff *effTbl2Lyr)
1991 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1997 case RG_SCH_CMN_NOR_CP:
2000 case RG_SCH_CMN_EXT_CP:
2004 /* Generate a log error. This case should never be executed */
2008 /* Depending on the Tx Antenna Index, deduct the
2009 * Resource elements for the CRS */
2013 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
2016 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
2019 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
2022 /* Generate a log error. This case should never be executed */
2026 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
2027 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
2029 (*effTbl2Lyr)[i] = 0;
2030 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
2032 /* This line computes the coding efficiency per 1024 REs */
2033 (*effTbl2Lyr)[i] += (rgTbSzTbl[1][i][j] * 1024) / (noResPerRb * (j+1));
2035 (*effTbl2Lyr)[i] /= RG_SCH_CMN_NUM_RBS;
2042 * @brief This function initializes the rgSchCmnDciFrmtSizes table.
2046 * Function: rgSCHCmnGetDciFrmtSizes
2047 * Purpose: This function determines the sizes of all
2048 * the available DCI Formats. The order of
2049 * bits addition for each format is inaccordance
2051 * Invoked by: rgSCHCmnRgrCellCfg
2056 static Void rgSCHCmnGetDciFrmtSizes(RgSchCellCb *cell)
2060 /* DCI Format 0 size determination */
2061 rgSchCmnDciFrmtSizes[0] = 1 +
2063 rgSCHUtlLog32bitNbase2((cell->bwCfg.ulTotalBw * \
2064 (cell->bwCfg.ulTotalBw + 1))/2) +
2074 /* DCI Format 1 size determination */
2075 rgSchCmnDciFrmtSizes[1] = 1 +
2076 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2081 4 + 2 + /* HqProc Id and DAI */
2087 /* DCI Format 1A size determination */
2088 rgSchCmnDciFrmtSizes[2] = 1 + /* Flag for format0/format1a differentiation */
2089 1 + /* Localized/distributed VRB assignment flag */
2092 3 + /* Harq process Id */
2094 4 + /* Harq process Id */
2095 2 + /* UL Index or DAI */
2097 1 + /* New Data Indicator */
2100 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2101 (cell->bwCfg.dlTotalBw + 1))/2);
2102 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
2103 Since VRB is local */
2105 /* DCI Format 1B size determination */
2106 rgSchCmnDciFrmtSizes[3] = 1 +
2107 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2108 (cell->bwCfg.dlTotalBw + 1))/2) +
2118 ((cell->numTxAntPorts == 4)? 4:2) +
2121 /* DCI Format 1C size determination */
2122 /* Approximation: NDLVrbGap1 ~= Nprb for DL */
2123 rgSchCmnDciFrmtSizes[4] = (cell->bwCfg.dlTotalBw < 50)? 0:1 +
2124 (cell->bwCfg.dlTotalBw < 50)?
2125 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/2 * \
2126 (cell->bwCfg.dlTotalBw/2 + 1))/2)) :
2127 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/4 * \
2128 (cell->bwCfg.dlTotalBw/4 + 1))/2)) +
2131 /* DCI Format 1D size determination */
2132 rgSchCmnDciFrmtSizes[5] = 1 +
2133 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2134 (cell->bwCfg.dlTotalBw + 1))/2) +
2143 ((cell->numTxAntPorts == 4)? 4:2) +
2146 /* DCI Format 2 size determination */
2147 rgSchCmnDciFrmtSizes[6] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2148 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2156 ((cell->numTxAntPorts == 4)? 6:3);
2158 /* DCI Format 2A size determination */
2159 rgSchCmnDciFrmtSizes[7] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2160 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2168 ((cell->numTxAntPorts == 4)? 2:0);
2170 /* DCI Format 3 size determination */
2171 rgSchCmnDciFrmtSizes[8] = rgSchCmnDciFrmtSizes[0];
2173 /* DCI Format 3A size determination */
2174 rgSchCmnDciFrmtSizes[9] = rgSchCmnDciFrmtSizes[0];
2181 * @brief This function initializes the cmnCell->dciAggrLvl table.
2185 * Function: rgSCHCmnGetCqiDciFrmt2AggrLvl
2186 * Purpose: This function determines the Aggregation level
2187 * for each CQI level against each DCI format.
2188 * Invoked by: rgSCHCmnRgrCellCfg
2193 static Void rgSCHCmnGetCqiDciFrmt2AggrLvl(RgSchCellCb *cell)
2195 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2200 for (i = 0; i < RG_SCH_CMN_MAX_CQI; i++)
2202 for (j = 0; j < 10; j++)
2204 uint32_t pdcchBits; /* Actual number of phy bits needed for a given DCI Format
2205 * for a given CQI Level */
2206 pdcchBits = (rgSchCmnDciFrmtSizes[j] * 1024)/rgSchCmnCqiPdcchEff[i];
2208 if (pdcchBits < 192)
2210 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL2;
2213 if (pdcchBits < 384)
2215 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL4;
2218 if (pdcchBits < 768)
2220 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL8;
2223 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL16;
2230 * @brief This function initializes all the data for the scheduler.
2234 * Function: rgSCHCmnDlInit
2235 * Purpose: This function initializes the following information:
2236 * 1. Efficiency table
2237 * 2. CQI to table index - It is one row for upto 3 RBs
2238 * and another row for greater than 3 RBs
2239 * currently extended prefix is compiled out.
2240 * Invoked by: MAC intialization code..may be ActvInit
2245 static Void rgSCHCmnDlInit()
2251 RgSchCmnTbSzEff *effTbl;
2252 RgSchCmnCqiToTbs *tbsTbl;
2255 /* 0 corresponds to Single layer case, 1 corresponds to 2 layers case*/
2256 /* Init Efficiency table for normal cyclic prefix */
2257 /*Initialize Efficiency table for Layer Index 0 */
2258 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2259 /*Initialize Efficiency table for each of the CFI indices. The
2260 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2261 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[0];
2262 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[0];
2263 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[0];
2264 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[0];
2265 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2266 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[0];
2267 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[0];
2268 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[0];
2269 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[0];
2270 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2271 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[0];
2272 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[0];
2273 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[0];
2274 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[0];
2276 /*Initialize CQI to TBS table for Layer Index 0 for Normal CP */
2277 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[0];
2278 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[0];
2279 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[0];
2280 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[0];
2282 /*Intialize Efficency table for Layer Index 1 */
2283 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2284 /*Initialize Efficiency table for each of the CFI indices. The
2285 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2286 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[1];
2287 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[1];
2288 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[1];
2289 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[1];
2290 /*Initialize Efficiency table for Tx Antenna Port Index 1 */
2291 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[1];
2292 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[1];
2293 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[1];
2294 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[1];
2295 /*Initialize Efficiency table for Tx Antenna Port Index 2 */
2296 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[1];
2297 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[1];
2298 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[1];
2299 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[1];
2301 /*Initialize CQI to TBS table for Layer Index 1 for Normal CP */
2302 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[1];
2303 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[1];
2304 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[1];
2305 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[1];
2307 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2309 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2311 /* EfficiencyTbl calculation incase of 2 layers for normal CP */
2312 rgSCHCmnCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx,\
2313 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i]);
2314 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx, \
2315 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i]);
2319 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2321 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2323 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i];
2324 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][i];
2325 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2326 (j >= 0) && (k > 0); --j)
2328 /* ADD CQI to MCS mapping correction
2329 * single dimensional array is replaced by 2 dimensions for different CFI*/
2330 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2332 (*tbsTbl)[k--] = (uint8_t)j;
2339 /* effTbl,tbsTbl calculation incase of 2 layers for normal CP */
2340 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i];
2341 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][i];
2342 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2343 (j >= 0) && (k > 0); --j)
2345 /* ADD CQI to MCS mapping correction
2346 * single dimensional array is replaced by 2 dimensions for different CFI*/
2347 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2349 (*tbsTbl)[k--] = (uint8_t)j;
2359 /* Efficiency Table for Extended CP */
2360 /*Initialize Efficiency table for Layer Index 0 */
2361 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2362 /*Initialize Efficiency table for each of the CFI indices. The
2363 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2364 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[0];
2365 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[0];
2366 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[0];
2367 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[0];
2368 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2369 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[0];
2370 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[0];
2371 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[0];
2372 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[0];
2373 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2374 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[0];
2375 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[0];
2376 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[0];
2377 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[0];
2379 /*Initialize CQI to TBS table for Layer Index 0 for Extended CP */
2380 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[0];
2381 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[0];
2382 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[0];
2383 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[0];
2385 /*Initialize Efficiency table for Layer Index 1 */
2386 /*Initialize Efficiency table for each of the CFI indices. The
2387 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2388 /*Initialize Efficency table for Tx Antenna Port Index 0 */
2389 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[1];
2390 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[1];
2391 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[1];
2392 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[1];
2393 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2394 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[1];
2395 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[1];
2396 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[1];
2397 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[1];
2398 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2399 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[1];
2400 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[1];
2401 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[1];
2402 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[1];
2404 /*Initialize CQI to TBS table for Layer Index 1 for Extended CP */
2405 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[1];
2406 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[1];
2407 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[1];
2408 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[1];
2409 /* Activate this code when extended cp is supported */
2410 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2412 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2414 /* EfficiencyTbl calculation incase of 2 layers for extendedl CP */
2415 rgSCHCmnCompEff( (uint8_t)(i + 1 ), (uint8_t)RG_SCH_CMN_EXT_CP, idx,\
2416 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i]);
2417 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), (uint8_t) RG_SCH_CMN_EXT_CP,idx, \
2418 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i]);
2422 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2424 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2426 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i];
2427 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][i];
2428 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2429 (j >= 0) && (k > 0); --j)
2431 /* ADD CQI to MCS mapping correction
2432 * single dimensional array is replaced by 2 dimensions for different CFI*/
2433 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2435 (*tbsTbl)[k--] = (uint8_t)j;
2442 /* effTbl,tbsTbl calculation incase of 2 layers for extended CP */
2443 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i];
2444 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][i];
2445 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2446 (j >= 0) && (k > 0); --j)
2448 /* ADD CQI to MCS mapping correction
2449 * single dimensional array is replaced by 2 dimensions for different CFI*/
2450 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2452 (*tbsTbl)[k--] = (uint8_t)j;
2465 * @brief This function initializes all the data for the scheduler.
2469 * Function: rgSCHCmnUlInit
2470 * Purpose: This function initializes the following information:
2471 * 1. Efficiency table
2472 * 2. CQI to table index - It is one row for upto 3 RBs
2473 * and another row for greater than 3 RBs
2474 * currently extended prefix is compiled out.
2475 * Invoked by: MAC intialization code..may be ActvInit
2480 static Void rgSCHCmnUlInit()
2482 uint8_t *mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_NOR_CP][0];
2483 RgSchCmnTbSzEff *effTbl = &rgSchCmnNorUlEff[0];
2484 const RgSchCmnUlCqiInfo *cqiTbl = &rgSchCmnUlCqiTbl[0];
2488 /* Initaializing new variable added for UL eff */
2489 rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP] = &rgSchCmnNorUlEff[0];
2490 /* Reason behind using 3 as the number of symbols to rule out for
2491 * efficiency table computation would be that we are using 2 symbols for
2492 * DMRS(1 in each slot) and 1 symbol for SRS*/
2493 rgSCHCmnCompUlEff(RGSCH_UL_SYM_DMRS_SRS,RG_SCH_CMN_NOR_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP]);
2495 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2496 i >= 0 && j > 0; --i)
2498 if ((*effTbl)[i] <= cqiTbl[j].eff)
2500 mapTbl[j--] = (uint8_t)i;
2507 effTbl = &rgSchCmnExtUlEff[0];
2508 mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_EXT_CP][0];
2510 /* Initaializing new variable added for UL eff */
2511 rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP] = &rgSchCmnExtUlEff[0];
2512 /* Reason behind using 3 as the number of symbols to rule out for
2513 * efficiency table computation would be that we are using 2 symbols for
2514 * DMRS(1 in each slot) and 1 symbol for SRS*/
2515 rgSCHCmnCompUlEff(3,RG_SCH_CMN_EXT_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP]);
2517 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2518 i >= 0 && j > 0; --i)
2520 if ((*effTbl)[i] <= cqiTbl[j].eff)
2522 mapTbl[j--] = (uint8_t)i;
2534 * @brief This function initializes all the data for the scheduler.
2538 * Function: rgSCHCmnInit
2539 * Purpose: This function initializes the following information:
2540 * 1. Efficiency table
2541 * 2. CQI to table index - It is one row for upto 3 RBs
2542 * and another row for greater than 3 RBs
2543 * currently extended prefix is compiled out.
2544 * Invoked by: MAC intialization code..may be ActvInit
2556 rgSCHEmtcCmnDlInit();
2557 rgSCHEmtcCmnUlInit();
2563 /* Init the function pointers */
2564 rgSchCmnApis.rgSCHRgrUeCfg = rgSCHCmnRgrUeCfg;
2565 rgSchCmnApis.rgSCHRgrUeRecfg = rgSCHCmnRgrUeRecfg;
2566 rgSchCmnApis.rgSCHFreeUe = rgSCHCmnUeDel;
2567 rgSchCmnApis.rgSCHRgrCellCfg = rgSCHCmnRgrCellCfg;
2568 rgSchCmnApis.rgSCHRgrCellRecfg = rgSCHCmnRgrCellRecfg;
2569 rgSchCmnApis.rgSCHFreeCell = rgSCHCmnCellDel;
2570 rgSchCmnApis.rgSCHRgrLchCfg = rgSCHCmnRgrLchCfg;
2571 rgSchCmnApis.rgSCHRgrLcgCfg = rgSCHCmnRgrLcgCfg;
2572 rgSchCmnApis.rgSCHRgrLchRecfg = rgSCHCmnRgrLchRecfg;
2573 rgSchCmnApis.rgSCHRgrLcgRecfg = rgSCHCmnRgrLcgRecfg;
2574 rgSchCmnApis.rgSCHFreeDlLc = rgSCHCmnFreeDlLc;
2575 rgSchCmnApis.rgSCHFreeLcg = rgSCHCmnLcgDel;
2576 rgSchCmnApis.rgSCHRgrLchDel = rgSCHCmnRgrLchDel;
2577 rgSchCmnApis.rgSCHActvtUlUe = rgSCHCmnActvtUlUe;
2578 rgSchCmnApis.rgSCHActvtDlUe = rgSCHCmnActvtDlUe;
2579 rgSchCmnApis.rgSCHHdlUlTransInd = rgSCHCmnHdlUlTransInd;
2580 rgSchCmnApis.rgSCHDlDedBoUpd = rgSCHCmnDlDedBoUpd;
2581 rgSchCmnApis.rgSCHUlRecMsg3Alloc = rgSCHCmnUlRecMsg3Alloc;
2582 rgSchCmnApis.rgSCHUlCqiInd = rgSCHCmnUlCqiInd;
2583 rgSchCmnApis.rgSCHPucchDeltaPwrInd = rgSCHPwrPucchDeltaInd;
2584 rgSchCmnApis.rgSCHUlHqProcForUe = rgSCHCmnUlHqProcForUe;
2586 rgSchCmnApis.rgSCHUpdUlHqProc = rgSCHCmnUpdUlHqProc;
2588 rgSchCmnApis.rgSCHUpdBsrShort = rgSCHCmnUpdBsrShort;
2589 rgSchCmnApis.rgSCHUpdBsrTrunc = rgSCHCmnUpdBsrTrunc;
2590 rgSchCmnApis.rgSCHUpdBsrLong = rgSCHCmnUpdBsrLong;
2591 rgSchCmnApis.rgSCHUpdPhr = rgSCHCmnUpdPhr;
2592 rgSchCmnApis.rgSCHUpdExtPhr = rgSCHCmnUpdExtPhr;
2593 rgSchCmnApis.rgSCHContResUlGrant = rgSCHCmnContResUlGrant;
2594 rgSchCmnApis.rgSCHSrRcvd = rgSCHCmnSrRcvd;
2595 rgSchCmnApis.rgSCHFirstRcptnReq = rgSCHCmnFirstRcptnReq;
2596 rgSchCmnApis.rgSCHNextRcptnReq = rgSCHCmnNextRcptnReq;
2597 rgSchCmnApis.rgSCHFirstHqFdbkAlloc = rgSCHCmnFirstHqFdbkAlloc;
2598 rgSchCmnApis.rgSCHNextHqFdbkAlloc = rgSCHCmnNextHqFdbkAlloc;
2599 rgSchCmnApis.rgSCHDlProcAddToRetx = rgSCHCmnDlProcAddToRetx;
2600 rgSchCmnApis.rgSCHDlCqiInd = rgSCHCmnDlCqiInd;
2602 rgSchCmnApis.rgSCHUlProcAddToRetx = rgSCHCmnEmtcUlProcAddToRetx;
2605 rgSchCmnApis.rgSCHSrsInd = rgSCHCmnSrsInd;
2607 rgSchCmnApis.rgSCHDlTARpt = rgSCHCmnDlTARpt;
2608 rgSchCmnApis.rgSCHDlRlsSubFrm = rgSCHCmnDlRlsSubFrm;
2609 rgSchCmnApis.rgSCHUeReset = rgSCHCmnUeReset;
2611 rgSchCmnApis.rgSCHHdlCrntiCE = rgSCHCmnHdlCrntiCE;
2612 rgSchCmnApis.rgSCHDlProcAck = rgSCHCmnDlProcAck;
2613 rgSchCmnApis.rgSCHDlRelPdcchFbk = rgSCHCmnDlRelPdcchFbk;
2614 rgSchCmnApis.rgSCHUlSpsRelInd = rgSCHCmnUlSpsRelInd;
2615 rgSchCmnApis.rgSCHUlSpsActInd = rgSCHCmnUlSpsActInd;
2616 rgSchCmnApis.rgSCHUlCrcFailInd = rgSCHCmnUlCrcFailInd;
2617 rgSchCmnApis.rgSCHUlCrcInd = rgSCHCmnUlCrcInd;
2619 rgSchCmnApis.rgSCHDrxStrtInActvTmrInUl = rgSCHCmnDrxStrtInActvTmrInUl;
2620 rgSchCmnApis.rgSCHUpdUeDataIndLcg = rgSCHCmnUpdUeDataIndLcg;
2622 for (idx = 0; idx < RGSCH_NUM_SCHEDULERS; ++idx)
2624 rgSchUlSchdInits[idx](&rgSchUlSchdTbl[idx]);
2625 rgSchDlSchdInits[idx](&rgSchDlSchdTbl[idx]);
2628 for (idx = 0; idx < RGSCH_NUM_EMTC_SCHEDULERS; ++idx)
2630 rgSchEmtcUlSchdInits[idx](&rgSchEmtcUlSchdTbl[idx]);
2631 rgSchEmtcDlSchdInits[idx](&rgSchEmtcDlSchdTbl[idx]);
2634 #if (defined (RG_PHASE2_SCHED) && defined(TFU_UPGRADE))
2635 for (idx = 0; idx < RGSCH_NUM_DLFS_SCHEDULERS; ++idx)
2637 rgSchDlfsSchdInits[idx](&rgSchDlfsSchdTbl[idx]);
2641 rgSchCmnApis.rgSCHRgrSCellUeCfg = rgSCHCmnRgrSCellUeCfg;
2642 rgSchCmnApis.rgSCHRgrSCellUeDel = rgSCHCmnRgrSCellUeDel;
2649 * @brief This function is a wrapper to call scheduler specific API.
2653 * Function: rgSCHCmnDlRlsSubFrm
2654 * Purpose: Releases scheduler Information from DL SubFrm.
2658 * @param[in] RgSchCellCb *cell
2659 * @param[out] CmLteTimingInfo frm
2663 Void rgSCHCmnDlRlsSubFrm(RgSchCellCb *cell,CmLteTimingInfo frm)
2665 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2669 /* Get the pointer to the subframe */
2670 sf = rgSCHUtlSubFrmGet(cell, frm);
2672 rgSCHUtlSubFrmPut(cell, sf);
2675 /* Re-initialize DLFS specific information for the sub-frame */
2676 cellSch->apisDlfs->rgSCHDlfsReinitSf(cell, sf);
2684 * @brief This function is the starting function for DL allocation.
2688 * Function: rgSCHCmnDlCmnChAlloc
2689 * Purpose: Scheduling for downlink. It performs allocation in the order
2690 * of priority wich BCCH/PCH first, CCCH, Random Access and TA.
2692 * Invoked by: Scheduler
2694 * @param[in] RgSchCellCb* cell
2695 * @param[out] RgSchCmnDlRbAllocInfo* allocInfo
2699 static Void rgSCHCmnDlCcchRarAlloc(RgSchCellCb *cell)
2701 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2704 rgSCHCmnDlCcchRetx(cell, &cellSch->allocInfo);
2705 /* LTE_ADV_FLAG_REMOVED_START */
2706 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2708 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2710 /*eNodeB need to blank the subframe */
2714 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2719 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2721 /* LTE_ADV_FLAG_REMOVED_END */
2725 /*Added these function calls for processing CCCH SDU arriving
2726 * after guard timer expiry.Functions differ from above two functions
2727 * in using ueCb instead of raCb.*/
2728 rgSCHCmnDlCcchSduRetx(cell, &cellSch->allocInfo);
2729 /* LTE_ADV_FLAG_REMOVED_START */
2730 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2732 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2734 /*eNodeB need to blank the subframe */
2738 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2743 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2745 /* LTE_ADV_FLAG_REMOVED_END */
2749 if(cellSch->ul.msg3SchdIdx != RGSCH_INVALID_INFO)
2751 /* Do not schedule msg3 if there is a CFI change ongoing */
2752 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2754 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2758 /* LTE_ADV_FLAG_REMOVED_START */
2759 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2761 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2763 /*eNodeB need to blank the subframe */
2767 /* Do not schedule msg3 if there is a CFI change ongoing */
2768 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2770 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2776 /* Do not schedule msg3 if there is a CFI change ongoing */
2777 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2779 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2782 /* LTE_ADV_FLAG_REMOVED_END */
2790 * @brief Scheduling for CCCH SDU.
2794 * Function: rgSCHCmnCcchSduAlloc
2795 * Purpose: Scheduling for CCCH SDU
2797 * Invoked by: Scheduler
2799 * @param[in] RgSchCellCb* cell
2800 * @param[in] RgSchUeCb* ueCb
2801 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2805 static S16 rgSCHCmnCcchSduAlloc(RgSchCellCb *cell,RgSchUeCb *ueCb,RgSchCmnDlRbAllocInfo *allocInfo)
2807 RgSchDlRbAlloc *rbAllocInfo;
2808 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2809 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2812 /* Return if subframe BW exhausted */
2813 if (allocInfo->ccchSduAlloc.ccchSduDlSf->bw <=
2814 allocInfo->ccchSduAlloc.ccchSduDlSf->bwAssigned)
2816 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2817 "bw<=bwAssigned for UEID:%d",ueCb->ueId);
2821 if (rgSCHDhmGetCcchSduHqProc(ueCb, cellSch->dl.time, &(ueDl->proc)) != ROK)
2823 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2824 "rgSCHDhmGetCcchSduHqProc failed UEID:%d",ueCb->ueId);
2828 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
2829 rbAllocInfo->dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2831 if (rgSCHCmnCcchSduDedAlloc(cell, ueCb) != ROK)
2833 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
2834 rgSCHDhmRlsHqpTb(ueDl->proc, 0, FALSE);
2835 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2836 "rgSCHCmnCcchSduDedAlloc failed UEID:%d",ueCb->ueId);
2839 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduTxLst, &ueDl->proc->reqLnk);
2840 ueDl->proc->reqLnk.node = (PTR)ueDl->proc;
2841 allocInfo->ccchSduAlloc.ccchSduDlSf->schdCcchUe++;
2846 * @brief This function scheduler for downlink CCCH messages.
2850 * Function: rgSCHCmnDlCcchSduTx
2851 * Purpose: Scheduling for downlink CCCH
2853 * Invoked by: Scheduler
2855 * @param[in] RgSchCellCb *cell
2856 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2860 static Void rgSCHCmnDlCcchSduTx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2864 RgSchCmnDlUe *ueCmnDl;
2865 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2866 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2869 node = cell->ccchSduUeLst.first;
2872 if(cellSch->dl.maxCcchPerDlSf &&
2873 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2879 ueCb = (RgSchUeCb *)(node->node);
2880 ueCmnDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2882 /* Fix : syed postpone scheduling for this
2883 * until msg4 is done */
2884 /* Fix : syed RLC can erroneously send CCCH SDU BO
2885 * twice. Hence an extra guard to avoid if already
2886 * scheduled for RETX */
2887 if ((!(ueCb->dl.dlInactvMask & RG_HQENT_INACTIVE)) &&
2890 if ((rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)) != ROK)
2897 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"ERROR!! THIS SHOULD "
2898 "NEVER HAPPEN for UEID:%d", ueCb->ueId);
2908 * @brief This function scheduler for downlink CCCH messages.
2912 * Function: rgSCHCmnDlCcchTx
2913 * Purpose: Scheduling for downlink CCCH
2915 * Invoked by: Scheduler
2917 * @param[in] RgSchCellCb *cell
2918 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2922 static Void rgSCHCmnDlCcchTx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2926 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2927 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
2929 node = cell->raInfo.toBeSchdLst.first;
2932 if(cellSch->dl.maxCcchPerDlSf &&
2933 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2940 raCb = (RgSchRaCb *)(node->node);
2942 /* Address allocation for this UE for MSG 4 */
2943 /* Allocation for Msg4 */
2944 if ((rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)) != ROK)
2955 * @brief This function scheduler for downlink CCCH messages.
2959 * Function: rgSCHCmnDlCcchSduRetx
2960 * Purpose: Scheduling for downlink CCCH
2962 * Invoked by: Scheduler
2964 * @param[in] RgSchCellCb *cell
2965 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2969 static Void rgSCHCmnDlCcchSduRetx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
2971 RgSchDlRbAlloc *rbAllocInfo;
2973 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2975 RgSchDlHqProcCb *hqP;
2978 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2981 node = cellSch->dl.ccchSduRetxLst.first;
2984 if(cellSch->dl.maxCcchPerDlSf &&
2985 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2992 hqP = (RgSchDlHqProcCb *)(node->node);
2995 /* DwPts Scheduling Changes Start */
2997 if (rgSCHCmnRetxAvoidTdd(allocInfo->ccchSduAlloc.ccchSduDlSf,
3003 /* DwPts Scheduling Changes End */
3005 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3009 ueCb = (RgSchUeCb*)(hqP->hqE->ue);
3010 ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
3012 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3013 /* Fill RB Alloc Info */
3014 rbAllocInfo->dlSf = dlSf;
3015 rbAllocInfo->tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3016 rbAllocInfo->rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3017 /* Fix : syed iMcs setting did not correspond to RETX */
3018 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3019 rbAllocInfo->tbInfo[0].imcs);
3020 rbAllocInfo->rnti = ueCb->ueId;
3021 rbAllocInfo->tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3022 /* Fix : syed Copying info in entirety without depending on stale TX information */
3023 rbAllocInfo->tbInfo[0].tbCb = &hqP->tbInfo[0];
3024 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
3025 /* Fix : syed Assigning proc to scratchpad */
3028 retxBw += rbAllocInfo->rbsReq;
3030 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduRetxLst, \
3032 hqP->reqLnk.node = (PTR)hqP;
3036 dlSf->bwAssigned += retxBw;
3042 * @brief This function scheduler for downlink CCCH messages.
3046 * Function: rgSCHCmnDlCcchRetx
3047 * Purpose: Scheduling for downlink CCCH
3049 * Invoked by: Scheduler
3051 * @param[in] RgSchCellCb *cell
3052 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3056 static Void rgSCHCmnDlCcchRetx(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
3059 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3061 RgSchDlHqProcCb *hqP;
3063 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3066 node = cellSch->dl.msg4RetxLst.first;
3069 if(cellSch->dl.maxCcchPerDlSf &&
3070 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3076 hqP = (RgSchDlHqProcCb *)(node->node);
3080 /* DwPts Scheduling Changes Start */
3082 if (rgSCHCmnRetxAvoidTdd(allocInfo->msg4Alloc.msg4DlSf,
3088 /* DwPts Scheduling Changes End */
3090 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3094 raCb = (RgSchRaCb*)(hqP->hqE->raCb);
3095 /* Fill RB Alloc Info */
3096 raCb->rbAllocInfo.dlSf = dlSf;
3097 raCb->rbAllocInfo.tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3098 raCb->rbAllocInfo.rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3099 /* Fix : syed iMcs setting did not correspond to RETX */
3100 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3101 raCb->rbAllocInfo.tbInfo[0].imcs);
3102 raCb->rbAllocInfo.rnti = raCb->tmpCrnti;
3103 raCb->rbAllocInfo.tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3104 /* Fix; syed Copying info in entirety without depending on stale TX information */
3105 raCb->rbAllocInfo.tbInfo[0].tbCb = &hqP->tbInfo[0];
3106 raCb->rbAllocInfo.tbInfo[0].schdlngForTb = TRUE;
3108 retxBw += raCb->rbAllocInfo.rbsReq;
3110 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4RetxLst, \
3112 hqP->reqLnk.node = (PTR)hqP;
3116 dlSf->bwAssigned += retxBw;
3122 * @brief This function implements scheduler DL allocation for
3123 * for broadcast (on PDSCH) and paging.
3127 * Function: rgSCHCmnDlBcchPcch
3128 * Purpose: This function implements scheduler for DL allocation
3129 * for broadcast (on PDSCH) and paging.
3131 * Invoked by: Scheduler
3133 * @param[in] RgSchCellCb* cell
3138 static Void rgSCHCmnDlBcchPcch(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo,RgInfSfAlloc *subfrmAlloc)
3140 CmLteTimingInfo frm;
3142 RgSchClcDlLcCb *pcch;
3146 RgSchClcDlLcCb *bcch, *bch;
3147 #endif/*RGR_SI_SCH*/
3150 frm = cell->crntTime;
3152 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
3153 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
3154 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
3156 // RGSCH_SUBFRAME_INDEX(frm);
3157 //RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
3160 /* Compute the subframe for which allocation is being made */
3161 /* essentially, we need pointer to the dl frame for this subframe */
3162 sf = rgSCHUtlSubFrmGet(cell, frm);
3166 bch = rgSCHDbmGetBcchOnBch(cell);
3167 #if (ERRCLASS & ERRCLS_DEBUG)
3170 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on BCH is not configured");
3174 if (bch->boLst.first != NULLP)
3176 bo = (RgSchClcBoRpt *)(bch->boLst.first->node);
3177 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3179 sf->bch.tbSize = bo->bo;
3180 cmLListDelFrm(&bch->boLst, bch->boLst.first);
3181 /* ccpu00117052 - MOD - Passing double pointer
3182 for proper NULLP assignment*/
3183 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(*bo));
3184 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, bch->lcId,TRUE);
3189 if ((frm.sfn % 4 == 0) && (frm.subframe == 0))
3194 allocInfo->bcchAlloc.schdFirst = FALSE;
3195 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
3196 #if (ERRCLASS & ERRCLS_DEBUG)
3199 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3203 if (bcch->boLst.first != NULLP)
3205 bo = (RgSchClcBoRpt *)(bcch->boLst.first->node);
3207 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3209 allocInfo->bcchAlloc.schdFirst = TRUE;
3210 /* Time to perform allocation for this BCCH transmission */
3211 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3215 if(!allocInfo->bcchAlloc.schdFirst)
3218 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
3219 #if (ERRCLASS & ERRCLS_DEBUG)
3222 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3226 lnk = bcch->boLst.first;
3227 while (lnk != NULLP)
3229 bo = (RgSchClcBoRpt *)(lnk->node);
3231 valid = rgSCHCmnChkInWin(frm, bo->timeToTx, bo->maxTimeToTx);
3235 bo->i = RGSCH_CALC_SF_DIFF(frm, bo->timeToTx);
3236 /* Time to perform allocation for this BCCH transmission */
3237 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3242 valid = rgSCHCmnChkPastWin(frm, bo->maxTimeToTx);
3245 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
3246 /* ccpu00117052 - MOD - Passing double pointer
3247 for proper NULLP assignment*/
3248 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo,
3249 sizeof(RgSchClcBoRpt));
3255 rgSCHDlSiSched(cell, allocInfo, subfrmAlloc);
3256 #endif/*RGR_SI_SCH*/
3258 pcch = rgSCHDbmGetPcch(cell);
3262 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"PCCH on DLSCH is not configured");
3266 if (pcch->boLst.first != NULLP)
3268 bo = (RgSchClcBoRpt *)(pcch->boLst.first->node);
3270 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3272 /* Time to perform allocation for this PCCH transmission */
3273 rgSCHCmnClcAlloc(cell, sf, pcch, RGSCH_P_RNTI, allocInfo);
3281 * Fun: rgSCHCmnChkInWin
3283 * Desc: This function checks if frm occurs in window
3285 * Ret: TRUE - if in window
3290 * File: rg_sch_cmn.c
3293 Bool rgSCHCmnChkInWin(CmLteTimingInfo frm,CmLteTimingInfo start,CmLteTimingInfo end)
3298 if (end.sfn > start.sfn)
3300 if (frm.sfn > start.sfn
3301 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3303 if (frm.sfn < end.sfn
3305 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3307 || (frm.sfn == end.sfn && frm.slot <= start.slot))
3314 /* Testing for wrap around, sfn wraparound check should be enough */
3315 else if (end.sfn < start.sfn)
3317 if (frm.sfn > start.sfn
3318 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3324 if (frm.sfn < end.sfn
3325 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3331 else /* start.sfn == end.sfn */
3333 if (frm.sfn == start.sfn
3334 && (frm.slot >= start.slot
3335 && frm.slot <= end.slot))
3342 } /* end of rgSCHCmnChkInWin*/
3346 * Fun: rgSCHCmnChkPastWin
3348 * Desc: This function checks if frm has gone past window edge
3350 * Ret: TRUE - if past window edge
3355 * File: rg_sch_cmn.c
3358 Bool rgSCHCmnChkPastWin(CmLteTimingInfo frm,CmLteTimingInfo end)
3360 CmLteTimingInfo refFrm = end;
3364 RGSCH_INCR_FRAME(refFrm.sfn);
3365 RGSCH_INCR_SUB_FRAME(end, 1);
3366 pastWin = rgSCHCmnChkInWin(frm, end, refFrm);
3369 } /* end of rgSCHCmnChkPastWin*/
3372 * @brief This function implements allocation of the resources for common
3373 * channels BCCH, PCCH.
3377 * Function: rgSCHCmnClcAlloc
3378 * Purpose: This function implements selection of number of RBs based
3379 * the allowed grant for the service. It is also responsible
3380 * for selection of MCS for the transmission.
3382 * Invoked by: Scheduler
3384 * @param[in] RgSchCellCb *cell,
3385 * @param[in] RgSchDlSf *sf,
3386 * @param[in] RgSchClcDlLcCb *lch,
3387 * @param[in] uint16_t rnti,
3388 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3392 static Void rgSCHCmnClcAlloc(RgSchCellCb *cell,RgSchDlSf *sf,RgSchClcDlLcCb *lch,uint16_t rnti,RgSchCmnDlRbAllocInfo *allocInfo)
3394 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3401 uint8_t cfi = cellDl->currCfi;
3405 bo = (RgSchClcBoRpt *)(lch->boLst.first->node);
3409 /* rgSCHCmnClcRbAllocForFxdTb(cell, bo->bo, cellDl->ccchCqi, &rb);*/
3410 if(cellDl->bitsPerRb==0)
3412 while ((rgTbSzTbl[0][0][rb]) < (tbs*8))
3420 rb = RGSCH_CEIL((tbs*8), cellDl->bitsPerRb);
3422 /* DwPTS Scheduling Changes Start */
3424 if(sf->sfType == RG_SCH_SPL_SF_DATA)
3426 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
3428 /* Calculate the less RE's because of DwPTS */
3429 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
3431 /* Increase number of RBs in Spl SF to compensate for lost REs */
3432 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
3435 /* DwPTS Scheduling Changes End */
3436 /*ccpu00115595- end*/
3437 /* additional check to see if required RBs
3438 * exceeds the available */
3439 if (rb > sf->bw - sf->bwAssigned)
3441 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"BW allocation "
3442 "failed for CRNTI:%d",rnti);
3446 /* Update the subframe Allocated BW field */
3447 sf->bwAssigned = sf->bwAssigned + rb;
3448 /* Fill in the BCCH/PCCH transmission info to the RBAllocInfo struct */
3449 if (rnti == RGSCH_SI_RNTI)
3451 allocInfo->bcchAlloc.rnti = rnti;
3452 allocInfo->bcchAlloc.dlSf = sf;
3453 allocInfo->bcchAlloc.tbInfo[0].bytesReq = tbs;
3454 allocInfo->bcchAlloc.rbsReq = rb;
3455 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
3456 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
3457 /* Nprb indication at PHY for common Ch */
3458 allocInfo->bcchAlloc.nPrb = bo->nPrb;
3462 allocInfo->pcchAlloc.rnti = rnti;
3463 allocInfo->pcchAlloc.dlSf = sf;
3464 allocInfo->pcchAlloc.tbInfo[0].bytesReq = tbs;
3465 allocInfo->pcchAlloc.rbsReq = rb;
3466 allocInfo->pcchAlloc.tbInfo[0].imcs = mcs;
3467 allocInfo->pcchAlloc.tbInfo[0].noLyr = 1;
3468 allocInfo->pcchAlloc.nPrb = bo->nPrb;
3475 * @brief This function implements PDCCH allocation for common channels.
3479 * Function: rgSCHCmnCmnPdcchAlloc
3480 * Purpose: This function implements allocation of PDCCH for a UE.
3481 * 1. This uses index 0 of PDCCH table for efficiency.
3482 * 2. Uses he candidate PDCCH count for the aggr level.
3483 * 3. Look for availability for each candidate and choose
3484 * the first one available.
3486 * Invoked by: Scheduler
3488 * @param[in] RgSchCellCb *cell
3489 * @param[in] RgSchDlSf *sf
3490 * @return RgSchPdcch *
3491 * -# NULLP when unsuccessful
3494 RgSchPdcch *rgSCHCmnCmnPdcchAlloc(RgSchCellCb *cell,RgSchDlSf *subFrm)
3496 CmLteAggrLvl aggrLvl;
3497 RgSchPdcchInfo *pdcchInfo;
3499 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3500 uint8_t numCce; /*store num CCEs based on
3501 aggregation level */
3503 aggrLvl = cellSch->dl.cmnChAggrLvl;
3505 pdcchInfo = &(subFrm->pdcchInfo);
3507 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3510 if(subFrm->nCce != pdcchInfo->nCce)
3512 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3515 if(cell->nCce != pdcchInfo->nCce)
3517 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3523 case CM_LTE_AGGR_LVL4:
3526 case CM_LTE_AGGR_LVL8:
3529 case CM_LTE_AGGR_LVL16:
3536 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3539 pdcch->isSpsRnti = FALSE;
3541 /* Increment the CCE used counter in the current subframe */
3542 subFrm->cceCnt += numCce;
3543 pdcch->pdcchSearchSpace = RG_SCH_CMN_SEARCH_SPACE;
3548 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3549 subFrm->isCceFailure = TRUE;
3551 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
3552 "PDCCH ERR: NO PDDCH AVAIL IN COMMON SEARCH SPACE aggr:%u",
3559 * @brief This function implements bandwidth allocation for common channels.
3563 * Function: rgSCHCmnClcRbAlloc
3564 * Purpose: This function implements bandwith allocation logic
3565 * for common control channels.
3567 * Invoked by: Scheduler
3569 * @param[in] RgSchCellCb* cell
3570 * @param[in] uint32_t bo
3571 * @param[in] uint8_t cqi
3572 * @param[in] uint8_t *rb
3573 * @param[in] uint32_t *tbs
3574 * @param[in] uint8_t *mcs
3575 * @param[in] RgSchDlSf *sf
3580 Void rgSCHCmnClcRbAlloc
3593 static Void rgSCHCmnClcRbAlloc
3603 #endif /* LTEMAC_SPS */
3606 RgSchCmnTbSzEff *effTbl;
3609 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3610 uint8_t cfi = cellSch->dl.currCfi;
3613 /* first get the CQI to MCS table and determine the number of RBs */
3614 effTbl = (RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]);
3615 iTbsVal = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[cqi];
3616 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3618 /* Efficiency is number of bits per 1024 REs */
3619 eff = (*effTbl)[iTbsVal];
3621 /* Get the number of REs needed for this bo */
3622 noRes = ((bo * 8 * 1024) / eff );
3624 /* Get the number of RBs needed for this transmission */
3625 /* Number of RBs = No of REs / No of REs per RB */
3626 tmpRb = RGSCH_CEIL(noRes, cellSch->dl.noResPerRb[cfi]);
3627 /* KWORK_FIX: added check to see if rb has crossed maxRb*/
3628 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3629 if (tmpRb > cellSch->dl.maxDlBwPerUe)
3631 tmpRb = cellSch->dl.maxDlBwPerUe;
3633 while ((rgTbSzTbl[0][iTbsVal][tmpRb-1]/8) < bo &&
3634 (tmpRb < cellSch->dl.maxDlBwPerUe))
3637 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3639 *tbs = rgTbSzTbl[0][iTbsVal][tmpRb-1]/8;
3640 *rb = (uint8_t)tmpRb;
3641 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3649 * @brief Scheduling for MSG4.
3653 * Function: rgSCHCmnMsg4Alloc
3654 * Purpose: Scheduling for MSG4
3656 * Invoked by: Scheduler
3658 * @param[in] RgSchCellCb* cell
3659 * @param[in] RgSchRaCb* raCb
3660 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3664 static S16 rgSCHCmnMsg4Alloc(RgSchCellCb *cell,RgSchRaCb *raCb,RgSchCmnDlRbAllocInfo *allocInfo)
3666 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3669 /* SR_RACH_STATS : MSG4 TO BE TXED */
3671 /* Return if subframe BW exhausted */
3672 if (allocInfo->msg4Alloc.msg4DlSf->bw <=
3673 allocInfo->msg4Alloc.msg4DlSf->bwAssigned)
3675 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId ,
3680 if (rgSCHDhmGetMsg4HqProc(raCb, cellSch->dl.time) != ROK)
3682 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3683 "rgSCHDhmGetMsg4HqProc failed");
3687 raCb->rbAllocInfo.dlSf = allocInfo->msg4Alloc.msg4DlSf;
3689 if (rgSCHCmnMsg4DedAlloc(cell, raCb) != ROK)
3691 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
3692 rgSCHDhmRlsHqpTb(raCb->dlHqE->msg4Proc, 0, FALSE);
3693 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3694 "rgSCHCmnMsg4DedAlloc failed.");
3697 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4TxLst, &raCb->dlHqE->msg4Proc->reqLnk);
3698 raCb->dlHqE->msg4Proc->reqLnk.node = (PTR)raCb->dlHqE->msg4Proc;
3699 allocInfo->msg4Alloc.msg4DlSf->schdCcchUe++;
3706 * @brief This function implements PDCCH allocation for an UE.
3710 * Function: PdcchAlloc
3711 * Purpose: This function implements allocation of PDCCH for an UE.
3712 * 1. Get the aggregation level for the CQI of the UE.
3713 * 2. Get the candidate PDCCH count for the aggr level.
3714 * 3. Look for availability for each candidate and choose
3715 * the first one available.
3717 * Invoked by: Scheduler
3722 * @param[in] dciFrmt
3723 * @return RgSchPdcch *
3724 * -# NULLP when unsuccessful
3727 RgSchPdcch *rgSCHCmnPdcchAlloc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlSf *subFrm,uint8_t cqi,TfuDciFormat dciFrmt,Bool isDtx)
3729 CmLteAggrLvl aggrLvl;
3730 RgSchPdcchInfo *pdcchInfo;
3734 /* 3.1 consider the selected DCI format size in determining the
3735 * aggregation level */
3736 //TODO_SID Need to update. Currently using 4 aggregation level
3737 aggrLvl = CM_LTE_AGGR_LVL2;//cellSch->dciAggrLvl[cqi][dciFrmt];
3740 if((dciFrmt == TFU_DCI_FORMAT_1A) &&
3741 ((ue) && (ue->allocCmnUlPdcch)) )
3743 pdcch = rgSCHCmnCmnPdcchAlloc(cell, subFrm);
3744 /* Since CRNTI Scrambled */
3747 pdcch->dciNumOfBits = ue->dciSize.cmnSize[dciFrmt];
3748 // prc_trace_format_string(PRC_TRACE_GROUP_PS, PRC_TRACE_INFO_LOW,"Forcing alloc in CMN search spc size %d fmt %d \n",
3749 // pdcch->dciNumOfBits, dciFrmt);
3755 /* Incrementing aggrLvl by 1 if it not AGGR_LVL8(MAX SIZE)
3756 * inorder to increse the redudancy bits for better decoding of UE */
3759 if (aggrLvl != CM_LTE_AGGR_LVL16)
3763 case CM_LTE_AGGR_LVL2:
3764 aggrLvl = CM_LTE_AGGR_LVL4;
3766 case CM_LTE_AGGR_LVL4:
3767 aggrLvl = CM_LTE_AGGR_LVL8;
3769 case CM_LTE_AGGR_LVL8:
3770 aggrLvl = CM_LTE_AGGR_LVL16;
3779 pdcchInfo = &subFrm->pdcchInfo;
3781 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3784 if(subFrm->nCce != pdcchInfo->nCce)
3786 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3789 if(cell->nCce != pdcchInfo->nCce)
3791 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3795 if (pdcchInfo->nCce < (1 << (aggrLvl - 1)))
3797 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3798 subFrm->isCceFailure = TRUE;
3799 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
3800 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
3806 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3808 /* SR_RACH_STATS : Reset isTBMsg4 */
3809 pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4= FALSE;
3810 pdcch->dci.u.format0Info.isSrGrant = FALSE;
3812 pdcch->isSpsRnti = FALSE;
3814 /* Increment the CCE used counter in the current subframe */
3815 subFrm->cceCnt += aggrLvl;
3816 pdcch->pdcchSearchSpace = RG_SCH_UE_SPECIFIC_SEARCH_SPACE;
3820 if (ue->cell != cell)
3822 /* Secondary Cell */
3823 //pdcch->dciNumOfBits = ue->dciSize.noUlCcSize[dciFrmt];
3824 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
3829 //pdcch->dciNumOfBits = ue->dciSize.dedSize[dciFrmt];
3830 //TODO_SID Need to update dci size.
3831 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
3837 pdcch->dciNumOfBits = cell->dciSize.size[dciFrmt];
3842 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3843 subFrm->isCceFailure = TRUE;
3845 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
3846 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
3853 * @brief This function implements BW allocation for CCCH SDU
3857 * Function: rgSCHCmnCcchSduDedAlloc
3858 * Purpose: Downlink bandwidth Allocation for CCCH SDU.
3860 * Invoked by: Scheduler
3862 * @param[in] RgSchCellCb* cell
3863 * @param[out] RgSchUeCb *ueCb
3867 static S16 rgSCHCmnCcchSduDedAlloc(RgSchCellCb *cell,RgSchUeCb *ueCb)
3869 RgSchDlHqEnt *hqE = NULLP;
3871 RgSchDlRbAlloc *rbAllocinfo = NULLP;
3872 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3876 uint8_t cfi = cellDl->currCfi;
3880 rbAllocinfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3882 effBo = ueCb->dlCcchInfo.bo + RGSCH_CCCH_SDU_HDRSIZE;
3885 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
3886 &rbAllocinfo->tbInfo[0].bytesReq,
3887 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
3888 #else /* LTEMAC_SPS */
3889 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
3890 &rbAllocinfo->tbInfo[0].bytesReq,\
3891 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
3893 #endif /* LTEMAC_SPS */
3896 /* Cannot exceed the total number of RBs in the cell */
3897 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
3898 rbAllocinfo->dlSf->bwAssigned)))
3900 /* Check if atleast one allocation was possible.
3901 This may be the case where the Bw is very less and
3902 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
3903 if (rbAllocinfo->dlSf->bwAssigned == 0)
3905 numRb = rbAllocinfo->dlSf->bw;
3906 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
3907 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
3911 rbAllocinfo->rbsReq = numRb;
3912 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
3913 /* DwPTS Scheduling Changes Start */
3915 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
3917 rbAllocinfo->tbInfo[0].bytesReq =
3918 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1,cfi);
3921 /* DwPTS Scheduling Changes End */
3922 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
3930 /* Update the subframe Allocated BW field */
3931 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
3932 rbAllocinfo->rbsReq;
3933 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
3934 rbAllocinfo->tbInfo[0].tbCb = &hqE->ccchSduProc->tbInfo[0];
3935 rbAllocinfo->rnti = ueCb->ueId;
3936 rbAllocinfo->tbInfo[0].noLyr = 1;
3943 * @brief This function implements BW allocation for MSG4
3947 * Function: rgSCHCmnMsg4DedAlloc
3948 * Purpose: Downlink bandwidth Allocation for MSG4.
3950 * Invoked by: Scheduler
3952 * @param[in] RgSchCellCb* cell
3953 * @param[out] RgSchRaCb *raCb
3957 static S16 rgSCHCmnMsg4DedAlloc(RgSchCellCb *cell,RgSchRaCb *raCb)
3960 RgSchDlRbAlloc *rbAllocinfo = &raCb->rbAllocInfo;
3964 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3965 uint8_t cfi = cellDl->currCfi;
3969 effBo = raCb->dlCcchInfo.bo + RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE;
3972 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
3973 &rbAllocinfo->tbInfo[0].bytesReq,\
3974 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
3975 #else /* LTEMAC_SPS */
3976 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
3977 &rbAllocinfo->tbInfo[0].bytesReq,\
3978 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
3980 #endif /* LTEMAC_SPS */
3983 /* Cannot exceed the total number of RBs in the cell */
3984 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
3985 rbAllocinfo->dlSf->bwAssigned)))
3987 /* Check if atleast one allocation was possible.
3988 This may be the case where the Bw is very less and
3989 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
3990 if (rbAllocinfo->dlSf->bwAssigned == 0)
3992 numRb = rbAllocinfo->dlSf->bw;
3993 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
3994 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
3998 rbAllocinfo->rbsReq = numRb;
3999 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
4000 /* DwPTS Scheduling Changes Start */
4002 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
4004 rbAllocinfo->tbInfo[0].bytesReq =
4005 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1, cfi);
4008 /* DwPTS Scheduling Changes End */
4009 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4017 /* Update the subframe Allocated BW field */
4018 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4019 rbAllocinfo->rbsReq;
4020 rbAllocinfo->rnti = raCb->tmpCrnti;
4021 rbAllocinfo->tbInfo[0].tbCb = &raCb->dlHqE->msg4Proc->tbInfo[0];
4022 rbAllocinfo->tbInfo[0].schdlngForTb = TRUE;
4023 rbAllocinfo->tbInfo[0].noLyr = 1;
4030 * @brief This function implements scheduling for RA Response.
4034 * Function: rgSCHCmnDlRaRsp
4035 * Purpose: Downlink scheduling for RA responses.
4037 * Invoked by: Scheduler
4039 * @param[in] RgSchCellCb* cell
4043 static Void rgSCHCmnDlRaRsp(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
4045 CmLteTimingInfo frm;
4046 CmLteTimingInfo schFrm;
4052 RgSchTddRachRspLst *rachRsp;
4053 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
4058 frm = cell->crntTime;
4059 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4061 /* Compute the subframe for which allocation is being made */
4062 /* essentially, we need pointer to the dl frame for this subframe */
4063 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4065 /* Get the RACH Response scheduling related information
4066 * for the subframe with RA index */
4067 raIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][frm.subframe]-1;
4069 rachRsp = &cell->rachRspLst[raIdx];
4071 for(sfnIdx = 0; sfnIdx < rachRsp->numRadiofrms; sfnIdx++)
4073 /* For all scheduled RACH Responses in SFNs */
4075 RG_SCH_CMN_DECR_FRAME(schFrm.sfn, rachRsp->rachRsp[sfnIdx].sfnOffset);
4076 /* For all scheduled RACH Responses in subframes */
4078 subfrmIdx < rachRsp->rachRsp[sfnIdx].numSubfrms; subfrmIdx++)
4080 schFrm.subframe = rachRsp->rachRsp[sfnIdx].subframe[subfrmIdx];
4081 /* compute the last RA RNTI used in the previous subframe */
4082 raIdx = (((schFrm.sfn % cell->raInfo.maxRaSize) * \
4083 RGSCH_NUM_SUB_FRAMES * RGSCH_MAX_RA_RNTI_PER_SUBFRM) \
4086 /* For all RA RNTIs within a subframe */
4088 for(i=0; (i < RGSCH_MAX_RA_RNTI_PER_SUBFRM) && \
4089 (noRaRnti < RGSCH_MAX_TDD_RA_RSP_ALLOC); i++)
4091 rarnti = (schFrm.subframe + RGSCH_NUM_SUB_FRAMES*i + 1);
4092 rntiIdx = (raIdx + RGSCH_NUM_SUB_FRAMES*i);
4094 if (cell->raInfo.raReqLst[rntiIdx].first != NULLP)
4096 /* compute the next RA RNTI */
4097 if (rgSCHCmnRaRspAlloc(cell, subFrm, rntiIdx,
4098 rarnti, noRaRnti, allocInfo) != ROK)
4100 /* The resources are exhausted */
4114 * @brief This function implements scheduling for RA Response.
4118 * Function: rgSCHCmnDlRaRsp
4119 * Purpose: Downlink scheduling for RA responses.
4121 * Invoked by: Scheduler
4123 * @param[in] RgSchCellCb* cell
4124 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4128 static Void rgSCHCmnDlRaRsp(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
4130 CmLteTimingInfo frm;
4131 CmLteTimingInfo winStartFrm;
4133 uint8_t winStartIdx;
4137 RgSchCmnCell *sched;
4138 uint8_t i,noRaRnti=0;
4140 frm = cell->crntTime;
4141 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4143 /* Compute the subframe for which allocation is being made */
4144 /* essentially, we need pointer to the dl frame for this subframe */
4145 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4146 sched = RG_SCH_CMN_GET_CELL(cell);
4148 /* ccpu00132523 - Window Start calculated by considering RAR window size,
4149 * RAR Wait period, Subframes occuppied for respective preamble format*/
4150 winGap = (sched->dl.numRaSubFrms-1) + (cell->rachCfg.raWinSize-1)
4151 +RGSCH_RARSP_WAIT_PERIOD;
4153 /* Window starting occassion is retrieved using the gap and tried to
4154 * fit to the size of raReqLst array*/
4155 RGSCHDECRFRMCRNTTIME(frm, winStartFrm, winGap);
4157 //5G_TODO TIMING update. Need to check
4158 winStartIdx = (winStartFrm.sfn & 1) * RGSCH_MAX_RA_RNTI+ winStartFrm.slot;
4160 for(i = 0; ((i < cell->rachCfg.raWinSize) && (noRaRnti < RG_SCH_CMN_MAX_CMN_PDCCH)); i++)
4162 raIdx = (winStartIdx + i) % RGSCH_RAREQ_ARRAY_SIZE;
4164 if (cell->raInfo.raReqLst[raIdx].first != NULLP)
4166 allocInfo->raRspAlloc[noRaRnti].biEstmt = \
4167 (!i * RGSCH_ONE_BIHDR_SIZE);
4168 rarnti = raIdx % RGSCH_MAX_RA_RNTI+ 1;
4169 if (rgSCHCmnRaRspAlloc(cell, subFrm, raIdx,
4170 rarnti, noRaRnti, allocInfo) != ROK)
4172 /* The resources are exhausted */
4175 /* ccpu00132523- If all the RAP ids are not scheduled then need not
4176 * proceed for next RA RNTIs*/
4177 if(allocInfo->raRspAlloc[noRaRnti].numRapids < cell->raInfo.raReqLst[raIdx].count)
4181 noRaRnti++; /* Max of RG_SCH_CMN_MAX_CMN_PDCCH RARNTIs
4182 for response allocation */
4191 * @brief This function allocates the resources for an RARNTI.
4195 * Function: rgSCHCmnRaRspAlloc
4196 * Purpose: Allocate resources to a RARNTI.
4197 * 0. Allocate PDCCH for sending the response.
4198 * 1. Locate the number of RA requests pending for the RARNTI.
4199 * 2. Compute the size of data to be built.
4200 * 3. Using common channel CQI, compute the number of RBs.
4202 * Invoked by: Scheduler
4204 * @param[in] RgSchCellCb *cell,
4205 * @param[in] RgSchDlSf *subFrm,
4206 * @param[in] uint16_t rarnti,
4207 * @param[in] uint8_t noRaRnti
4208 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4212 static S16 rgSCHCmnRaRspAlloc(RgSchCellCb *cell,RgSchDlSf *subFrm,uint16_t raIndex,uint16_t rarnti,uint8_t noRaRnti,RgSchCmnDlRbAllocInfo *allocInfo)
4214 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4215 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4219 /*ccpu00116700,ccpu00116708- Corrected the wrong type for mcs*/
4222 /* RACH handling related changes */
4223 Bool isAlloc = FALSE;
4224 static uint8_t schdNumRapid = 0;
4225 uint8_t remNumRapid = 0;
4230 uint8_t cfi = cellDl->currCfi;
4237 /* ccpu00132523: Resetting the schdRap Id count in every scheduling subframe*/
4244 if (subFrm->bw == subFrm->bwAssigned)
4246 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4247 "bw == bwAssigned RARNTI:%d",rarnti);
4251 reqLst = &cell->raInfo.raReqLst[raIndex];
4252 if (reqLst->count == 0)
4254 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4255 "reqLst Count=0 RARNTI:%d",rarnti);
4258 remNumRapid = reqLst->count;
4261 /* Limit number of rach rsps to maxMsg3PerUlsf */
4262 if ( schdNumRapid+remNumRapid > cellUl->maxMsg3PerUlSf )
4264 remNumRapid = cellUl->maxMsg3PerUlSf-schdNumRapid;
4270 /* Try allocating for as many RAPIDs as possible */
4271 /* BI sub-header size to the tbSize requirement */
4272 noBytes = RGSCH_GET_RAR_BYTES(remNumRapid) +\
4273 allocInfo->raRspAlloc[noRaRnti].biEstmt;
4274 if ((allwdTbSz = rgSCHUtlGetAllwdCchTbSz(noBytes*8, &nPrb, &mcs)) == -1)
4280 /* rgSCHCmnClcRbAllocForFxdTb(cell, allwdTbSz/8, cellDl->ccchCqi, &rb);*/
4281 if(cellDl->bitsPerRb==0)
4283 while ((rgTbSzTbl[0][0][rb]) <(uint32_t) allwdTbSz)
4291 rb = RGSCH_CEIL(allwdTbSz, cellDl->bitsPerRb);
4293 /* DwPTS Scheduling Changes Start */
4295 if (subFrm->sfType == RG_SCH_SPL_SF_DATA)
4297 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
4299 /* Calculate the less RE's because of DwPTS */
4300 lostRe = rb * (cellDl->noResPerRb[cfi] -
4301 cellDl->numReDwPts[cfi]);
4303 /* Increase number of RBs in Spl SF to compensate for lost REs */
4304 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
4307 /* DwPTS Scheduling Changes End */
4309 /*ccpu00115595- end*/
4310 if (rb > subFrm->bw - subFrm->bwAssigned)
4315 /* Allocation succeeded for 'remNumRapid' */
4318 printf("\n!!!RAR alloc noBytes:%u,allwdTbSz:%u,tbs:%u,rb:%u\n",
4319 noBytes,allwdTbSz,tbs,rb);
4324 RLOG_ARG0(L_INFO,DBG_CELLID,cell->cellId,"BW alloc Failed");
4328 subFrm->bwAssigned = subFrm->bwAssigned + rb;
4330 /* Fill AllocInfo structure */
4331 allocInfo->raRspAlloc[noRaRnti].rnti = rarnti;
4332 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].bytesReq = tbs;
4333 allocInfo->raRspAlloc[noRaRnti].rbsReq = rb;
4334 allocInfo->raRspAlloc[noRaRnti].dlSf = subFrm;
4335 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].imcs = mcs;
4336 allocInfo->raRspAlloc[noRaRnti].raIndex = raIndex;
4337 /* RACH changes for multiple RAPID handling */
4338 allocInfo->raRspAlloc[noRaRnti].numRapids = remNumRapid;
4339 allocInfo->raRspAlloc[noRaRnti].nPrb = nPrb;
4340 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].noLyr = 1;
4341 allocInfo->raRspAlloc[noRaRnti].vrbgReq = RGSCH_CEIL(nPrb,MAX_5GTF_VRBG_SIZE);
4342 schdNumRapid += remNumRapid;
4346 /***********************************************************
4348 * Func : rgSCHCmnUlAllocFillRbInfo
4350 * Desc : Fills the start RB and the number of RBs for
4351 * uplink allocation.
4359 **********************************************************/
4360 Void rgSCHCmnUlAllocFillRbInfo(RgSchCellCb *cell,RgSchUlSf *sf,RgSchUlAlloc *alloc)
4362 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4363 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4364 uint8_t cfi = cellDl->currCfi;
4367 alloc->grnt.rbStart = (alloc->sbStart * cellUl->sbSize) +
4368 cell->dynCfiCb.bwInfo[cfi].startRb;
4370 /* Num RBs = numSbAllocated * sbSize - less RBs in the last SB */
4371 alloc->grnt.numRb = (alloc->numSb * cellUl->sbSize);
4377 * @brief Grant request for Msg3.
4381 * Function : rgSCHCmnMsg3GrntReq
4383 * This is invoked by downlink scheduler to request allocation
4386 * - Attempt to allocate msg3 in the current msg3 subframe
4387 * Allocation attempt based on whether preamble is from group A
4388 * and the value of MESSAGE_SIZE_GROUP_A
4389 * - Link allocation with passed RNTI and msg3 HARQ process
4390 * - Set the HARQ process ID (*hqProcIdRef)
4392 * @param[in] RgSchCellCb *cell
4393 * @param[in] CmLteRnti rnti
4394 * @param[in] Bool preamGrpA
4395 * @param[in] RgSchUlHqProcCb *hqProc
4396 * @param[out] RgSchUlAlloc **ulAllocRef
4397 * @param[out] uint8_t *hqProcIdRef
4400 static Void rgSCHCmnMsg3GrntReq
4405 RgSchUlHqProcCb *hqProc,
4406 RgSchUlAlloc **ulAllocRef,
4407 uint8_t *hqProcIdRef
4410 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4411 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
4413 RgSchUlAlloc *alloc;
4418 *ulAllocRef = NULLP;
4420 /* Fix: ccpu00120610 Use remAllocs from subframe during msg3 allocation */
4421 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
4425 if (preamGrpA == FALSE)
4427 numSb = cellUl->ra.prmblBNumSb;
4428 iMcs = cellUl->ra.prmblBIMcs;
4432 numSb = cellUl->ra.prmblANumSb;
4433 iMcs = cellUl->ra.prmblAIMcs;
4436 if ((hole = rgSCHUtlUlHoleFirst(sf)) != NULLP)
4438 if(*sf->allocCountRef == 0)
4440 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4441 /* Reinitialize the hole */
4442 if (sf->holeDb->count == 1 && (hole->start == 0)) /* Sanity check of holeDb */
4444 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4445 /* Re-Initialize available subbands because of CFI change*/
4446 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4450 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4451 "Error! holeDb sanity check failed RNTI:%d",rnti);
4454 if (numSb <= hole->num)
4457 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
4458 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
4459 alloc->grnt.iMcs = iMcs;
4460 alloc->grnt.iMcsCrnt = iMcs;
4461 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
4462 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
4463 /* To include the length and ModOrder in DataRecp Req.*/
4464 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
4465 RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
4466 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
4467 alloc->grnt.nDmrs = 0;
4468 alloc->grnt.hop = 0;
4469 alloc->grnt.delayBit = 0;
4470 alloc->grnt.isRtx = FALSE;
4471 *ulAllocRef = alloc;
4472 *hqProcIdRef = (cellUl->msg3SchdHqProcIdx);
4473 hqProc->procId = *hqProcIdRef;
4474 hqProc->ulSfIdx = (cellUl->msg3SchdIdx);
4477 alloc->pdcch = FALSE;
4478 alloc->forMsg3 = TRUE;
4479 alloc->hqProc = hqProc;
4480 rgSCHUhmNewTx(hqProc, (uint8_t)(cell->rachCfg.maxMsg3Tx - 1), alloc);
4481 //RLOG_ARG4(L_DEBUG,DBG_CELLID,cell->cellId,
4483 "\nRNTI:%d MSG3 ALLOC proc(%lu)procId(%d)schdIdx(%d)\n",
4485 ((PTR)alloc->hqProc),
4486 alloc->hqProc->procId,
4487 alloc->hqProc->ulSfIdx);
4488 RLOG_ARG2(L_DEBUG,DBG_CELLID,cell->cellId,
4489 "alloc(%p)maxMsg3Tx(%d)",
4491 cell->rachCfg.maxMsg3Tx);
4500 * @brief This function determines the allocation limits and
4501 * parameters that aid in DL scheduling.
4505 * Function: rgSCHCmnDlSetUeAllocLmt
4506 * Purpose: This function determines the Maximum RBs
4507 * a UE is eligible to get based on softbuffer
4508 * limitation and cell->>>maxDlBwPerUe. The Codeword
4509 * specific parameters like iTbs, eff and noLyrs
4510 * are also set in this function. This function
4511 * is called while UE configuration and UeDlCqiInd.
4513 * Invoked by: Scheduler
4515 * @param[in] RgSchCellCb *cellCb
4516 * @param[in] RgSchCmnDlUe *ueDl
4520 static Void rgSCHCmnDlSetUeAllocLmt(RgSchCellCb *cell,RgSchCmnDlUe *ueDl,Bool isEmtcUe)
4524 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4525 uint8_t cfi = cellSch->dl.currCfi;
4529 if(TRUE == isEmtcUe)
4531 /* ITbs for CW0 for 1 Layer Tx */
4532 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4533 [ueDl->mimoInfo.cwInfo[0].cqi];
4534 /* ITbs for CW0 for 2 Layer Tx */
4535 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4536 [ueDl->mimoInfo.cwInfo[0].cqi];
4537 /* Eff for CW0 for 1 Layer Tx */
4538 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4539 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4540 /* Eff for CW0 for 2 Layer Tx */
4541 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4542 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4544 /* ITbs for CW1 for 1 Layer Tx */
4545 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4546 [ueDl->mimoInfo.cwInfo[1].cqi];
4547 /* ITbs for CW1 for 2 Layer Tx */
4548 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4549 [ueDl->mimoInfo.cwInfo[1].cqi];
4550 /* Eff for CW1 for 1 Layer Tx */
4551 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4552 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4553 /* Eff for CW1 for 2 Layer Tx */
4554 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4555 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4560 /* ITbs for CW0 for 1 Layer Tx */
4561 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4562 [ueDl->mimoInfo.cwInfo[0].cqi];
4563 /* ITbs for CW0 for 2 Layer Tx */
4564 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4565 [ueDl->mimoInfo.cwInfo[0].cqi];
4566 /* Eff for CW0 for 1 Layer Tx */
4567 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4568 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4569 /* Eff for CW0 for 2 Layer Tx */
4570 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4571 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4573 /* ITbs for CW1 for 1 Layer Tx */
4574 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4575 [ueDl->mimoInfo.cwInfo[1].cqi];
4576 /* ITbs for CW1 for 2 Layer Tx */
4577 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4578 [ueDl->mimoInfo.cwInfo[1].cqi];
4579 /* Eff for CW1 for 1 Layer Tx */
4580 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4581 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4582 /* Eff for CW1 for 2 Layer Tx */
4583 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4584 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4588 // ueDl->laCb.cqiBasediTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0] * 100;
4590 /* Assigning noLyrs to each CW assuming optimal Spatial multiplexing
4592 (ueDl->mimoInfo.ri/2 == 0)? (ueDl->mimoInfo.cwInfo[0].noLyr = 1) : \
4593 (ueDl->mimoInfo.cwInfo[0].noLyr = ueDl->mimoInfo.ri/2);
4594 ueDl->mimoInfo.cwInfo[1].noLyr = ueDl->mimoInfo.ri - ueDl->mimoInfo.cwInfo[0].noLyr;
4595 /* rg002.101:ccpu00102106: correcting DL harq softbuffer limitation logic.
4596 * The maxTbSz is the maximum number of PHY bits a harq process can
4597 * hold. Hence we limit our allocation per harq process based on this.
4598 * Earlier implementation we misinterpreted the maxTbSz to be per UE
4599 * per TTI, but in fact it is per Harq per TTI. */
4600 /* rg002.101:ccpu00102106: cannot exceed the harq Tb Size
4601 * and harq Soft Bits limit.*/
4603 /* Considering iTbs corresponding to 2 layer transmission for
4604 * codeword0(approximation) and the maxLayers supported by
4605 * this UE at this point of time. */
4606 RG_SCH_CMN_TBS_TO_MODODR(ueDl->mimoInfo.cwInfo[0].iTbs[1], modOrder);
4608 /* Bits/modOrder gives #REs, #REs/noResPerRb gives #RBs */
4609 /* rg001.301 -MOD- [ccpu00119213] : avoiding wraparound */
4610 maxRb = ((ueDl->maxSbSz)/(cellSch->dl.noResPerRb[cfi] * modOrder *\
4611 ueDl->mimoInfo.ri));
4612 if (cellSch->dl.isDlFreqSel)
4614 /* Rounding off to left nearest multiple of RBG size */
4615 maxRb -= maxRb % cell->rbgSize;
4617 ueDl->maxRb = RGSCH_MIN(maxRb, cellSch->dl.maxDlBwPerUe);
4618 if (cellSch->dl.isDlFreqSel)
4620 /* Rounding off to right nearest multiple of RBG size */
4621 if (ueDl->maxRb % cell->rbgSize)
4623 ueDl->maxRb += (cell->rbgSize -
4624 (ueDl->maxRb % cell->rbgSize));
4628 /* Set the index of the cwInfo, which is better in terms of
4629 * efficiency. If RI<2, only 1 CW, hence btrCwIdx shall be 0 */
4630 if (ueDl->mimoInfo.ri < 2)
4632 ueDl->mimoInfo.btrCwIdx = 0;
4636 if (ueDl->mimoInfo.cwInfo[0].eff[ueDl->mimoInfo.cwInfo[0].noLyr-1] <\
4637 ueDl->mimoInfo.cwInfo[1].eff[ueDl->mimoInfo.cwInfo[1].noLyr-1])
4639 ueDl->mimoInfo.btrCwIdx = 1;
4643 ueDl->mimoInfo.btrCwIdx = 0;
4653 * @brief This function updates TX Scheme.
4657 * Function: rgSCHCheckAndSetTxScheme
4658 * Purpose: This function determines the Maximum RBs
4659 * a UE is eligible to get based on softbuffer
4660 * limitation and cell->>>maxDlBwPerUe. The Codeword
4661 * specific parameters like iTbs, eff and noLyrs
4662 * are also set in this function. This function
4663 * is called while UE configuration and UeDlCqiInd.
4665 * Invoked by: Scheduler
4667 * @param[in] RgSchCellCb *cell
4668 * @param[in] RgSchUeCb *ue
4672 static Void rgSCHCheckAndSetTxScheme(RgSchCellCb *cell,RgSchUeCb *ue)
4674 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4675 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue ,cell);
4676 uint8_t cfi = cellSch->dl.currCfi;
4678 uint8_t cqiBasediTbs;
4682 maxiTbs = (*(RgSchCmnCqiToTbs*)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4683 [RG_SCH_CMN_MAX_CQI - 1];
4684 cqiBasediTbs = (ueDl->laCb[0].cqiBasediTbs)/100;
4685 actualiTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0];
4687 if((actualiTbs < RG_SCH_TXSCHEME_CHNG_ITBS_FACTOR) && (cqiBasediTbs >
4688 actualiTbs) && ((cqiBasediTbs - actualiTbs) > RG_SCH_TXSCHEME_CHNG_THRSHD))
4690 RG_SCH_CMN_SET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
4693 if(actualiTbs >= maxiTbs)
4695 RG_SCH_CMN_UNSET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
4702 * @brief This function determines the allocation limits and
4703 * parameters that aid in DL scheduling.
4707 * Function: rgSCHCmnDlSetUeAllocLmtLa
4708 * Purpose: This function determines the Maximum RBs
4709 * a UE is eligible to get based on softbuffer
4710 * limitation and cell->>>maxDlBwPerUe. The Codeword
4711 * specific parameters like iTbs, eff and noLyrs
4712 * are also set in this function. This function
4713 * is called while UE configuration and UeDlCqiInd.
4715 * Invoked by: Scheduler
4717 * @param[in] RgSchCellCb *cell
4718 * @param[in] RgSchUeCb *ue
4722 Void rgSCHCmnDlSetUeAllocLmtLa(RgSchCellCb *cell,RgSchUeCb *ue)
4726 uint8_t reportediTbs;
4727 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4728 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
4729 uint8_t cfi = cellSch->dl.currCfi;
4734 maxiTbs = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[RG_SCH_CMN_MAX_CQI - 1];
4735 if(ueDl->cqiFlag == TRUE)
4737 for(cwIdx=0; cwIdx < RG_SCH_CMN_MAX_CW_PER_UE; cwIdx++)
4741 /* Calcluating the reported iTbs for code word 0 */
4742 reportediTbs = ue->ue5gtfCb.mcs;
4744 iTbsNew = (S32) reportediTbs;
4746 if(!ueDl->laCb[cwIdx].notFirstCqi)
4748 /* This is the first CQI report from UE */
4749 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
4750 ueDl->laCb[cwIdx].notFirstCqi = TRUE;
4752 else if ((RG_ITBS_DIFF(reportediTbs, ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0])) > 5)
4754 /* Ignore this iTBS report and mark that last iTBS report was */
4755 /* ignored so that subsequently we reset the LA algorithm */
4756 ueDl->laCb[cwIdx].lastiTbsIgnored = TRUE;
4757 ueDl->laCb[cwIdx].numLastiTbsIgnored++;
4758 if( ueDl->laCb[cwIdx].numLastiTbsIgnored > 10)
4760 /* CQI reported by UE is not catching up. Reset the LA algorithm */
4761 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
4762 ueDl->laCb[cwIdx].deltaiTbs = 0;
4763 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
4764 ueDl->laCb[cwIdx].numLastiTbsIgnored = 0;
4769 if (ueDl->laCb[cwIdx].lastiTbsIgnored != TRUE)
4771 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
4772 (80 * ueDl->laCb[cwIdx].cqiBasediTbs))/100;
4776 /* Reset the LA as iTbs in use caught up with the value */
4777 /* reported by UE. */
4778 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
4779 (80 * ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] * 100))/100;
4780 ueDl->laCb[cwIdx].deltaiTbs = 0;
4781 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
4785 iTbsNew = (ueDl->laCb[cwIdx].cqiBasediTbs + ueDl->laCb[cwIdx].deltaiTbs)/100;
4787 RG_SCH_CHK_ITBS_RANGE(iTbsNew, maxiTbs);
4789 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] = RGSCH_MIN(iTbsNew, cell->thresholds.maxDlItbs);
4790 //ueDl->mimoInfo.cwInfo[cwIdx].iTbs[1] = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
4792 ue->ue5gtfCb.mcs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
4794 printf("reportediTbs[%d] cqiBasediTbs[%d] deltaiTbs[%d] iTbsNew[%d] mcs[%d] cwIdx[%d]\n",
4795 reportediTbs, ueDl->laCb[cwIdx].cqiBasediTbs, ueDl->laCb[cwIdx].deltaiTbs,
4796 iTbsNew, ue->ue5gtfCb.mcs, cwIdx);
4800 if((ue->mimoInfo.txMode != RGR_UE_TM_3) && (ue->mimoInfo.txMode != RGR_UE_TM_4))
4805 ueDl->cqiFlag = FALSE;
4812 /***********************************************************
4814 * Func : rgSCHCmnDlUeResetTemp
4816 * Desc : Reset whatever variables where temporarily used
4817 * during UE scheduling.
4825 **********************************************************/
4826 Void rgSCHCmnDlHqPResetTemp(RgSchDlHqProcCb *hqP)
4829 /* Fix: syed having a hqP added to Lists for RB assignment rather than
4830 * a UE, as adding UE was limiting handling some scenarios */
4831 hqP->reqLnk.node = (PTR)NULLP;
4832 hqP->schdLstLnk.node = (PTR)NULLP;
4835 } /* rgSCHCmnDlHqPResetTemp */
4837 /***********************************************************
4839 * Func : rgSCHCmnDlUeResetTemp
4841 * Desc : Reset whatever variables where temporarily used
4842 * during UE scheduling.
4850 **********************************************************/
4851 Void rgSCHCmnDlUeResetTemp(RgSchUeCb *ue,RgSchDlHqProcCb *hqP)
4853 RgSchDlRbAlloc *allocInfo;
4854 RgSchCmnDlUe *cmnUe = RG_SCH_CMN_GET_DL_UE(ue,hqP->hqE->cell);
4860 /* Fix : syed check for UE's existence was useless.
4861 * Instead we need to check that reset is done only for the
4862 * information of a scheduled harq proc, which is cmnUe->proc.
4863 * Reset should not be done for non-scheduled hqP */
4864 if((cmnUe->proc == hqP) || (cmnUe->proc == NULLP))
4866 cmnUe->proc = NULLP;
4867 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, hqP->hqE->cell);
4869 tmpCb = allocInfo->laaCb;
4871 memset(allocInfo, 0, sizeof(RgSchDlRbAlloc));
4872 allocInfo->rnti = ue->ueId;
4874 allocInfo->laaCb = tmpCb;
4876 /* Fix: syed moving this to a common function for both scheduled
4877 * and non-scheduled UEs */
4878 cmnUe->outStndAlloc = 0;
4880 rgSCHCmnDlHqPResetTemp(hqP);
4883 } /* rgSCHCmnDlUeResetTemp */
4885 /***********************************************************
4887 * Func : rgSCHCmnUlUeResetTemp
4889 * Desc : Reset whatever variables where temporarily used
4890 * during UE scheduling.
4898 **********************************************************/
4899 Void rgSCHCmnUlUeResetTemp(RgSchCellCb *cell,RgSchUeCb *ue)
4901 RgSchCmnUlUe *cmnUlUe = RG_SCH_CMN_GET_UL_UE(ue,cell);
4903 memset(&cmnUlUe->alloc, 0, sizeof(cmnUlUe->alloc));
4906 } /* rgSCHCmnUlUeResetTemp */
4911 * @brief This function fills the PDCCH information from dlProc.
4915 * Function: rgSCHCmnFillPdcch
4916 * Purpose: This function fills in the PDCCH information
4917 * obtained from the RgSchDlRbAlloc
4918 * during common channel scheduling(P, SI, RA - RNTI's).
4920 * Invoked by: Downlink Scheduler
4922 * @param[out] RgSchPdcch* pdcch
4923 * @param[in] RgSchDlRbAlloc* rbAllocInfo
4927 Void rgSCHCmnFillPdcch(RgSchCellCb *cell,RgSchPdcch *pdcch,RgSchDlRbAlloc *rbAllocInfo)
4930 /* common channel pdcch filling,
4931 * only 1A and Local is supported */
4932 pdcch->rnti = rbAllocInfo->rnti;
4933 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
4934 switch(rbAllocInfo->dciFormat)
4936 #ifdef RG_5GTF /* ANOOP: ToDo: DCI format B1/B2 filling */
4937 case TFU_DCI_FORMAT_B1:
4940 pdcch->dci.u.formatB1Info.formatType = 0;
4941 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].cmnGrnt.xPDSCHRange;
4942 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].cmnGrnt.rbAssign;
4943 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = 0;
4944 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
4945 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = 0;
4946 //pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].ndi;
4947 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].cmnGrnt.rv;
4948 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
4949 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
4950 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
4951 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
4952 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
4953 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
4954 //TODO_SID: Need to update
4955 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
4956 pdcch->dci.u.formatB1Info.beamSwitch = 0;
4957 pdcch->dci.u.formatB1Info.SRS_Config = 0;
4958 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
4959 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
4960 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
4961 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].cmnGrnt.SCID;
4962 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
4963 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
4964 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
4966 break; /* case TFU_DCI_FORMAT_B1: */
4969 case TFU_DCI_FORMAT_B2:
4971 //printf(" RG_5GTF:: Pdcch filling with DCI format B2\n");
4973 break; /* case TFU_DCI_FORMAT_B2: */
4976 case TFU_DCI_FORMAT_1A:
4977 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
4979 /*Nprb indication at PHY for common Ch
4980 *setting least significant bit of tpc field to 1 if
4981 nPrb=3 and 0 otherwise. */
4982 if (rbAllocInfo->nPrb == 3)
4984 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 1;
4988 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 0;
4990 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
4991 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
4992 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
4993 rbAllocInfo->tbInfo[0].imcs;
4994 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = 0;
4995 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = 0;
4997 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
4999 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
5000 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5001 rbAllocInfo->allocInfo.raType2.rbStart,
5002 rbAllocInfo->allocInfo.raType2.numRb);
5005 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = \
5008 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5009 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
5012 break; /* case TFU_DCI_FORMAT_1A: */
5013 case TFU_DCI_FORMAT_1:
5014 pdcch->dci.u.format1Info.tpcCmd = 0;
5015 /* Avoiding this check,as we dont support Type1 RA */
5017 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5020 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5021 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5022 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5024 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5025 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5027 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5028 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5030 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5031 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5035 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5036 pdcch->dci.u.format1Info.allocInfo.ndi = 0;
5037 pdcch->dci.u.format1Info.allocInfo.mcs = rbAllocInfo->tbInfo[0].imcs;
5038 pdcch->dci.u.format1Info.allocInfo.rv = 0;
5040 pdcch->dci.u.format1Info.dai = 1;
5044 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Allocator's icorrect "
5045 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5053 * @brief This function finds whether the subframe is special subframe or not.
5057 * Function: rgSCHCmnIsSplSubfrm
5058 * Purpose: This function finds the subframe index of the special subframe
5059 * and finds whether the current DL index matches it or not.
5061 * Invoked by: Scheduler
5063 * @param[in] uint8_t splfrmCnt
5064 * @param[in] uint8_t curSubfrmIdx
5065 * @param[in] uint8_t periodicity
5066 * @param[in] RgSchTddSubfrmInfo *subfrmInfo
5070 static Bool rgSCHCmnIsSplSubfrm(uint8_t splfrmCnt,uint8_t curSubfrmIdx,uint8_t periodicity,RgSchTddSubfrmInfo *subfrmInfo)
5072 uint8_t dlSfCnt = 0;
5073 uint8_t splfrmIdx = 0;
5077 if(periodicity == RG_SCH_CMN_5_MS_PRD)
5081 dlSfCnt = ((splfrmCnt-1)/2) *\
5082 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5083 dlSfCnt = dlSfCnt + subfrmInfo->numFrmHf1;
5087 dlSfCnt = (splfrmCnt/2) * \
5088 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5093 dlSfCnt = splfrmCnt * subfrmInfo->numFrmHf1;
5095 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1 +\
5096 (periodicity*splfrmCnt - dlSfCnt);
5100 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1;
5103 if(splfrmIdx == curSubfrmIdx)
5112 * @brief This function updates DAI or UL index.
5116 * Function: rgSCHCmnUpdHqAndDai
5117 * Purpose: Updates the DAI based on UL-DL Configuration
5118 * index and UE. It also updates the HARQ feedback
5119 * time and 'm' index.
5123 * @param[in] RgDlHqProcCb *hqP
5124 * @param[in] RgSchDlSf *subFrm
5125 * @param[in] RgSchDlHqTbCb *tbCb
5126 * @param[in] uint8_t tbAllocIdx
5130 static Void rgSCHCmnUpdHqAndDai(RgSchDlHqProcCb *hqP,RgSchDlSf *subFrm,RgSchDlHqTbCb *tbCb,uint8_t tbAllocIdx)
5132 RgSchUeCb *ue = hqP->hqE->ue;
5137 /* set the time at which UE shall send the feedback
5138 * for this process */
5139 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5140 subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5141 tbCb->fdbkTime.subframe = subFrm->dlFdbkInfo.subframe;
5142 tbCb->m = subFrm->dlFdbkInfo.m;
5146 /* set the time at which UE shall send the feedback
5147 * for this process */
5148 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5149 hqP->subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5150 tbCb->fdbkTime.subframe = hqP->subFrm->dlFdbkInfo.subframe;
5151 tbCb->m = hqP->subFrm->dlFdbkInfo.m;
5154 /* ccpu00132340-MOD- DAI need to be updated for first TB only*/
5155 if(ue && !tbAllocIdx)
5157 Bool havePdcch = (tbCb->hqP->pdcch ? TRUE : FALSE);
5160 dlDai = rgSCHCmnUpdDai(ue, &tbCb->fdbkTime, tbCb->m, havePdcch,tbCb->hqP,
5163 {/* Non SPS occasions */
5164 tbCb->hqP->pdcch->dlDai = dlDai;
5165 /* hqP->ulDai is used for N1 resource filling
5166 * when SPS occaions present in a bundle */
5167 tbCb->hqP->ulDai = tbCb->dai;
5168 tbCb->hqP->dlDai = dlDai;
5172 /* Updatijng pucchFdbkIdx for both PUCCH or PUSCH
5174 tbCb->pucchFdbkIdx = tbCb->hqP->ulDai;
5181 * @brief This function updates DAI or UL index.
5185 * Function: rgSCHCmnUpdDai
5186 * Purpose: Updates the DAI in the ack-nack info, a valid
5187 * ue should be passed
5191 * @param[in] RgDlHqProcCb *hqP
5192 * @param[in] RgSchDlSf *subFrm
5193 * @param[in] RgSchDlHqTbCb *tbCb
5194 * @return uint8_t dlDai
5197 uint8_t rgSCHCmnUpdDai
5200 CmLteTimingInfo *fdbkTime,
5203 RgSchDlHqProcCb *hqP,
5207 RgSchTddANInfo *anInfo;
5208 uint8_t servCellIdx;
5209 uint8_t ackNackFdbkArrSize;
5214 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5215 hqP->hqE->cell->cellId,
5218 servCellIdx = RGSCH_PCELL_INDEX;
5220 ackNackFdbkArrSize = hqP->hqE->cell->ackNackFdbkArrSize;
5222 {/* SPS on primary cell */
5223 servCellIdx = RGSCH_PCELL_INDEX;
5224 ackNackFdbkArrSize = ue->cell->ackNackFdbkArrSize;
5228 anInfo = rgSCHUtlGetUeANFdbkInfo(ue, fdbkTime,servCellIdx);
5230 /* If no ACK/NACK feedback already present, create a new one */
5233 anInfo = &ue->cellInfo[servCellIdx]->anInfo[ue->cellInfo[servCellIdx]->nextFreeANIdx];
5234 anInfo->sfn = fdbkTime->sfn;
5235 anInfo->subframe = fdbkTime->subframe;
5236 anInfo->latestMIdx = m;
5237 /* Fixing DAI value - ccpu00109162 */
5238 /* Handle TDD case as in MIMO definition of the function */
5244 anInfo->isSpsOccasion = FALSE;
5245 /* set the free Index to store Ack/Nack Information*/
5246 ue->cellInfo[servCellIdx]->nextFreeANIdx = (ue->cellInfo[servCellIdx]->nextFreeANIdx + 1) %
5252 anInfo->latestMIdx = m;
5253 /* Fixing DAI value - ccpu00109162 */
5254 /* Handle TDD case as in MIMO definition of the function */
5255 anInfo->ulDai = anInfo->ulDai + 1;
5258 anInfo->dlDai = anInfo->dlDai + 1;
5262 /* ignoring the Scell check,
5263 * for primary cell this field is unused*/
5266 anInfo->n1ResTpcIdx = hqP->tpc;
5270 {/* As this not required for release pdcch */
5271 *ulDai = anInfo->ulDai;
5274 return (anInfo->dlDai);
5277 #endif /* ifdef LTE_TDD */
5279 uint32_t rgHqRvRetxCnt[4][2];
5280 uint32_t rgUlrate_grant;
5283 * @brief This function fills the HqP TB with rbAllocInfo.
5287 * Function: rgSCHCmnFillHqPTb
5288 * Purpose: This function fills in the HqP TB with rbAllocInfo.
5290 * Invoked by: rgSCHCmnFillHqPTb
5292 * @param[in] RgSchCellCb* cell
5293 * @param[in] RgSchDlRbAlloc *rbAllocInfo,
5294 * @param[in] uint8_t tbAllocIdx
5295 * @param[in] RgSchPdcch *pdcch
5300 Void rgSCHCmnFillHqPTb
5303 RgSchDlRbAlloc *rbAllocInfo,
5308 static Void rgSCHCmnFillHqPTb
5311 RgSchDlRbAlloc *rbAllocInfo,
5315 #endif /* LTEMAC_SPS */
5317 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
5318 RgSchDlTbAllocInfo *tbAllocInfo = &rbAllocInfo->tbInfo[tbAllocIdx];
5319 RgSchDlHqTbCb *tbInfo = tbAllocInfo->tbCb;
5320 RgSchDlHqProcCb *hqP = tbInfo->hqP;
5323 /*ccpu00120365-ADD-if tb is disabled, set mcs=0,rv=1.
5324 * Relevant for DCI format 2 & 2A as per 36.213-7.1.7.2
5326 if ( tbAllocInfo->isDisabled)
5329 tbInfo->dlGrnt.iMcs = 0;
5330 tbInfo->dlGrnt.rv = 1;
5332 /* Fill for TB retransmission */
5333 else if (tbInfo->txCntr > 0)
5336 tbInfo->timingInfo = cmnCellDl->time;
5338 if ((tbInfo->isAckNackDtx == TFU_HQFDB_DTX))
5340 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5341 rgHqRvRetxCnt[tbInfo->dlGrnt.rv][tbInfo->tbIdx]++;
5345 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[++(tbInfo->ccchSchdInfo.rvIdx) & 0x03];
5348 /* fill the scheduler information of hqProc */
5349 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5350 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx,hqP->tbInfo,tbInfo->tbIdx );
5351 rgSCHDhmHqTbRetx(hqP->hqE, tbInfo->timingInfo, hqP, tbInfo->tbIdx);
5353 /* Fill for TB transmission */
5356 /* Fill the HqProc */
5357 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5358 tbInfo->tbSz = tbAllocInfo->bytesAlloc;
5359 tbInfo->timingInfo = cmnCellDl->time;
5361 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[0];
5362 /* fill the scheduler information of hqProc */
5363 tbInfo->ccchSchdInfo.rvIdx = 0;
5364 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5365 /* DwPts Scheduling Changes Start */
5366 /* DwPts Scheduling Changes End */
5367 cell->measurements.dlBytesCnt += tbAllocInfo->bytesAlloc;
5370 /*ccpu00120365:-ADD-only add to subFrm list if tb is not disabled */
5371 if ( tbAllocInfo->isDisabled == FALSE )
5373 /* Set the number of transmitting SM layers for this TB */
5374 tbInfo->numLyrs = tbAllocInfo->noLyr;
5375 /* Set the TB state as WAITING to indicate TB has been
5376 * considered for transmission */
5377 tbInfo->state = HQ_TB_WAITING;
5378 hqP->subFrm = rbAllocInfo->dlSf;
5379 tbInfo->hqP->pdcch = pdcch;
5380 //tbInfo->dlGrnt.numRb = rbAllocInfo->rbsAlloc;
5381 rgSCHUtlDlHqPTbAddToTx(hqP->subFrm, hqP, tbInfo->tbIdx);
5387 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5391 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5392 * Purpose: This function fills in the PDCCH information
5393 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5394 * for dedicated service scheduling. It also
5395 * obtains TPC to be filled in from the power module.
5396 * Assign the PDCCH to HQProc.
5398 * Invoked by: Downlink Scheduler
5400 * @param[in] RgSchCellCb* cell
5401 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5402 * @param[in] RgDlHqProc* hqP
5403 * @param[out] RgSchPdcch *pdcch
5404 * @param[in] uint8_t tpc
5408 static Void rgSCHCmnFillHqPPdcchDciFrmtB1B2
5411 RgSchDlRbAlloc *rbAllocInfo,
5412 RgSchDlHqProcCb *hqP,
5419 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5420 //Currently hardcoding values here.
5421 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
5422 switch(rbAllocInfo->dciFormat)
5424 case TFU_DCI_FORMAT_B1:
5426 pdcch->dci.u.formatB1Info.formatType = 0;
5427 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5428 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5429 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5430 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5431 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5432 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5433 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5434 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5435 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5436 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5437 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5438 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5439 //TODO_SID: Need to update
5440 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5441 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5442 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5443 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5444 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5445 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5446 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5447 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5448 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5449 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5452 case TFU_DCI_FORMAT_B2:
5454 pdcch->dci.u.formatB2Info.formatType = 1;
5455 pdcch->dci.u.formatB2Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5456 pdcch->dci.u.formatB2Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5457 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5458 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5459 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5460 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5461 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5462 pdcch->dci.u.formatB2Info.CSI_BSI_BRI_Req = 0;
5463 pdcch->dci.u.formatB2Info.CSIRS_BRRS_TxTiming = 0;
5464 pdcch->dci.u.formatB2Info.CSIRS_BRRS_SymbIdx = 0;
5465 pdcch->dci.u.formatB2Info.CSIRS_BRRS_ProcInd = 0;
5466 pdcch->dci.u.formatB2Info.xPUCCH_TxTiming = 0;
5467 //TODO_SID: Need to update
5468 pdcch->dci.u.formatB2Info.freqResIdx_xPUCCH = 0;
5469 pdcch->dci.u.formatB2Info.beamSwitch = 0;
5470 pdcch->dci.u.formatB2Info.SRS_Config = 0;
5471 pdcch->dci.u.formatB2Info.SRS_Symbol = 0;
5472 //TODO_SID: Need to check.Currently setting 4(2 layer, ports(8,9) w/o OCC).
5473 pdcch->dci.u.formatB2Info.AntPorts_numLayers = 4;
5474 pdcch->dci.u.formatB2Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5475 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5476 pdcch->dci.u.formatB2Info.tpcCmd = 1; //tpc;
5477 pdcch->dci.u.formatB2Info.DL_PCRS = 0;
5481 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId," 5GTF_ERROR Allocator's icorrect "
5482 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5489 uint32_t totPcellSCell;
5490 uint32_t addedForScell;
5491 uint32_t addedForScell1;
5492 uint32_t addedForScell2;
5494 * @brief This function fills the PDCCH information from dlProc.
5498 * Function: rgSCHCmnFillHqPPdcch
5499 * Purpose: This function fills in the PDCCH information
5500 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5501 * for dedicated service scheduling. It also
5502 * obtains TPC to be filled in from the power module.
5503 * Assign the PDCCH to HQProc.
5505 * Invoked by: Downlink Scheduler
5507 * @param[in] RgSchCellCb* cell
5508 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5509 * @param[in] RgDlHqProc* hqP
5513 Void rgSCHCmnFillHqPPdcch(RgSchCellCb *cell,RgSchDlRbAlloc *rbAllocInfo,RgSchDlHqProcCb *hqP)
5515 RgSchCmnDlCell *cmnCell = RG_SCH_CMN_GET_DL_CELL(cell);
5516 RgSchPdcch *pdcch = rbAllocInfo->pdcch;
5523 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5530 tpc = rgSCHPwrPucchTpcForUe(cell, hqP->hqE->ue);
5532 /* Fix: syed moving this to a common function for both scheduled
5533 * and non-scheduled UEs */
5535 pdcch->ue = hqP->hqE->ue;
5536 if (hqP->hqE->ue->csgMmbrSta == FALSE)
5538 cmnCell->ncsgPrbCnt += rbAllocInfo->rbsAlloc;
5540 cmnCell->totPrbCnt += rbAllocInfo->rbsAlloc;
5543 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlPrbUsg +=
5544 rbAllocInfo->rbsAlloc;
5545 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw0iTbs +=
5546 rbAllocInfo->tbInfo[0].iTbs;
5547 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw0iTbs ++;
5548 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
5549 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5552 totPcellSCell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5553 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5555 addedForScell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5556 addedForScell1 += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5558 printf (" Hqp %d cell %d addedForScell %lu addedForScell1 %lu sfn:sf %d:%d \n",
5560 hqP->hqE->cell->cellId,
5564 cell->crntTime.slot);
5568 hqP->hqE->cell->tenbStats->sch.dlPrbUsage[0] +=
5569 rbAllocInfo->rbsAlloc;
5570 hqP->hqE->cell->tenbStats->sch.dlSumCw0iTbs +=
5571 rbAllocInfo->tbInfo[0].iTbs;
5572 hqP->hqE->cell->tenbStats->sch.dlNumCw0iTbs ++;
5573 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
5574 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
5575 if (rbAllocInfo->tbInfo[1].schdlngForTb)
5577 hqP->hqE->cell->tenbStats->sch.dlSumCw1iTbs +=
5578 rbAllocInfo->tbInfo[1].iTbs;
5579 hqP->hqE->cell->tenbStats->sch.dlNumCw1iTbs ++;
5580 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw1iTbs +=
5581 rbAllocInfo->tbInfo[1].iTbs;
5582 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw1iTbs ++;
5583 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
5584 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5588 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
5590 addedForScell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5591 addedForScell2 += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5593 printf (" Hqp %d cell %d addedForScell %lu addedForScell2 %lu \n",
5595 hqP->hqE->cell->cellId,
5600 totPcellSCell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5604 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
5605 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
5608 printf ("add DL TPT is %lu sfn:sf %d:%d \n", hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt ,
5610 cell->crntTime.slot);
5616 pdcch->rnti = rbAllocInfo->rnti;
5617 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
5618 /* Update subframe and pdcch info in HqTb control block */
5619 switch(rbAllocInfo->dciFormat)
5622 case TFU_DCI_FORMAT_B1:
5623 case TFU_DCI_FORMAT_B2:
5625 // printf(" RG_5GTF:: Pdcch filling with DCI format B1/B2\n");
5626 rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, \
5632 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
5633 "Allocator's incorrect dciForamt Fill for RNTI:%d",rbAllocInfo->rnti);
5640 * @brief This function fills the PDCCH DCI format 1 information from dlProc.
5644 * Function: rgSCHCmnFillHqPPdcchDciFrmt1
5645 * Purpose: This function fills in the PDCCH information
5646 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5647 * for dedicated service scheduling. It also
5648 * obtains TPC to be filled in from the power module.
5649 * Assign the PDCCH to HQProc.
5651 * Invoked by: Downlink Scheduler
5653 * @param[in] RgSchCellCb* cell
5654 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5655 * @param[in] RgDlHqProc* hqP
5656 * @param[out] RgSchPdcch *pdcch
5657 * @param[in] uint8_t tpc
5662 static Void rgSCHCmnFillHqPPdcchDciFrmt1
5665 RgSchDlRbAlloc *rbAllocInfo,
5666 RgSchDlHqProcCb *hqP,
5673 RgSchTddANInfo *anInfo;
5677 /* For activation or reactivation,
5678 * Harq ProcId should be 0 */
5679 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5683 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5684 pdcch->dci.u.format1Info.tpcCmd = tpc;
5685 /* Avoiding this check,as we dont support Type1 RA */
5687 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5690 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5691 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5692 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5694 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5695 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5697 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5698 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5700 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5701 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5706 if ((!(hqP->tbInfo[0].txCntr)) &&
5707 (cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5708 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5709 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV)))
5712 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5716 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
5719 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
5722 pdcch->dci.u.format1Info.allocInfo.ndi =
5723 rbAllocInfo->tbInfo[0].tbCb->ndi;
5724 pdcch->dci.u.format1Info.allocInfo.mcs =
5725 rbAllocInfo->tbInfo[0].imcs;
5726 pdcch->dci.u.format1Info.allocInfo.rv =
5727 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5729 if(hqP->hqE->ue != NULLP)
5732 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5733 hqP->hqE->cell->cellId,
5736 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5737 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5739 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5740 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5745 pdcch->dci.u.format1Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5749 /* Fixing DAI value - ccpu00109162 */
5750 pdcch->dci.u.format1Info.dai = RG_SCH_MAX_DAI_IDX;
5756 /* always 0 for RACH */
5757 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5759 /* Fixing DAI value - ccpu00109162 */
5760 pdcch->dci.u.format1Info.dai = 1;
5769 * @brief This function fills the PDCCH DCI format 1A information from dlProc.
5773 * Function: rgSCHCmnFillHqPPdcchDciFrmt1A
5774 * Purpose: This function fills in the PDCCH information
5775 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5776 * for dedicated service scheduling. It also
5777 * obtains TPC to be filled in from the power module.
5778 * Assign the PDCCH to HQProc.
5780 * Invoked by: Downlink Scheduler
5782 * @param[in] RgSchCellCb* cell
5783 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5784 * @param[in] RgDlHqProc* hqP
5785 * @param[out] RgSchPdcch *pdcch
5786 * @param[in] uint8_t tpc
5790 static Void rgSCHCmnFillHqPPdcchDciFrmt1A
5793 RgSchDlRbAlloc *rbAllocInfo,
5794 RgSchDlHqProcCb *hqP,
5801 RgSchTddANInfo *anInfo;
5805 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5809 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5810 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
5811 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = tpc;
5812 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
5813 rbAllocInfo->tbInfo[0].imcs;
5814 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = TRUE;
5816 if ((!(hqP->tbInfo[0].txCntr)) &&
5817 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5818 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5819 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
5822 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val = 0;
5826 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val
5830 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val =
5833 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = \
5834 rbAllocInfo->tbInfo[0].tbCb->ndi;
5835 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = \
5836 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5837 /* As of now, we do not support Distributed allocations */
5838 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
5839 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
5840 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
5842 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
5843 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5844 rbAllocInfo->allocInfo.raType2.rbStart,
5845 rbAllocInfo->allocInfo.raType2.numRb);
5847 if(hqP->hqE->ue != NULLP)
5850 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5851 hqP->hqE->cell->cellId,
5853 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5854 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5856 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5857 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5860 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5863 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val =
5864 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5868 /* Fixing DAI value - ccpu00109162 */
5869 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = RG_SCH_MAX_DAI_IDX;
5870 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
5871 "PDCCH is been scheduled without updating anInfo RNTI:%d",
5878 /* always 0 for RACH */
5879 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres
5882 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5883 /* Fixing DAI value - ccpu00109162 */
5884 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
5892 * @brief This function fills the PDCCH DCI format 1B information from dlProc.
5896 * Function: rgSCHCmnFillHqPPdcchDciFrmt1B
5897 * Purpose: This function fills in the PDCCH information
5898 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5899 * for dedicated service scheduling. It also
5900 * obtains TPC to be filled in from the power module.
5901 * Assign the PDCCH to HQProc.
5903 * Invoked by: Downlink Scheduler
5905 * @param[in] RgSchCellCb* cell
5906 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5907 * @param[in] RgDlHqProc* hqP
5908 * @param[out] RgSchPdcch *pdcch
5909 * @param[in] uint8_t tpc
5913 static Void rgSCHCmnFillHqPPdcchDciFrmt1B
5916 RgSchDlRbAlloc *rbAllocInfo,
5917 RgSchDlHqProcCb *hqP,
5924 RgSchTddANInfo *anInfo;
5928 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
5932 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5933 pdcch->dci.u.format1bInfo.tpcCmd = tpc;
5934 pdcch->dci.u.format1bInfo.allocInfo.mcs = \
5935 rbAllocInfo->tbInfo[0].imcs;
5937 if ((!(hqP->tbInfo[0].txCntr)) &&
5938 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
5939 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
5940 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
5943 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = 0;
5947 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
5950 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
5952 pdcch->dci.u.format1bInfo.allocInfo.ndi = \
5953 rbAllocInfo->tbInfo[0].tbCb->ndi;
5954 pdcch->dci.u.format1bInfo.allocInfo.rv = \
5955 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5956 /* As of now, we do not support Distributed allocations */
5957 pdcch->dci.u.format1bInfo.allocInfo.isLocal = TRUE;
5958 pdcch->dci.u.format1bInfo.allocInfo.nGap2.pres = NOTPRSNT;
5959 pdcch->dci.u.format1bInfo.allocInfo.alloc.type =
5961 pdcch->dci.u.format1bInfo.allocInfo.alloc.u.riv =
5962 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5963 rbAllocInfo->allocInfo.raType2.rbStart,
5964 rbAllocInfo->allocInfo.raType2.numRb);
5965 /* Fill precoding Info */
5966 pdcch->dci.u.format1bInfo.allocInfo.pmiCfm = \
5967 rbAllocInfo->mimoAllocInfo.precIdxInfo >> 4;
5968 pdcch->dci.u.format1bInfo.allocInfo.tPmi = \
5969 rbAllocInfo->mimoAllocInfo.precIdxInfo & 0x0F;
5971 if(hqP->hqE->ue != NULLP)
5974 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5975 hqP->hqE->cell->cellId,
5977 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5978 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
5980 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
5981 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
5986 pdcch->dci.u.format1bInfo.dai =
5987 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
5991 pdcch->dci.u.format1bInfo.dai = RG_SCH_MAX_DAI_IDX;
5992 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
5993 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6004 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
6008 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
6009 * Purpose: This function fills in the PDCCH information
6010 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6011 * for dedicated service scheduling. It also
6012 * obtains TPC to be filled in from the power module.
6013 * Assign the PDCCH to HQProc.
6015 * Invoked by: Downlink Scheduler
6017 * @param[in] RgSchCellCb* cell
6018 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6019 * @param[in] RgDlHqProc* hqP
6020 * @param[out] RgSchPdcch *pdcch
6021 * @param[in] uint8_t tpc
6025 static Void rgSCHCmnFillHqPPdcchDciFrmt2
6028 RgSchDlRbAlloc *rbAllocInfo,
6029 RgSchDlHqProcCb *hqP,
6036 RgSchTddANInfo *anInfo;
6040 /* ccpu00119023-ADD-For activation or reactivation,
6041 * Harq ProcId should be 0 */
6042 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6046 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6047 /*ccpu00120365:-ADD-call also if tb is disabled */
6048 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6049 rbAllocInfo->tbInfo[1].isDisabled)
6051 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6053 pdcch->dci.u.format2Info.tpcCmd = tpc;
6054 /* Avoiding this check,as we dont support Type1 RA */
6056 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6059 pdcch->dci.u.format2Info.allocInfo.isAllocType0 = TRUE;
6060 pdcch->dci.u.format2Info.allocInfo.resAllocMap[0] =
6061 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6063 pdcch->dci.u.format2Info.allocInfo.resAllocMap[1] =
6064 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6066 pdcch->dci.u.format2Info.allocInfo.resAllocMap[2] =
6067 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6069 pdcch->dci.u.format2Info.allocInfo.resAllocMap[3] =
6070 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6075 if ((!(hqP->tbInfo[0].txCntr)) &&
6076 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6077 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6078 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6081 pdcch->dci.u.format2Info.allocInfo.harqProcId = 0;
6085 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6088 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6090 /* Initialize the TB info for both the TBs */
6091 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].mcs = 0;
6092 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].rv = 1;
6093 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].mcs = 0;
6094 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].rv = 1;
6095 /* Fill tbInfo for scheduled TBs */
6096 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6097 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6098 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6099 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6100 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6101 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6102 /* If we reach this function. It is safely assumed that
6103 * rbAllocInfo->tbInfo[0] always has non default valid values.
6104 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6105 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6107 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6108 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6109 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6110 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6111 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6112 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6114 pdcch->dci.u.format2Info.allocInfo.transSwap =
6115 rbAllocInfo->mimoAllocInfo.swpFlg;
6116 pdcch->dci.u.format2Info.allocInfo.precoding =
6117 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6119 if(hqP->hqE->ue != NULLP)
6123 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6124 hqP->hqE->cell->cellId,
6126 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6127 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6129 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6130 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6135 pdcch->dci.u.format2Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6139 pdcch->dci.u.format2Info.dai = RG_SCH_MAX_DAI_IDX;
6140 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6141 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6151 * @brief This function fills the PDCCH DCI format 2A information from dlProc.
6155 * Function: rgSCHCmnFillHqPPdcchDciFrmt2A
6156 * Purpose: This function fills in the PDCCH information
6157 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6158 * for dedicated service scheduling. It also
6159 * obtains TPC to be filled in from the power module.
6160 * Assign the PDCCH to HQProc.
6162 * Invoked by: Downlink Scheduler
6164 * @param[in] RgSchCellCb* cell
6165 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6166 * @param[in] RgDlHqProc* hqP
6167 * @param[out] RgSchPdcch *pdcch
6168 * @param[in] uint8_t tpc
6172 static Void rgSCHCmnFillHqPPdcchDciFrmt2A
6175 RgSchDlRbAlloc *rbAllocInfo,
6176 RgSchDlHqProcCb *hqP,
6182 RgSchTddANInfo *anInfo;
6186 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6190 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6191 /*ccpu00120365:-ADD-call also if tb is disabled */
6192 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6193 rbAllocInfo->tbInfo[1].isDisabled)
6196 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6199 pdcch->dci.u.format2AInfo.tpcCmd = tpc;
6200 /* Avoiding this check,as we dont support Type1 RA */
6202 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6205 pdcch->dci.u.format2AInfo.allocInfo.isAllocType0 = TRUE;
6206 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[0] =
6207 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6209 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[1] =
6210 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6212 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[2] =
6213 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6215 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[3] =
6216 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6221 if ((!(hqP->tbInfo[0].txCntr)) &&
6222 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6223 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6224 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6227 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = 0;
6231 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6234 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6236 /* Initialize the TB info for both the TBs */
6237 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].mcs = 0;
6238 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].rv = 1;
6239 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].mcs = 0;
6240 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].rv = 1;
6241 /* Fill tbInfo for scheduled TBs */
6242 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6243 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6244 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6245 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6246 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6247 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6248 /* If we reach this function. It is safely assumed that
6249 * rbAllocInfo->tbInfo[0] always has non default valid values.
6250 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6252 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6254 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6255 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6256 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6257 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6258 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6259 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6262 pdcch->dci.u.format2AInfo.allocInfo.transSwap =
6263 rbAllocInfo->mimoAllocInfo.swpFlg;
6264 pdcch->dci.u.format2AInfo.allocInfo.precoding =
6265 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6267 if(hqP->hqE->ue != NULLP)
6270 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6271 hqP->hqE->cell->cellId,
6273 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6274 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6276 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6277 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6282 pdcch->dci.u.format2AInfo.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6286 pdcch->dci.u.format2AInfo.dai = RG_SCH_MAX_DAI_IDX;
6287 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6288 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6300 * @brief init of Sch vars.
6304 * Function: rgSCHCmnInitVars
6305 Purpose: Initialization of various UL subframe indices
6307 * @param[in] RgSchCellCb *cell
6311 static Void rgSCHCmnInitVars(RgSchCellCb *cell)
6313 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6316 cellUl->idx = RGSCH_INVALID_INFO;
6317 cellUl->schdIdx = RGSCH_INVALID_INFO;
6318 cellUl->schdHqProcIdx = RGSCH_INVALID_INFO;
6319 cellUl->msg3SchdIdx = RGSCH_INVALID_INFO;
6321 cellUl->emtcMsg3SchdIdx = RGSCH_INVALID_INFO;
6323 cellUl->msg3SchdHqProcIdx = RGSCH_INVALID_INFO;
6324 cellUl->rcpReqIdx = RGSCH_INVALID_INFO;
6325 cellUl->hqFdbkIdx[0] = RGSCH_INVALID_INFO;
6326 cellUl->hqFdbkIdx[1] = RGSCH_INVALID_INFO;
6327 cellUl->reTxIdx[0] = RGSCH_INVALID_INFO;
6328 cellUl->reTxIdx[1] = RGSCH_INVALID_INFO;
6329 /* Stack Crash problem for TRACE5 Changes. Added the return below */
6336 * @brief Updation of Sch vars per TTI.
6340 * Function: rgSCHCmnUpdVars
6341 * Purpose: Updation of Sch vars per TTI.
6343 * @param[in] RgSchCellCb *cell
6347 Void rgSCHCmnUpdVars(RgSchCellCb *cell)
6349 CmLteTimingInfo timeInfo;
6350 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6354 idx = (cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot);
6355 cellUl->idx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6357 printf("idx %d cellUl->idx %d RGSCH_NUM_SUB_FRAMES_5G %d time(%d %d) \n",idx,cellUl->idx ,RGSCH_NUM_SUB_FRAMES_5G,cell->crntTime.sfn,cell->crntTime.slot);
6359 /* Need to scheduler for after SCHED_DELTA */
6360 /* UL allocation has been advanced by 1 subframe
6361 * so that we do not wrap around and send feedback
6362 * before the data is even received by the PHY */
6363 /* Introduced timing delta for UL control */
6364 idx = (cellUl->idx + TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA);
6365 cellUl->schdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6367 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6368 TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA)
6369 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6371 /* ccpu00127193 filling schdTime for logging and enhancement purpose*/
6372 cellUl->schdTime = timeInfo;
6374 /* msg3 scheduling two subframes after general scheduling */
6375 idx = (cellUl->idx + RG_SCH_CMN_DL_DELTA + RGSCH_RARSP_MSG3_DELTA);
6376 cellUl->msg3SchdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6378 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6379 RG_SCH_CMN_DL_DELTA+ RGSCH_RARSP_MSG3_DELTA)
6380 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6382 idx = (cellUl->idx + TFU_RECPREQ_DLDELTA);
6384 cellUl->rcpReqIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6386 /* Downlink harq feedback is sometime after data reception / harq failure */
6387 /* Since feedback happens prior to scheduling being called, we add 1 to */
6388 /* take care of getting the correct subframe for feedback */
6389 idx = (cellUl->idx - TFU_CRCIND_ULDELTA + RG_SCH_CMN_UL_NUM_SF);
6391 printf("Finally setting cellUl->hqFdbkIdx[0] = %d TFU_CRCIND_ULDELTA %d RG_SCH_CMN_UL_NUM_SF %d\n",idx,TFU_CRCIND_ULDELTA,RG_SCH_CMN_UL_NUM_SF);
6393 cellUl->hqFdbkIdx[0] = (idx % (RG_SCH_CMN_UL_NUM_SF));
6395 idx = ((cellUl->schdIdx) % (RG_SCH_CMN_UL_NUM_SF));
6397 cellUl->reTxIdx[0] = (uint8_t) idx;
6399 printf("cellUl->hqFdbkIdx[0] %d cellUl->reTxIdx[0] %d \n",cellUl->hqFdbkIdx[0], cellUl->reTxIdx[0] );
6401 /* RACHO: update cmn sched specific RACH variables,
6402 * mainly the prachMaskIndex */
6403 rgSCHCmnUpdRachParam(cell);
6412 * @brief To get uplink subframe index associated with current PHICH
6417 * Function: rgSCHCmnGetPhichUlSfIdx
6418 * Purpose: Gets uplink subframe index associated with current PHICH
6419 * transmission based on SFN and subframe no
6421 * @param[in] CmLteTimingInfo *timeInfo
6422 * @param[in] RgSchCellCb *cell
6426 uint8_t rgSCHCmnGetPhichUlSfIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6428 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6430 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6437 dlsf = rgSCHUtlSubFrmGet(cell, *timeInfo);
6439 if(dlsf->phichOffInfo.sfnOffset == RGSCH_INVALID_INFO)
6441 return (RGSCH_INVALID_INFO);
6443 subframe = dlsf->phichOffInfo.subframe;
6445 sfn = (RGSCH_MAX_SFN + timeInfo->sfn -
6446 dlsf->phichOffInfo.sfnOffset) % RGSCH_MAX_SFN;
6448 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
6449 * wrap case such that idx will be proper*/
6450 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6451 numUlSf = ((numUlSf * sfn) + rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subframe]) - 1;
6452 idx = numUlSf % (cellUl->numUlSubfrms);
6458 * @brief To get uplink subframe index.
6463 * Function: rgSCHCmnGetUlSfIdx
6464 * Purpose: Gets uplink subframe index based on SFN and subframe number.
6466 * @param[in] CmLteTimingInfo *timeInfo
6467 * @param[in] uint8_t ulDlCfgIdx
6471 uint8_t rgSCHCmnGetUlSfIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6473 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6474 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6479 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
6480 * wrap case such that idx will be proper*/
6481 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6482 numUlSf = ((numUlSf * timeInfo->sfn) + \
6483 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->subframe]) - 1;
6484 idx = numUlSf % (cellUl->numUlSubfrms);
6492 * @brief To get uplink hq index.
6497 * Function: rgSCHCmnGetUlHqProcIdx
6498 * Purpose: Gets uplink subframe index based on SFN and subframe number.
6500 * @param[in] CmLteTimingInfo *timeInfo
6501 * @param[in] uint8_t ulDlCfgIdx
6505 uint8_t rgSCHCmnGetUlHqProcIdx(CmLteTimingInfo *timeInfo,RgSchCellCb *cell)
6511 numUlSf = (timeInfo->sfn * RGSCH_NUM_SUB_FRAMES_5G + timeInfo->slot);
6512 procId = numUlSf % RGSCH_NUM_UL_HQ_PROC;
6514 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
6515 /*ccpu00130639 - MOD - To get correct UL HARQ Proc IDs for all UL/DL Configs*/
6516 uint8_t numUlSfInSfn;
6517 S8 sfnCycle = cell->tddHqSfnCycle;
6518 uint8_t numUlHarq = rgSchTddUlNumHarqProcTbl[ulDlCfgIdx]
6520 /* TRACE 5 Changes */
6522 /* Calculate the number of UL SF in one SFN */
6523 numUlSfInSfn = RGSCH_NUM_SUB_FRAMES -
6524 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
6526 /* Check for the SFN wrap around case */
6527 if(cell->crntTime.sfn == 1023 && timeInfo->sfn == 0)
6531 else if(cell->crntTime.sfn == 0 && timeInfo->sfn == 1023)
6533 /* sfnCycle decremented by 1 */
6534 sfnCycle = (sfnCycle + numUlHarq-1) % numUlHarq;
6536 /* Calculate the total number of UL sf */
6537 /* -1 is done since uplink sf are counted from 0 */
6538 numUlSf = numUlSfInSfn * (timeInfo->sfn + (sfnCycle*1024)) +
6539 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->slot] - 1;
6541 procId = numUlSf % numUlHarq;
6547 /* UL_ALLOC_CHANGES */
6548 /***********************************************************
6550 * Func : rgSCHCmnUlFreeAlloc
6552 * Desc : Free an allocation - invokes UHM and releases
6553 * alloc for the scheduler
6554 * Doest need subframe as argument
6562 **********************************************************/
6563 Void rgSCHCmnUlFreeAlloc(RgSchCellCb *cell,RgSchUlAlloc *alloc)
6565 RgSchUlHqProcCb *hqProc;
6569 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
6570 if ((alloc->hqProc->remTx == 0) &&
6571 (alloc->hqProc->rcvdCrcInd == FALSE) &&
6574 RgSchRaCb *raCb = alloc->raCb;
6575 rgSCHUhmFreeProc(alloc->hqProc, cell);
6576 rgSCHUtlUlAllocRelease(alloc);
6577 rgSCHRamDelRaCb(cell, raCb, TRUE);
6582 hqProc = alloc->hqProc;
6583 rgSCHUtlUlAllocRelease(alloc);
6584 rgSCHUhmFreeProc(hqProc, cell);
6589 /***********************************************************
6591 * Func : rgSCHCmnUlFreeAllocation
6593 * Desc : Free an allocation - invokes UHM and releases
6594 * alloc for the scheduler
6602 **********************************************************/
6603 Void rgSCHCmnUlFreeAllocation(RgSchCellCb *cell,RgSchUlSf *sf,RgSchUlAlloc *alloc)
6605 RgSchUlHqProcCb *hqProc;
6610 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
6611 if ((alloc->hqProc->remTx == 0) &&
6612 (alloc->hqProc->rcvdCrcInd == FALSE) &&
6615 RgSchRaCb *raCb = alloc->raCb;
6616 rgSCHUhmFreeProc(alloc->hqProc, cell);
6617 rgSCHUtlUlAllocRls(sf, alloc);
6618 rgSCHRamDelRaCb(cell, raCb, TRUE);
6623 hqProc = alloc->hqProc;
6624 rgSCHUhmFreeProc(hqProc, cell);
6626 /* re-setting the PRB count while freeing the allocations */
6629 rgSCHUtlUlAllocRls(sf, alloc);
6635 * @brief This function implements PDCCH allocation for an UE
6636 * in the currently running subframe.
6640 * Function: rgSCHCmnPdcchAllocCrntSf
6641 * Purpose: This function determines current DL subframe
6642 * and UE DL CQI to call the actual pdcch allocator
6644 * Note that this function is called only
6645 * when PDCCH request needs to be made during
6646 * uplink scheduling.
6648 * Invoked by: Scheduler
6650 * @param[in] RgSchCellCb *cell
6651 * @param[in] RgSchUeCb *ue
6652 * @return RgSchPdcch *
6653 * -# NULLP when unsuccessful
6655 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf(RgSchCellCb *cell,RgSchUeCb *ue)
6657 CmLteTimingInfo frm = cell->crntTime;
6658 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
6660 RgSchPdcch *pdcch = NULLP;
6662 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
6663 sf = rgSCHUtlSubFrmGet(cell, frm);
6666 if (ue->allocCmnUlPdcch)
6668 pdcch = rgSCHCmnCmnPdcchAlloc(cell, sf);
6669 /* Since CRNTI Scrambled */
6672 pdcch->dciNumOfBits = ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
6678 //pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, y, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_0, FALSE);
6679 pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_A1, FALSE);
6684 /***********************************************************
6686 * Func : rgSCHCmnUlAllocFillNdmrs
6688 * Desc : Determines and fills N_dmrs for a UE uplink
6693 * Notes: N_dmrs determination is straightforward, so
6694 * it is configured per subband
6698 **********************************************************/
6699 Void rgSCHCmnUlAllocFillNdmrs(RgSchCmnUlCell *cellUl,RgSchUlAlloc *alloc)
6701 alloc->grnt.nDmrs = cellUl->dmrsArr[alloc->sbStart];
6705 /***********************************************************
6707 * Func : rgSCHCmnUlAllocLnkHqProc
6709 * Desc : Links a new allocation for an UE with the
6710 * appropriate HARQ process of the UE.
6718 **********************************************************/
6719 Void rgSCHCmnUlAllocLnkHqProc(RgSchUeCb *ue,RgSchUlAlloc *alloc,RgSchUlHqProcCb *proc,Bool isRetx)
6724 rgSCHCmnUlAdapRetx(alloc, proc);
6728 #ifdef LTE_L2_MEAS /* L2_COUNTERS */
6731 rgSCHUhmNewTx(proc, (((RgUeUlHqCb*)proc->hqEnt)->maxHqRetx), alloc);
6737 * @brief This function releases a PDCCH in the subframe that is
6738 * currently being allocated for.
6742 * Function: rgSCHCmnPdcchRlsCrntSf
6743 * Purpose: This function determines current DL subframe
6744 * which is considered for PDCCH allocation,
6745 * and then calls the actual function that
6746 * releases a PDCCH in a specific subframe.
6747 * Note that this function is called only
6748 * when PDCCH release needs to be made during
6749 * uplink scheduling.
6751 * Invoked by: Scheduler
6753 * @param[in] RgSchCellCb *cell
6754 * @param[in] RgSchPdcch *pdcch
6757 Void rgSCHCmnPdcchRlsCrntSf(RgSchCellCb *cell,RgSchPdcch *pdcch)
6759 CmLteTimingInfo frm = cell->crntTime;
6762 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
6763 sf = rgSCHUtlSubFrmGet(cell, frm);
6764 rgSCHUtlPdcchPut(cell, &sf->pdcchInfo, pdcch);
6767 /***********************************************************
6769 * Func : rgSCHCmnUlFillPdcchWithAlloc
6771 * Desc : Fills a PDCCH with format 0 information.
6779 **********************************************************/
6780 Void rgSCHCmnUlFillPdcchWithAlloc(RgSchPdcch *pdcch,RgSchUlAlloc *alloc,RgSchUeCb *ue)
6784 pdcch->rnti = alloc->rnti;
6785 //pdcch->dci.dciFormat = TFU_DCI_FORMAT_A2;
6786 pdcch->dci.dciFormat = alloc->grnt.dciFrmt;
6788 //Currently hardcoding values here.
6789 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
6790 switch(pdcch->dci.dciFormat)
6792 case TFU_DCI_FORMAT_A1:
6794 pdcch->dci.u.formatA1Info.formatType = 0;
6795 pdcch->dci.u.formatA1Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
6796 pdcch->dci.u.formatA1Info.xPUSCH_TxTiming = 0;
6797 pdcch->dci.u.formatA1Info.RBAssign = alloc->grnt.rbAssign;
6798 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
6799 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
6800 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
6801 pdcch->dci.u.formatA1Info.CSI_BSI_BRI_Req = 0;
6802 pdcch->dci.u.formatA1Info.CSIRS_BRRS_TxTiming = 0;
6803 pdcch->dci.u.formatA1Info.CSIRS_BRRS_SymbIdx = 0;
6804 pdcch->dci.u.formatA1Info.CSIRS_BRRS_ProcInd = 0;
6805 pdcch->dci.u.formatA1Info.numBSI_Reports = 0;
6806 pdcch->dci.u.formatA1Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
6807 pdcch->dci.u.formatA1Info.beamSwitch = 0;
6808 pdcch->dci.u.formatA1Info.SRS_Config = 0;
6809 pdcch->dci.u.formatA1Info.SRS_Symbol = 0;
6810 pdcch->dci.u.formatA1Info.REMapIdx_DMRS_PCRS_numLayers = 0;
6811 pdcch->dci.u.formatA1Info.SCID = alloc->grnt.SCID;
6812 pdcch->dci.u.formatA1Info.PMI = alloc->grnt.PMI;
6813 pdcch->dci.u.formatA1Info.UL_PCRS = 0;
6814 pdcch->dci.u.formatA1Info.tpcCmd = alloc->grnt.tpc;
6817 case TFU_DCI_FORMAT_A2:
6819 pdcch->dci.u.formatA2Info.formatType = 1;
6820 pdcch->dci.u.formatA2Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
6821 pdcch->dci.u.formatA2Info.xPUSCH_TxTiming = 0;
6822 pdcch->dci.u.formatA2Info.RBAssign = alloc->grnt.rbAssign;
6823 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
6824 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
6825 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
6826 pdcch->dci.u.formatA2Info.CSI_BSI_BRI_Req = 0;
6827 pdcch->dci.u.formatA2Info.CSIRS_BRRS_TxTiming = 0;
6828 pdcch->dci.u.formatA2Info.CSIRS_BRRS_SymbIdx = 0;
6829 pdcch->dci.u.formatA2Info.CSIRS_BRRS_ProcInd = 0;
6830 pdcch->dci.u.formatA2Info.numBSI_Reports = 0;
6831 pdcch->dci.u.formatA2Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
6832 pdcch->dci.u.formatA2Info.beamSwitch = 0;
6833 pdcch->dci.u.formatA2Info.SRS_Config = 0;
6834 pdcch->dci.u.formatA2Info.SRS_Symbol = 0;
6835 pdcch->dci.u.formatA2Info.REMapIdx_DMRS_PCRS_numLayers = 0;
6836 pdcch->dci.u.formatA2Info.SCID = alloc->grnt.SCID;
6837 pdcch->dci.u.formatA2Info.PMI = alloc->grnt.PMI;
6838 pdcch->dci.u.formatA2Info.UL_PCRS = 0;
6839 pdcch->dci.u.formatA2Info.tpcCmd = alloc->grnt.tpc;
6843 RLOG1(L_ERROR," 5GTF_ERROR UL Allocator's icorrect "
6844 "dciForamt Fill RNTI:%d",alloc->rnti);
6852 /***********************************************************
6854 * Func : rgSCHCmnUlAllocFillTpc
6856 * Desc : Determines and fills TPC for an UE allocation.
6864 **********************************************************/
6865 Void rgSCHCmnUlAllocFillTpc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchUlAlloc *alloc)
6867 alloc->grnt.tpc = rgSCHPwrPuschTpcForUe(cell, ue);
6872 /***********************************************************
6874 * Func : rgSCHCmnAddUeToRefreshQ
6876 * Desc : Adds a UE to refresh queue, so that the UE is
6877 * periodically triggered to refresh it's GBR and
6886 **********************************************************/
6887 static Void rgSCHCmnAddUeToRefreshQ(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t wait)
6889 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
6891 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
6895 memset(&arg, 0, sizeof(arg));
6896 arg.tqCp = &sched->tmrTqCp;
6897 arg.tq = sched->tmrTq;
6898 arg.timers = &ueSchd->tmr;
6902 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
6909 * @brief Perform UE reset procedure.
6913 * Function : rgSCHCmnUlUeReset
6915 * This functions performs BSR resetting and
6916 * triggers UL specific scheduler
6917 * to Perform UE reset procedure.
6919 * @param[in] RgSchCellCb *cell
6920 * @param[in] RgSchUeCb *ue
6923 static Void rgSCHCmnUlUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
6925 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
6926 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
6928 RgSchCmnLcg *lcgCmn;
6930 RgSchCmnAllocRecord *allRcd;
6932 ue->ul.minReqBytes = 0;
6933 ue->ul.totalBsr = 0;
6935 ue->ul.nonGbrLcgBs = 0;
6936 ue->ul.effAmbr = ue->ul.cfgdAmbr;
6938 node = ueUl->ulAllocLst.first;
6941 allRcd = (RgSchCmnAllocRecord *)node->node;
6945 for(lcgCnt = 0; lcgCnt < RGSCH_MAX_LCG_PER_UE; lcgCnt++)
6947 lcgCmn = RG_SCH_CMN_GET_UL_LCG(&ue->ul.lcgArr[lcgCnt]);
6949 lcgCmn->reportedBs = 0;
6950 lcgCmn->effGbr = lcgCmn->cfgdGbr;
6951 lcgCmn->effDeltaMbr = lcgCmn->deltaMbr;
6953 rgSCHCmnUlUeDelAllocs(cell, ue);
6955 ue->isSrGrant = FALSE;
6957 cellSchd->apisUl->rgSCHUlUeReset(cell, ue);
6959 /* Stack Crash problem for TRACE5 changes. Added the return below */
6965 * @brief RESET UL CQI and DL CQI&RI to conservative values
6966 * for a reestablishing UE.
6970 * Function : rgSCHCmnResetRiCqi
6972 * RESET UL CQI and DL CQI&RI to conservative values
6973 * for a reestablishing UE
6975 * @param[in] RgSchCellCb *cell
6976 * @param[in] RgSchUeCb *ue
6979 static Void rgSCHCmnResetRiCqi(RgSchCellCb *cell,RgSchUeCb *ue)
6981 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
6982 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
6983 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
6984 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
6987 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
6988 cell->isCpUlExtend);
6990 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
6991 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
6992 ueDl->mimoInfo.ri = 1;
6993 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
6994 (ue->mimoInfo.txMode == RGR_UE_TM_6))
6996 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
6998 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
7000 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
7003 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
7005 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
7009 /* Request for an early Aper CQI in case of reest */
7010 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
7011 if(acqiCb && acqiCb->aCqiCfg.pres)
7013 acqiCb->aCqiTrigWt = 0;
7021 * @brief Perform UE reset procedure.
7025 * Function : rgSCHCmnDlUeReset
7027 * This functions performs BO resetting and
7028 * triggers DL specific scheduler
7029 * to Perform UE reset procedure.
7031 * @param[in] RgSchCellCb *cell
7032 * @param[in] RgSchUeCb *ue
7035 static Void rgSCHCmnDlUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
7037 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7038 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
7039 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7042 if (ueDl->rachInfo.poLnk.node != NULLP)
7044 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
7047 /* Fix: syed Remove from TA List if this UE is there.
7048 * If TA Timer is running. Stop it */
7049 if (ue->dlTaLnk.node)
7051 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
7052 ue->dlTaLnk.node = (PTR)NULLP;
7054 else if (ue->taTmr.tmrEvnt != TMR_NONE)
7056 rgSCHTmrStopTmr(cell, ue->taTmr.tmrEvnt, ue);
7059 cellSchd->apisDl->rgSCHDlUeReset(cell, ue);
7063 rgSCHSCellDlUeReset(cell,ue);
7069 * @brief Perform UE reset procedure.
7073 * Function : rgSCHCmnUeReset
7075 * This functions triggers specific scheduler
7076 * to Perform UE reset procedure.
7078 * @param[in] RgSchCellCb *cell
7079 * @param[in] RgSchUeCb *ue
7084 Void rgSCHCmnUeReset(RgSchCellCb *cell,RgSchUeCb *ue)
7088 RgInfResetHqEnt hqEntRstInfo;
7090 /* RACHO: remove UE from pdcch, handover and rapId assoc Qs */
7091 rgSCHCmnDelRachInfo(cell, ue);
7093 rgSCHPwrUeReset(cell, ue);
7095 rgSCHCmnUlUeReset(cell, ue);
7096 rgSCHCmnDlUeReset(cell, ue);
7099 /* Making allocCmnUlPdcch TRUE to allocate DCI0/1A from Common search space.
7100 As because multiple cells are added hence 2 bits CqiReq is there
7101 This flag will be set to FALSE once we will get Scell READY */
7102 ue->allocCmnUlPdcch = TRUE;
7105 /* Fix : syed RESET UL CQI and DL CQI&RI to conservative values
7106 * for a reestablishing UE */
7107 /*Reset Cqi Config for all the configured cells*/
7108 for (idx = 0;idx < CM_LTE_MAX_CELLS; idx++)
7110 if (ue->cellInfo[idx] != NULLP)
7112 rgSCHCmnResetRiCqi(ue->cellInfo[idx]->cell, ue);
7115 /*After Reset Trigger APCQI for Pcell*/
7116 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7117 if(pCellInfo->acqiCb.aCqiCfg.pres)
7119 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
7122 /* sending HqEnt reset to MAC */
7123 hqEntRstInfo.cellId = cell->cellId;
7124 hqEntRstInfo.crnti = ue->ueId;
7126 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
7127 RgSchMacRstHqEnt(&pst,&hqEntRstInfo);
7133 * @brief UE out of MeasGap or AckNackReptn.
7137 * Function : rgSCHCmnActvtUlUe
7139 * This functions triggers specific scheduler
7140 * to start considering it for scheduling.
7142 * @param[in] RgSchCellCb *cell
7143 * @param[in] RgSchUeCb *ue
7148 Void rgSCHCmnActvtUlUe(RgSchCellCb *cell,RgSchUeCb *ue)
7150 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7152 /* : take care of this in UL retransmission */
7153 cellSchd->apisUl->rgSCHUlActvtUe(cell, ue);
7158 * @brief UE out of MeasGap or AckNackReptn.
7162 * Function : rgSCHCmnActvtDlUe
7164 * This functions triggers specific scheduler
7165 * to start considering it for scheduling.
7167 * @param[in] RgSchCellCb *cell
7168 * @param[in] RgSchUeCb *ue
7173 Void rgSCHCmnActvtDlUe(RgSchCellCb *cell,RgSchUeCb *ue)
7175 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7177 cellSchd->apisDl->rgSCHDlActvtUe(cell, ue);
7182 * @brief This API is invoked to indicate scheduler of a CRC indication.
7186 * Function : rgSCHCmnHdlUlTransInd
7187 * This API is invoked to indicate scheduler of a CRC indication.
7189 * @param[in] RgSchCellCb *cell
7190 * @param[in] RgSchUeCb *ue
7191 * @param[in] CmLteTimingInfo timingInfo
7195 Void rgSCHCmnHdlUlTransInd(RgSchCellCb *cell,RgSchUeCb *ue,CmLteTimingInfo timingInfo)
7198 /* Update the latest UL dat/sig transmission time */
7199 RGSCHCPYTIMEINFO(timingInfo, ue->ul.ulTransTime);
7200 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
7202 /* Some UL Transmission from this UE.
7203 * Activate this UE if it was inactive */
7204 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7205 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7213 * @brief Compute the minimum Rank based on Codebook subset
7214 * restriction configuration for 4 Tx Ports and Tx Mode 4.
7218 * Function : rgSCHCmnComp4TxMode4
7220 * Depending on BitMap set at CBSR during Configuration
7221 * - return the least possible Rank
7224 * @param[in] uint32_t *pmiBitMap
7225 * @return RgSchCmnRank
7227 static RgSchCmnRank rgSCHCmnComp4TxMode4(uint32_t *pmiBitMap)
7229 uint32_t bitMap0, bitMap1;
7230 bitMap0 = pmiBitMap[0];
7231 bitMap1 = pmiBitMap[1];
7232 if((bitMap1) & 0xFFFF)
7234 return (RG_SCH_CMN_RANK_1);
7236 else if((bitMap1>>16) & 0xFFFF)
7238 return (RG_SCH_CMN_RANK_2);
7240 else if((bitMap0) & 0xFFFF)
7242 return (RG_SCH_CMN_RANK_3);
7244 else if((bitMap0>>16) & 0xFFFF)
7246 return (RG_SCH_CMN_RANK_4);
7250 return (RG_SCH_CMN_RANK_1);
7256 * @brief Compute the minimum Rank based on Codebook subset
7257 * restriction configuration for 2 Tx Ports and Tx Mode 4.
7261 * Function : rgSCHCmnComp2TxMode4
7263 * Depending on BitMap set at CBSR during Configuration
7264 * - return the least possible Rank
7267 * @param[in] uint32_t *pmiBitMap
7268 * @return RgSchCmnRank
7270 static RgSchCmnRank rgSCHCmnComp2TxMode4(uint32_t *pmiBitMap)
7273 bitMap0 = pmiBitMap[0];
7274 if((bitMap0>>26)& 0x0F)
7276 return (RG_SCH_CMN_RANK_1);
7278 else if((bitMap0>>30) & 3)
7280 return (RG_SCH_CMN_RANK_2);
7284 return (RG_SCH_CMN_RANK_1);
7289 * @brief Compute the minimum Rank based on Codebook subset
7290 * restriction configuration for 4 Tx Ports and Tx Mode 3.
7294 * Function : rgSCHCmnComp4TxMode3
7296 * Depending on BitMap set at CBSR during Configuration
7297 * - return the least possible Rank
7300 * @param[in] uint32_t *pmiBitMap
7301 * @return RgSchCmnRank
7303 static RgSchCmnRank rgSCHCmnComp4TxMode3(uint32_t *pmiBitMap)
7306 bitMap0 = pmiBitMap[0];
7307 if((bitMap0>>28)& 1)
7309 return (RG_SCH_CMN_RANK_1);
7311 else if((bitMap0>>29) &1)
7313 return (RG_SCH_CMN_RANK_2);
7315 else if((bitMap0>>30) &1)
7317 return (RG_SCH_CMN_RANK_3);
7319 else if((bitMap0>>31) &1)
7321 return (RG_SCH_CMN_RANK_4);
7325 return (RG_SCH_CMN_RANK_1);
7330 * @brief Compute the minimum Rank based on Codebook subset
7331 * restriction configuration for 2 Tx Ports and Tx Mode 3.
7335 * Function : rgSCHCmnComp2TxMode3
7337 * Depending on BitMap set at CBSR during Configuration
7338 * - return the least possible Rank
7341 * @param[in] uint32_t *pmiBitMap
7342 * @return RgSchCmnRank
7344 static RgSchCmnRank rgSCHCmnComp2TxMode3(uint32_t *pmiBitMap)
7347 bitMap0 = pmiBitMap[0];
7348 if((bitMap0>>30)& 1)
7350 return (RG_SCH_CMN_RANK_1);
7352 else if((bitMap0>>31) &1)
7354 return (RG_SCH_CMN_RANK_2);
7358 return (RG_SCH_CMN_RANK_1);
7363 * @brief Compute the minimum Rank based on Codebook subset
7364 * restriction configuration.
7368 * Function : rgSCHCmnComputeRank
7370 * Depending on Num Tx Ports and Transmission mode
7371 * - return the least possible Rank
7374 * @param[in] RgrTxMode txMode
7375 * @param[in] uint32_t *pmiBitMap
7376 * @param[in] uint8_t numTxPorts
7377 * @return RgSchCmnRank
7379 static RgSchCmnRank rgSCHCmnComputeRank(RgrTxMode txMode,uint32_t *pmiBitMap,uint8_t numTxPorts)
7382 if (numTxPorts ==2 && txMode == RGR_UE_TM_3)
7384 return (rgSCHCmnComp2TxMode3(pmiBitMap));
7386 else if (numTxPorts ==4 && txMode == RGR_UE_TM_3)
7388 return (rgSCHCmnComp4TxMode3(pmiBitMap));
7390 else if (numTxPorts ==2 && txMode == RGR_UE_TM_4)
7392 return (rgSCHCmnComp2TxMode4(pmiBitMap));
7394 else if (numTxPorts ==4 && txMode == RGR_UE_TM_4)
7396 return (rgSCHCmnComp4TxMode4(pmiBitMap));
7400 return (RG_SCH_CMN_RANK_1);
7407 * @brief Harq Entity Deinitialization for CMN SCH.
7411 * Function : rgSCHCmnDlDeInitHqEnt
7413 * Harq Entity Deinitialization for CMN SCH
7415 * @param[in] RgSchCellCb *cell
7416 * @param[in] RgSchDlHqEnt *hqE
7419 /*KWORK_FIX:Changed function return type to void */
7420 Void rgSCHCmnDlDeInitHqEnt(RgSchCellCb *cell,RgSchDlHqEnt *hqE)
7422 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7423 RgSchDlHqProcCb *hqP;
7427 ret = cellSchd->apisDl->rgSCHDlUeHqEntDeInit(cell, hqE);
7428 /* Free only If the Harq proc are created*/
7433 for(cnt = 0; cnt < hqE->numHqPrcs; cnt++)
7435 hqP = &hqE->procs[cnt];
7436 if ((RG_SCH_CMN_GET_DL_HQP(hqP)))
7438 rgSCHUtlFreeSBuf(cell->instIdx,
7439 (Data**)(&(hqP->sch)), (sizeof(RgSchCmnDlHqProc)));
7443 rgSCHLaaDeInitDlHqProcCb (cell, hqE);
7450 * @brief Harq Entity initialization for CMN SCH.
7454 * Function : rgSCHCmnDlInitHqEnt
7456 * Harq Entity initialization for CMN SCH
7458 * @param[in] RgSchCellCb *cell
7459 * @param[in] RgSchUeCb *ue
7464 S16 rgSCHCmnDlInitHqEnt(RgSchCellCb *cell,RgSchDlHqEnt *hqEnt)
7466 RgSchDlHqProcCb *hqP;
7468 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7470 for(cnt = 0; cnt < hqEnt->numHqPrcs; cnt++)
7472 hqP = &hqEnt->procs[cnt];
7473 if (rgSCHUtlAllocSBuf(cell->instIdx,
7474 (Data**)&(hqP->sch), (sizeof(RgSchCmnDlHqProc))) != ROK)
7480 if((cell->emtcEnable) &&(hqEnt->ue->isEmtcUe))
7482 if(ROK != cellSchd->apisEmtcDl->rgSCHDlUeHqEntInit(cell, hqEnt))
7491 if(ROK != cellSchd->apisDl->rgSCHDlUeHqEntInit(cell, hqEnt))
7498 } /* rgSCHCmnDlInitHqEnt */
7501 * @brief This function computes distribution of refresh period
7505 * Function: rgSCHCmnGetRefreshDist
7506 * Purpose: This function computes distribution of refresh period
7507 * This is required to align set of UEs refresh
7508 * around the different consecutive subframe.
7510 * Invoked by: rgSCHCmnGetRefreshPerDist
7512 * @param[in] RgSchCellCb *cell
7513 * @param[in] RgSchUeCb *ue
7517 static uint8_t rgSCHCmnGetRefreshDist(RgSchCellCb *cell,RgSchUeCb *ue)
7521 Inst inst = cell->instIdx;
7524 for(refOffst = 0; refOffst < RGSCH_MAX_REFRESH_OFFSET; refOffst++)
7526 if(cell->refreshUeCnt[refOffst] < RGSCH_MAX_REFRESH_GRPSZ)
7528 cell->refreshUeCnt[refOffst]++;
7529 ue->refreshOffset = refOffst;
7530 /* printf("UE[%d] refresh offset[%d]. Cell refresh ue count[%d].\n", ue->ueId, refOffst, cell->refreshUeCnt[refOffst]); */
7535 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Allocation of refresh distribution failed\n"));
7536 /* We should not enter here normally, but incase of failure, allocating from last offset*/
7537 cell->refreshUeCnt[refOffst-1]++;
7538 ue->refreshOffset = refOffst-1;
7540 return (refOffst-1);
7543 * @brief This function computes initial Refresh Wait Period.
7547 * Function: rgSCHCmnGetRefreshPer
7548 * Purpose: This function computes initial Refresh Wait Period.
7549 * This is required to align multiple UEs refresh
7550 * around the same time.
7552 * Invoked by: rgSCHCmnGetRefreshPer
7554 * @param[in] RgSchCellCb *cell
7555 * @param[in] RgSchUeCb *ue
7556 * @param[in] uint32_t *waitPer
7560 static Void rgSCHCmnGetRefreshPer(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t *waitPer)
7562 uint32_t refreshPer;
7563 uint32_t crntSubFrm;
7566 refreshPer = RG_SCH_CMN_REFRESH_TIME * RG_SCH_CMN_REFRESH_TIMERES;
7567 crntSubFrm = cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot;
7568 /* Fix: syed align multiple UEs to refresh at same time */
7569 *waitPer = refreshPer - (crntSubFrm % refreshPer);
7570 *waitPer = RGSCH_CEIL(*waitPer, RG_SCH_CMN_REFRESH_TIMERES);
7571 *waitPer = *waitPer + rgSCHCmnGetRefreshDist(cell, ue);
7579 * @brief UE initialisation for scheduler.
7583 * Function : rgSCHCmnRgrSCellUeCfg
7585 * This functions intialises UE specific scheduler
7586 * information for SCELL
7587 * 0. Perform basic validations
7588 * 1. Allocate common sched UE cntrl blk
7589 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
7591 * 4. Perform DLFS cfg
7593 * @param[in] RgSchCellCb *cell
7594 * @param[in] RgSchUeCb *ue
7595 * @param[out] RgSchErrInfo *err
7600 S16 rgSCHCmnRgrSCellUeCfg(RgSchCellCb *sCell,RgSchUeCb *ue,RgrUeSecCellCfg *sCellInfoCfg,RgSchErrInfo *err)
7605 RgSchCmnAllocRecord *allRcd;
7606 RgSchDlRbAlloc *allocInfo;
7607 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
7609 RgSchCmnUlUe *ueUlPcell;
7610 RgSchCmnUe *pCellUeSchCmn;
7611 RgSchCmnUe *ueSchCmn;
7613 RgSchCmnDlUe *pCellUeDl;
7615 Inst inst = ue->cell->instIdx;
7617 uint32_t idx = (uint8_t)((sCell->cellId - rgSchCb[sCell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
7619 pCellUeSchCmn = RG_SCH_CMN_GET_UE(ue,ue->cell);
7620 pCellUeDl = &pCellUeSchCmn->dl;
7622 /* 1. Allocate Common sched control block */
7623 if((rgSCHUtlAllocSBuf(sCell->instIdx,
7624 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
7626 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Memory allocation FAILED\n"));
7627 err->errCause = RGSCHERR_SCH_CFG;
7630 ueSchCmn = RG_SCH_CMN_GET_UE(ue,sCell);
7632 /*2. Perform UEs downlink configuration */
7633 ueDl = &ueSchCmn->dl;
7636 ueDl->mimoInfo = pCellUeDl->mimoInfo;
7638 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
7639 (ue->mimoInfo.txMode == RGR_UE_TM_6))
7641 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_NO_PMI);
7643 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
7645 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_RI_1);
7647 RGSCH_ARRAY_BOUND_CHECK(sCell->instIdx, rgUeCatTbl, pCellUeSchCmn->cmn.ueCat);
7648 ueDl->maxTbBits = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlTbBits;
7651 ri = RGSCH_MIN(ri, sCell->numTxAntPorts);
7652 if(((CM_LTE_UE_CAT_6 == pCellUeSchCmn->cmn.ueCat )
7653 ||(CM_LTE_UE_CAT_7 == pCellUeSchCmn->cmn.ueCat))
7656 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[1];
7660 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[0];
7663 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
7665 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
7666 rgSchTddDlNumHarqProcTbl[sCell->ulDlCfgIdx]);
7668 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
7669 RGSCH_NUM_DL_HQ_PROC);
7672 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, ue->isEmtcUe);
7674 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, FALSE);
7678 /* ambrCfgd config moved to ueCb.dl, as it's not needed for per cell wise*/
7680 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, sCell);
7681 allocInfo->rnti = ue->ueId;
7683 /* Initializing the lastCfi value to current cfi value */
7684 ueDl->lastCfi = cellSchd->dl.currCfi;
7686 if ((cellSchd->apisDl->rgSCHRgrSCellDlUeCfg(sCell, ue, err)) != ROK)
7688 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Spec Sched DL UE CFG FAILED\n"));
7692 /* TODO: enhance for DLFS RB Allocation for SCELLs in future dev */
7694 /* DLFS UE Config */
7695 if (cellSchd->dl.isDlFreqSel)
7697 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeCfg(sCell, ue, sCellInfoCfg, err)) != ROK)
7699 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS UE config FAILED\n"));
7704 /* TODO: Do UL SCELL CFG during UL CA dev */
7706 ueUl = RG_SCH_CMN_GET_UL_UE(ue, sCell);
7708 /* TODO_ULCA: SRS for SCELL needs to be handled in the below function call */
7709 rgSCHCmnUpdUeUlCqiInfo(sCell, ue, ueUl, ueSchCmn, cellSchd,
7710 sCell->isCpUlExtend);
7712 ret = rgSCHUhmHqEntInit(sCell, ue);
7715 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL UHM HARQ Ent Init "
7716 "Failed for CRNTI:%d", ue->ueId);
7720 ueUlPcell = RG_SCH_CMN_GET_UL_UE(ue, ue->cell);
7721 /* Initialize uplink HARQ related information for UE */
7722 ueUl->hqEnt.maxHqRetx = ueUlPcell->hqEnt.maxHqRetx;
7723 cmLListInit(&ueUl->hqEnt.free);
7724 cmLListInit(&ueUl->hqEnt.inUse);
7725 for(i=0; i < ueUl->hqEnt.numHqPrcs; i++)
7727 ueUl->hqEnt.hqProcCb[i].hqEnt = (void*)(&ueUl->hqEnt);
7728 ueUl->hqEnt.hqProcCb[i].procId = i;
7729 ueUl->hqEnt.hqProcCb[i].ulSfIdx = RGSCH_INVALID_INFO;
7730 ueUl->hqEnt.hqProcCb[i].alloc = NULLP;
7732 /* ccpu00139513- Initializing SPS flags*/
7733 ueUl->hqEnt.hqProcCb[i].isSpsActvnHqP = FALSE;
7734 ueUl->hqEnt.hqProcCb[i].isSpsOccnHqP = FALSE;
7736 cmLListAdd2Tail(&ueUl->hqEnt.free, &ueUl->hqEnt.hqProcCb[i].lnk);
7737 ueUl->hqEnt.hqProcCb[i].lnk.node = (PTR)&ueUl->hqEnt.hqProcCb[i];
7740 /* Allocate UL BSR allocation tracking List */
7741 cmLListInit(&ueUl->ulAllocLst);
7743 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
7745 if((rgSCHUtlAllocSBuf(sCell->instIdx,
7746 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
7748 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL Memory allocation FAILED"
7749 "for CRNTI:%d",ue->ueId);
7750 err->errCause = RGSCHERR_SCH_CFG;
7753 allRcd->allocTime = sCell->crntTime;
7754 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
7755 allRcd->lnk.node = (PTR)allRcd;
7758 /* After initialising UL part, do power related init */
7759 ret = rgSCHPwrUeSCellCfg(sCell, ue, sCellInfoCfg);
7762 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Could not do "
7763 "power config for UE CRNTI:%d",ue->ueId);
7768 if(TRUE == ue->isEmtcUe)
7770 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
7772 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
7773 "for CRNTI:%d",ue->ueId);
7780 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
7782 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
7783 "for CRNTI:%d",ue->ueId);
7788 ue->ul.isUlCaEnabled = TRUE;
7792 } /* rgSCHCmnRgrSCellUeCfg */
7796 * @brief UE initialisation for scheduler.
7800 * Function : rgSCHCmnRgrSCellUeDel
7802 * This functions Delete UE specific scheduler
7803 * information for SCELL
7805 * @param[in] RgSchCellCb *cell
7806 * @param[in] RgSchUeCb *ue
7811 S16 rgSCHCmnRgrSCellUeDel(RgSchUeCellInfo *sCellInfo,RgSchUeCb *ue)
7813 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
7814 Inst inst = ue->cell->instIdx;
7817 cellSchd->apisDl->rgSCHRgrSCellDlUeDel(sCellInfo, ue);
7820 rgSCHCmnUlUeDelAllocs(sCellInfo->cell, ue);
7823 if(TRUE == ue->isEmtcUe)
7825 cellSchd->apisEmtcUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
7830 cellSchd->apisUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
7833 /* DLFS UE Config */
7834 if (cellSchd->dl.isDlFreqSel)
7836 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeDel(sCellInfo->cell, ue)) != ROK)
7838 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS Scell del FAILED\n"));
7843 rgSCHUtlFreeSBuf(sCellInfo->cell->instIdx,
7844 (Data**)(&(sCellInfo->sch)), (sizeof(RgSchCmnUe)));
7848 } /* rgSCHCmnRgrSCellUeDel */
7854 * @brief Handles 5gtf configuration for a UE
7858 * Function : rgSCHCmn5gtfUeCfg
7864 * @param[in] RgSchCellCb *cell
7865 * @param[in] RgSchUeCb *ue
7866 * @param[in] RgrUeCfg *cfg
7871 S16 rgSCHCmn5gtfUeCfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeCfg *cfg)
7874 RgSchUeGrp *ue5gtfGrp;
7875 ue->ue5gtfCb.grpId = cfg->ue5gtfCfg.grpId;
7876 ue->ue5gtfCb.BeamId = cfg->ue5gtfCfg.BeamId;
7877 ue->ue5gtfCb.numCC = cfg->ue5gtfCfg.numCC;
7878 ue->ue5gtfCb.mcs = cfg->ue5gtfCfg.mcs;
7879 ue->ue5gtfCb.maxPrb = cfg->ue5gtfCfg.maxPrb;
7881 ue->ue5gtfCb.cqiRiPer = 100;
7882 /* 5gtf TODO: CQIs to start from (10,0)*/
7883 ue->ue5gtfCb.nxtCqiRiOccn.sfn = 10;
7884 ue->ue5gtfCb.nxtCqiRiOccn.slot = 0;
7885 ue->ue5gtfCb.rank = 1;
7887 printf("\nschd cfg at mac,%u,%u,%u,%u,%u\n",ue->ue5gtfCb.grpId,ue->ue5gtfCb.BeamId,ue->ue5gtfCb.numCC,
7888 ue->ue5gtfCb.mcs,ue->ue5gtfCb.maxPrb);
7890 ue5gtfGrp = &(cell->cell5gtfCb.ueGrp5gConf[ue->ue5gtfCb.BeamId]);
7892 /* TODO_5GTF: Currently handling 1 group only. Need to update when multi group
7893 scheduling comes into picture */
7894 if(ue5gtfGrp->beamBitMask & (1 << ue->ue5gtfCb.BeamId))
7896 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
7897 "5GTF_ERROR Invalid beam id CRNTI:%d",cfg->crnti);
7900 ue5gtfGrp->beamBitMask |= (1 << ue->ue5gtfCb.BeamId);
7907 * @brief UE initialisation for scheduler.
7911 * Function : rgSCHCmnRgrUeCfg
7913 * This functions intialises UE specific scheduler
7915 * 0. Perform basic validations
7916 * 1. Allocate common sched UE cntrl blk
7917 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
7919 * 4. Perform DLFS cfg
7921 * @param[in] RgSchCellCb *cell
7922 * @param[in] RgSchUeCb *ue
7923 * @param[int] RgrUeCfg *ueCfg
7924 * @param[out] RgSchErrInfo *err
7929 S16 rgSCHCmnRgrUeCfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeCfg *ueCfg,RgSchErrInfo *err)
7931 RgSchDlRbAlloc *allocInfo;
7933 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7934 RgSchCmnUe *ueSchCmn;
7938 RgSchCmnAllocRecord *allRcd;
7940 uint32_t idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
7941 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7944 /* 1. Allocate Common sched control block */
7945 if((rgSCHUtlAllocSBuf(cell->instIdx,
7946 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
7948 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
7949 "Memory allocation FAILED for CRNTI:%d",ueCfg->crnti);
7950 err->errCause = RGSCHERR_SCH_CFG;
7953 ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
7954 ue->dl.ueDlCqiCfg = ueCfg->ueDlCqiCfg;
7955 pCellInfo->acqiCb.aCqiCfg = ueCfg->ueDlCqiCfg.aprdCqiCfg;
7956 if(ueCfg->ueCatEnum > 0 )
7958 /*KWORK_FIX removed NULL chk for ueSchCmn*/
7959 ueSchCmn->cmn.ueCat = ueCfg->ueCatEnum - 1;
7963 ueSchCmn->cmn.ueCat = 0; /* Assuming enum values correctly set */
7965 cmInitTimers(&ueSchCmn->cmn.tmr, 1);
7967 /*2. Perform UEs downlink configuration */
7968 ueDl = &ueSchCmn->dl;
7969 /* RACHO : store the rapId assigned for HandOver UE.
7970 * Append UE to handover list of cmnCell */
7971 if (ueCfg->dedPreambleId.pres == PRSNT_NODEF)
7973 rgSCHCmnDelDedPreamble(cell, ueCfg->dedPreambleId.val);
7974 ueDl->rachInfo.hoRapId = ueCfg->dedPreambleId.val;
7975 cmLListAdd2Tail(&cellSchd->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
7976 ueDl->rachInfo.hoLnk.node = (PTR)ue;
7979 rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd);
7981 if (ueCfg->txMode.pres == TRUE)
7983 if ((ueCfg->txMode.txModeEnum == RGR_UE_TM_4) ||
7984 (ueCfg->txMode.txModeEnum == RGR_UE_TM_6))
7986 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
7988 if (ueCfg->txMode.txModeEnum == RGR_UE_TM_3)
7990 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
7993 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
7994 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
7997 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
7998 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
7999 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
8002 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
8006 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
8009 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
8011 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8012 rgSchTddDlNumHarqProcTbl[cell->ulDlCfgIdx]);
8014 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8015 RGSCH_NUM_DL_HQ_PROC);
8018 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
8020 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
8022 /* if none of the DL and UL AMBR are configured then fail the configuration
8024 if((ueCfg->ueQosCfg.dlAmbr == 0) && (ueCfg->ueQosCfg.ueBr == 0))
8026 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"UL Ambr and DL Ambr are"
8027 "configured as 0 for CRNTI:%d",ueCfg->crnti);
8028 err->errCause = RGSCHERR_SCH_CFG;
8032 ue->dl.ambrCfgd = (ueCfg->ueQosCfg.dlAmbr * RG_SCH_CMN_REFRESH_TIME)/100;
8034 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
8035 allocInfo->rnti = ue->ueId;
8037 /* Initializing the lastCfi value to current cfi value */
8038 ueDl->lastCfi = cellSchd->dl.currCfi;
8040 if(cell->emtcEnable && ue->isEmtcUe)
8042 if ((cellSchd->apisEmtcDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8044 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8045 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8053 if ((cellSchd->apisDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8055 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8056 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8063 /* 3. Initialize ul part */
8064 ueUl = &ueSchCmn->ul;
8066 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
8067 cell->isCpUlExtend);
8069 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8070 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8072 ue->ul.cfgdAmbr = (ueCfg->ueQosCfg.ueBr * RG_SCH_CMN_REFRESH_TIME)/100;
8073 ue->ul.effAmbr = ue->ul.cfgdAmbr;
8074 RGSCHCPYTIMEINFO(cell->crntTime, ue->ul.ulTransTime);
8076 /* Allocate UL BSR allocation tracking List */
8077 cmLListInit(&ueUl->ulAllocLst);
8079 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8081 if((rgSCHUtlAllocSBuf(cell->instIdx,
8082 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8084 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation FAILED"
8085 "for CRNTI:%d",ueCfg->crnti);
8086 err->errCause = RGSCHERR_SCH_CFG;
8089 allRcd->allocTime = cell->crntTime;
8090 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8091 allRcd->lnk.node = (PTR)allRcd;
8093 /* Allocate common sch cntrl blocks for LCGs */
8094 for (cnt=0; cnt<RGSCH_MAX_LCG_PER_UE; cnt++)
8096 ret = rgSCHUtlAllocSBuf(cell->instIdx,
8097 (Data**)&(ue->ul.lcgArr[cnt].sch), (sizeof(RgSchCmnLcg)));
8100 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8101 "SCH struct alloc failed for CRNTI:%d",ueCfg->crnti);
8102 err->errCause = RGSCHERR_SCH_CFG;
8106 /* After initialising UL part, do power related init */
8107 ret = rgSCHPwrUeCfg(cell, ue, ueCfg);
8110 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
8111 "power config for UE CRNTI:%d",ueCfg->crnti);
8115 ret = rgSCHCmnSpsUeCfg(cell, ue, ueCfg, err);
8118 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
8119 "SPS config for CRNTI:%d",ueCfg->crnti);
8122 #endif /* LTEMAC_SPS */
8125 if(TRUE == ue->isEmtcUe)
8127 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
8129 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
8130 "for CRNTI:%d",ueCfg->crnti);
8137 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
8139 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
8140 "for CRNTI:%d",ueCfg->crnti);
8145 /* DLFS UE Config */
8146 if (cellSchd->dl.isDlFreqSel)
8148 if ((cellSchd->apisDlfs->rgSCHDlfsUeCfg(cell, ue, ueCfg, err)) != ROK)
8150 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "DLFS UE config FAILED"
8151 "for CRNTI:%d",ueCfg->crnti);
8156 /* Fix: syed align multiple UEs to refresh at same time */
8157 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
8158 /* Start UE Qos Refresh Timer */
8159 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
8161 rgSCHCmn5gtfUeCfg(cell, ue, ueCfg);
8165 } /* rgSCHCmnRgrUeCfg */
8168 * @brief UE TX mode reconfiguration handler.
8172 * Function : rgSCHCmnDlHdlTxModeRecfg
8174 * This functions updates UE specific scheduler
8175 * information upon UE reconfiguration.
8177 * @param[in] RgSchUeCb *ue
8178 * @param[in] RgrUeRecfg *ueRecfg
8182 static Void rgSCHCmnDlHdlTxModeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg,uint8_t numTxPorts)
8184 static Void rgSCHCmnDlHdlTxModeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg)
8187 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
8189 if (ueRecfg->txMode.pres != PRSNT_NODEF)
8193 /* ccpu00140894- Starting Timer for TxMode Transition Completion*/
8194 ue->txModeTransCmplt =FALSE;
8195 rgSCHTmrStartTmr (ue->cell, ue, RG_SCH_TMR_TXMODE_TRNSTN, RG_SCH_TXMODE_TRANS_TIMER);
8196 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_CMPLT)
8198 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell,
8199 RG_SCH_CMN_TD_TXMODE_RECFG);
8200 /* MS_WORKAROUND for ccpu00123186 MIMO Fix Start: need to set FORCE TD bitmap based on TX mode */
8201 ueDl->mimoInfo.ri = 1;
8202 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8203 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
8205 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8207 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
8209 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8211 /* MIMO Fix End: need to set FORCE TD bitmap based on TX mode */
8214 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_START)
8216 /* start afresh forceTD masking */
8217 RG_SCH_CMN_INIT_FORCE_TD(ue, cell, 0);
8218 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_TXMODE_RECFG);
8219 /* Intialize MIMO related parameters of UE */
8222 if(ueRecfg->txMode.pres)
8224 if((ueRecfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
8225 (ueRecfg->txMode.txModeEnum ==RGR_UE_TM_4))
8227 if(ueRecfg->ueCodeBookRstRecfg.pres)
8230 rgSCHCmnComputeRank(ueRecfg->txMode.txModeEnum,
8231 ueRecfg->ueCodeBookRstRecfg.pmiBitMap, numTxPorts);
8235 ueDl->mimoInfo.ri = 1;
8240 ueDl->mimoInfo.ri = 1;
8245 ueDl->mimoInfo.ri = 1;
8248 ueDl->mimoInfo.ri = 1;
8249 #endif /* TFU_UPGRADE */
8250 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8251 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
8253 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8255 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
8257 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8262 /***********************************************************
8264 * Func : rgSCHCmnUpdUeMimoInfo
8266 * Desc : Updates UL and DL Ue Information
8274 **********************************************************/
8275 static Void rgSCHCmnUpdUeMimoInfo(RgrUeCfg *ueCfg,RgSchCmnDlUe *ueDl,RgSchCellCb *cell,RgSchCmnCell *cellSchd)
8278 if(ueCfg->txMode.pres)
8280 if((ueCfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
8281 (ueCfg->txMode.txModeEnum ==RGR_UE_TM_4))
8283 if(ueCfg->ueCodeBookRstCfg.pres)
8286 rgSCHCmnComputeRank(ueCfg->txMode.txModeEnum,
8287 ueCfg->ueCodeBookRstCfg.pmiBitMap, cell->numTxAntPorts);
8291 ueDl->mimoInfo.ri = 1;
8296 ueDl->mimoInfo.ri = 1;
8301 ueDl->mimoInfo.ri = 1;
8305 ueDl->mimoInfo.ri = 1;
8306 #endif /*TFU_UPGRADE */
8307 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
8308 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
8312 /***********************************************************
8314 * Func : rgSCHCmnUpdUeUlCqiInfo
8316 * Desc : Updates UL and DL Ue Information
8324 **********************************************************/
8325 static Void rgSCHCmnUpdUeUlCqiInfo(RgSchCellCb *cell,RgSchUeCb *ue,RgSchCmnUlUe *ueUl,RgSchCmnUe *ueSchCmn,RgSchCmnCell *cellSchd,Bool isEcp)
8329 if(ue->srsCb.srsCfg.type == RGR_SCH_SRS_SETUP)
8331 if(ue->ul.ulTxAntSel.pres)
8333 ueUl->crntUlCqi[ue->srsCb.selectedAnt] = cellSchd->ul.dfltUlCqi;
8334 ueUl->validUlCqi = ueUl->crntUlCqi[ue->srsCb.selectedAnt];
8338 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
8339 ueUl->validUlCqi = ueUl->crntUlCqi[0];
8341 ue->validTxAnt = ue->srsCb.selectedAnt;
8345 ueUl->validUlCqi = cellSchd->ul.dfltUlCqi;
8349 ueUl->ulLaCb.cqiBasediTbs = rgSchCmnUlCqiToTbsTbl[isEcp]
8350 [ueUl->validUlCqi] * 100;
8351 ueUl->ulLaCb.deltaiTbs = 0;
8355 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
8356 #endif /*TFU_UPGRADE */
8357 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
8358 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
8360 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
8364 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
8369 /***********************************************************
8371 * Func : rgSCHCmnUpdUeCatCfg
8373 * Desc : Updates UL and DL Ue Information
8381 **********************************************************/
8382 static Void rgSCHCmnUpdUeCatCfg(RgSchUeCb *ue,RgSchCellCb *cell)
8384 RgSchDlHqEnt *hqE = NULLP;
8385 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
8386 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
8387 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
8388 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8391 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
8393 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
8396 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
8397 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
8398 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
8399 && (RG_SCH_MAX_TX_LYRS_4 == ri))
8401 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
8405 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
8408 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8410 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
8412 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
8416 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
8418 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8419 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8424 * @brief UE reconfiguration for scheduler.
8428 * Function : rgSChCmnRgrUeRecfg
8430 * This functions updates UE specific scheduler
8431 * information upon UE reconfiguration.
8433 * @param[in] RgSchCellCb *cell
8434 * @param[in] RgSchUeCb *ue
8435 * @param[int] RgrUeRecfg *ueRecfg
8436 * @param[out] RgSchErrInfo *err
8441 S16 rgSCHCmnRgrUeRecfg(RgSchCellCb *cell,RgSchUeCb *ue,RgrUeRecfg *ueRecfg,RgSchErrInfo *err)
8443 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
8446 /* Basic validations */
8447 if (ueRecfg->ueRecfgTypes & RGR_UE_TXMODE_RECFG)
8450 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, cell->numTxAntPorts);
8452 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg);
8453 #endif /* TFU_UPGRADE */
8455 if(ueRecfg->ueRecfgTypes & RGR_UE_CSG_PARAM_RECFG)
8457 ue->csgMmbrSta = ueRecfg->csgMmbrSta;
8459 /* Changes for UE Category reconfiguration feature */
8460 if(ueRecfg->ueRecfgTypes & RGR_UE_UECAT_RECFG)
8462 rgSCHCmnUpdUeCatCfg(ue, cell);
8464 if (ueRecfg->ueRecfgTypes & RGR_UE_APRD_DLCQI_RECFG)
8466 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
8467 pCellInfo->acqiCb.aCqiCfg = ueRecfg->aprdDlCqiRecfg;
8470 if (ueRecfg->ueRecfgTypes & RGR_UE_PRD_DLCQI_RECFG)
8472 if ((ueRecfg->prdDlCqiRecfg.pres == TRUE)
8473 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD10)
8474 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD20))
8476 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unsupported periodic CQI "
8477 "reporting mode %d for old CRNIT:%d",
8478 (int)ueRecfg->prdDlCqiRecfg.prdModeEnum,ueRecfg->oldCrnti);
8479 err->errCause = RGSCHERR_SCH_CFG;
8482 ue->dl.ueDlCqiCfg.prdCqiCfg = ueRecfg->prdDlCqiRecfg;
8486 if (ueRecfg->ueRecfgTypes & RGR_UE_ULPWR_RECFG)
8488 if (rgSCHPwrUeRecfg(cell, ue, ueRecfg) != ROK)
8490 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8491 "Power Reconfiguration Failed for OLD CRNTI:%d",ueRecfg->oldCrnti);
8496 if (ueRecfg->ueRecfgTypes & RGR_UE_QOS_RECFG)
8498 /* Uplink Sched related Initialization */
8499 if ((ueRecfg->ueQosRecfg.dlAmbr == 0) && (ueRecfg->ueQosRecfg.ueBr == 0))
8501 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Ul Ambr and DL Ambr "
8502 "configured as 0 for OLD CRNTI:%d",ueRecfg->oldCrnti);
8503 err->errCause = RGSCHERR_SCH_CFG;
8506 ue->ul.cfgdAmbr = (ueRecfg->ueQosRecfg.ueBr * \
8507 RG_SCH_CMN_REFRESH_TIME)/100;
8508 /* Downlink Sched related Initialization */
8509 ue->dl.ambrCfgd = (ueRecfg->ueQosRecfg.dlAmbr * \
8510 RG_SCH_CMN_REFRESH_TIME)/100;
8511 /* Fix: syed Update the effAmbr and effUeBR fields w.r.t the
8512 * new QOS configuration */
8513 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
8514 /* Fix: syed align multiple UEs to refresh at same time */
8515 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
8516 rgSCHCmnApplyUeRefresh(cell, ue);
8517 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
8520 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
8522 if ((cellSchCmn->apisEmtcUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8524 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8525 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8528 if ((cellSchCmn->apisEmtcDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8530 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8531 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8538 if ((cellSchCmn->apisUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8540 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8541 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8544 if ((cellSchCmn->apisDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
8546 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8547 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
8551 /* DLFS UE Config */
8552 if (cellSchCmn->dl.isDlFreqSel)
8554 if ((cellSchCmn->apisDlfs->rgSCHDlfsUeRecfg(cell, ue, \
8555 ueRecfg, err)) != ROK)
8557 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8558 "DLFS UE re-config FAILED for CRNTI:%d",ue->ueId);
8564 /* Invoke re-configuration on SPS module */
8565 if (rgSCHCmnSpsUeRecfg(cell, ue, ueRecfg, err) != ROK)
8567 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8568 "DL SPS ReCFG FAILED for UE CRNTI:%d", ue->ueId);
8574 } /* rgSCHCmnRgrUeRecfg*/
8576 /***********************************************************
8578 * Func : rgSCHCmnUlUeDelAllocs
8580 * Desc : Deletion of all UE allocations.
8588 **********************************************************/
8589 static Void rgSCHCmnUlUeDelAllocs(RgSchCellCb *cell,RgSchUeCb *ue)
8591 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
8592 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
8595 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
8598 for (i = 0; i < ueUl->hqEnt.numHqPrcs; ++i)
8600 RgSchUlHqProcCb *proc = rgSCHUhmGetUlHqProc(cell, ue, i);
8603 /* proc can't be NULL here */
8611 /* Added Insure Fixes Of reading Dangling memory.NULLed crntAlloc */
8613 if(proc->alloc == ulSpsUe->ulSpsSchdInfo.crntAlloc)
8615 ulSpsUe->ulSpsSchdInfo.crntAlloc = NULLP;
8616 ulSpsUe->ulSpsSchdInfo.crntAllocSf = NULLP;
8620 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
8621 proc->alloc,ue->isEmtcUe);
8623 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
8626 /* PHY probably needn't be intimated since
8627 * whatever intimation it needs happens at the last minute
8630 /* Fix: syed Adaptive Msg3 Retx crash. Remove the harqProc
8631 * from adaptive retx List. */
8632 if (proc->reTxLnk.node)
8635 //TODO_SID: Need to take care
8636 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
8637 proc->reTxLnk.node = (PTR)NULLP;
8645 /***********************************************************
8647 * Func : rgSCHCmnDelUeFrmRefreshQ
8649 * Desc : Adds a UE to refresh queue, so that the UE is
8650 * periodically triggered to refresh it's GBR and
8659 **********************************************************/
8660 static Void rgSCHCmnDelUeFrmRefreshQ(RgSchCellCb *cell,RgSchUeCb *ue)
8662 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
8664 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
8667 #ifdef RGL_SPECIFIC_CHANGES
8668 if(ue->refreshOffset < RGSCH_MAX_REFRESH_GRPSZ)
8670 if(cell->refreshUeCnt[ue->refreshOffset])
8672 cell->refreshUeCnt[ue->refreshOffset]--;
8678 memset(&arg, 0, sizeof(arg));
8679 arg.tqCp = &sched->tmrTqCp;
8680 arg.tq = sched->tmrTq;
8681 arg.timers = &ueSchd->tmr;
8685 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
8691 /***********************************************************
8693 * Func : rgSCHCmnUeCcchSduDel
8695 * Desc : Clear CCCH SDU scheduling context.
8703 **********************************************************/
8704 static Void rgSCHCmnUeCcchSduDel(RgSchCellCb *cell,RgSchUeCb *ueCb)
8706 RgSchDlHqEnt *hqE = NULLP;
8707 RgSchDlHqProcCb *ccchSduHqP = NULLP;
8708 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
8711 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
8716 ccchSduHqP = hqE->ccchSduProc;
8717 if(ueCb->ccchSduLnk.node != NULLP)
8719 /* Remove the ccchSduProc if it is in the Tx list */
8720 cmLListDelFrm(&(cell->ccchSduUeLst), &(ueCb->ccchSduLnk));
8721 ueCb->ccchSduLnk.node = NULLP;
8723 else if(ccchSduHqP != NULLP)
8725 /* Fix for crash due to stale pdcch. Release ccch pdcch*/
8726 if(ccchSduHqP->pdcch)
8728 cmLListDelFrm(&ccchSduHqP->subFrm->pdcchInfo.pdcchs,
8729 &ccchSduHqP->pdcch->lnk);
8730 cmLListAdd2Tail(&cell->pdcchLst, &ccchSduHqP->pdcch->lnk);
8731 ccchSduHqP->pdcch = NULLP;
8733 if(ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node != NULLP)
8735 /* Remove the ccchSduProc if it is in the retx list */
8736 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
8737 &ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk);
8738 /* ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node = NULLP; */
8739 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
8741 else if ((ccchSduHqP->subFrm != NULLP) &&
8742 (ccchSduHqP->hqPSfLnk.node != NULLP))
8744 rgSCHUtlDlHqPTbRmvFrmTx(ccchSduHqP->subFrm,
8745 ccchSduHqP, 0, FALSE);
8746 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
8756 * @brief UE deletion for scheduler.
8760 * Function : rgSCHCmnUeDel
8762 * This functions deletes all scheduler information
8763 * pertaining to an UE.
8765 * @param[in] RgSchCellCb *cell
8766 * @param[in] RgSchUeCb *ue
8769 Void rgSCHCmnUeDel(RgSchCellCb *cell,RgSchUeCb *ue)
8771 RgSchDlHqEnt *hqE = NULLP;
8772 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
8774 RgSchCmnAllocRecord *allRcd;
8776 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
8779 if (RG_SCH_CMN_GET_UE(ue,cell) == NULLP)
8781 /* Common scheduler config has not happened yet */
8784 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
8787 /* UE Free can be triggered before MSG4 done when dlHqE is not updated */
8791 rgSCHEmtcCmnUeCcchSduDel(cell, ue);
8796 rgSCHCmnUeCcchSduDel(cell, ue);
8799 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
8801 rgSCHCmnUlUeDelAllocs(cell, ue);
8803 rgSCHCmnDelRachInfo(cell, ue);
8806 if(TRUE == ue->isEmtcUe)
8808 cellSchCmn->apisEmtcUl->rgSCHFreeUlUe(cell, ue);
8813 cellSchCmn->apisUl->rgSCHFreeUlUe(cell, ue);
8818 for(idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
8820 if(ue->cellInfo[idx] != NULLP)
8822 rgSCHSCellDelUeSCell(cell,ue,idx);
8829 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
8831 cellSchCmn->apisEmtcDl->rgSCHFreeDlUe(cell, ue);
8836 cellSchCmn->apisDl->rgSCHFreeDlUe(cell, ue);
8838 rgSCHPwrUeDel(cell, ue);
8841 rgSCHCmnSpsUeDel(cell, ue);
8842 #endif /* LTEMAC_SPS*/
8845 rgSchCmnDlSfHqDel(ue, cell);
8847 /* DLFS UE delete */
8848 if (cellSchCmn->dl.isDlFreqSel)
8850 cellSchCmn->apisDlfs->rgSCHDlfsUeDel(cell, ue);
8852 node = ueUl->ulAllocLst.first;
8854 /* ccpu00117052 - MOD - Passing double pointer in all the places of
8855 rgSCHUtlFreeSBuf function call for proper NULLP assignment*/
8858 allRcd = (RgSchCmnAllocRecord *)node->node;
8860 cmLListDelFrm(&ueUl->ulAllocLst, &allRcd->lnk);
8861 rgSCHUtlFreeSBuf(cell->instIdx,
8862 (Data**)(&allRcd), (sizeof(RgSchCmnAllocRecord)));
8865 for(cnt = 0; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
8867 if (ue->ul.lcgArr[cnt].sch != NULLP)
8869 rgSCHUtlFreeSBuf(cell->instIdx,
8870 (Data**)(&(ue->ul.lcgArr[cnt].sch)), (sizeof(RgSchCmnLcg)));
8874 /* Fix : syed Moved hqEnt deinit to rgSCHCmnDlDeInitHqEnt */
8875 idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId) & (CM_LTE_MAX_CELLS - 1));
8876 rgSCHUtlFreeSBuf(cell->instIdx,
8877 (Data**)(&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch))), (sizeof(RgSchCmnUe)));
8879 } /* rgSCHCmnUeDel */
8883 * @brief This function handles the common code rate configurations
8884 * done as part of RgrCellCfg/RgrCellRecfg.
8888 * Function: rgSCHCmnDlCnsdrCmnRt
8889 * Purpose: This function handles the common code rate configurations
8890 * done as part of RgrCellCfg/RgrCellRecfg.
8892 * Invoked by: Scheduler
8894 * @param[in] RgSchCellCb *cell
8895 * @param[in] RgrDlCmnCodeRateCfg *dlCmnCodeRate
8899 static S16 rgSCHCmnDlCnsdrCmnRt(RgSchCellCb *cell,RgrDlCmnCodeRateCfg *dlCmnCodeRate)
8901 RgSchCmnCell *cellDl = RG_SCH_CMN_GET_CELL(cell);
8903 uint32_t bitsPer2Rb;
8904 uint32_t bitsPer3Rb;
8909 /* code rate is bits per 1024 phy bits, since modl'n scheme is 2. it is
8910 * bits per 1024/2 REs */
8911 if (dlCmnCodeRate->bcchPchRaCodeRate != 0)
8913 bitsPerRb = ((dlCmnCodeRate->bcchPchRaCodeRate * 2) *
8914 cellDl->dl.noResPerRb[3])/1024;
8918 bitsPerRb = ((RG_SCH_CMN_DEF_BCCHPCCH_CODERATE * 2) *
8919 cellDl->dl.noResPerRb[3])/1024;
8921 /* Store bitsPerRb in cellDl->dl to use later to determine
8922 * Number of RBs for UEs with SI-RNTI, P-RNTI and RA-RNTI */
8923 cellDl->dl.bitsPerRb = bitsPerRb;
8924 /* ccpu00115595 end*/
8925 /* calculate the ITbs for 2 RBs. Initialize ITbs to MAX value */
8928 bitsPer2Rb = bitsPerRb * rbNum;
8929 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer2Rb))
8932 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs2Rbs = 0) :
8933 (cellDl->dl.cmnChITbs.iTbs2Rbs = i-1);
8935 /* calculate the ITbs for 3 RBs. Initialize ITbs to MAX value */
8938 bitsPer3Rb = bitsPerRb * rbNum;
8939 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer3Rb))
8942 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs3Rbs = 0) :
8943 (cellDl->dl.cmnChITbs.iTbs3Rbs = i-1);
8946 pdcchBits = 1 + /* Flag for format0/format1a differentiation */
8947 1 + /* Localized/distributed VRB assignment flag */
8950 3 + /* Harq process Id */
8952 4 + /* Harq process Id */
8953 2 + /* UL Index or DAI */
8955 1 + /* New Data Indicator */
8958 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
8959 (cell->bwCfg.dlTotalBw + 1))/2);
8960 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
8961 Since VRB is local */
8962 /* For TDD consider DAI */
8964 /* Convert the pdcchBits to actual pdcchBits required for transmission */
8965 if (dlCmnCodeRate->pdcchCodeRate != 0)
8967 pdcchBits = (pdcchBits * 1024)/dlCmnCodeRate->pdcchCodeRate;
8968 if (pdcchBits <= 288) /* 288 : Num of pdcch bits for aggrLvl=4 */
8970 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
8972 else /* 576 : Num of pdcch bits for aggrLvl=8 */
8974 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL8;
8979 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
8981 if (dlCmnCodeRate->ccchCqi == 0)
8987 cellDl->dl.ccchCqi = dlCmnCodeRate->ccchCqi;
8994 * @brief This function handles the configuration of cell for the first
8995 * time by the scheduler.
8999 * Function: rgSCHCmnDlRgrCellCfg
9000 * Purpose: Configuration received is stored into the data structures
9001 * Also, update the scheduler with the number of frames of
9002 * RACH preamble transmission.
9004 * Invoked by: BO and Scheduler
9006 * @param[in] RgSchCellCb* cell
9007 * @param[in] RgrCellCfg* cfg
9011 static S16 rgSCHCmnDlRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cfg,RgSchErrInfo *err)
9013 RgSchCmnCell *cellSch;
9016 uint8_t numPdcchSym;
9017 uint8_t noSymPerSlot;
9018 uint8_t maxDlSubfrms = cell->numDlSubfrms;
9019 uint8_t splSubfrmIdx = cfg->spclSfCfgIdx;
9020 uint8_t swPtCnt = 0;
9022 RgSchTddSubfrmInfo subfrmInfo = rgSchTddMaxUlSubfrmTbl[cell->ulDlCfgIdx];
9035 cellSch = RG_SCH_CMN_GET_CELL(cell);
9036 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->\
9037 rachCfg.preambleFormat];
9038 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
9039 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
9041 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
9042 3 TTI (MAX L1+L2 processing delay at the UE) */
9043 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
9044 rgSchCmnHarqRtt[cell->ulDlCfgIdx] + 3;
9045 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9046 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9047 if (cfg->maxUePerDlSf == 0)
9049 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9051 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
9057 if (cell->bwCfg.dlTotalBw <= 10)
9067 /* DwPTS Scheduling Changes Start */
9068 cellSch->dl.splSfCfg = splSubfrmIdx;
9070 if (cfg->isCpDlExtend == TRUE)
9072 if((0 == splSubfrmIdx) || (4 == splSubfrmIdx) ||
9073 (7 == splSubfrmIdx) || (8 == splSubfrmIdx)
9076 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
9080 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
9085 /* Refer to 36.213 Section 7.1.7 */
9086 if((0 == splSubfrmIdx) || (5 == splSubfrmIdx))
9088 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
9092 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
9095 /* DwPTS Scheduling Changes End */
9097 splSfCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
9098 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
9100 for (sfCount = 0; sfCount < maxDlSubfrms; sfCount++)
9102 sf = cell->subFrms[sfCount];
9103 /* Sfcount matches the first special subframe occurs at Index 0
9104 * or subsequent special subframes */
9105 if(subfrmInfo.switchPoints == 1)
9107 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
9108 RG_SCH_CMN_10_MS_PRD, &subfrmInfo);
9112 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
9113 RG_SCH_CMN_5_MS_PRD, &subfrmInfo);
9115 if(isSplfrm == TRUE)
9118 /* DwPTS Scheduling Changes Start */
9119 if (cell->splSubfrmCfg.isDlDataAllowed == TRUE)
9121 sf->sfType = RG_SCH_SPL_SF_DATA;
9125 sf->sfType = RG_SCH_SPL_SF_NO_DATA;
9127 /* DwPTS Scheduling Changes End */
9131 /* DwPTS Scheduling Changes Start */
9134 sf->sfType = RG_SCH_DL_SF;
9138 sf->sfType = RG_SCH_DL_SF_0;
9140 /* DwPTS Scheduling Changes End */
9143 /* Calculate the number of CCEs per subframe in the cell */
9144 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][sf->sfNum];
9145 if(cell->dynCfiCb.isDynCfiEnb == TRUE)
9147 /* In case if Dynamic CFI feature is enabled, default CFI
9148 * value 1 is used */
9149 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][1];
9153 if (sf->sfType == RG_SCH_SPL_SF_DATA)
9155 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
9159 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi)];
9164 /* Intialize the RACH response scheduling related infromation */
9165 if(rgSCHCmnDlRachInfoInit(cell) != ROK)
9170 /* Allocate PRACH preamble list */
9171 rgSCHCmnDlCreateRachPrmLst(cell);
9173 /* Initialize PHICH offset information */
9174 rgSCHCmnDlPhichOffsetInit(cell);
9176 /* Update the size of HARQ ACK/NACK feedback table */
9177 /* The array size is increased by 2 to have enough free indices, where other
9178 * indices are busy waiting for HARQ feedback */
9179 cell->ackNackFdbkArrSize = rgSchTddANFdbkMapTbl[cell->ulDlCfgIdx] + 2;
9181 /* Initialize expected HARQ ACK/NACK feedback time */
9182 rgSCHCmnDlANFdbkInit(cell);
9184 /* Initialize UL association set index */
9185 if(cell->ulDlCfgIdx != 0)
9187 rgSCHCmnDlKdashUlAscInit(cell);
9190 if (cfg->isCpDlExtend == TRUE)
9192 cp = RG_SCH_CMN_EXT_CP;
9194 cell->splSubfrmCfg.dwPts =
9195 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlDwPts;
9197 if ( cell->splSubfrmCfg.dwPts == 0 )
9199 cell->isDwPtsCnted = FALSE;
9203 cell->isDwPtsCnted = TRUE;
9206 if(cfg->isCpUlExtend == TRUE)
9208 cell->splSubfrmCfg.upPts =
9209 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlExtUpPts;
9213 cell->splSubfrmCfg.upPts =
9214 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlNorUpPts;
9219 cp = RG_SCH_CMN_NOR_CP;
9221 cell->splSubfrmCfg.dwPts =
9222 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlDwPts;
9223 cell->isDwPtsCnted = TRUE;
9225 if(cfg->isCpUlExtend == TRUE)
9227 cell->splSubfrmCfg.upPts =
9228 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlExtUpPts;
9232 cell->splSubfrmCfg.upPts =
9233 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlNorUpPts;
9237 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
9238 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++,cfiIdx++)
9240 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
9241 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
9242 [cell->numTxAntPorts]][cfiIdx];
9243 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
9244 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
9245 [cell->numTxAntPorts]][cfiIdx];
9248 /* Initializing the values of CFI parameters */
9249 if(cell->dynCfiCb.isDynCfiEnb)
9251 /* If DCFI is enabled, current CFI value will start from 1 */
9252 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
9256 /* If DCFI is disabled, current CFI value is set as default max allowed CFI value */
9257 cellSch->dl.currCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
9258 cellSch->dl.newCfi = cellSch->dl.currCfi;
9261 /* Include CRS REs while calculating Efficiency
9262 * The number of Resource Elements occupied by CRS depends on Number of
9263 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
9264 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
9265 * details of the same. Please note that PDCCH overlap symbols would not
9266 * considered in CRS REs deduction */
9267 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
9269 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
9270 - numPdcchSym) *RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
9273 /* DwPTS Scheduling Changes Start */
9274 antPortIdx = (cell->numTxAntPorts == 1)? 0:
9275 ((cell->numTxAntPorts == 2)? 1: 2);
9277 if (cp == RG_SCH_CMN_NOR_CP)
9279 splSfIdx = (splSubfrmIdx == 4)? 1: 0;
9283 splSfIdx = (splSubfrmIdx == 3)? 1: 0;
9286 numCrs = rgSchCmnDwptsCrs[splSfIdx][antPortIdx];
9288 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI-1; cfi++)
9290 /* If CFI is 2 and Ant Port is 4, don't consider the sym 1 CRS REs */
9291 if (antPortIdx == 2 && cfi == 2)
9295 cellSch->dl.numReDwPts[cfi] = ((cell->splSubfrmCfg.dwPts - cfi)*
9296 RB_SCH_CMN_NUM_SCS_PER_RB) - numCrs;
9298 /* DwPTS Scheduling Changes End */
9300 if (cfg->maxDlBwPerUe == 0)
9302 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
9306 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
9308 if (cfg->maxDlRetxBw == 0)
9310 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
9314 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
9316 /* Fix: MUE_PERTTI_DL*/
9317 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9318 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9319 if (cfg->maxUePerDlSf == 0)
9321 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9323 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
9324 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
9325 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
9327 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
9328 "Invalid configuration !: "
9329 "maxCcchPerDlSf %u > maxUePerDlSf %u",
9330 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
9334 else if (!cfg->maxCcchPerDlSf)
9336 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
9337 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
9338 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
9339 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
9340 * FLE crash in PHY as PHY has limit of 16 max*/
9341 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
9345 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
9347 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
9352 /*ccpu00118273 - ADD - start */
9353 cmLListInit(&cellSch->dl.msg4RetxLst);
9355 cmLListInit(&cellSch->dl.ccchSduRetxLst);
9358 #ifdef RG_PHASE2_SCHED
9359 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
9361 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
9363 if (cfg->dlfsCfg.isDlFreqSel)
9365 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
9371 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
9374 /* Power related configuration */
9375 ret = rgSCHPwrCellCfg(cell, cfg);
9381 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
9382 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
9383 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
9384 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
9385 cellSch->dl.msg4pAVal = cfg->msg4pAVal;
9390 * @brief This function handles the configuration of cell for the first
9391 * time by the scheduler.
9395 * Function: rgSCHCmnDlRgrCellCfg
9396 * Purpose: Configuration received is stored into the data structures
9397 * Also, update the scheduler with the number of frames of
9398 * RACH preamble transmission.
9400 * Invoked by: BO and Scheduler
9402 * @param[in] RgSchCellCb* cell
9403 * @param[in] RgrCellCfg* cfg
9404 * @param[in] RgSchErrInfo* err
9408 static S16 rgSCHCmnDlRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cfg,RgSchErrInfo *err)
9411 RgSchCmnCell *cellSch;
9413 uint8_t numPdcchSym;
9414 uint8_t noSymPerSlot;
9419 cellSch = RG_SCH_CMN_GET_CELL(cell);
9421 /* Initialize the parameters with the ones received in the */
9422 /* configuration. */
9424 /* Added matrix 'rgRaPrmblToRaFrmTbl' for computation of RA
9425 * sub-frames from preamble format */
9426 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->rachCfg.preambleFormat];
9428 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
9429 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
9431 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
9432 3 TTI (MAX L1+L2 processing delay at the UE) */
9433 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
9434 rgSchCmnHarqRtt[7] + 3;
9436 if (cell->bwCfg.dlTotalBw <= 10)
9447 if (cell->isCpDlExtend == TRUE)
9449 cp = RG_SCH_CMN_EXT_CP;
9454 cp = RG_SCH_CMN_NOR_CP;
9458 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
9459 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, cfiIdx++)
9461 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
9463 cellSch->dl.emtcCqiToTbsTbl[0][cfi] = rgSchEmtcCmnCqiToTbs[0][cp][cfiIdx];
9465 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
9466 [cell->numTxAntPorts]][cfiIdx];
9467 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
9469 cellSch->dl.emtcCqiToTbsTbl[1][cfi] = rgSchEmtcCmnCqiToTbs[1][cp][cfiIdx];
9471 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
9472 [cell->numTxAntPorts]][cfiIdx];
9475 /* Initializing the values of CFI parameters */
9476 if(cell->dynCfiCb.isDynCfiEnb)
9478 /* If DCFI is enabled, current CFI value will start from 1 */
9479 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
9483 /* If DCFI is disabled, current CFI value is set as default CFI value */
9484 cellSch->dl.currCfi = cellSch->cfiCfg.cfi;
9485 cellSch->dl.newCfi = cellSch->dl.currCfi;
9488 /* Include CRS REs while calculating Efficiency
9489 * The number of Resource Elements occupied by CRS depends on Number of
9490 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
9491 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
9492 * details of the same. Please note that PDCCH overlap symbols would not
9493 * considered in CRS REs deduction */
9494 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
9496 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
9497 - numPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
9500 if (cfg->maxDlBwPerUe == 0)
9502 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
9506 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
9508 if (cfg->maxDlRetxBw == 0)
9510 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
9514 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
9517 /* Fix: MUE_PERTTI_DL*/
9518 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
9519 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
9520 if (cfg->maxUePerDlSf == 0)
9522 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
9524 /* Fix: MUE_PERTTI_DL syed validating Cell Configuration */
9525 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
9527 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
9528 "FAILED MaxUePerDlSf(%u) < MaxDlUeNewTxPerTti(%u)",
9529 cellSch->dl.maxUePerDlSf,
9530 cellSch->dl.maxUeNewTxPerTti);
9533 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
9534 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
9536 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid configuration !: "
9537 "maxCcchPerDlSf %u > maxUePerDlSf %u",
9538 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
9542 else if (!cfg->maxCcchPerDlSf)
9544 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
9545 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
9546 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
9547 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
9548 * FLE crash in PHY as PHY has limit of 16 max*/
9549 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
9553 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
9557 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
9561 cmLListInit(&cellSch->dl.msg4RetxLst);
9563 cmLListInit(&cellSch->dl.ccchSduRetxLst);
9566 #ifdef RG_PHASE2_SCHED
9567 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
9569 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
9571 if (cfg->dlfsCfg.isDlFreqSel)
9573 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
9579 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
9582 /* Power related configuration */
9583 ret = rgSCHPwrCellCfg(cell, cfg);
9589 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
9590 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
9591 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
9592 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
9593 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
9596 #endif /* LTE_TDD */
9598 /***********************************************************
9600 * Func : rgSCHCmnUlCalcReqRbCeil
9602 * Desc : Calculate RB required to satisfy 'bytes' for
9604 * Returns number of RBs such that requirement
9605 * is necessarily satisfied (does a 'ceiling'
9608 * Ret : Required RBs (uint8_t)
9614 **********************************************************/
9615 uint8_t rgSCHCmnUlCalcReqRbCeil(uint32_t bytes,uint8_t cqi,RgSchCmnUlCell *cellUl)
9617 uint32_t numRe = RGSCH_CEIL((bytes * 8) * 1024, rgSchCmnUlCqiTbl[cqi].eff);
9618 return ((uint8_t)RGSCH_CEIL(numRe, RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl)));
9621 /***********************************************************
9623 * Func : rgSCHCmnPrecompMsg3Vars
9625 * Desc : Precomputes the following for msg3 allocation:
9626 * 1. numSb and Imcs for msg size A
9627 * 2. numSb and Imcs otherwise
9631 * Notes: The corresponding vars in cellUl struct is filled
9636 **********************************************************/
9637 static S16 rgSCHCmnPrecompMsg3Vars(RgSchCmnUlCell *cellUl,uint8_t ccchCqi,uint16_t msgSzA,uint8_t sbSize,Bool isEcp)
9644 uint16_t msg3GrntSz = 0;
9647 if (ccchCqi > cellUl->max16qamCqi)
9649 ccchCqi = cellUl->max16qamCqi;
9651 /* #ifndef RG_SCH_CMN_EXP_CP_SUP For ECP Pick the index 1 */
9653 ccchTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
9654 ccchMcs = rgSCHCmnUlGetIMcsFrmITbs(ccchTbs, CM_LTE_UE_CAT_1);
9656 /* MCS should fit in 4 bits in RAR */
9662 /* Limit the ccchMcs to 15 as it
9663 * can be inferred from 36.213, section 6.2 that msg3 imcs
9665 * Since, UE doesn't exist right now, we use CAT_1 for ue
9667 while((ccchMcs = (rgSCHCmnUlGetIMcsFrmITbs(
9668 rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi],CM_LTE_UE_CAT_1))
9670 RG_SCH_CMN_MAX_MSG3_IMCS)
9675 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
9677 if (msgSzA < RGSCH_MIN_MSG3_GRNT_SZ)
9681 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(msgSzA, ccchCqi, cellUl), sbSize);
9683 numRb = numSb * sbSize;
9684 msg3GrntSz = 8 * msgSzA;
9686 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
9689 numRb = numSb * sbSize;
9691 while (rgSchCmnMult235Tbl[numSb].match != numSb)
9695 /* Reversed(Corrected) the assignment for preamble-GrpA
9696 * Refer- TG36.321- section- 5.1.2*/
9697 cellUl->ra.prmblBNumSb = numSb;
9698 cellUl->ra.prmblBIMcs = ccchMcs;
9699 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(RGSCH_MIN_MSG3_GRNT_SZ, \
9703 numRb = numSb * sbSize;
9704 msg3GrntSz = 8 * RGSCH_MIN_MSG3_GRNT_SZ;
9705 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
9708 numRb = numSb * sbSize;
9710 while (rgSchCmnMult235Tbl[numSb].match != numSb)
9714 /* Reversed(Corrected) the assignment for preamble-GrpA
9715 * Refer- TG36.321- section- 5.1.2*/
9716 cellUl->ra.prmblANumSb = numSb;
9717 cellUl->ra.prmblAIMcs = ccchMcs;
9721 uint32_t gPrntPucchDet=0;
9724 /***********************************************************
9726 * Func : rgSCHCmnUlCalcAvailBw
9728 * Desc : Calculates bandwidth available for PUSCH scheduling.
9730 * Ret : S16 (ROK/RFAILED)
9736 **********************************************************/
9737 static S16 rgSCHCmnUlCalcAvailBw(RgSchCellCb *cell,RgrCellCfg *cellCfg,uint8_t cfi,uint8_t *rbStartRef,uint8_t *bwAvailRef)
9740 uint8_t ulBw = cell->bwCfg.ulTotalBw;
9741 uint8_t n2Rb = cell->pucchCfg.resourceSize;
9742 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
9743 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
9744 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
9750 uint8_t exclRb; /* RBs to exclude */
9752 uint8_t puschRbStart;
9753 /* To avoid PUCCH and PUSCH collision issue */
9757 /* Maximum value of M as per Table 10.1-1 */
9758 uint8_t M[RGSCH_MAX_TDD_UL_DL_CFG] = {1, 2, 4, 3, 4, 9, 1};
9761 if (cell->isCpUlExtend)
9766 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
9768 /* Considering the max no. of CCEs for PUSCH BW calculation
9769 * based on min mi value */
9770 if (cell->ulDlCfgIdx == 0 || cell->ulDlCfgIdx == 6)
9779 totalCce = cell->dynCfiCb.cfi2NCceTbl[mi][cfi];
9781 P = rgSCHCmnGetPValFrmCCE(cell, totalCce-1);
9782 n1PlusOne = cell->rgSchTddNpValTbl[P + 1];
9783 n1Max = (M[cell->ulDlCfgIdx] - 1)*n1PlusOne + (totalCce-1) + n1Pucch;
9785 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
9787 n1RbPart = (c*n1Cs)/pucchDeltaShft;
9788 n1Rb = (n1Max - n1RbPart)/ n1PerRb;
9789 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
9791 /* get the total Number of RB's to be excluded for PUSCH */
9793 if(n1Pucch < n1RbPart)
9799 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
9801 puschRbStart = exclRb/2 + 1;
9803 /* Num of PUCCH RBs = puschRbStart*2 */
9804 if (puschRbStart * 2 >= ulBw)
9806 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
9810 *rbStartRef = puschRbStart;
9811 *bwAvailRef = ulBw - puschRbStart * 2;
9813 if(cell->pucchCfg.maxPucchRb !=0 &&
9814 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
9816 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
9823 /***********************************************************
9825 * Func : rgSCHCmnUlCalcAvailBw
9827 * Desc : Calculates bandwidth available for PUSCH scheduling.
9829 * Ret : S16 (ROK/RFAILED)
9835 **********************************************************/
9836 static S16 rgSCHCmnUlCalcAvailBw(RgSchCellCb *cell,RgrCellCfg *cellCfg,uint8_t cfi,uint8_t *rbStartRef,uint8_t *bwAvailRef)
9839 uint8_t ulBw = cell->bwCfg.ulTotalBw;
9840 uint8_t n2Rb = cell->pucchCfg.resourceSize;
9841 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
9842 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
9843 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
9849 uint8_t exclRb; /* RBs to exclude */
9851 uint8_t puschRbStart;
9853 uint16_t numOfN3PucchRb;
9854 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
9858 if (cell->isCpUlExtend)
9863 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
9865 totalCce = cell->dynCfiCb.cfi2NCceTbl[0][cfi];
9867 n1Max = n1Pucch + totalCce-1;
9869 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
9871 n1RbPart = (c*n1Cs)/pucchDeltaShft;
9872 n1Rb = (uint8_t)((n1Max - n1RbPart) / n1PerRb);
9873 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
9875 /* get the total Number of RB's to be excluded for PUSCH */
9877 if(n1Pucch < n1RbPart)
9883 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
9885 /*Support for PUCCH Format 3*/
9887 if (cell->isPucchFormat3Sptd)
9889 numOfN3PucchRb = RGSCH_CEIL(cellSch->dl.maxUePerDlSf,5);
9890 exclRb = exclRb + numOfN3PucchRb;
9893 puschRbStart = exclRb/2 + 1;
9898 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%ld:%d:%d:%d:%d:%d]\n",
9899 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
9901 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%d:%d:%d:%d:%d:%d]\n",
9902 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
9906 if (puschRbStart*2 >= ulBw)
9908 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
9912 *rbStartRef = puschRbStart;
9913 *bwAvailRef = ulBw - puschRbStart * 2;
9915 if(cell->pucchCfg.maxPucchRb !=0 &&
9916 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
9918 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
9927 /***********************************************************
9929 * Func : rgSCHCmnUlCellInit
9931 * Desc : Uplink scheduler initialisation for cell.
9939 **********************************************************/
9940 static S16 rgSCHCmnUlCellInit(RgSchCellCb *cell,RgrCellCfg *cellCfg)
9943 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
9944 uint8_t maxUePerUlSf = cellCfg->maxUePerUlSf;
9946 /* Added configuration for maximum number of MSG3s */
9947 uint8_t maxMsg3PerUlSf = cellCfg->maxMsg3PerUlSf;
9949 uint8_t maxUlBwPerUe = cellCfg->maxUlBwPerUe;
9950 uint8_t sbSize = cellCfg->puschSubBand.size;
9958 uint16_t ulDlCfgIdx = cell->ulDlCfgIdx;
9959 /* [ccpu00127294]-MOD-Change the max Ul subfrms size in TDD */
9960 uint8_t maxSubfrms = 2 * rgSchTddNumUlSf[ulDlCfgIdx];
9961 uint8_t ulToDlMap[12] = {0}; /* maximum 6 Subframes in UL * 2 */
9962 uint8_t maxUlsubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
9963 [RGSCH_NUM_SUB_FRAMES-1];
9967 uint8_t maxSubfrms = RG_SCH_CMN_UL_NUM_SF;
9973 #if (defined(LTE_L2_MEAS) )
9974 Inst inst = cell->instIdx;
9975 #endif /* #if (defined(LTE_L2_MEAS) || defined(DEBUGP) */
9976 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
9979 cellUl->maxUeNewTxPerTti = cellCfg->maxUlUeNewTxPerTti;
9980 if (maxUePerUlSf == 0)
9982 maxUePerUlSf = RG_SCH_CMN_MAX_UE_PER_UL_SF;
9985 if (maxMsg3PerUlSf == 0)
9987 maxMsg3PerUlSf = RG_SCH_CMN_MAX_MSG3_PER_UL_SF;
9989 /* fixed the problem while sending raRsp
9990 * if maxMsg3PerUlSf is greater than
9991 * RGSCH_MAX_RNTI_PER_RARNTI
9993 if(maxMsg3PerUlSf > RGSCH_MAX_RNTI_PER_RARNTI)
9995 maxMsg3PerUlSf = RGSCH_MAX_RNTI_PER_RARNTI;
9998 if(maxMsg3PerUlSf > maxUePerUlSf)
10000 maxMsg3PerUlSf = maxUePerUlSf;
10003 /*cellUl->maxAllocPerUlSf = maxUePerUlSf + maxMsg3PerUlSf;*/
10004 /*Max MSG3 should be a subset of Max UEs*/
10005 cellUl->maxAllocPerUlSf = maxUePerUlSf;
10006 cellUl->maxMsg3PerUlSf = maxMsg3PerUlSf;
10008 cellUl->maxAllocPerUlSf = maxUePerUlSf;
10010 /* Fix: MUE_PERTTI_UL syed validating Cell Configuration */
10011 if (cellUl->maxAllocPerUlSf < cellUl->maxUeNewTxPerTti)
10013 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10014 "FAILED: MaxUePerUlSf(%u) < MaxUlUeNewTxPerTti(%u)",
10015 cellUl->maxAllocPerUlSf,
10016 cellUl->maxUeNewTxPerTti);
10022 for(idx = 0; idx < RGSCH_SF_ALLOC_SIZE; idx++)
10024 for(idx = 0; idx < RGSCH_NUM_SUB_FRAMES; idx++)
10028 ret = rgSCHUtlAllocSBuf(inst, (Data **)&(cell->sfAllocArr[idx].
10029 ulUeInfo.ulAllocInfo), (cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc)));
10032 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation failed ");
10037 if (maxUlBwPerUe == 0)
10039 /* ccpu00139362- Setting to configured UL BW instead of MAX BW(100)*/
10040 maxUlBwPerUe = cell->bwCfg.ulTotalBw;
10042 cellUl->maxUlBwPerUe = maxUlBwPerUe;
10044 /* FOR RG_SCH_CMN_EXT_CP_SUP */
10045 if (!cellCfg->isCpUlExtend)
10047 cellUl->ulNumRePerRb = 12 * (14 - RGSCH_UL_SYM_DMRS_SRS);
10051 cellUl->ulNumRePerRb = 12 * (12 - RGSCH_UL_SYM_DMRS_SRS);
10054 if (sbSize != rgSchCmnMult235Tbl[sbSize].match)
10056 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Invalid subband size %d", sbSize);
10059 //Setting the subband size to 4 which is size of VRBG in 5GTF
10061 sbSize = MAX_5GTF_VRBG_SIZE;
10064 maxSbPerUe = maxUlBwPerUe / sbSize;
10065 if (maxSbPerUe == 0)
10067 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnUlCellInit(): "
10068 "maxUlBwPerUe/sbSize is zero");
10071 cellUl->maxSbPerUe = rgSchCmnMult235Tbl[maxSbPerUe].prvMatch;
10073 /* CQI related updations */
10074 if ((!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->ulCmnCodeRate.ccchCqi))
10075 || (!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->trgUlCqi.trgCqi)))
10077 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnUlCellInit(): "
10081 cellUl->dfltUlCqi = cellCfg->ulCmnCodeRate.ccchCqi;
10083 /* Changed the logic to determine maxUlCqi.
10084 * For a 16qam UE, maxUlCqi is the CQI Index at which
10085 * efficiency is as close as possible to RG_SCH_MAX_CODE_RATE_16QAM
10086 * Refer to 36.213-8.6.1 */
10087 for (i = RG_SCH_CMN_UL_NUM_CQI - 1;i > 0; --i)
10089 RLOG_ARG2(L_INFO,DBG_CELLID,cell->cellId,
10092 rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i]);
10093 #ifdef MAC_SCH_STATS
10094 /* ccpu00128489 ADD Update mcs in hqFailStats here instead of at CRC
10095 * since CQI to MCS mapping does not change. The only exception is for
10096 * ITBS = 19 where the MCS can be 20 or 21 based on the UE cat. We
10097 * choose 20, instead of 21, ie UE_CAT_3 */
10098 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
10099 RG_SCH_CMN_UL_TBS_TO_MCS(iTbs, hqFailStats.ulCqiStat[i - 1].mcs);
10102 for (i = RG_SCH_CMN_UL_NUM_CQI - 1; i != 0; --i)
10104 /* Fix for ccpu00123912*/
10105 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
10106 if (iTbs <= RGSCH_UL_16QAM_MAX_ITBS) /* corresponds to 16QAM */
10108 RLOG_ARG1(L_INFO,DBG_CELLID,cell->cellId,
10109 "16 QAM CQI %u", i);
10110 cellUl->max16qamCqi = i;
10116 /* Precompute useful values for RA msg3 */
10117 ret = rgSCHCmnPrecompEmtcMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
10118 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
10125 /* Precompute useful values for RA msg3 */
10126 ret = rgSCHCmnPrecompMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
10127 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
10133 cellUl->sbSize = sbSize;
10136 cellUl->numUlSubfrms = maxSubfrms;
10138 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->ulSfArr,
10139 cellUl->numUlSubfrms * sizeof(RgSchUlSf));
10143 cellUl->numUlSubfrms = 0;
10147 /* store the DL subframe corresponding to the PUSCH offset
10148 * in their respective UL subframe */
10149 for(i=0; i < RGSCH_NUM_SUB_FRAMES; i++)
10151 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][i] != 0)
10153 subfrm = (i + rgSchTddPuschTxKTbl[ulDlCfgIdx][i]) % \
10154 RGSCH_NUM_SUB_FRAMES;
10155 subfrm = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subfrm]-1;
10156 dlIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][i]-1;
10157 RGSCH_ARRAY_BOUND_CHECK( cell->instIdx, ulToDlMap, subfrm);
10158 ulToDlMap[subfrm] = dlIdx;
10161 /* Copy the information in the remaining UL subframes based
10162 * on number of HARQ processes */
10163 for(i=maxUlsubfrms; i < maxSubfrms; i++)
10165 subfrm = i-maxUlsubfrms;
10166 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, i);
10167 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, subfrm)
10168 ulToDlMap[i] = ulToDlMap[subfrm];
10172 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++)
10175 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
10177 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
10186 cell->ulAvailBw = bwAvail;
10189 numSb = bwAvail/sbSize;
10191 cell->dynCfiCb.bwInfo[cfi].startRb = rbStart;
10192 cell->dynCfiCb.bwInfo[cfi].numSb = numSb;
10195 if(0 == cell->dynCfiCb.maxCfi)
10197 RLOG_ARG3(L_ERROR,DBG_CELLID,cell->cellId,
10198 "Incorrect Default CFI(%u), maxCfi(%u), maxPucchRb(%d)",
10199 cellSch->cfiCfg.cfi, cell->dynCfiCb.maxCfi,
10200 cell->pucchCfg.maxPucchRb);
10206 cellUl->dmrsArrSize = cell->dynCfiCb.bwInfo[1].numSb;
10207 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->dmrsArr,
10208 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10213 for (i = 0; i < cellUl->dmrsArrSize; ++i)
10215 cellUl->dmrsArr[i] = cellCfg->puschSubBand.dmrs[i];
10218 /* Init subframes */
10219 for (i = 0; i < maxSubfrms; ++i)
10221 ret = rgSCHUtlUlSfInit(cell, &cellUl->ulSfArr[i], i,
10222 cellUl->maxAllocPerUlSf);
10225 for (; i != 0; --i)
10227 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[i-1]);
10229 /* ccpu00117052 - MOD - Passing double pointer
10230 for proper NULLP assignment*/
10231 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)(&(cellUl->dmrsArr)),
10232 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10234 /* ccpu00117052 - MOD - Passing double pointer
10235 for proper NULLP assignment*/
10236 rgSCHUtlFreeSBuf(cell->instIdx,
10237 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
10242 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cellUl);
10247 * @brief Scheduler processing on cell configuration.
10251 * Function : rgSCHCmnRgrCellCfg
10253 * This function does requisite initialisation
10254 * and setup for scheduler1 when a cell is
10257 * @param[in] RgSchCellCb *cell
10258 * @param[in] RgrCellCfg *cellCfg
10259 * @param[out] RgSchErrInfo *err
10264 S16 rgSCHCmnRgrCellCfg(RgSchCellCb *cell,RgrCellCfg *cellCfg,RgSchErrInfo *err)
10267 RgSchCmnCell *cellSch;
10269 /* As part of RGR cell configuration, validate the CRGCellCfg
10270 * There is no trigger for crgCellCfg from SC1 */
10271 /* Removed failure check for Extended CP */
10273 if (((ret = rgSCHUtlAllocSBuf(cell->instIdx,
10274 (Data**)&(cell->sc.sch), (sizeof(RgSchCmnCell)))) != ROK))
10276 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
10277 "Memory allocation FAILED");
10278 err->errCause = RGSCHERR_SCH_CFG;
10281 cellSch = (RgSchCmnCell *)(cell->sc.sch);
10282 cellSch->cfiCfg = cellCfg->cfiCfg;
10283 cellSch->trgUlCqi.trgCqi = cellCfg->trgUlCqi.trgCqi;
10284 /* Initialize the scheduler refresh timer queues */
10285 cellSch->tmrTqCp.nxtEnt = 0;
10286 cellSch->tmrTqCp.tmrLen = RG_SCH_CMN_NUM_REFRESH_Q;
10288 /* RACHO Intialize the RACH ded Preamble Information */
10289 rgSCHCmnCfgRachDedPrm(cell);
10291 /* Initialize 'Np' value for each 'p' used for
10292 * HARQ ACK/NACK reception */
10293 rgSCHCmnDlNpValInit(cell);
10296 /* Initialize 'Np' value for each 'p' used for
10297 * HARQ ACK/NACK reception */
10299 rgSCHCmnDlNpValInit(cell);
10302 /* Now perform uplink related initializations */
10303 ret = rgSCHCmnUlCellInit(cell, cellCfg);
10306 /* There is no downlink deinit to be performed */
10307 err->errCause = RGSCHERR_SCH_CFG;
10310 ret = rgSCHCmnDlRgrCellCfg(cell, cellCfg, err);
10313 err->errCause = RGSCHERR_SCH_CFG;
10316 /* DL scheduler has no initializations to make */
10317 /* As of now DL scheduler always returns ROK */
10319 rgSCHCmnGetDciFrmtSizes(cell);
10320 rgSCHCmnGetCqiDciFrmt2AggrLvl(cell);
10322 rgSCHCmnGetEmtcDciFrmtSizes(cell);
10323 rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl(cell);
10324 #endif /* EMTC_ENABLE */
10327 if(TRUE == cellCfg->emtcEnable)
10329 cellSch->apisEmtcUl = &rgSchEmtcUlSchdTbl[0];
10330 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
10337 cellSch->apisUl = &rgSchUlSchdTbl[RG_SCH_CMN_GET_UL_SCHED_TYPE(cell)];
10338 ret = cellSch->apisUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
10344 if(TRUE == cellCfg->emtcEnable)
10346 cellSch->apisEmtcDl = &rgSchEmtcDlSchdTbl[0];
10347 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
10354 cellSch->apisDl = &rgSchDlSchdTbl[RG_SCH_CMN_GET_DL_SCHED_TYPE(cell)];
10356 /* Perform SPS specific initialization for the cell */
10357 ret = rgSCHCmnSpsCellCfg(cell, cellCfg, err);
10363 ret = cellSch->apisDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
10368 rgSCHCmnInitVars(cell);
10371 } /* rgSCHCmnRgrCellCfg*/
10375 * @brief This function handles the reconfiguration of cell.
10379 * Function: rgSCHCmnRgrCellRecfg
10380 * Purpose: Update the reconfiguration parameters.
10382 * Invoked by: Scheduler
10384 * @param[in] RgSchCellCb* cell
10388 S16 rgSCHCmnRgrCellRecfg(RgSchCellCb *cell,RgrCellRecfg *recfg,RgSchErrInfo *err)
10391 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10392 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
10395 if (recfg->recfgTypes & RGR_CELL_UL_CMNRATE_RECFG)
10397 uint8_t oldCqi = cellUl->dfltUlCqi;
10398 if (!RG_SCH_CMN_UL_IS_CQI_VALID(recfg->ulCmnCodeRate.ccchCqi))
10400 err->errCause = RGSCHERR_SCH_CFG;
10401 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnRgrCellRecfg(): "
10405 cellUl->dfltUlCqi = recfg->ulCmnCodeRate.ccchCqi;
10406 ret = rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
10407 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
10410 cellUl->dfltUlCqi = oldCqi;
10411 rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
10412 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
10417 if (recfg->recfgTypes & RGR_CELL_DL_CMNRATE_RECFG)
10419 if (rgSCHCmnDlCnsdrCmnRt(cell, &recfg->dlCmnCodeRate) != ROK)
10421 err->errCause = RGSCHERR_SCH_CFG;
10427 if(TRUE == cell->emtcEnable)
10429 /* Invoke UL sched for cell Recfg */
10430 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
10436 /* Invoke DL sched for cell Recfg */
10437 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
10446 /* Invoke UL sched for cell Recfg */
10447 ret = cellSch->apisUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
10453 /* Invoke DL sched for cell Recfg */
10454 ret = cellSch->apisDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
10461 if (recfg->recfgTypes & RGR_CELL_DLFS_RECFG)
10463 ret = cellSch->apisDlfs->rgSCHDlfsCellRecfg(cell, recfg, err);
10468 cellSch->dl.isDlFreqSel = recfg->dlfsRecfg.isDlFreqSel;
10471 if (recfg->recfgTypes & RGR_CELL_PWR_RECFG)
10473 ret = rgSCHPwrCellRecfg(cell, recfg);
10483 /***********************************************************
10485 * Func : rgSCHCmnUlCellDeinit
10487 * Desc : Uplink scheduler de-initialisation for cell.
10495 **********************************************************/
10496 static Void rgSCHCmnUlCellDeinit(RgSchCellCb *cell)
10498 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
10501 uint8_t maxSubfrms = cellUl->numUlSubfrms;
10504 CmLList *lnk = NULLP;
10505 RgSchL2MeasCb *measCb;
10509 for(ulSfIdx = 0; ulSfIdx < RGSCH_SF_ALLOC_SIZE; ulSfIdx++)
10511 for(ulSfIdx = 0; ulSfIdx < RGSCH_NUM_SUB_FRAMES; ulSfIdx++)
10514 if(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo != NULLP)
10516 /* ccpu00117052 - MOD - Passing double pointer
10517 for proper NULLP assignment*/
10518 rgSCHUtlFreeSBuf(cell->instIdx,
10519 (Data **)(&(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo)),
10520 cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc));
10522 /* ccpu00117052 - DEL - removed explicit NULLP assignment
10523 as it is done in above utility function */
10526 /* Free the memory allocated to measCb */
10527 lnk = cell->l2mList.first;
10528 while(lnk != NULLP)
10530 measCb = (RgSchL2MeasCb *)lnk->node;
10531 cmLListDelFrm(&cell->l2mList, lnk);
10533 /* ccpu00117052 - MOD - Passing double pointer
10534 for proper NULLP assignment*/
10535 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&measCb,\
10536 sizeof(RgSchL2MeasCb));
10539 if (cellUl->dmrsArr != NULLP)
10541 /* ccpu00117052 - MOD - Passing double pointer
10542 for proper NULLP assignment*/
10543 rgSCHUtlFreeSBuf(cell->instIdx,(Data **)(&(cellUl->dmrsArr)),
10544 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
10546 /* De-init subframes */
10548 for (ulSfIdx = 0; ulSfIdx < maxSubfrms; ++ulSfIdx)
10550 for (ulSfIdx = 0; ulSfIdx < RG_SCH_CMN_UL_NUM_SF; ++ulSfIdx)
10553 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[ulSfIdx]);
10557 if (cellUl->ulSfArr != NULLP)
10559 /* ccpu00117052 - MOD - Passing double pointer
10560 for proper NULLP assignment*/
10561 rgSCHUtlFreeSBuf(cell->instIdx,
10562 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
10570 * @brief Scheduler processing for cell delete.
10574 * Function : rgSCHCmnCellDel
10576 * This functions de-initialises and frees memory
10577 * taken up by scheduler1 for the entire cell.
10579 * @param[in] RgSchCellCb *cell
10582 Void rgSCHCmnCellDel(RgSchCellCb *cell)
10584 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10589 if (cellSch == NULLP)
10593 /* Perform the deinit for the UL scheduler */
10594 rgSCHCmnUlCellDeinit(cell);
10596 if(TRUE == cell->emtcEnable)
10598 if (cellSch->apisEmtcUl)
10600 cellSch->apisEmtcUl->rgSCHFreeUlCell(cell);
10604 if (cellSch->apisUl)
10606 /* api pointer checks added (here and below in
10607 * this function). pl check. - antriksh */
10608 cellSch->apisUl->rgSCHFreeUlCell(cell);
10611 /* Perform the deinit for the DL scheduler */
10612 cmLListInit(&cellSch->dl.taLst);
10613 if (cellSch->apisDl)
10615 cellSch->apisDl->rgSCHFreeDlCell(cell);
10618 if (cellSch->apisEmtcDl)
10620 rgSCHEmtcInitTaLst(&cellSch->dl);
10622 cellSch->apisEmtcDl->rgSCHFreeDlCell(cell);
10626 /* DLFS de-initialization */
10627 if (cellSch->dl.isDlFreqSel && cellSch->apisDlfs)
10629 cellSch->apisDlfs->rgSCHDlfsCellDel(cell);
10632 rgSCHPwrCellDel(cell);
10634 rgSCHCmnSpsCellDel(cell);
10637 /* ccpu00117052 - MOD - Passing double pointer
10638 for proper NULLP assignment*/
10639 rgSCHUtlFreeSBuf(cell->instIdx,
10640 (Data**)(&(cell->sc.sch)), (sizeof(RgSchCmnCell)));
10642 } /* rgSCHCmnCellDel */
10646 * @brief This function validates QOS parameters for DL.
10650 * Function: rgSCHCmnValidateDlQos
10651 * Purpose: This function validates QOS parameters for DL.
10653 * Invoked by: Scheduler
10655 * @param[in] CrgLchQosCfg *dlQos
10659 static S16 rgSCHCmnValidateDlQos(RgrLchQosCfg *dlQos)
10661 uint8_t qci = dlQos->qci;
10662 if ( qci < RG_SCH_CMN_MIN_QCI || qci > RG_SCH_CMN_MAX_QCI )
10667 if ((qci >= RG_SCH_CMN_GBR_QCI_START) &&
10668 (qci <= RG_SCH_CMN_GBR_QCI_END))
10670 if ((dlQos->mbr == 0) || (dlQos->mbr < dlQos->gbr))
10679 * @brief Scheduler invocation on logical channel addition.
10683 * Function : rgSCHCmnRgrLchCfg
10685 * This functions does required processing when a new
10686 * (dedicated) logical channel is added. Assumes lcg
10687 * pointer in ulLc is set.
10689 * @param[in] RgSchCellCb *cell
10690 * @param[in] RgSchUeCb *ue
10691 * @param[in] RgSchDlLcCb *dlLc
10692 * @param[int] RgrLchCfg *lcCfg
10693 * @param[out] RgSchErrInfo *err
10698 S16 rgSCHCmnRgrLchCfg
10709 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10712 ret = rgSCHUtlAllocSBuf(cell->instIdx,
10713 (Data**)&((dlLc)->sch), (sizeof(RgSchCmnDlSvc)));
10716 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRgrLchCfg(): "
10717 "SCH struct alloc failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10718 err->errCause = RGSCHERR_SCH_CFG;
10721 if(lcCfg->lcType != CM_LTE_LCH_DCCH)
10723 ret = rgSCHCmnValidateDlQos(&lcCfg->dlInfo.dlQos);
10726 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSchCmnCrgLcCfg(): "
10727 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10728 err->errCause = RGSCHERR_SCH_CFG;
10731 /* Perform DL service activation in the scheduler */
10732 ((RgSchCmnDlSvc *)(dlLc->sch))->qci = lcCfg->dlInfo.dlQos.qci;
10733 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = rgSchCmnDlQciPrio[lcCfg->dlInfo.dlQos.qci - 1];
10734 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcCfg->dlInfo.dlQos.gbr * \
10735 RG_SCH_CMN_REFRESH_TIME)/100;
10736 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcCfg->dlInfo.dlQos.mbr * \
10737 RG_SCH_CMN_REFRESH_TIME)/100;
10741 /*assigning highest priority to DCCH */
10742 ((RgSchCmnDlSvc *)(dlLc->sch))->prio=RG_SCH_CMN_DCCH_PRIO;
10745 dlLc->lcType=lcCfg->lcType;
10748 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
10750 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcCfg(cell, ue,dlLc ,lcCfg, err);
10759 ret = cellSch->apisDl->rgSCHRgrDlLcCfg(cell, ue, dlLc, lcCfg, err);
10767 if(TRUE == ue->isEmtcUe)
10769 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
10778 ret = cellSch->apisUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
10788 rgSCHSCellDlLcCfg(cell, ue, dlLc);
10794 if(lcCfg->dlInfo.dlSpsCfg.isSpsEnabled)
10796 /* Invoke SPS module if SPS is enabled for the service */
10797 ret = rgSCHCmnSpsDlLcCfg(cell, ue, dlLc, lcCfg, err);
10800 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "rgSchCmnRgrLchCfg(): "
10801 "SPS configuration failed for DL LC for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
10802 err->errCause = RGSCHERR_SCH_CFG;
10812 * @brief Scheduler invocation on logical channel addition.
10816 * Function : rgSCHCmnRgrLchRecfg
10818 * This functions does required processing when an existing
10819 * (dedicated) logical channel is reconfigured. Assumes lcg
10820 * pointer in ulLc is set to the old value.
10821 * Independent of whether new LCG is meant to be configured,
10822 * the new LCG scheduler information is accessed and possibly modified.
10824 * @param[in] RgSchCellCb *cell
10825 * @param[in] RgSchUeCb *ue
10826 * @param[in] RgSchDlLcCb *dlLc
10827 * @param[int] RgrLchRecfg *lcRecfg
10828 * @param[out] RgSchErrInfo *err
10833 S16 rgSCHCmnRgrLchRecfg
10838 RgrLchRecfg *lcRecfg,
10843 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10846 if(dlLc->lcType != CM_LTE_LCH_DCCH)
10848 ret = rgSCHCmnValidateDlQos(&lcRecfg->dlRecfg.dlQos);
10852 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10853 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10854 err->errCause = RGSCHERR_SCH_CFG;
10857 if (((RgSchCmnDlSvc *)(dlLc->sch))->qci != lcRecfg->dlRecfg.dlQos.qci)
10859 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Qci, hence lc Priority change "
10860 "not supported for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10861 err->errCause = RGSCHERR_SCH_CFG;
10864 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcRecfg->dlRecfg.dlQos.gbr * \
10865 RG_SCH_CMN_REFRESH_TIME)/100;
10866 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcRecfg->dlRecfg.dlQos.mbr * \
10867 RG_SCH_CMN_REFRESH_TIME)/100;
10871 /*assigning highest priority to DCCH */
10872 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = RG_SCH_CMN_DCCH_PRIO;
10876 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
10878 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10883 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
10892 ret = cellSch->apisDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10897 ret = cellSch->apisUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
10905 if (lcRecfg->recfgTypes & RGR_DL_LC_SPS_RECFG)
10907 /* Invoke SPS module if SPS is enabled for the service */
10908 if(lcRecfg->dlRecfg.dlSpsRecfg.isSpsEnabled)
10910 ret = rgSCHCmnSpsDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
10913 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"SPS re-configuration not "
10914 "supported for dlLC Ignore this CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
10925 * @brief Scheduler invocation on logical channel addition.
10929 * Function : rgSCHCmnRgrLcgCfg
10931 * This functions does required processing when a new
10932 * (dedicated) logical channel is added. Assumes lcg
10933 * pointer in ulLc is set.
10935 * @param[in] RgSchCellCb *cell,
10936 * @param[in] RgSchUeCb *ue,
10937 * @param[in] RgSchLcgCb *lcg,
10938 * @param[in] RgrLcgCfg *lcgCfg,
10939 * @param[out] RgSchErrInfo *err
10944 S16 rgSCHCmnRgrLcgCfg
10954 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10955 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCfg->ulInfo.lcgId].sch));
10958 ulLcg->cfgdGbr = (lcgCfg->ulInfo.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
10959 ulLcg->effGbr = ulLcg->cfgdGbr;
10960 ulLcg->deltaMbr = ((lcgCfg->ulInfo.mbr - lcgCfg->ulInfo.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
10961 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
10964 if(TRUE == ue->isEmtcUe)
10966 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
10975 ret = cellSch->apisUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
10981 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
10983 /* Indicate MAC that this LCG is GBR LCG */
10984 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcgCfg->ulInfo.lcgId, TRUE);
10990 * @brief Scheduler invocation on logical channel addition.
10994 * Function : rgSCHCmnRgrLcgRecfg
10996 * This functions does required processing when a new
10997 * (dedicated) logical channel is added. Assumes lcg
10998 * pointer in ulLc is set.
11000 * @param[in] RgSchCellCb *cell,
11001 * @param[in] RgSchUeCb *ue,
11002 * @param[in] RgSchLcgCb *lcg,
11003 * @param[in] RgrLcgRecfg *reCfg,
11004 * @param[out] RgSchErrInfo *err
11009 S16 rgSCHCmnRgrLcgRecfg
11014 RgrLcgRecfg *reCfg,
11019 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11020 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[reCfg->ulRecfg.lcgId].sch));
11023 ulLcg->cfgdGbr = (reCfg->ulRecfg.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
11024 ulLcg->effGbr = ulLcg->cfgdGbr;
11025 ulLcg->deltaMbr = ((reCfg->ulRecfg.mbr - reCfg->ulRecfg.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
11026 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
11029 if(TRUE == ue->isEmtcUe)
11031 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
11040 ret = cellSch->apisUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
11046 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
11048 /* Indicate MAC that this LCG is GBR LCG */
11049 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, TRUE);
11053 /* In case of RAB modification */
11054 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, FALSE);
11059 /***********************************************************
11061 * Func : rgSCHCmnRgrLchDel
11063 * Desc : Scheduler handling for a (dedicated)
11064 * uplink logical channel being deleted.
11071 **********************************************************/
11072 S16 rgSCHCmnRgrLchDel(RgSchCellCb *cell,RgSchUeCb *ue,CmLteLcId lcId,uint8_t lcgId)
11074 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11076 if(TRUE == ue->isEmtcUe)
11078 cellSch->apisEmtcUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
11083 cellSch->apisUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
11088 /***********************************************************
11090 * Func : rgSCHCmnLcgDel
11092 * Desc : Scheduler handling for a (dedicated)
11093 * uplink logical channel being deleted.
11101 **********************************************************/
11102 Void rgSCHCmnLcgDel(RgSchCellCb *cell,RgSchUeCb *ue,RgSchLcgCb *lcg)
11104 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11105 RgSchCmnLcg *lcgCmn = RG_SCH_CMN_GET_UL_LCG(lcg);
11107 if (lcgCmn == NULLP)
11112 if (RGSCH_IS_GBR_BEARER(lcgCmn->cfgdGbr))
11114 /* Indicate MAC that this LCG is GBR LCG */
11115 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcg->lcgId, FALSE);
11119 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
11121 rgSCHCmnSpsUlLcgDel(cell, ue, lcg);
11123 #endif /* LTEMAC_SPS */
11125 lcgCmn->effGbr = 0;
11126 lcgCmn->reportedBs = 0;
11127 lcgCmn->cfgdGbr = 0;
11128 /* set lcg bs to 0. Deletion of control block happens
11129 * at the time of UE deletion. */
11132 if(TRUE == ue->isEmtcUe)
11134 cellSch->apisEmtcUl->rgSCHFreeUlLcg(cell, ue, lcg);
11139 cellSch->apisUl->rgSCHFreeUlLcg(cell, ue, lcg);
11146 * @brief This function deletes a service from scheduler.
11150 * Function: rgSCHCmnFreeDlLc
11151 * Purpose: This function is made available through a FP for
11152 * making scheduler aware of a service being deleted from UE.
11154 * Invoked by: BO and Scheduler
11156 * @param[in] RgSchCellCb* cell
11157 * @param[in] RgSchUeCb* ue
11158 * @param[in] RgSchDlLcCb* svc
11162 Void rgSCHCmnFreeDlLc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlLcCb *svc)
11164 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11165 if (svc->sch == NULLP)
11170 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
11172 cellSch->apisEmtcDl->rgSCHFreeDlLc(cell, ue, svc);
11177 cellSch->apisDl->rgSCHFreeDlLc(cell, ue, svc);
11183 rgSCHSCellDlLcDel(cell, ue, svc);
11188 /* If SPS service, invoke SPS module */
11189 if (svc->dlLcSpsCfg.isSpsEnabled)
11191 rgSCHCmnSpsDlLcDel(cell, ue, svc);
11195 /* ccpu00117052 - MOD - Passing double pointer
11196 for proper NULLP assignment*/
11197 rgSCHUtlFreeSBuf(cell->instIdx,
11198 (Data**)(&(svc->sch)), (sizeof(RgSchCmnDlSvc)));
11201 rgSCHLaaDeInitDlLchCb(cell, svc);
11210 * @brief This function Processes the Final Allocations
11211 * made by the RB Allocator against the requested
11212 * CCCH SDURetx Allocations.
11216 * Function: rgSCHCmnDlCcchSduRetxFnlz
11217 * Purpose: This function Processes the Final Allocations
11218 * made by the RB Allocator against the requested
11219 * CCCH Retx Allocations.
11220 * Scans through the scheduled list of ccchSdu retrans
11221 * fills the corresponding pdcch, adds the hqProc to
11222 * the corresponding SubFrm and removes the hqP from
11225 * Invoked by: Common Scheduler
11227 * @param[in] RgSchCellCb *cell
11228 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11232 static Void rgSCHCmnDlCcchSduRetxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11235 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11236 RgSchDlRbAlloc *rbAllocInfo;
11237 RgSchDlHqProcCb *hqP;
11240 /* Traverse through the Scheduled Retx List */
11241 node = allocInfo->ccchSduAlloc.schdCcchSduRetxLst.first;
11244 hqP = (RgSchDlHqProcCb *)(node->node);
11246 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
11248 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11250 /* Remove the HqP from cell's ccchSduRetxLst */
11251 cmLListDelFrm(&cmnCellDl->ccchSduRetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
11252 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
11254 /* Fix: syed dlAllocCb reset should be performed.
11255 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11256 rgSCHCmnDlUeResetTemp(ue, hqP);
11258 /* Fix: syed dlAllocCb reset should be performed.
11259 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11260 node = allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst.first;
11263 hqP = (RgSchDlHqProcCb *)(node->node);
11266 /* reset the UE allocation Information */
11267 rgSCHCmnDlUeResetTemp(ue, hqP);
11273 * @brief This function Processes the Final Allocations
11274 * made by the RB Allocator against the requested
11275 * CCCH Retx Allocations.
11279 * Function: rgSCHCmnDlCcchRetxFnlz
11280 * Purpose: This function Processes the Final Allocations
11281 * made by the RB Allocator against the requested
11282 * CCCH Retx Allocations.
11283 * Scans through the scheduled list of msg4 retrans
11284 * fills the corresponding pdcch, adds the hqProc to
11285 * the corresponding SubFrm and removes the hqP from
11288 * Invoked by: Common Scheduler
11290 * @param[in] RgSchCellCb *cell
11291 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11295 static Void rgSCHCmnDlCcchRetxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11298 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11299 RgSchDlRbAlloc *rbAllocInfo;
11300 RgSchDlHqProcCb *hqP;
11303 /* Traverse through the Scheduled Retx List */
11304 node = allocInfo->msg4Alloc.schdMsg4RetxLst.first;
11307 hqP = (RgSchDlHqProcCb *)(node->node);
11308 raCb = hqP->hqE->raCb;
11309 rbAllocInfo = &raCb->rbAllocInfo;
11311 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11313 /* Remove the HqP from cell's msg4RetxLst */
11314 cmLListDelFrm(&cmnCellDl->msg4RetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
11315 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
11316 /* Fix: syed dlAllocCb reset should be performed.
11317 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11318 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
11319 rgSCHCmnDlHqPResetTemp(hqP);
11321 /* Fix: syed dlAllocCb reset should be performed.
11322 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11323 node = allocInfo->msg4Alloc.nonSchdMsg4RetxLst.first;
11326 hqP = (RgSchDlHqProcCb *)(node->node);
11327 raCb = hqP->hqE->raCb;
11329 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
11330 rgSCHCmnDlHqPResetTemp(hqP);
11337 * @brief This function Processes the Final Allocations
11338 * made by the RB Allocator against the requested
11339 * CCCH SDU tx Allocations.
11343 * Function: rgSCHCmnDlCcchSduTxFnlz
11344 * Purpose: This function Processes the Final Allocations
11345 * made by the RB Allocator against the requested
11346 * CCCH tx Allocations.
11347 * Scans through the scheduled list of CCCH SDU trans
11348 * fills the corresponding pdcch, adds the hqProc to
11349 * the corresponding SubFrm and removes the hqP from
11352 * Invoked by: Common Scheduler
11354 * @param[in] RgSchCellCb *cell
11355 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11359 static Void rgSCHCmnDlCcchSduTxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11363 RgSchDlRbAlloc *rbAllocInfo;
11364 RgSchDlHqProcCb *hqP;
11365 RgSchLchAllocInfo lchSchdData;
11367 /* Traverse through the Scheduled Retx List */
11368 node = allocInfo->ccchSduAlloc.schdCcchSduTxLst.first;
11371 hqP = (RgSchDlHqProcCb *)(node->node);
11372 ueCb = hqP->hqE->ue;
11374 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
11376 /* fill the pdcch and HqProc */
11377 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11379 /* Remove the raCb from cell's toBeSchdLst */
11380 cmLListDelFrm(&cell->ccchSduUeLst, &ueCb->ccchSduLnk);
11381 ueCb->ccchSduLnk.node = (PTR)NULLP;
11383 /* Fix : Resetting this required to avoid complication
11384 * in reestablishment case */
11385 ueCb->dlCcchInfo.bo = 0;
11387 /* Indicate DHM of the CCCH LC scheduling */
11388 hqP->tbInfo[0].contResCe = NOTPRSNT;
11389 lchSchdData.lcId = 0;
11390 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
11391 (RGSCH_MSG4_HDRSIZE);
11392 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
11394 /* Fix: syed dlAllocCb reset should be performed.
11395 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11396 rgSCHCmnDlUeResetTemp(ueCb, hqP);
11398 /* Fix: syed dlAllocCb reset should be performed.
11399 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11400 node = allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst.first;
11403 hqP = (RgSchDlHqProcCb *)(node->node);
11404 ueCb = hqP->hqE->ue;
11406 /* Release HqProc */
11407 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
11408 /*Fix: Removing releasing of TB1 as it will not exist for CCCH SDU and hence caused a crash*/
11409 /*rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
11410 /* reset the UE allocation Information */
11411 rgSCHCmnDlUeResetTemp(ueCb, hqP);
11418 * @brief This function Processes the Final Allocations
11419 * made by the RB Allocator against the requested
11420 * CCCH tx Allocations.
11424 * Function: rgSCHCmnDlCcchTxFnlz
11425 * Purpose: This function Processes the Final Allocations
11426 * made by the RB Allocator against the requested
11427 * CCCH tx Allocations.
11428 * Scans through the scheduled list of msg4 trans
11429 * fills the corresponding pdcch, adds the hqProc to
11430 * the corresponding SubFrm and removes the hqP from
11433 * Invoked by: Common Scheduler
11435 * @param[in] RgSchCellCb *cell
11436 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11440 static Void rgSCHCmnDlCcchTxFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11444 RgSchDlRbAlloc *rbAllocInfo;
11445 RgSchDlHqProcCb *hqP;
11446 RgSchLchAllocInfo lchSchdData;
11448 /* Traverse through the Scheduled Retx List */
11449 node = allocInfo->msg4Alloc.schdMsg4TxLst.first;
11452 hqP = (RgSchDlHqProcCb *)(node->node);
11453 raCb = hqP->hqE->raCb;
11455 rbAllocInfo = &raCb->rbAllocInfo;
11457 /* fill the pdcch and HqProc */
11458 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
11459 /* MSG4 Fix Start */
11461 rgSCHRamRmvFrmRaInfoSchdLst(cell, raCb);
11464 /* Indicate DHM of the CCCH LC scheduling */
11465 lchSchdData.lcId = 0;
11466 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
11467 (RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE);
11468 /* TRansmitting presence of cont Res CE across MAC-SCH interface to
11469 * identify CCCH SDU transmissions which need to be done
11471 * contention resolution CE*/
11472 hqP->tbInfo[0].contResCe = PRSNT_NODEF;
11473 /*Dont add lc if only cont res CE is being transmitted*/
11474 if(raCb->dlCcchInfo.bo)
11476 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
11481 /* Fix: syed dlAllocCb reset should be performed.
11482 * zombie info in dlAllocCb leading to crash rbNum wraparound */
11483 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
11484 rgSCHCmnDlHqPResetTemp(hqP);
11486 node = allocInfo->msg4Alloc.nonSchdMsg4TxLst.first;
11489 hqP = (RgSchDlHqProcCb *)(node->node);
11490 raCb = hqP->hqE->raCb;
11492 rbAllocInfo = &raCb->rbAllocInfo;
11493 /* Release HqProc */
11494 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
11495 /*Fix: Removing releasing of TB1 as it will not exist for MSG4 and hence caused a crash*/
11496 /* rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
11497 /* reset the UE allocation Information */
11498 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
11499 rgSCHCmnDlHqPResetTemp(hqP);
11506 * @brief This function calculates the BI Index to be sent in the Bi header
11510 * Function: rgSCHCmnGetBiIndex
11511 * Purpose: This function Processes utilizes the previous BI time value
11512 * calculated and the difference last BI sent time and current time. To
11513 * calculate the latest BI Index. It also considers the how many UE's
11514 * Unserved in this subframe.
11516 * Invoked by: Common Scheduler
11518 * @param[in] RgSchCellCb *cell
11519 * @param[in] uint32_t ueCount
11523 uint8_t rgSCHCmnGetBiIndex(RgSchCellCb *cell,uint32_t ueCount)
11525 S16 prevVal = 0; /* To Store Intermediate Value */
11526 uint16_t newBiVal = 0; /* To store Bi Value in millisecond */
11528 uint16_t timeDiff = 0;
11531 if (cell->biInfo.prevBiTime != 0)
11534 if(cell->emtcEnable == TRUE)
11536 timeDiff =(RGSCH_CALC_SF_DIFF_EMTC(cell->crntTime, cell->biInfo.biTime));
11541 timeDiff =(RGSCH_CALC_SF_DIFF(cell->crntTime, cell->biInfo.biTime));
11544 prevVal = cell->biInfo.prevBiTime - timeDiff;
11550 newBiVal = RG_SCH_CMN_GET_BI_VAL(prevVal,ueCount);
11551 /* To be used next time when BI is calculated */
11553 if(cell->emtcEnable == TRUE)
11555 RGSCHCPYTIMEINFO_EMTC(cell->crntTime, cell->biInfo.biTime)
11560 RGSCHCPYTIMEINFO(cell->crntTime, cell->biInfo.biTime)
11563 /* Search the actual BI Index from table Backoff Parameters Value and
11564 * return that Index */
11567 if (rgSchCmnBiTbl[idx] > newBiVal)
11572 }while(idx < RG_SCH_CMN_NUM_BI_VAL-1);
11573 cell->biInfo.prevBiTime = rgSchCmnBiTbl[idx];
11574 /* For 16 Entries in Table 7.2.1 36.321.880 - 3 reserved so total 13 Entries */
11575 return (idx); /* Returning reserved value from table UE treats it has 960 ms */
11576 } /* rgSCHCmnGetBiIndex */
11580 * @brief This function Processes the Final Allocations
11581 * made by the RB Allocator against the requested
11582 * RAR allocations. Assumption: The reuqested
11583 * allocations are always satisfied completely.
11584 * Hence no roll back.
11588 * Function: rgSCHCmnDlRaRspFnlz
11589 * Purpose: This function Processes the Final Allocations
11590 * made by the RB Allocator against the requested.
11591 * Takes care of PDCCH filling.
11593 * Invoked by: Common Scheduler
11595 * @param[in] RgSchCellCb *cell
11596 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11600 static Void rgSCHCmnDlRaRspFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11602 uint32_t rarCnt = 0;
11603 RgSchDlRbAlloc *raRspAlloc;
11604 RgSchDlSf *subFrm = NULLP;
11608 RgSchRaReqInfo *raReq;
11610 RgSchUlAlloc *ulAllocRef=NULLP;
11611 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11612 uint8_t allocRapidCnt = 0;
11614 uint32_t msg3SchdIdx = 0;
11615 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
11616 uint8_t msg3Subfrm;
11620 for (rarCnt=0; rarCnt<RG_SCH_CMN_MAX_CMN_PDCCH; rarCnt++)
11622 raRspAlloc = &allocInfo->raRspAlloc[rarCnt];
11623 /* Having likely condition first for optimization */
11624 if (!raRspAlloc->pdcch)
11630 subFrm = raRspAlloc->dlSf;
11631 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
11632 /* Corrected RACH handling for multiple RAPIDs per RARNTI */
11633 allocRapidCnt = raRspAlloc->numRapids;
11634 while (allocRapidCnt)
11636 raReq = (RgSchRaReqInfo *)(reqLst->first->node);
11637 /* RACHO: If dedicated preamble, then allocate UL Grant
11638 * (consequence of handover/pdcchOrder) and continue */
11639 if (RGSCH_IS_DEDPRM(cell, raReq->raReq.rapId))
11641 rgSCHCmnHdlHoPo(cell, &subFrm->raRsp[rarCnt].contFreeUeLst,
11643 cmLListDelFrm(reqLst, reqLst->first);
11645 /* ccpu00117052 - MOD - Passing double pointer
11646 for proper NULLP assignment*/
11647 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11648 sizeof(RgSchRaReqInfo));
11652 if(cell->overLoadBackOffEnab)
11653 {/* rach Overlaod conrol is triggerd, Skipping this rach */
11654 cmLListDelFrm(reqLst, reqLst->first);
11656 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11657 sizeof(RgSchRaReqInfo));
11660 /* Attempt to include each RA request into the RSP */
11661 /* Any failure in the procedure is considered to */
11662 /* affect futher allocations in the same TTI. When */
11663 /* a failure happens, we break out and complete */
11664 /* the processing for random access */
11665 if (rgSCHRamCreateRaCb(cell, &raCb, &err) != ROK)
11669 /* Msg3 allocation request to USM */
11670 if (raReq->raReq.rapId < cell->rachCfg.sizeRaPreambleGrpA)
11674 /*ccpu00128820 - MOD - Msg3 alloc double delete issue*/
11675 rgSCHCmnMsg3GrntReq(cell, raCb->tmpCrnti, preamGrpA, \
11676 &(raCb->msg3HqProc), &ulAllocRef, &raCb->msg3HqProcId);
11677 if (ulAllocRef == NULLP)
11679 rgSCHRamDelRaCb(cell, raCb, TRUE);
11682 if (raReq->raReq.cqiPres)
11684 raCb->ccchCqi = raReq->raReq.cqiIdx;
11688 raCb->ccchCqi = cellDl->ccchCqi;
11690 raCb->rapId = raReq->raReq.rapId;
11691 raCb->ta.pres = TRUE;
11692 raCb->ta.val = raReq->raReq.ta;
11693 raCb->msg3Grnt = ulAllocRef->grnt;
11694 /* Populating the tpc value received */
11695 raCb->msg3Grnt.tpc = raReq->raReq.tpc;
11696 /* PHR handling for MSG3 */
11697 ulAllocRef->raCb = raCb;
11699 /* To the crntTime, add the MIN time at which UE will
11700 * actually send MSG3 i.e DL_DELTA+6 */
11701 raCb->msg3AllocTime = cell->crntTime;
11702 RGSCH_INCR_SUB_FRAME(raCb->msg3AllocTime, RG_SCH_CMN_MIN_MSG3_RECP_INTRVL);
11704 msg3SchdIdx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) %
11705 RGSCH_NUM_SUB_FRAMES;
11706 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
11707 special subframe */
11708 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][msg3SchdIdx] !=
11709 RG_SCH_TDD_UL_SUBFRAME)
11711 RGSCHCMNADDTOCRNTTIME(cell->crntTime,raCb->msg3AllocTime,
11712 RG_SCH_CMN_DL_DELTA)
11713 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][
11714 raCb->msg3AllocTime.slot];
11715 RGSCHCMNADDTOCRNTTIME(raCb->msg3AllocTime, raCb->msg3AllocTime,
11719 cmLListAdd2Tail(&subFrm->raRsp[rarCnt].raRspLst, &raCb->rspLnk);
11720 raCb->rspLnk.node = (PTR)raCb;
11721 cmLListDelFrm(reqLst, reqLst->first);
11723 /* ccpu00117052 - MOD - Passing double pointer
11724 for proper NULLP assignment*/
11725 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
11726 sizeof(RgSchRaReqInfo));
11728 /* SR_RACH_STATS : RAR scheduled */
11733 /* Fill subframe data members */
11734 subFrm->raRsp[rarCnt].raRnti = raRspAlloc->rnti;
11735 subFrm->raRsp[rarCnt].pdcch = raRspAlloc->pdcch;
11736 subFrm->raRsp[rarCnt].tbSz = raRspAlloc->tbInfo[0].bytesAlloc;
11737 /* Fill PDCCH data members */
11738 rgSCHCmnFillPdcch(cell, subFrm->raRsp[rarCnt].pdcch, raRspAlloc);
11741 if(cell->overLoadBackOffEnab)
11742 {/* rach Overlaod conrol is triggerd, Skipping this rach */
11743 subFrm->raRsp[rarCnt].backOffInd.pres = PRSNT_NODEF;
11744 subFrm->raRsp[rarCnt].backOffInd.val = cell->overLoadBackOffval;
11749 subFrm->raRsp[rarCnt].backOffInd.pres = NOTPRSNT;
11752 /*[ccpu00125212] Avoiding sending of empty RAR in case of RAR window
11753 is short and UE is sending unauthorised preamble.*/
11754 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
11755 if ((raRspAlloc->biEstmt) && (reqLst->count))
11757 subFrm->raRsp[0].backOffInd.pres = PRSNT_NODEF;
11758 /* Added as part of Upgrade */
11759 subFrm->raRsp[0].backOffInd.val =
11760 rgSCHCmnGetBiIndex(cell, reqLst->count);
11762 /* SR_RACH_STATS : Back Off Inds */
11766 else if ((subFrm->raRsp[rarCnt].raRspLst.first == NULLP) &&
11767 (subFrm->raRsp[rarCnt].contFreeUeLst.first == NULLP))
11769 /* Return the grabbed PDCCH */
11770 rgSCHUtlPdcchPut(cell, &subFrm->pdcchInfo, raRspAlloc->pdcch);
11771 subFrm->raRsp[rarCnt].pdcch = NULLP;
11772 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRaRspAlloc(): "
11773 "Not even one RaReq.");
11777 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId,
11778 "RNTI:%d Scheduled RAR @ (%u,%u) ",
11780 cell->crntTime.sfn,
11781 cell->crntTime.slot);
11787 * @brief This function computes rv.
11791 * Function: rgSCHCmnDlCalcRvForBcch
11792 * Purpose: This function computes rv.
11794 * Invoked by: Common Scheduler
11796 * @param[in] RgSchCellCb *cell
11797 * @param[in] Bool si
11798 * @param[in] uint16_t i
11802 static uint8_t rgSCHCmnDlCalcRvForBcch(RgSchCellCb *cell,Bool si,uint16_t i)
11805 CmLteTimingInfo frm;
11807 frm = cell->crntTime;
11808 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
11816 k = (frm.sfn/2) % 4;
11818 rv = RGSCH_CEIL(3*k, 2) % 4;
11823 * @brief This function Processes the Final Allocations
11824 * made by the RB Allocator against the requested
11825 * BCCH/PCCH allocations. Assumption: The reuqested
11826 * allocations are always satisfied completely.
11827 * Hence no roll back.
11831 * Function: rgSCHCmnDlBcchPcchFnlz
11832 * Purpose: This function Processes the Final Allocations
11833 * made by the RB Allocator against the requested.
11834 * Takes care of PDCCH filling.
11836 * Invoked by: Common Scheduler
11838 * @param[in] RgSchCellCb *cell
11839 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
11843 static Void rgSCHCmnDlBcchPcchFnlz(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
11845 RgSchDlRbAlloc *rbAllocInfo;
11849 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
11851 #ifdef LTEMAC_HDFDD
11852 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11854 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11858 /* Moving variables to available scope for optimization */
11859 RgSchClcDlLcCb *pcch;
11862 RgSchClcDlLcCb *bcch;
11865 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
11869 rbAllocInfo = &allocInfo->pcchAlloc;
11870 if (rbAllocInfo->pdcch)
11872 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
11874 /* Added sfIdx calculation for TDD as well */
11876 #ifdef LTEMAC_HDFDD
11877 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11879 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11882 subFrm = rbAllocInfo->dlSf;
11883 pcch = rgSCHDbmGetPcch(cell);
11886 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnDlBcchPcchFnlz( ): "
11887 "No Pcch Present");
11891 /* Added Dl TB count for paging message transmission*/
11893 cell->dlUlTbCnt.tbTransDlTotalCnt++;
11895 bo = (RgSchClcBoRpt *)pcch->boLst.first->node;
11896 cmLListDelFrm(&pcch->boLst, &bo->boLstEnt);
11897 /* ccpu00117052 - MOD - Passing double pointer
11898 for proper NULLP assignment*/
11899 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
11900 /* Fill subframe data members */
11901 subFrm->pcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
11902 subFrm->pcch.pdcch = rbAllocInfo->pdcch;
11903 /* Fill PDCCH data members */
11904 rgSCHCmnFillPdcch(cell, subFrm->pcch.pdcch, rbAllocInfo);
11905 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, pcch->lcId, TRUE);
11906 /* ccpu00132314-ADD-Update the tx power allocation info
11907 TODO-Need to add a check for max tx power per symbol */
11908 subfrmAlloc->cmnLcInfo.pcchInfo.txPwrOffset = cellDl->pcchTxPwrOffset;
11912 rbAllocInfo = &allocInfo->bcchAlloc;
11913 if (rbAllocInfo->pdcch)
11915 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
11917 #ifdef LTEMAC_HDFDD
11918 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
11920 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
11923 subFrm = rbAllocInfo->dlSf;
11925 /* Fill subframe data members */
11926 subFrm->bcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
11927 subFrm->bcch.pdcch = rbAllocInfo->pdcch;
11928 /* Fill PDCCH data members */
11929 rgSCHCmnFillPdcch(cell, subFrm->bcch.pdcch, rbAllocInfo);
11931 if(rbAllocInfo->schdFirst)
11934 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
11935 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
11937 /*Copy the SIB1 msg buff into interface buffer */
11938 SCpyMsgMsg(cell->siCb.crntSiInfo.sib1Info.sib1,
11939 rgSchCb[cell->instIdx].rgSchInit.region,
11940 rgSchCb[cell->instIdx].rgSchInit.pool,
11941 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11942 #endif/*RGR_SI_SCH*/
11943 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
11944 rgSCHCmnDlCalcRvForBcch(cell, FALSE, 0);
11952 i = cell->siCb.siCtx.i;
11953 /*Decrement the retransmission count */
11954 cell->siCb.siCtx.retxCntRem--;
11956 /*Copy the SI msg buff into interface buffer */
11957 if(cell->siCb.siCtx.warningSiFlag == FALSE)
11959 SCpyMsgMsg(cell->siCb.siArray[cell->siCb.siCtx.siId-1].si,
11960 rgSchCb[cell->instIdx].rgSchInit.region,
11961 rgSchCb[cell->instIdx].rgSchInit.pool,
11962 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11966 pdu = rgSCHUtlGetWarningSiPdu(cell);
11967 RGSCH_NULL_CHECK(cell->instIdx, pdu);
11969 rgSchCb[cell->instIdx].rgSchInit.region,
11970 rgSchCb[cell->instIdx].rgSchInit.pool,
11971 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
11972 if(cell->siCb.siCtx.retxCntRem == 0)
11974 rgSCHUtlFreeWarningSiPdu(cell);
11975 cell->siCb.siCtx.warningSiFlag = FALSE;
11980 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
11981 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
11983 if(bo->retxCnt != cell->siCfg.retxCnt-1)
11988 #endif/*RGR_SI_SCH*/
11989 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
11990 rgSCHCmnDlCalcRvForBcch(cell, TRUE, i);
11993 /* Added Dl TB count for SIB1 and SI messages transmission.
11994 * This counter will be incremented only for the first transmission
11995 * (with RV 0) of these messages*/
11997 if(subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv == 0)
11999 cell->dlUlTbCnt.tbTransDlTotalCnt++;
12003 if(bo->retxCnt == 0)
12005 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
12006 /* ccpu00117052 - MOD - Passing double pointer
12007 for proper NULLP assignment*/
12008 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
12010 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, bcch->lcId, sendInd);
12012 /*Fill the interface info */
12013 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, NULLD, NULLD);
12015 /* ccpu00132314-ADD-Update the tx power allocation info
12016 TODO-Need to add a check for max tx power per symbol */
12017 subfrmAlloc->cmnLcInfo.bcchInfo.txPwrOffset = cellDl->bcchTxPwrOffset;
12019 /*mBuf has been already copied above */
12020 #endif/*RGR_SI_SCH*/
12033 * Function: rgSCHCmnUlSetAllUnSched
12036 * Invoked by: Common Scheduler
12038 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
12042 static Void rgSCHCmnUlSetAllUnSched(RgSchCmnUlRbAllocInfo *allocInfo)
12047 node = allocInfo->contResLst.first;
12050 rgSCHCmnUlMov2NonSchdCntResLst(allocInfo, (RgSchUeCb *)node->node);
12051 node = allocInfo->contResLst.first;
12054 node = allocInfo->retxUeLst.first;
12057 rgSCHCmnUlMov2NonSchdRetxUeLst(allocInfo, (RgSchUeCb *)node->node);
12058 node = allocInfo->retxUeLst.first;
12061 node = allocInfo->ueLst.first;
12064 rgSCHCmnUlMov2NonSchdUeLst(allocInfo, (RgSchUeCb *)node->node);
12065 node = allocInfo->ueLst.first;
12077 * Function: rgSCHCmnUlAdd2CntResLst
12080 * Invoked by: Common Scheduler
12082 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
12083 * @param[in] RgSchUeCb *ue
12087 Void rgSCHCmnUlAdd2CntResLst(RgSchCmnUlRbAllocInfo *allocInfo,RgSchUeCb *ue)
12089 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,ue->cell))->alloc);
12090 cmLListAdd2Tail(&allocInfo->contResLst, &ulAllocInfo->reqLnk);
12091 ulAllocInfo->reqLnk.node = (PTR)ue;
12100 * Function: rgSCHCmnUlAdd2UeLst
12103 * Invoked by: Common Scheduler
12105 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
12106 * @param[in] RgSchUeCb *ue
12110 Void rgSCHCmnUlAdd2UeLst(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUeCb *ue)
12112 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,cell))->alloc);
12113 if (ulAllocInfo->reqLnk.node == NULLP)
12115 cmLListAdd2Tail(&allocInfo->ueLst, &ulAllocInfo->reqLnk);
12116 ulAllocInfo->reqLnk.node = (PTR)ue;
12126 * Function: rgSCHCmnAllocUlRb
12127 * Purpose: To do RB allocations for uplink
12129 * Invoked by: Common Scheduler
12131 * @param[in] RgSchCellCb *cell
12132 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12135 Void rgSCHCmnAllocUlRb(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo)
12137 RgSchUlSf *sf = allocInfo->sf;
12139 /* Schedule for new transmissions */
12140 rgSCHCmnUlRbAllocForLst(cell, sf, allocInfo->ueLst.count,
12141 &allocInfo->ueLst, &allocInfo->schdUeLst,
12142 &allocInfo->nonSchdUeLst, (Bool)TRUE);
12146 /***********************************************************
12148 * Func : rgSCHCmnUlRbAllocForLst
12150 * Desc : Allocate for a list in cmn rb alloc information passed
12159 **********************************************************/
12160 static Void rgSCHCmnUlRbAllocForLst
12166 CmLListCp *schdLst,
12167 CmLListCp *nonSchdLst,
12176 CmLteTimingInfo timeInfo;
12180 if(schdLst->count == 0)
12182 cmLListInit(schdLst);
12185 cmLListInit(nonSchdLst);
12187 if(isNewTx == TRUE)
12189 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.numUes = (uint8_t) count;
12191 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime, timeInfo, TFU_ULCNTRL_DLDELTA);
12192 k = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][timeInfo.subframe];
12193 RG_SCH_ADD_TO_CRNT_TIME(timeInfo,
12194 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo, k);
12196 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime,cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo,
12197 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
12202 for (lnk = reqLst->first; count; lnk = lnk->next, --count)
12204 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
12205 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
12210 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
12215 ueUl->subbandShare = ueUl->subbandRequired;
12216 if(isNewTx == TRUE)
12218 maxRb = RGSCH_MIN((ueUl->subbandRequired * MAX_5GTF_VRBG_SIZE), ue->ue5gtfCb.maxPrb);
12220 ret = rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole);
12223 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, schdLst);
12224 rgSCHCmnUlUeFillAllocInfo(cell, ue);
12228 gUl5gtfRbAllocFail++;
12229 #if defined (TENB_STATS) && defined (RG_5GTF)
12230 cell->tenbStats->sch.ul5gtfRbAllocFail++;
12232 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
12233 ue->isMsg4PdcchWithCrnti = FALSE;
12234 ue->isSrGrant = FALSE;
12237 if(isNewTx == TRUE)
12239 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
12240 ulAllocInfo[count - 1].rnti = ue->ueId;
12241 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
12242 ulAllocInfo[count - 1].numPrb = ue->ul.nPrb;
12245 ueUl->subbandShare = 0; /* This reset will take care of
12246 * all scheduler types */
12248 for (; count; lnk = lnk->next, --count)
12250 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
12251 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
12252 ue->isMsg4PdcchWithCrnti = FALSE;
12259 /***********************************************************
12261 * Func : rgSCHCmnUlMdfyGrntForCqi
12263 * Desc : Modify UL Grant to consider presence of
12264 * CQI along with PUSCH Data.
12269 * - Scale down iTbs based on betaOffset and
12270 * size of Acqi Size.
12271 * - Optionally attempt to increase numSb by 1
12272 * if input payload size does not fit in due
12273 * to reduced tbSz as a result of iTbsNew.
12277 **********************************************************/
12278 static S16 rgSCHCmnUlMdfyGrntForCqi
12286 uint32_t stepDownItbs,
12290 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(ue->cell);
12295 uint32_t remREsForPusch;
12296 uint32_t bitsPerRe;
12298 uint32_t betaOffVal = ue->ul.betaOffstVal;
12299 uint32_t cqiRiRptSz = ue->ul.cqiRiSz;
12300 uint32_t betaOffHqVal = rgSchCmnBetaHqOffstTbl[ue->ul.betaHqOffst];
12301 uint32_t resNumSb = *numSb;
12302 uint32_t puschEff = 1000;
12305 Bool mdfyiTbsFlg = FALSE;
12306 uint8_t resiTbs = *iTbs;
12312 iMcs = rgSCHCmnUlGetIMcsFrmITbs(resiTbs, RG_SCH_CMN_GET_UE_CTGY(ue));
12313 RG_SCH_UL_MCS_TO_MODODR(iMcs, modOdr);
12314 if (RG_SCH_CMN_GET_UE_CTGY(ue) != CM_LTE_UE_CAT_5)
12316 modOdr = RGSCH_MIN(RGSCH_QM_QPSK, modOdr);
12320 modOdr = RGSCH_MIN(RGSCH_QM_64QAM, modOdr);
12322 nPrb = resNumSb * cellUl->sbSize;
12323 /* Restricting the minumum iTbs requried to modify to 10 */
12324 if ((nPrb >= maxRb) && (resiTbs <= 10))
12326 /* Could not accomodate ACQI */
12329 totREs = nPrb * RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
12330 tbSz = rgTbSzTbl[0][resiTbs][nPrb-1];
12331 /* totalREs/tbSz = num of bits perRE. */
12332 cqiRiREs = (totREs * betaOffVal * cqiRiRptSz)/(1000 * tbSz); /* betaOffVal is represented
12333 as parts per 1000 */
12334 hqREs = (totREs * betaOffHqVal * hqSz)/(1000 * tbSz);
12335 if ((cqiRiREs + hqREs) < totREs)
12337 remREsForPusch = totREs - cqiRiREs - hqREs;
12338 bitsPerRe = (tbSz * 1000)/remREsForPusch; /* Multiplying by 1000 for Interger Oper */
12339 puschEff = bitsPerRe/modOdr;
12341 if (puschEff < effTgt)
12343 /* ensure resultant efficiency for PUSCH Data is within 0.93*/
12348 /* Alternate between increasing SB or decreasing iTbs until eff is met */
12349 if (mdfyiTbsFlg == FALSE)
12353 resNumSb = resNumSb + 1;
12355 mdfyiTbsFlg = TRUE;
12361 resiTbs-= stepDownItbs;
12363 mdfyiTbsFlg = FALSE;
12366 }while (1); /* Loop breaks if efficency is met
12367 or returns RFAILED if not able to meet the efficiency */
12376 /***********************************************************
12378 * Func : rgSCHCmnUlRbAllocForUe
12380 * Desc : Do uplink RB allocation for an UE.
12384 * Notes: Note that as of now, for retx, maxRb
12385 * is not considered. Alternatives, such
12386 * as dropping retx if it crosses maxRb
12387 * could be considered.
12391 **********************************************************/
12392 static S16 rgSCHCmnUlRbAllocForUe
12401 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
12402 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
12403 RgSchUlAlloc *alloc = NULLP;
12409 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->schdHqProcIdx];
12411 RgSchUlHqProcCb *proc = NULLP;
12415 uint8_t numVrbgTemp;
12417 TfuDciFormat dciFrmt;
12422 rgSCHUhmGetAvlHqProc(cell, ue, &proc);
12425 //printf("UE [%d] HQ Proc unavailable\n", ue->ueId);
12430 if (ue->ue5gtfCb.rank == 2)
12432 dciFrmt = TFU_DCI_FORMAT_A2;
12437 dciFrmt = TFU_DCI_FORMAT_A1;
12440 /* 5gtf TODO : To pass dci frmt to this function */
12441 pdcch = rgSCHCmnPdcchAllocCrntSf(cell, ue);
12444 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
12445 "rgSCHCmnUlRbAllocForUe(): Could not get PDCCH for CRNTI:%d",ue->ueId);
12448 gUl5gtfPdcchSchd++;
12449 #if defined (TENB_STATS) && defined (RG_5GTF)
12450 cell->tenbStats->sch.ul5gtfPdcchSchd++;
12453 //TODO_SID using configured prb as of now
12454 nPrb = ue->ue5gtfCb.maxPrb;
12455 reqVrbg = nPrb/MAX_5GTF_VRBG_SIZE;
12456 iMcs = ue->ue5gtfCb.mcs; //gSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
12460 if((sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart > MAX_5GTF_VRBG)
12461 || (sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated > MAX_5GTF_VRBG))
12463 printf("5GTF_ERROR vrbg > 25 valstart = %d valalloc %d\n", sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart
12464 , sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated);
12469 /*TODO_SID: Workaround for alloc. Currently alloc is ulsf based. To handle multiple beams, we need a different
12470 design. Now alloc are formed based on MAX_5GTF_UE_SCH macro. */
12471 numVrbgTemp = MAX_5GTF_VRBG/MAX_5GTF_UE_SCH;
12474 alloc = rgSCHCmnUlSbAlloc(sf, numVrbgTemp,\
12477 if (alloc == NULLP)
12479 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
12480 "rgSCHCmnUlRbAllocForUe(): Could not get UlAlloc %d CRNTI:%d",numVrbg,ue->ueId);
12481 rgSCHCmnPdcchRlsCrntSf(cell, pdcch);
12484 gUl5gtfAllocAllocated++;
12485 #if defined (TENB_STATS) && defined (RG_5GTF)
12486 cell->tenbStats->sch.ul5gtfAllocAllocated++;
12488 alloc->grnt.vrbgStart = sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart;
12489 alloc->grnt.numVrbg = numVrbg;
12490 alloc->grnt.numLyr = numLyr;
12491 alloc->grnt.dciFrmt = dciFrmt;
12493 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart += numVrbg;
12494 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated += numVrbg;
12496 //rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
12498 sf->totPrb += alloc->grnt.numRb;
12499 ue->ul.nPrb = alloc->grnt.numRb;
12501 if (ue->csgMmbrSta != TRUE)
12503 cellUl->ncsgPrbCnt += alloc->grnt.numRb;
12505 cellUl->totPrbCnt += (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
12506 alloc->pdcch = pdcch;
12507 alloc->grnt.iMcs = iMcs;
12508 alloc->grnt.iMcsCrnt = iMcsCrnt;
12509 alloc->grnt.hop = 0;
12510 /* Initial Num RBs support for UCI on PUSCH */
12512 ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
12514 alloc->forMsg3 = FALSE;
12515 //RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTb5gtfSzTbl[0], (iTbs));
12517 //ueUl->alloc.allocdBytes = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
12518 /* TODO_SID Allocating based on configured MCS as of now.
12519 Currently for format A2. When doing multi grp per tti, need to update this. */
12520 ueUl->alloc.allocdBytes = (rgSch5gtfTbSzTbl[iMcs]/8) * ue->ue5gtfCb.rank;
12522 alloc->grnt.datSz = ueUl->alloc.allocdBytes;
12523 //TODO_SID Need to check mod order.
12524 RG_SCH_CMN_TBS_TO_MODODR(iMcs, alloc->grnt.modOdr);
12525 //alloc->grnt.modOdr = 6;
12526 alloc->grnt.isRtx = FALSE;
12528 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
12529 alloc->grnt.SCID = 0;
12530 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
12531 alloc->grnt.PMI = 0;
12532 alloc->grnt.uciOnxPUSCH = 0;
12533 alloc->grnt.hqProcId = proc->procId;
12535 alloc->hqProc = proc;
12536 alloc->hqProc->ulSfIdx = cellUl->schdIdx;
12538 /*commenting to retain the rnti used for transmission SPS/c-rnti */
12539 alloc->rnti = ue->ueId;
12540 ueUl->alloc.alloc = alloc;
12541 /*rntiwari-Adding the debug for generating the graph.*/
12542 /* No grant attr recorded now */
12546 /***********************************************************
12548 * Func : rgSCHCmnUlRbAllocAddUeToLst
12550 * Desc : Add UE to list (scheduled/non-scheduled list)
12551 * for UL RB allocation information.
12559 **********************************************************/
12560 Void rgSCHCmnUlRbAllocAddUeToLst(RgSchCellCb *cell,RgSchUeCb *ue,CmLListCp *lst)
12562 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
12565 gUl5gtfUeRbAllocDone++;
12566 #if defined (TENB_STATS) && defined (RG_5GTF)
12567 cell->tenbStats->sch.ul5gtfUeRbAllocDone++;
12569 cmLListAdd2Tail(lst, &ueUl->alloc.schdLstLnk);
12570 ueUl->alloc.schdLstLnk.node = (PTR)ue;
12575 * @brief This function Processes the Final Allocations
12576 * made by the RB Allocator against the requested.
12580 * Function: rgSCHCmnUlAllocFnlz
12581 * Purpose: This function Processes the Final Allocations
12582 * made by the RB Allocator against the requested.
12584 * Invoked by: Common Scheduler
12586 * @param[in] RgSchCellCb *cell
12587 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12591 static Void rgSCHCmnUlAllocFnlz(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo)
12593 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12595 /* call scheduler specific Finalization */
12596 cellSch->apisUl->rgSCHUlAllocFnlz(cell, allocInfo);
12602 * @brief This function Processes the Final Allocations
12603 * made by the RB Allocator against the requested.
12607 * Function: rgSCHCmnDlAllocFnlz
12608 * Purpose: This function Processes the Final Allocations
12609 * made by the RB Allocator against the requested.
12611 * Invoked by: Common Scheduler
12613 * @param[in] RgSchCellCb *cell
12617 Void rgSCHCmnDlAllocFnlz(RgSchCellCb *cell)
12619 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12620 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
12623 rgSCHCmnDlCcchRetxFnlz(cell, allocInfo);
12624 rgSCHCmnDlCcchTxFnlz(cell, allocInfo);
12626 /* Added below functions for handling CCCH SDU transmission received
12628 * * guard timer expiry*/
12629 rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo);
12630 rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo);
12632 rgSCHCmnDlRaRspFnlz(cell, allocInfo);
12633 /* call scheduler specific Finalization */
12634 cellSch->apisDl->rgSCHDlAllocFnlz(cell, allocInfo);
12636 /* Stack Crash problem for TRACE5 Changes. Added the return below */
12643 * @brief Update an uplink subframe.
12647 * Function : rgSCHCmnUlUpdSf
12649 * For each allocation
12650 * - if no more tx needed
12651 * - Release allocation
12653 * - Perform retransmission
12655 * @param[in] RgSchUlSf *sf
12658 static Void rgSCHCmnUlUpdSf(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUlSf *sf)
12662 while ((lnk = sf->allocs.first))
12664 RgSchUlAlloc *alloc = (RgSchUlAlloc *)lnk->node;
12667 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
12672 /* If need to handle all retx together, run another loop separately */
12673 rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc);
12675 rgSCHCmnUlRlsUlAlloc(cell, sf, alloc);
12678 /* By this time, all allocs would have been cleared and
12679 * SF is reset to be made ready for new allocations. */
12680 rgSCHCmnUlSfReset(cell, sf);
12681 /* In case there are timing problems due to msg3
12682 * allocations being done in advance, (which will
12683 * probably happen with the current FDD code that
12684 * handles 8 subframes) one solution
12685 * could be to hold the (recent) msg3 allocs in a separate
12686 * list, and then possibly add that to the actual
12687 * list later. So at this time while allocations are
12688 * traversed, the recent msg3 ones are not seen. Anytime after
12689 * this (a good time is when the usual allocations
12690 * are made), msg3 allocations could be transferred to the
12691 * normal list. Not doing this now as it is assumed
12692 * that incorporation of TDD shall take care of this.
12700 * @brief Handle uplink allocation for retransmission.
12704 * Function : rgSCHCmnUlHndlAllocRetx
12706 * Processing Steps:
12707 * - Add to queue for retx.
12708 * - Do not release here, release happends as part
12709 * of the loop that calls this function.
12711 * @param[in] RgSchCellCb *cell
12712 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
12713 * @param[in] RgSchUlSf *sf
12714 * @param[in] RgSchUlAlloc *alloc
12717 static Void rgSCHCmnUlHndlAllocRetx(RgSchCellCb *cell,RgSchCmnUlRbAllocInfo *allocInfo,RgSchUlSf *sf,RgSchUlAlloc *alloc)
12720 RgSchCmnUlUe *ueUl;
12722 rgTbSzTbl[0][rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs)]\
12723 [alloc->grnt.numRb-1]/8;
12724 if (!alloc->forMsg3)
12726 ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue);
12727 ueUl->alloc.reqBytes = bytes;
12728 rgSCHUhmRetx(alloc->hqProc);
12729 rgSCHCmnUlAdd2RetxUeLst(allocInfo, alloc->ue);
12733 /* RACHO msg3 retx handling. Part of RACH procedure changes. */
12734 retxAlloc = rgSCHCmnUlGetUlAlloc(cell, sf, alloc->numSb);
12735 if (retxAlloc == NULLP)
12737 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
12738 "rgSCHCmnUlRbAllocForUe():Could not get UlAlloc for msg3Retx RNTI:%d",
12742 retxAlloc->grnt.iMcs = alloc->grnt.iMcs;
12743 retxAlloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl\
12744 [alloc->hqProc->rvIdx];
12745 retxAlloc->grnt.nDmrs = 0;
12746 retxAlloc->grnt.hop = 0;
12747 retxAlloc->grnt.delayBit = 0;
12748 retxAlloc->rnti = alloc->rnti;
12749 retxAlloc->ue = NULLP;
12750 retxAlloc->pdcch = FALSE;
12751 retxAlloc->forMsg3 = TRUE;
12752 retxAlloc->raCb = alloc->raCb;
12753 retxAlloc->hqProc = alloc->hqProc;
12754 rgSCHUhmRetx(retxAlloc->hqProc);
12761 * @brief Uplink Scheduling Handler.
12765 * Function: rgSCHCmnUlAlloc
12766 * Purpose: This function Handles Uplink Scheduling.
12768 * Invoked by: Common Scheduler
12770 * @param[in] RgSchCellCb *cell
12773 /* ccpu00132653- The definition of this function made common for TDD and FDD*/
12774 static Void rgSCHCmnUlAlloc(RgSchCellCb *cell)
12776 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12777 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
12778 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12779 RgSchCmnUlRbAllocInfo allocInfo;
12780 RgSchCmnUlRbAllocInfo *allocInfoRef = &allocInfo;
12787 /* Initializing RgSchCmnUlRbAllocInfo structure */
12788 rgSCHCmnInitUlRbAllocInfo(allocInfoRef);
12790 /* Get Uplink Subframe */
12791 allocInfoRef->sf = &cellUl->ulSfArr[cellUl->schdIdx];
12793 /* initializing the UL PRB count */
12794 allocInfoRef->sf->totPrb = 0;
12798 rgSCHCmnSpsUlTti(cell, allocInfoRef);
12801 if(*allocInfoRef->sf->allocCountRef == 0)
12805 if ((hole = rgSCHUtlUlHoleFirst(allocInfoRef->sf)) != NULLP)
12807 /* Sanity check of holeDb */
12808 if (allocInfoRef->sf->holeDb->count == 1 && hole->start == 0)
12810 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
12811 /* Re-Initialize available subbands because of CFI change*/
12812 allocInfoRef->sf->availSubbands = cell->dynCfiCb.\
12813 bwInfo[cellDl->currCfi].numSb;
12814 /*Currently initializing 5gtf ulsf specific initialization here.
12815 need to do at proper place */
12817 allocInfoRef->sf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
12818 allocInfoRef->sf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
12819 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
12821 allocInfoRef->sf->sfBeamInfo[idx].totVrbgAllocated = 0;
12822 allocInfoRef->sf->sfBeamInfo[idx].totVrbgRequired = 0;
12823 allocInfoRef->sf->sfBeamInfo[idx].vrbgStart = 0;
12829 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
12830 "Error! holeDb sanity check failed");
12835 /* Fix: Adaptive re-transmissions prioritised over other transmissions */
12836 /* perform adaptive retransmissions */
12837 rgSCHCmnUlSfReTxAllocs(cell, allocInfoRef->sf);
12841 /* Fix: syed Adaptive Msg3 Retx crash. Release all
12842 Harq processes for which adap Retx failed, to avoid
12843 blocking. This step should be done before New TX
12844 scheduling to make hqProc available. Right now we
12845 dont check if proc is in adap Retx list for considering
12846 it to be available. But now with this release that
12847 functionality would be correct. */
12849 rgSCHCmnUlSfRlsRetxProcs(cell, allocInfoRef->sf);
12852 /* Specific UL scheduler to perform UE scheduling */
12853 cellSch->apisUl->rgSCHUlSched(cell, allocInfoRef);
12855 /* Call UL RB allocator module */
12856 rgSCHCmnAllocUlRb(cell, allocInfoRef);
12858 /* Do group power control for PUSCH */
12859 rgSCHCmnGrpPwrCntrlPusch(cell, allocInfoRef->sf);
12861 cell->sc.apis->rgSCHDrxStrtInActvTmrInUl(cell);
12863 rgSCHCmnUlAllocFnlz(cell, allocInfoRef);
12864 if(5000 == g5gtfTtiCnt)
12866 ul5gtfsidDlAlreadyMarkUl = 0;
12867 ul5gtfsidDlSchdPass = 0;
12868 ul5gtfsidUlMarkUl = 0;
12869 ul5gtfTotSchdCnt = 0;
12877 * @brief send Subframe Allocations.
12881 * Function: rgSCHCmnSndCnsldtInfo
12882 * Purpose: Send the scheduled
12883 * allocations to MAC for StaInd generation to Higher layers and
12884 * for MUXing. PST's RgInfSfAlloc to MAC instance.
12886 * Invoked by: Common Scheduler
12888 * @param[in] RgSchCellCb *cell
12891 Void rgSCHCmnSndCnsldtInfo(RgSchCellCb *cell)
12893 RgInfSfAlloc *subfrmAlloc;
12895 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12898 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
12900 /* Send the allocations to MAC for MUXing */
12901 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
12902 subfrmAlloc->cellId = cell->cellId;
12903 /* Populate the List of UEs needing PDB-based Flow control */
12904 cellSch->apisDl->rgSCHDlFillFlwCtrlInfo(cell, subfrmAlloc);
12906 if((subfrmAlloc->rarInfo.numRaRntis) ||
12908 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
12909 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
12910 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
12912 (subfrmAlloc->ueInfo.numUes) ||
12913 (subfrmAlloc->cmnLcInfo.bitMask) ||
12914 (subfrmAlloc->ulUeInfo.numUes) ||
12915 (subfrmAlloc->flowCntrlInfo.numUes))
12917 if((subfrmAlloc->rarInfo.numRaRntis) ||
12919 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
12920 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
12921 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
12923 (subfrmAlloc->ueInfo.numUes) ||
12924 (subfrmAlloc->cmnLcInfo.bitMask) ||
12925 (subfrmAlloc->flowCntrlInfo.numUes))
12928 RgSchMacSfAlloc(&pst, subfrmAlloc);
12931 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_NUM_SUB_FRAMES;
12933 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_SF_ALLOC_SIZE;
12939 * @brief Consolidate Subframe Allocations.
12943 * Function: rgSCHCmnCnsldtSfAlloc
12944 * Purpose: Consolidate Subframe Allocations.
12946 * Invoked by: Common Scheduler
12948 * @param[in] RgSchCellCb *cell
12951 Void rgSCHCmnCnsldtSfAlloc(RgSchCellCb *cell)
12953 RgInfSfAlloc *subfrmAlloc;
12954 CmLteTimingInfo frm;
12956 CmLListCp dlDrxInactvTmrLst;
12957 CmLListCp dlInActvLst;
12958 CmLListCp ulInActvLst;
12959 RgSchCmnCell *cellSch = NULLP;
12962 cmLListInit(&dlDrxInactvTmrLst);
12963 cmLListInit(&dlInActvLst);
12964 cmLListInit(&ulInActvLst);
12966 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
12968 /* Get Downlink Subframe */
12969 frm = cell->crntTime;
12970 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
12971 dlSf = rgSCHUtlSubFrmGet(cell, frm);
12973 /* Fill the allocation Info */
12974 rgSCHUtlFillRgInfRarInfo(dlSf, subfrmAlloc, cell);
12977 rgSCHUtlFillRgInfUeInfo(dlSf, cell, &dlDrxInactvTmrLst,
12978 &dlInActvLst, &ulInActvLst);
12979 #ifdef RG_PFS_STATS
12980 cell->totalPrb += dlSf->bwAssigned;
12982 /* Mark the following Ues inactive for UL*/
12983 cellSch = RG_SCH_CMN_GET_CELL(cell);
12985 /* Calling Scheduler specific function with DRX inactive UE list*/
12986 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInActvLst);
12987 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInActvLst);
12990 /*re/start DRX inactivity timer for the UEs*/
12991 (Void)rgSCHDrxStrtInActvTmr(cell,&dlDrxInactvTmrLst,RG_SCH_DRX_DL);
12997 * @brief Initialize the DL Allocation Information Structure.
13001 * Function: rgSCHCmnInitDlRbAllocInfo
13002 * Purpose: Initialize the DL Allocation Information Structure.
13004 * Invoked by: Common Scheduler
13006 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
13009 static Void rgSCHCmnInitDlRbAllocInfo(RgSchCmnDlRbAllocInfo *allocInfo)
13011 memset(&allocInfo->pcchAlloc, 0, sizeof(RgSchDlRbAlloc));
13012 memset(&allocInfo->bcchAlloc, 0, sizeof(RgSchDlRbAlloc));
13013 memset(allocInfo->raRspAlloc, 0, RG_SCH_CMN_MAX_CMN_PDCCH*sizeof(RgSchDlRbAlloc));
13015 allocInfo->msg4Alloc.msg4DlSf = NULLP;
13016 cmLListInit(&allocInfo->msg4Alloc.msg4TxLst);
13017 cmLListInit(&allocInfo->msg4Alloc.msg4RetxLst);
13018 cmLListInit(&allocInfo->msg4Alloc.schdMsg4TxLst);
13019 cmLListInit(&allocInfo->msg4Alloc.schdMsg4RetxLst);
13020 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4TxLst);
13021 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4RetxLst);
13023 allocInfo->ccchSduAlloc.ccchSduDlSf = NULLP;
13024 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduTxLst);
13025 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduRetxLst);
13026 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduTxLst);
13027 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduRetxLst);
13028 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst);
13029 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst);
13032 allocInfo->dedAlloc.dedDlSf = NULLP;
13033 cmLListInit(&allocInfo->dedAlloc.txHqPLst);
13034 cmLListInit(&allocInfo->dedAlloc.retxHqPLst);
13035 cmLListInit(&allocInfo->dedAlloc.schdTxHqPLst);
13036 cmLListInit(&allocInfo->dedAlloc.schdRetxHqPLst);
13037 cmLListInit(&allocInfo->dedAlloc.nonSchdTxHqPLst);
13038 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxHqPLst);
13040 cmLListInit(&allocInfo->dedAlloc.txRetxHqPLst);
13041 cmLListInit(&allocInfo->dedAlloc.schdTxRetxHqPLst);
13042 cmLListInit(&allocInfo->dedAlloc.nonSchdTxRetxHqPLst);
13044 cmLListInit(&allocInfo->dedAlloc.txSpsHqPLst);
13045 cmLListInit(&allocInfo->dedAlloc.retxSpsHqPLst);
13046 cmLListInit(&allocInfo->dedAlloc.schdTxSpsHqPLst);
13047 cmLListInit(&allocInfo->dedAlloc.schdRetxSpsHqPLst);
13048 cmLListInit(&allocInfo->dedAlloc.nonSchdTxSpsHqPLst);
13049 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxSpsHqPLst);
13053 rgSCHLaaCmnInitDlRbAllocInfo (allocInfo);
13056 cmLListInit(&allocInfo->dedAlloc.errIndTxHqPLst);
13057 cmLListInit(&allocInfo->dedAlloc.schdErrIndTxHqPLst);
13058 cmLListInit(&allocInfo->dedAlloc.nonSchdErrIndTxHqPLst);
13063 * @brief Initialize the UL Allocation Information Structure.
13067 * Function: rgSCHCmnInitUlRbAllocInfo
13068 * Purpose: Initialize the UL Allocation Information Structure.
13070 * Invoked by: Common Scheduler
13072 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13075 Void rgSCHCmnInitUlRbAllocInfo(RgSchCmnUlRbAllocInfo *allocInfo)
13077 allocInfo->sf = NULLP;
13078 cmLListInit(&allocInfo->contResLst);
13079 cmLListInit(&allocInfo->schdContResLst);
13080 cmLListInit(&allocInfo->nonSchdContResLst);
13081 cmLListInit(&allocInfo->ueLst);
13082 cmLListInit(&allocInfo->schdUeLst);
13083 cmLListInit(&allocInfo->nonSchdUeLst);
13089 * @brief Scheduling for PUCCH group power control.
13093 * Function: rgSCHCmnGrpPwrCntrlPucch
13094 * Purpose: This function does group power control for PUCCH
13095 * corresponding to the subframe for which DL UE allocations
13098 * Invoked by: Common Scheduler
13100 * @param[in] RgSchCellCb *cell
13103 static Void rgSCHCmnGrpPwrCntrlPucch(RgSchCellCb *cell,RgSchDlSf *dlSf)
13105 rgSCHPwrGrpCntrlPucch(cell, dlSf);
13110 * @brief Scheduling for PUSCH group power control.
13114 * Function: rgSCHCmnGrpPwrCntrlPusch
13115 * Purpose: This function does group power control, for
13116 * the subframe for which UL allocation has (just) happened.
13118 * Invoked by: Common Scheduler
13120 * @param[in] RgSchCellCb *cell
13121 * @param[in] RgSchUlSf *ulSf
13124 static Void rgSCHCmnGrpPwrCntrlPusch(RgSchCellCb *cell,RgSchUlSf *ulSf)
13126 /*removed unused variable *cellSch*/
13127 CmLteTimingInfo frm;
13131 /* Got to pass DL SF corresponding to UL SF, so get that first.
13132 * There is no easy way of getting dlSf by having the RgSchUlSf*,
13133 * so use the UL delta from current time to get the DL SF. */
13134 frm = cell->crntTime;
13137 if(cell->emtcEnable == TRUE)
13139 RGSCH_INCR_SUB_FRAME_EMTC(frm, TFU_DLCNTRL_DLDELTA);
13144 RGSCH_INCR_SUB_FRAME(frm, TFU_DLCNTRL_DLDELTA);
13146 /* Del filling of dl.time */
13147 dlSf = rgSCHUtlSubFrmGet(cell, frm);
13149 rgSCHPwrGrpCntrlPusch(cell, dlSf, ulSf);
13154 /* Fix: syed align multiple UEs to refresh at same time */
13155 /***********************************************************
13157 * Func : rgSCHCmnApplyUeRefresh
13159 * Desc : Apply UE refresh in CMN and Specific
13160 * schedulers. Data rates and corresponding
13161 * scratchpad variables are updated.
13169 **********************************************************/
13170 static S16 rgSCHCmnApplyUeRefresh(RgSchCellCb *cell,RgSchUeCb *ue)
13172 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13173 uint32_t effGbrBsr = 0;
13174 uint32_t effNonGbrBsr = 0;
13178 /* Reset the refresh cycle variableCAP */
13179 ue->ul.effAmbr = ue->ul.cfgdAmbr;
13181 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
13183 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
13185 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
13187 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
13189 cmnLcg->effGbr = cmnLcg->cfgdGbr;
13190 cmnLcg->effDeltaMbr = cmnLcg->deltaMbr;
13191 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
13192 /* Considering GBR LCG will be prioritised by UE */
13193 effGbrBsr += cmnLcg->bs;
13194 }/* Else no remaing BS so nonLcg0 will be updated when BSR will be received */
13197 effNonGbrBsr += cmnLcg->reportedBs;
13198 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
13202 effNonGbrBsr = RGSCH_MIN(effNonGbrBsr,ue->ul.effAmbr);
13203 ue->ul.nonGbrLcgBs = effNonGbrBsr;
13205 ue->ul.nonLcg0Bs = effGbrBsr + effNonGbrBsr;
13206 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
13207 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
13210 /* call scheduler specific event handlers
13211 * for refresh timer expiry */
13212 cellSch->apisUl->rgSCHUlUeRefresh(cell, ue);
13213 cellSch->apisDl->rgSCHDlUeRefresh(cell, ue);
13218 /***********************************************************
13220 * Func : rgSCHCmnTmrExpiry
13222 * Desc : Adds an UE to refresh queue, so that the UE is
13223 * periodically triggered to refresh it's GBR and
13232 **********************************************************/
13233 static S16 rgSCHCmnTmrExpiry
13235 PTR cb, /* Pointer to timer control block */
13236 S16 tmrEvnt /* Timer Event */
13239 RgSchUeCb *ue = (RgSchUeCb *)cb;
13240 RgSchCellCb *cell = ue->cell;
13241 #if (ERRCLASS & ERRCLS_DEBUG)
13245 #if (ERRCLASS & ERRCLS_DEBUG)
13246 if (tmrEvnt != RG_SCH_CMN_EVNT_UE_REFRESH)
13248 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnTmrExpiry(): Invalid "
13249 "timer event CRNTI:%d",ue->ueId);
13256 rgSCHCmnApplyUeRefresh(cell, ue);
13258 rgSCHCmnAddUeToRefreshQ(cell, ue, RG_SCH_CMN_REFRESH_TIME);
13263 /***********************************************************
13265 * Func : rgSCHCmnTmrProc
13267 * Desc : Timer entry point per cell. Timer
13268 * processing is triggered at every frame boundary
13277 **********************************************************/
13278 static S16 rgSCHCmnTmrProc(RgSchCellCb *cell)
13280 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
13281 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
13282 /* Moving the assignment of scheduler pointer
13283 to available scope for optimization */
13285 if ((cell->crntTime.slot % RGSCH_NUM_SUB_FRAMES_5G) == 0)
13287 /* Reset the counters periodically */
13288 if ((cell->crntTime.sfn % RG_SCH_CMN_CSG_REFRESH_TIME) == 0)
13290 RG_SCH_RESET_HCSG_DL_PRB_CNTR(cmnDlCell);
13291 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cmnUlCell);
13293 if ((cell->crntTime.sfn % RG_SCH_CMN_OVRLDCTRL_REFRESH_TIME) == 0)
13296 cell->measurements.ulTpt = ((cell->measurements.ulTpt * 95) + ( cell->measurements.ulBytesCnt * 5))/100;
13297 cell->measurements.dlTpt = ((cell->measurements.dlTpt * 95) + ( cell->measurements.dlBytesCnt * 5))/100;
13299 rgSCHUtlCpuOvrLdAdjItbsCap(cell);
13300 /* reset cell level tpt measurements for next cycle */
13301 cell->measurements.ulBytesCnt = 0;
13302 cell->measurements.dlBytesCnt = 0;
13304 /* Comparing with Zero instead of % is being done for efficiency.
13305 * If Timer resolution changes then accordingly update the
13306 * macro RG_SCH_CMN_REFRESH_TIMERES */
13307 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
13308 cmPrcTmr(&sched->tmrTqCp, sched->tmrTq, (PFV)rgSCHCmnTmrExpiry);
13315 /***********************************************************
13317 * Func : rgSchCmnUpdCfiVal
13319 * Desc : Update the CFI value if CFI switch was done
13327 **********************************************************/
13328 static Void rgSchCmnUpdCfiVal(RgSchCellCb *cell,uint8_t delta)
13331 CmLteTimingInfo pdsch;
13332 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
13338 uint8_t splSfCfi = 0;
13342 pdsch = cell->crntTime;
13343 RGSCH_INCR_SUB_FRAME(pdsch, delta);
13344 dlSf = rgSCHUtlSubFrmGet(cell, pdsch);
13345 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
13346 *change happens in that SF then UL PDCCH allocation happens with old CFI
13347 *but CFI in control Req goes updated one since it was stored in the CELL
13349 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
13350 if(cell->dynCfiCb.pdcchSfIdx != 0xFF)
13353 dlIdx = rgSCHUtlGetDlSfIdx(cell, &pdsch);
13355 dlIdx = (((pdsch.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (pdsch.slot % RGSCH_NUM_SUB_FRAMES));
13356 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
13358 /* If current downlink subframe index is same as pdcch SF index,
13359 * perform the switching of CFI in this subframe */
13360 if(cell->dynCfiCb.pdcchSfIdx == dlIdx)
13362 cellCmnDl->currCfi = cellCmnDl->newCfi;
13363 cell->dynCfiCb.pdcchSfIdx = 0xFF;
13365 /* Updating the nCce value based on the new CFI */
13367 splSfCfi = cellCmnDl->newCfi;
13368 for(idx = 0; idx < cell->numDlSubfrms; idx++)
13370 tddSf = cell->subFrms[idx];
13372 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][tddSf->sfNum];
13374 if(tddSf->sfType == RG_SCH_SPL_SF_DATA)
13376 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
13378 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
13382 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][cellCmnDl->currCfi];
13385 /* Setting the switch over window length based on config index.
13386 * During switch over period all the UL trnsmissions are Acked
13388 cell->dynCfiCb.switchOvrWinLen =
13389 rgSchCfiSwitchOvrWinLen[cell->ulDlCfgIdx];
13391 cell->nCce = cell->dynCfiCb.cfi2NCceTbl[0][cellCmnDl->currCfi];
13392 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
13393 *change happens in that SF then UL PDCCH allocation happens with old CFI
13394 *but CFI in control Req goes updated one since it was stored in the CELL
13396 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
13397 cell->dynCfiCb.switchOvrWinLen = rgSchCfiSwitchOvrWinLen[7];
13405 /***********************************************************
13407 * Func : rgSchCmnUpdtPdcchSfIdx
13409 * Desc : Update the switch over window length
13417 **********************************************************/
13419 static Void rgSchCmnUpdtPdcchSfIdx(RgSchCellCb *cell,uint8_t dlIdx,uint8_t sfNum)
13421 static Void rgSchCmnUpdtPdcchSfIdx(RgSchCellCb *cell,uint8_t dlIdx)
13427 /* Resetting the parameters on CFI switching */
13428 cell->dynCfiCb.cceUsed = 0;
13429 cell->dynCfiCb.lowCceCnt = 0;
13431 cell->dynCfiCb.cceFailSum = 0;
13432 cell->dynCfiCb.cceFailCnt = 0;
13433 cell->dynCfiCb.prevCceFailIdx = 0;
13435 cell->dynCfiCb.switchOvrInProgress = TRUE;
13437 for(idx = 0; idx < cell->dynCfiCb.numFailSamples; idx++)
13439 cell->dynCfiCb.cceFailSamples[idx] = 0;
13442 cell->dynCfiCb.ttiCnt = 0;
13444 cell->dynCfiCb.cfiSwitches++;
13445 cfiSwitchCnt = cell->dynCfiCb.cfiSwitches;
13448 cell->dynCfiCb.pdcchSfIdx = (dlIdx +
13449 rgSchTddPdcchSfIncTbl[cell->ulDlCfgIdx][sfNum]) % cell->numDlSubfrms;
13451 cell->dynCfiCb.pdcchSfIdx = (dlIdx + RG_SCH_CFI_APPLY_DELTA) % \
13452 RGSCH_NUM_DL_slotS;
13456 /***********************************************************
13458 * Func : rgSchCmnUpdCfiDb
13460 * Desc : Update the counters related to dynamic
13461 * CFI feature in cellCb.
13469 **********************************************************/
13470 Void rgSchCmnUpdCfiDb(RgSchCellCb *cell,uint8_t delta)
13472 CmLteTimingInfo frm;
13478 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13479 uint8_t nCceLowerCfi = 0;
13481 uint8_t cceFailIdx;
13487 /* Get Downlink Subframe */
13488 frm = cell->crntTime;
13489 RGSCH_INCR_SUB_FRAME(frm, delta);
13492 dlIdx = rgSCHUtlGetDlSfIdx(cell, &frm);
13493 dlSf = cell->subFrms[dlIdx];
13494 isHiDci0 = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][dlSf->sfNum];
13496 /* Changing the idexing
13497 so that proper subframe is selected */
13498 dlIdx = (((frm.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (frm.slot % RGSCH_NUM_SUB_FRAMES));
13499 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
13500 dlSf = cell->subFrms[dlIdx];
13503 currCfi = cellSch->dl.currCfi;
13505 if(!cell->dynCfiCb.switchOvrInProgress)
13508 if(!cell->dynCfiCb.isDynCfiEnb)
13510 if(currCfi != cellSch->cfiCfg.cfi)
13512 if(currCfi < cellSch->cfiCfg.cfi)
13514 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
13515 cfiIncr = cell->dynCfiCb.cfiIncr;
13519 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
13520 cfiDecr = cell->dynCfiCb.cfiDecr;
13527 /* Setting ttiMod to 0 for ttiCnt > 1000 in case if this
13528 * function was not called in UL subframe*/
13529 if(cell->dynCfiCb.ttiCnt > RGSCH_CFI_TTI_MON_INTRVL)
13536 ttiMod = cell->dynCfiCb.ttiCnt % RGSCH_CFI_TTI_MON_INTRVL;
13539 dlSf->dlUlBothCmplt++;
13541 if((dlSf->dlUlBothCmplt == 2) || (!isHiDci0))
13543 if(dlSf->dlUlBothCmplt == 2)
13546 /********************STEP UP CRITERIA********************/
13547 /* Updating the CCE failure count parameter */
13548 cell->dynCfiCb.cceFailCnt += dlSf->isCceFailure;
13549 cell->dynCfiCb.cceFailSum += dlSf->isCceFailure;
13551 /* Check if cfi step up can be performed */
13552 if(currCfi < cell->dynCfiCb.maxCfi)
13554 if(cell->dynCfiCb.cceFailSum >= cell->dynCfiCb.cfiStepUpTtiCnt)
13556 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
13557 cfiIncr = cell->dynCfiCb.cfiIncr;
13562 /********************STEP DOWN CRITERIA********************/
13564 /* Updating the no. of CCE used in this dl subframe */
13565 cell->dynCfiCb.cceUsed += dlSf->cceCnt;
13567 if(currCfi > RGSCH_MIN_CFI_VAL)
13569 /* calculating the number of CCE for next lower CFI */
13571 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][dlSf->sfNum];
13572 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[mPhich][currCfi-1];
13574 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[0][currCfi-1];
13576 if(dlSf->cceCnt < nCceLowerCfi)
13578 /* Updating the count of TTIs in which no. of CCEs
13579 * used were less than the CCEs of next lower CFI */
13580 cell->dynCfiCb.lowCceCnt++;
13585 totalCce = (nCceLowerCfi * cell->dynCfiCb.cfiStepDownTtiCnt *
13586 RGSCH_CFI_CCE_PERCNTG)/100;
13588 if((!cell->dynCfiCb.cceFailSum) &&
13589 (cell->dynCfiCb.lowCceCnt >=
13590 cell->dynCfiCb.cfiStepDownTtiCnt) &&
13591 (cell->dynCfiCb.cceUsed < totalCce))
13593 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
13594 cfiDecr = cell->dynCfiCb.cfiDecr;
13600 cceFailIdx = ttiMod/cell->dynCfiCb.failSamplePrd;
13602 if(cceFailIdx != cell->dynCfiCb.prevCceFailIdx)
13604 /* New sample period has started. Subtract the old count
13605 * from the new sample period */
13606 cell->dynCfiCb.cceFailSum -= cell->dynCfiCb.cceFailSamples[cceFailIdx];
13608 /* Store the previous sample period data */
13609 cell->dynCfiCb.cceFailSamples[cell->dynCfiCb.prevCceFailIdx]
13610 = cell->dynCfiCb.cceFailCnt;
13612 cell->dynCfiCb.prevCceFailIdx = cceFailIdx;
13614 /* Resetting the CCE failure count as zero for next sample period */
13615 cell->dynCfiCb.cceFailCnt = 0;
13620 /* Restting the parametrs after Monitoring Interval expired */
13621 cell->dynCfiCb.cceUsed = 0;
13622 cell->dynCfiCb.lowCceCnt = 0;
13623 cell->dynCfiCb.ttiCnt = 0;
13626 cell->dynCfiCb.ttiCnt++;
13630 if(cellSch->dl.newCfi != cellSch->dl.currCfi)
13633 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, dlSf->sfNum);
13635 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx);
13642 * @brief Dl Scheduler for Broadcast and Common channel scheduling.
13646 * Function: rgSCHCmnDlCommonChSch
13647 * Purpose: This function schedules DL Common channels for LTE.
13648 * Invoked by TTI processing in TOM. Scheduling is done for
13649 * BCCH, PCCH, Msg4, CCCH SDU, RAR in that order
13651 * Invoked by: TOM (TTI processing)
13653 * @param[in] RgSchCellCb *cell
13656 Void rgSCHCmnDlCommonChSch(RgSchCellCb *cell)
13658 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13661 cellSch->apisDl->rgSCHDlTickForPdbTrkng(cell);
13662 rgSchCmnUpdCfiVal(cell, RG_SCH_CMN_DL_DELTA);
13664 /* handle Inactive UEs for DL */
13665 rgSCHCmnHdlDlInactUes(cell);
13667 /* Send a Tick to Refresh Timer */
13668 rgSCHCmnTmrProc(cell);
13670 if (cell->isDlDataAllwd && (cell->stopSiSch == FALSE))
13672 rgSCHCmnInitRbAlloc(cell);
13673 /* Perform DL scheduling of BCCH, PCCH */
13674 rgSCHCmnDlBcchPcchAlloc(cell);
13678 if(cell->siCb.inWindow != 0)
13680 cell->siCb.inWindow--;
13683 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
13685 rgSCHCmnDlCcchRarAlloc(cell);
13691 * @brief Scheduler invocation per TTI.
13695 * Function: rgSCHCmnUlSch
13696 * Purpose: This function implements UL scheduler alone. This is to
13697 * be able to perform scheduling with more flexibility.
13699 * Invoked by: TOM (TTI processing)
13701 * @param[in] RgSchCellCb *cell
13704 Void rgSCHCmnUlSch(RgSchCellCb *cell)
13706 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13711 if(TRUE == rgSCHLaaSCellEnabled(cell))
13717 if(cellSch->ul.schdIdx != RGSCH_INVALID_INFO)
13719 rgSchCmnUpdCfiVal(cell, TFU_ULCNTRL_DLDELTA);
13721 /* Handle Inactive UEs for UL */
13722 rgSCHCmnHdlUlInactUes(cell);
13723 /* Perform UL Scheduling EVERY TTI */
13724 rgSCHCmnUlAlloc(cell);
13726 /* Calling function to update CFI parameters*/
13727 rgSchCmnUpdCfiDb(cell, TFU_ULCNTRL_DLDELTA);
13729 if(cell->dynCfiCb.switchOvrWinLen > 0)
13731 /* Decrementing the switchover window length */
13732 cell->dynCfiCb.switchOvrWinLen--;
13734 if(!cell->dynCfiCb.switchOvrWinLen)
13736 if(cell->dynCfiCb.dynCfiRecfgPend)
13738 /* Toggling the Dynamic CFI enabling */
13739 cell->dynCfiCb.isDynCfiEnb ^= 1;
13740 rgSCHDynCfiReCfg(cell, cell->dynCfiCb.isDynCfiEnb);
13741 cell->dynCfiCb.dynCfiRecfgPend = FALSE;
13743 cell->dynCfiCb.switchOvrInProgress = FALSE;
13751 rgSCHCmnSpsUlTti(cell, NULLP);
13761 * @brief This function updates the scheduler with service for an UE.
13765 * Function: rgSCHCmnDlDedBoUpd
13766 * Purpose: This function should be called whenever there is a
13767 * change BO for a service.
13769 * Invoked by: BO and Scheduler
13771 * @param[in] RgSchCellCb* cell
13772 * @param[in] RgSchUeCb* ue
13773 * @param[in] RgSchDlLcCb* svc
13777 Void rgSCHCmnDlDedBoUpd(RgSchCellCb *cell,RgSchUeCb *ue,RgSchDlLcCb *svc)
13779 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13781 /* RACHO : if UEs idle time exceeded and a BO update
13782 * is received, then add UE to the pdcch Order Q */
13783 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
13785 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue, cell);
13786 /* If PDCCH order is already triggered and we are waiting for
13787 * RACH from UE then do not add to PdcchOdrQ. */
13788 if (ueDl->rachInfo.rapIdLnk.node == NULLP)
13790 rgSCHCmnDlAdd2PdcchOdrQ(cell, ue);
13796 /* If SPS service, invoke SPS module */
13797 if (svc->dlLcSpsCfg.isSpsEnabled)
13799 rgSCHCmnSpsDlDedBoUpd(cell, ue, svc);
13800 /* Note: Retrun from here, no update needed in other schedulers */
13805 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
13807 cellSch->apisEmtcDl->rgSCHDlDedBoUpd(cell, ue, svc);
13808 //printf("rgSCHEMTCDlDedBoUpd\n");
13813 cellSch->apisDl->rgSCHDlDedBoUpd(cell, ue, svc);
13818 rgSCHSCellDlDedBoUpd(cell, ue, svc);
13826 * @brief Removes an UE from Cell's TA List.
13830 * Function: rgSCHCmnRmvFrmTaLst
13831 * Purpose: Removes an UE from Cell's TA List.
13833 * Invoked by: Specific Scheduler
13835 * @param[in] RgSchCellCb* cell
13836 * @param[in] RgSchUeCb* ue
13840 Void rgSCHCmnRmvFrmTaLst(RgSchCellCb *cell,RgSchUeCb *ue)
13842 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
13845 if(cell->emtcEnable && ue->isEmtcUe)
13847 rgSCHEmtcRmvFrmTaLst(cellCmnDl,ue);
13852 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
13853 ue->dlTaLnk.node = (PTR)NULLP;
13858 /* Fix: syed Remove the msg4Proc from cell
13859 * msg4Retx Queue. I have used CMN scheduler function
13860 * directly. Please define a new API and call this
13861 * function through that. */
13864 * @brief This function removes MSG4 HARQ process from cell RETX Queues.
13868 * Function: rgSCHCmnDlMsg4ProcRmvFrmRetx
13869 * Purpose: This function removes MSG4 HARQ process from cell RETX Queues.
13871 * Invoked by: UE/RACB deletion.
13873 * @param[in] RgSchCellCb* cell
13874 * @param[in] RgSchDlHqProc* hqP
13878 Void rgSCHCmnDlMsg4ProcRmvFrmRetx(RgSchCellCb *cell,RgSchDlHqProcCb *hqP)
13880 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13882 if (hqP->tbInfo[0].ccchSchdInfo.retxLnk.node)
13884 if (hqP->hqE->msg4Proc == hqP)
13886 cmLListDelFrm(&cellSch->dl.msg4RetxLst, \
13887 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13888 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
13891 else if(hqP->hqE->ccchSduProc == hqP)
13893 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
13894 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13895 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
13904 * @brief This function adds a HARQ process for retx.
13908 * Function: rgSCHCmnDlProcAddToRetx
13909 * Purpose: This function adds a HARQ process to retransmission
13910 * queue. This may be performed when a HARQ ack is
13913 * Invoked by: HARQ feedback processing
13915 * @param[in] RgSchCellCb* cell
13916 * @param[in] RgSchDlHqProc* hqP
13920 Void rgSCHCmnDlProcAddToRetx(RgSchCellCb *cell,RgSchDlHqProcCb *hqP)
13922 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
13924 if (hqP->hqE->msg4Proc == hqP) /* indicating msg4 transmission */
13926 cmLListAdd2Tail(&cellSch->dl.msg4RetxLst, \
13927 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13928 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
13931 else if(hqP->hqE->ccchSduProc == hqP)
13933 /*If CCCH SDU being transmitted without cont res CE*/
13934 cmLListAdd2Tail(&cellSch->dl.ccchSduRetxLst,
13935 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
13936 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
13942 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
13944 /* Invoke SPS module for SPS HARQ proc re-transmission handling */
13945 rgSCHCmnSpsDlProcAddToRetx(cell, hqP);
13948 #endif /* LTEMAC_SPS */
13950 if((TRUE == cell->emtcEnable)
13951 && (TRUE == hqP->hqE->ue->isEmtcUe))
13953 cellSch->apisEmtcDl->rgSCHDlProcAddToRetx(cell, hqP);
13958 cellSch->apisDl->rgSCHDlProcAddToRetx(cell, hqP);
13966 * @brief This function performs RI validation and
13967 * updates it to the ueCb.
13971 * Function: rgSCHCmnDlSetUeRi
13972 * Purpose: This function performs RI validation and
13973 * updates it to the ueCb.
13975 * Invoked by: rgSCHCmnDlCqiInd
13977 * @param[in] RgSchCellCb *cell
13978 * @param[in] RgSchUeCb *ue
13979 * @param[in] uint8_t ri
13980 * @param[in] Bool isPeriodic
13984 static Void rgSCHCmnDlSetUeRi(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t ri,Bool isPer)
13986 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
13987 RgSchCmnUeInfo *ueSchCmn = RG_SCH_CMN_GET_CMN_UE(ue);
13990 RgSchUePCqiCb *cqiCb = RG_SCH_GET_UE_CELL_CQI_CB(ue,cell);
13995 /* FIX for RRC Reconfiguration issue */
13996 /* ccpu00140894- During Tx Mode transition RI report will not entertained for
13997 * specific during which SCH expecting UE can complete TX mode transition*/
13998 if (ue->txModeTransCmplt == FALSE)
14003 /* Restrict the Number of TX layers to cell->numTxAntPorts.
14004 * Protection from invalid RI values. */
14005 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
14007 /* Special case of converting PMI to sane value when
14008 * there is a switch in RI from 1 to 2 and PMI reported
14009 * for RI=1 is invalid for RI=2 */
14010 if ((cell->numTxAntPorts == 2) && (ue->mimoInfo.txMode == RGR_UE_TM_4))
14012 if ((ri == 2) && ( ueDl->mimoInfo.ri == 1))
14014 ueDl->mimoInfo.pmi = (ueDl->mimoInfo.pmi < 2)? 1:2;
14018 /* Restrict the Number of TX layers according to the UE Category */
14019 ueDl->mimoInfo.ri = RGSCH_MIN(ri, rgUeCatTbl[ueSchCmn->ueCat].maxTxLyrs);
14021 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].riCnt[ueDl->mimoInfo.ri-1]++;
14022 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
14026 ue->tenbStats->stats.nonPersistent.sch[0].riCnt[ueDl->mimoInfo.ri-1]++;
14027 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
14033 /* If RI is from Periodic CQI report */
14034 cqiCb->perRiVal = ueDl->mimoInfo.ri;
14035 /* Reset at every Periodic RI Reception */
14036 cqiCb->invalidateCqi = FALSE;
14040 /* If RI is from Aperiodic CQI report */
14041 if (cqiCb->perRiVal != ueDl->mimoInfo.ri)
14043 /* if this aperRI is different from last reported
14044 * perRI then invalidate all CQI reports till next
14046 cqiCb->invalidateCqi = TRUE;
14050 cqiCb->invalidateCqi = FALSE;
14055 if (ueDl->mimoInfo.ri > 1)
14057 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
14059 else if (ue->mimoInfo.txMode == RGR_UE_TM_3) /* ri == 1 */
14061 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
14069 * @brief This function performs PMI validation and
14070 * updates it to the ueCb.
14074 * Function: rgSCHCmnDlSetUePmi
14075 * Purpose: This function performs PMI validation and
14076 * updates it to the ueCb.
14078 * Invoked by: rgSCHCmnDlCqiInd
14080 * @param[in] RgSchCellCb *cell
14081 * @param[in] RgSchUeCb *ue
14082 * @param[in] uint8_t pmi
14086 static S16 rgSCHCmnDlSetUePmi(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t pmi)
14088 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14090 if (ue->txModeTransCmplt == FALSE)
14095 if (cell->numTxAntPorts == 2)
14101 if (ueDl->mimoInfo.ri == 2)
14103 /*ccpu00118150 - MOD - changed pmi value validation from 0 to 2*/
14104 /* PMI 2 and 3 are invalid incase of 2 TxAnt and 2 Layered SM */
14105 if (pmi == 2 || pmi == 3)
14109 ueDl->mimoInfo.pmi = pmi+1;
14113 ueDl->mimoInfo.pmi = pmi;
14116 else if (cell->numTxAntPorts == 4)
14122 ueDl->mimoInfo.pmi = pmi;
14124 /* Reset the No PMI Flag in forceTD */
14125 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
14130 * @brief This function Updates the DL CQI on PUCCH for the UE.
14134 * Function: rgSCHCmnDlProcCqiMode10
14136 * This function updates the DL CQI on PUCCH for the UE.
14138 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14140 * Processing Steps:
14142 * @param[in] RgSchCellCb *cell
14143 * @param[in] RgSchUeCb *ue
14144 * @param[in] TfuDlCqiRpt *dlCqiRpt
14149 #ifdef RGR_CQI_REPT
14150 static inline Void rgSCHCmnDlProcCqiMode10
14154 TfuDlCqiPucch *pucchCqi,
14158 static inline Void rgSCHCmnDlProcCqiMode10
14162 TfuDlCqiPucch *pucchCqi
14166 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14168 if (pucchCqi->u.mode10Info.type == TFU_RPT_CQI)
14170 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14171 /* Checking whether the decoded CQI is a value between 1 and 15*/
14172 if((pucchCqi->u.mode10Info.u.cqi) && (pucchCqi->u.mode10Info.u.cqi
14173 < RG_SCH_CMN_MAX_CQI))
14175 ueDl->cqiFlag = TRUE;
14176 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode10Info.u.cqi;
14177 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14178 /* ccpu00117452 - MOD - Changed macro name from
14179 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14180 #ifdef RGR_CQI_REPT
14181 *isCqiAvail = TRUE;
14189 else if (pucchCqi->u.mode10Info.type == TFU_RPT_RI)
14191 if ( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode10Info.u.ri) )
14193 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode10Info.u.ri,
14198 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
14199 pucchCqi->u.mode10Info.u.ri,ue->ueId);
14206 * @brief This function Updates the DL CQI on PUCCH for the UE.
14210 * Function: rgSCHCmnDlProcCqiMode11
14212 * This function updates the DL CQI on PUCCH for the UE.
14214 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14216 * Processing Steps:
14217 * Process CQI MODE 11
14218 * @param[in] RgSchCellCb *cell
14219 * @param[in] RgSchUeCb *ue
14220 * @param[in] TfuDlCqiRpt *dlCqiRpt
14225 #ifdef RGR_CQI_REPT
14226 static inline Void rgSCHCmnDlProcCqiMode11
14230 TfuDlCqiPucch *pucchCqi,
14232 Bool *is2ndCwCqiAvail
14235 static inline Void rgSCHCmnDlProcCqiMode11
14239 TfuDlCqiPucch *pucchCqi
14243 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14245 if (pucchCqi->u.mode11Info.type == TFU_RPT_CQI)
14247 ue->mimoInfo.puschFdbkVld = FALSE;
14248 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14249 if((pucchCqi->u.mode11Info.u.cqi.cqi) &&
14250 (pucchCqi->u.mode11Info.u.cqi.cqi < RG_SCH_CMN_MAX_CQI))
14252 ueDl->cqiFlag = TRUE;
14253 /* ccpu00117452 - MOD - Changed macro name from
14254 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14255 #ifdef RGR_CQI_REPT
14256 *isCqiAvail = TRUE;
14258 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode11Info.u.cqi.cqi;
14259 if (pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.pres)
14261 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
14262 ueDl->mimoInfo.cwInfo[1].cqi, \
14263 pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.val);
14264 #ifdef RGR_CQI_REPT
14265 /* ccpu00117259 - ADD - Considering second codeword CQI info
14266 incase of MIMO for CQI Reporting */
14267 *is2ndCwCqiAvail = TRUE;
14275 rgSCHCmnDlSetUePmi(cell, ue, \
14276 pucchCqi->u.mode11Info.u.cqi.pmi);
14278 else if (pucchCqi->u.mode11Info.type == TFU_RPT_RI)
14280 if( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode11Info.u.ri))
14282 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode11Info.u.ri,
14287 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
14288 pucchCqi->u.mode11Info.u.ri,ue->ueId);
14295 * @brief This function Updates the DL CQI on PUCCH for the UE.
14299 * Function: rgSCHCmnDlProcCqiMode20
14301 * This function updates the DL CQI on PUCCH for the UE.
14303 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14305 * Processing Steps:
14306 * Process CQI MODE 20
14307 * @param[in] RgSchCellCb *cell
14308 * @param[in] RgSchUeCb *ue
14309 * @param[in] TfuDlCqiRpt *dlCqiRpt
14314 #ifdef RGR_CQI_REPT
14315 static inline Void rgSCHCmnDlProcCqiMode20
14319 TfuDlCqiPucch *pucchCqi,
14323 static inline Void rgSCHCmnDlProcCqiMode20
14327 TfuDlCqiPucch *pucchCqi
14331 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14333 if (pucchCqi->u.mode20Info.type == TFU_RPT_CQI)
14335 if (pucchCqi->u.mode20Info.u.cqi.isWideband)
14337 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14338 if((pucchCqi->u.mode20Info.u.cqi.u.wideCqi) &&
14339 (pucchCqi->u.mode20Info.u.cqi.u.wideCqi < RG_SCH_CMN_MAX_CQI))
14341 ueDl->cqiFlag = TRUE;
14342 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode20Info.u.cqi.\
14344 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14345 /* ccpu00117452 - MOD - Changed macro name from
14346 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14347 #ifdef RGR_CQI_REPT
14348 *isCqiAvail = TRUE;
14357 else if (pucchCqi->u.mode20Info.type == TFU_RPT_RI)
14359 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode20Info.u.ri))
14361 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode20Info.u.ri,
14366 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
14367 pucchCqi->u.mode20Info.u.ri,ue->ueId);
14375 * @brief This function Updates the DL CQI on PUCCH for the UE.
14379 * Function: rgSCHCmnDlProcCqiMode21
14381 * This function updates the DL CQI on PUCCH for the UE.
14383 * Invoked by: rgSCHCmnDlCqiOnPucchInd
14385 * Processing Steps:
14386 * Process CQI MODE 21
14387 * @param[in] RgSchCellCb *cell
14388 * @param[in] RgSchUeCb *ue
14389 * @param[in] TfuDlCqiRpt *dlCqiRpt
14394 #ifdef RGR_CQI_REPT
14395 static inline Void rgSCHCmnDlProcCqiMode21
14399 TfuDlCqiPucch *pucchCqi,
14401 Bool *is2ndCwCqiAvail
14404 static inline Void rgSCHCmnDlProcCqiMode21
14408 TfuDlCqiPucch *pucchCqi
14412 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14414 if (pucchCqi->u.mode21Info.type == TFU_RPT_CQI)
14416 ue->mimoInfo.puschFdbkVld = FALSE;
14417 if (pucchCqi->u.mode21Info.u.cqi.isWideband)
14419 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14420 if((pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi) &&
14421 (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi < RG_SCH_CMN_MAX_CQI))
14423 ueDl->cqiFlag = TRUE;
14424 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode21Info.u.cqi.\
14426 if (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.pres)
14428 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
14429 ueDl->mimoInfo.cwInfo[1].cqi, \
14430 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.val);
14431 #ifdef RGR_CQI_REPT
14432 /* ccpu00117259 - ADD - Considering second codeword CQI info
14433 incase of MIMO for CQI Reporting */
14434 *is2ndCwCqiAvail = TRUE;
14437 /* ccpu00117452 - MOD - Changed macro name from
14438 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14439 #ifdef RGR_CQI_REPT
14440 *isCqiAvail = TRUE;
14447 rgSCHCmnDlSetUePmi(cell, ue, \
14448 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.pmi);
14451 else if (pucchCqi->u.mode21Info.type == TFU_RPT_RI)
14453 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode21Info.u.ri))
14455 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode21Info.u.ri,
14460 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
14461 pucchCqi->u.mode21Info.u.ri,ue->ueId);
14469 * @brief This function Updates the DL CQI on PUCCH for the UE.
14473 * Function: rgSCHCmnDlCqiOnPucchInd
14475 * This function updates the DL CQI on PUCCH for the UE.
14477 * Invoked by: rgSCHCmnDlCqiInd
14479 * Processing Steps:
14480 * - Depending on the reporting mode of the PUCCH, the CQI/PMI/RI values
14481 * are updated and stored for each UE
14483 * @param[in] RgSchCellCb *cell
14484 * @param[in] RgSchUeCb *ue
14485 * @param[in] TfuDlCqiRpt *dlCqiRpt
14490 #ifdef RGR_CQI_REPT
14491 static Void rgSCHCmnDlCqiOnPucchInd
14495 TfuDlCqiPucch *pucchCqi,
14496 RgrUeCqiRept *ueCqiRept,
14498 Bool *is2ndCwCqiAvail
14501 static Void rgSCHCmnDlCqiOnPucchInd
14505 TfuDlCqiPucch *pucchCqi
14509 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14511 /* ccpu00117452 - MOD - Changed
14512 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14513 #ifdef RGR_CQI_REPT
14514 /* Save CQI mode information in the report */
14515 ueCqiRept->cqiMode = pucchCqi->mode;
14518 switch(pucchCqi->mode)
14520 case TFU_PUCCH_CQI_MODE10:
14521 #ifdef RGR_CQI_REPT
14522 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail);
14524 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi);
14526 ueDl->cqiFlag = TRUE;
14528 case TFU_PUCCH_CQI_MODE11:
14529 #ifdef RGR_CQI_REPT
14530 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail,
14533 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi);
14535 ueDl->cqiFlag = TRUE;
14537 case TFU_PUCCH_CQI_MODE20:
14538 #ifdef RGR_CQI_REPT
14539 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail);
14541 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi);
14543 ueDl->cqiFlag = TRUE;
14545 case TFU_PUCCH_CQI_MODE21:
14546 #ifdef RGR_CQI_REPT
14547 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail,
14550 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi);
14552 ueDl->cqiFlag = TRUE;
14556 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unknown CQI Mode %d",
14557 pucchCqi->mode,ue->ueId);
14558 /* ccpu00117452 - MOD - Changed macro name from
14559 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14560 #ifdef RGR_CQI_REPT
14561 *isCqiAvail = FALSE;
14568 } /* rgSCHCmnDlCqiOnPucchInd */
14572 * @brief This function Updates the DL CQI on PUSCH for the UE.
14576 * Function: rgSCHCmnDlCqiOnPuschInd
14578 * This function updates the DL CQI on PUSCH for the UE.
14580 * Invoked by: rgSCHCmnDlCqiInd
14582 * Processing Steps:
14583 * - Depending on the reporting mode of the PUSCH, the CQI/PMI/RI values
14584 * are updated and stored for each UE
14586 * @param[in] RgSchCellCb *cell
14587 * @param[in] RgSchUeCb *ue
14588 * @param[in] TfuDlCqiRpt *dlCqiRpt
14593 #ifdef RGR_CQI_REPT
14594 static Void rgSCHCmnDlCqiOnPuschInd
14598 TfuDlCqiPusch *puschCqi,
14599 RgrUeCqiRept *ueCqiRept,
14601 Bool *is2ndCwCqiAvail
14604 static Void rgSCHCmnDlCqiOnPuschInd
14608 TfuDlCqiPusch *puschCqi
14612 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14613 uint32_t prevRiVal = 0;
14614 if (puschCqi->ri.pres == PRSNT_NODEF)
14616 if (RG_SCH_CMN_IS_RI_VALID(puschCqi->ri.val))
14618 /* Saving the previous ri value to revert back
14619 in case PMI update failed */
14620 if (RGR_UE_TM_4 == ue->mimoInfo.txMode ) /* Cheking for TM4. TM8 check later */
14622 prevRiVal = ueDl->mimoInfo.ri;
14624 rgSCHCmnDlSetUeRi(cell, ue, puschCqi->ri.val, FALSE);
14628 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
14629 puschCqi->ri.val,ue->ueId);
14633 ue->mimoInfo.puschFdbkVld = FALSE;
14634 /* ccpu00117452 - MOD - Changed macro name from
14635 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14636 #ifdef RGR_CQI_REPT
14637 /* Save CQI mode information in the report */
14638 ueCqiRept->cqiMode = puschCqi->mode;
14639 /* ccpu00117259 - DEL - removed default setting of isCqiAvail to TRUE */
14642 switch(puschCqi->mode)
14644 case TFU_PUSCH_CQI_MODE_20:
14645 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14646 /* Checking whether the decoded CQI is a value between 1 and 15*/
14647 if((puschCqi->u.mode20Info.wideBandCqi) &&
14648 (puschCqi->u.mode20Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
14650 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode20Info.wideBandCqi;
14651 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14652 /* ccpu00117452 - MOD - Changed macro name from
14653 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14654 #ifdef RGR_CQI_REPT
14655 *isCqiAvail = TRUE;
14663 case TFU_PUSCH_CQI_MODE_30:
14664 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14665 if((puschCqi->u.mode30Info.wideBandCqi) &&
14666 (puschCqi->u.mode30Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
14668 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode30Info.wideBandCqi;
14669 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
14670 /* ccpu00117452 - MOD - Changed macro name from
14671 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14672 #ifdef RGR_CQI_REPT
14673 *isCqiAvail = TRUE;
14677 uint32_t gACqiRcvdCount;
14688 case TFU_PUSCH_CQI_MODE_12:
14689 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14690 if((puschCqi->u.mode12Info.cqiIdx[0]) &&
14691 (puschCqi->u.mode12Info.cqiIdx[0] < RG_SCH_CMN_MAX_CQI))
14693 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode12Info.cqiIdx[0];
14694 /* ccpu00117452 - MOD - Changed macro name from
14695 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14696 #ifdef RGR_CQI_REPT
14697 *isCqiAvail = TRUE;
14704 if((puschCqi->u.mode12Info.cqiIdx[1]) &&
14705 (puschCqi->u.mode12Info.cqiIdx[1] < RG_SCH_CMN_MAX_CQI))
14707 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode12Info.cqiIdx[1];
14708 /* ccpu00117452 - MOD - Changed macro name from
14709 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14710 #ifdef RGR_CQI_REPT
14711 /* ccpu00117259 - ADD - Considering second codeword CQI info
14712 incase of MIMO for CQI Reporting */
14713 *is2ndCwCqiAvail = TRUE;
14720 ue->mimoInfo.puschFdbkVld = TRUE;
14721 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_12;
14722 ue->mimoInfo.puschPmiInfo.u.mode12Info = puschCqi->u.mode12Info;
14723 /* : resetting this is time based. Make use of CQI reporting
14724 * periodicity, DELTA's in determining the exact time at which this
14725 * need to be reset. */
14727 case TFU_PUSCH_CQI_MODE_22:
14728 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14729 if((puschCqi->u.mode22Info.wideBandCqi[0]) &&
14730 (puschCqi->u.mode22Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
14732 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode22Info.wideBandCqi[0];
14733 /* ccpu00117452 - MOD - Changed macro name from
14734 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14735 #ifdef RGR_CQI_REPT
14736 *isCqiAvail = TRUE;
14743 if((puschCqi->u.mode22Info.wideBandCqi[1]) &&
14744 (puschCqi->u.mode22Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
14746 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode22Info.wideBandCqi[1];
14747 /* ccpu00117452 - MOD - Changed macro name from
14748 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14749 #ifdef RGR_CQI_REPT
14750 /* ccpu00117259 - ADD - Considering second codeword CQI info
14751 incase of MIMO for CQI Reporting */
14752 *is2ndCwCqiAvail = TRUE;
14759 rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode22Info.wideBandPmi);
14760 ue->mimoInfo.puschFdbkVld = TRUE;
14761 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_22;
14762 ue->mimoInfo.puschPmiInfo.u.mode22Info = puschCqi->u.mode22Info;
14764 case TFU_PUSCH_CQI_MODE_31:
14765 /*ccpu00109787 - ADD - Check for non-zero CQI*/
14766 if((puschCqi->u.mode31Info.wideBandCqi[0]) &&
14767 (puschCqi->u.mode31Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
14769 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode31Info.wideBandCqi[0];
14770 /* ccpu00117452 - MOD - Changed macro name from
14771 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14772 #ifdef RGR_CQI_REPT
14773 *isCqiAvail = TRUE;
14776 if (ueDl->mimoInfo.ri > 1)
14778 if((puschCqi->u.mode31Info.wideBandCqi[1]) &&
14779 (puschCqi->u.mode31Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
14781 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode31Info.wideBandCqi[1];
14782 /* ccpu00117452 - MOD - Changed macro name from
14783 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14784 #ifdef RGR_CQI_REPT
14785 /* ccpu00117259 - ADD - Considering second codeword CQI info
14786 incase of MIMO for CQI Reporting */
14787 *is2ndCwCqiAvail = TRUE;
14791 if (rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode31Info.pmi) != ROK)
14793 /* To avoid Rank and PMI inconsistency */
14794 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
14795 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
14797 ueDl->mimoInfo.ri = prevRiVal;
14800 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_31;
14801 ue->mimoInfo.puschPmiInfo.u.mode31Info = puschCqi->u.mode31Info;
14805 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Unknown CQI Mode %d CRNTI:%d",
14806 puschCqi->mode,ue->ueId);
14807 /* CQI decoding failed revert the RI to previous value */
14808 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
14809 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
14811 ueDl->mimoInfo.ri = prevRiVal;
14813 /* ccpu00117452 - MOD - Changed macro name from
14814 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14815 #ifdef RGR_CQI_REPT
14816 *isCqiAvail = FALSE;
14817 /* ccpu00117259 - ADD - Considering second codeword CQI info
14818 incase of MIMO for CQI Reporting */
14819 *is2ndCwCqiAvail = FALSE;
14826 } /* rgSCHCmnDlCqiOnPuschInd */
14830 * @brief This function Updates the DL CQI for the UE.
14834 * Function: rgSCHCmnDlCqiInd
14835 * Purpose: Updates the DL CQI for the UE
14839 * @param[in] RgSchCellCb *cell
14840 * @param[in] RgSchUeCb *ue
14841 * @param[in] TfuDlCqiRpt *dlCqi
14845 Void rgSCHCmnDlCqiInd
14851 CmLteTimingInfo timingInfo
14854 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14855 /* ccpu00117452 - MOD - Changed macro name from
14856 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14857 #ifdef RGR_CQI_REPT
14858 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
14859 RgrUeCqiRept ueCqiRept = {{0}};
14860 Bool isCqiAvail = FALSE;
14861 /* ccpu00117259 - ADD - Considering second codeword CQI info
14862 incase of MIMO for CQI Reporting */
14863 Bool is2ndCwCqiAvail = FALSE;
14867 #ifdef RGR_CQI_REPT
14870 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
14874 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
14879 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi);
14883 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi);
14887 #ifdef CQI_CONFBITMASK_DROP
14888 if(!ue->cqiConfBitMask)
14890 if (ueDl->mimoInfo.cwInfo[0].cqi >15)
14892 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
14893 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
14895 else if ( ueDl->mimoInfo.cwInfo[0].cqi >= ue->prevCqi)
14897 ue->prevCqi = ueDl->mimoInfo.cwInfo[0].cqi;
14901 uint8_t dlCqiDeltaPrev = 0;
14902 dlCqiDeltaPrev = ue->prevCqi - ueDl->mimoInfo.cwInfo[0].cqi;
14903 if (dlCqiDeltaPrev > 3)
14904 dlCqiDeltaPrev = 3;
14905 if ((ue->prevCqi - dlCqiDeltaPrev) < 6)
14911 ue->prevCqi = ue->prevCqi - dlCqiDeltaPrev;
14913 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
14914 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
14920 /* ccpu00117452 - MOD - Changed macro name from
14921 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
14922 #ifdef RGR_CQI_REPT
14923 /* ccpu00117259 - ADD - Considering second codeword CQI info
14924 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail\
14925 in 'if' condition*/
14926 if (RG_SCH_CQIR_IS_PUSHNCQI_ENBLE(ue) && (isCqiAvail || is2ndCwCqiAvail))
14928 ueCqiRept.cqi[0] = ueDl->mimoInfo.cwInfo[0].cqi;
14930 /* ccpu00117259 - ADD - Considering second codeword CQI info
14931 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail
14932 in 'if' condition*/
14933 ueCqiRept.cqi[1] = 0;
14934 if(is2ndCwCqiAvail)
14936 ueCqiRept.cqi[1] = ueDl->mimoInfo.cwInfo[1].cqi;
14938 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, &ueCqiRept);
14943 rgSCHCmnDlSetUeAllocLmtLa(cell, ue);
14944 rgSCHCheckAndSetTxScheme(cell, ue);
14947 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), ue->isEmtcUe);
14949 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), FALSE);
14953 if (cellSch->dl.isDlFreqSel)
14955 cellSch->apisDlfs->rgSCHDlfsDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo);
14958 /* Call SPS module to update CQI indication */
14959 rgSCHCmnSpsDlCqiIndHndlr(cell, ue, timingInfo);
14961 /* Call Specific scheduler to process on dlCqiInd */
14963 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
14965 cellSch->apisEmtcDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
14970 cellSch->apisDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
14973 #ifdef RG_PFS_STATS
14974 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].avgCqi +=
14975 ueDl->mimoInfo.cwInfo[0].cqi;
14976 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].totalCqiOcc++;
14980 ueDl->avgCqi += ueDl->mimoInfo.cwInfo[0].cqi;
14981 ueDl->numCqiOccns++;
14982 if (ueDl->mimoInfo.ri == 1)
14993 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
14994 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
14995 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw0Cqi ++;
14996 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw1Cqi ++;
14997 cell->tenbStats->sch.dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
14998 cell->tenbStats->sch.dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
14999 cell->tenbStats->sch.dlNumCw0Cqi ++;
15000 cell->tenbStats->sch.dlNumCw1Cqi ++;
15007 * @brief This function calculates the wideband CQI from SNR
15008 * reported for each RB.
15012 * Function: rgSCHCmnCalcWcqiFrmSnr
15013 * Purpose: Wideband CQI calculation from SNR
15015 * Invoked by: RG SCH
15017 * @param[in] RgSchCellCb *cell
15018 * @param[in] TfuSrsRpt *srsRpt,
15019 * @return Wideband CQI
15022 static uint8_t rgSCHCmnCalcWcqiFrmSnr(RgSchCellCb *cell, TfuSrsRpt *srsRpt)
15024 uint8_t wideCqi=1; /*Calculated value from SNR*/
15025 /*Need to map a certain SNR with a WideCQI value.
15026 * The CQI calculation is still primitive. Further, need to
15027 * use a improvized method for calculating WideCQI from SNR*/
15028 if (srsRpt->snr[0] <=50)
15032 else if (srsRpt->snr[0]>=51 && srsRpt->snr[0] <=100)
15036 else if (srsRpt->snr[0]>=101 && srsRpt->snr[0] <=150)
15040 else if (srsRpt->snr[0]>=151 && srsRpt->snr[0] <=200)
15044 else if (srsRpt->snr[0]>=201 && srsRpt->snr[0] <=250)
15053 }/*rgSCHCmnCalcWcqiFrmSnr*/
15057 * @brief This function Updates the SRS for the UE.
15061 * Function: rgSCHCmnSrsInd
15062 * Purpose: Updates the UL SRS for the UE
15066 * @param[in] RgSchCellCb *cell
15067 * @param[in] RgSchUeCb *ue
15068 * @param[in] TfuSrsRpt *srsRpt,
15072 Void rgSCHCmnSrsInd(RgSchCellCb *cell,RgSchUeCb *ue,TfuSrsRpt *srsRpt,CmLteTimingInfo timingInfo)
15074 uint8_t wideCqi; /*Calculated value from SNR*/
15075 uint32_t recReqTime; /*Received Time in TTI*/
15077 recReqTime = (timingInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) + timingInfo.slot;
15078 ue->srsCb.selectedAnt = (recReqTime/ue->srsCb.peri)%2;
15079 if(srsRpt->wideCqiPres)
15081 wideCqi = srsRpt->wideCqi;
15085 wideCqi = rgSCHCmnCalcWcqiFrmSnr(cell, srsRpt);
15087 rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi);
15089 }/*rgSCHCmnSrsInd*/
15094 * @brief This function is a handler for TA report for an UE.
15098 * Function: rgSCHCmnDlTARpt
15099 * Purpose: Determine based on UE_IDLE_TIME threshold,
15100 * whether UE needs to be Linked to the scheduler's TA list OR
15101 * if it needs a PDCCH Order.
15106 * @param[in] RgSchCellCb *cell
15107 * @param[in] RgSchUeCb *ue
15111 Void rgSCHCmnDlTARpt(RgSchCellCb *cell,RgSchUeCb *ue)
15113 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15114 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
15115 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15116 CmLListCp poInactvLst;
15119 /* RACHO: If UE idle time is more than threshold, then
15120 * set its poInactv pdcch order inactivity */
15121 /* Fix : syed Ignore if TaTmr is not configured */
15122 if ((ue->dl.taCb.cfgTaTmr) && (rgSCHCmnUeIdleExdThrsld(cell, ue) == ROK))
15124 uint32_t prevDlMsk = ue->dl.dlInactvMask;
15125 uint32_t prevUlMsk = ue->ul.ulInactvMask;
15126 ue->dl.dlInactvMask |= RG_PDCCHODR_INACTIVE;
15127 ue->ul.ulInactvMask |= RG_PDCCHODR_INACTIVE;
15128 /* Indicate Specific scheduler for this UEs inactivity */
15129 cmLListInit(&poInactvLst);
15130 cmLListAdd2Tail(&poInactvLst, &ueDl->rachInfo.inActUeLnk);
15131 ueDl->rachInfo.inActUeLnk.node = (PTR)ue;
15132 /* Send inactivate ind only if not already sent */
15133 if (prevDlMsk == 0)
15135 cellSch->apisDl->rgSCHDlInactvtUes(cell, &poInactvLst);
15137 if (prevUlMsk == 0)
15139 cellSch->apisUl->rgSCHUlInactvtUes(cell, &poInactvLst);
15144 /* Fix: ccpu00124009 Fix for loop in the linked list "cellDl->taLst" */
15145 if (!ue->dlTaLnk.node)
15148 if(cell->emtcEnable)
15152 rgSCHEmtcAddToTaLst(cellDl,ue);
15159 cmLListAdd2Tail(&cellDl->taLst, &ue->dlTaLnk);
15160 ue->dlTaLnk.node = (PTR)ue;
15165 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
15166 "<TA>TA duplicate entry attempt failed: UEID:%u",
15175 * @brief Indication of UL CQI.
15179 * Function : rgSCHCmnFindUlCqiUlTxAnt
15181 * - Finds the Best Tx Antenna amongst the CQIs received
15182 * from Two Tx Antennas.
15184 * @param[in] RgSchCellCb *cell
15185 * @param[in] RgSchUeCb *ue
15186 * @param[in] uint8_t wideCqi
15189 static Void rgSCHCmnFindUlCqiUlTxAnt(RgSchCellCb *cell,RgSchUeCb *ue,uint8_t wideCqi)
15191 ue->validTxAnt = 1;
15193 } /* rgSCHCmnFindUlCqiUlTxAnt */
15197 * @brief Indication of UL CQI.
15201 * Function : rgSCHCmnUlCqiInd
15203 * - Updates uplink CQI information for the UE. Computes and
15204 * stores the lowest CQI of CQIs reported in all subbands.
15206 * @param[in] RgSchCellCb *cell
15207 * @param[in] RgSchUeCb *ue
15208 * @param[in] TfuUlCqiRpt *ulCqiInfo
15211 Void rgSCHCmnUlCqiInd(RgSchCellCb *cell,RgSchUeCb *ue,TfuUlCqiRpt *ulCqiInfo)
15213 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15214 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15219 #if (defined(SCH_STATS) || defined(TENB_STATS))
15220 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
15223 /* consider inputs from SRS handlers about SRS occassions
15224 * in determining the UL TX Antenna selection */
15225 ueUl->crntUlCqi[0] = ulCqiInfo->wideCqi;
15227 ueUl->validUlCqi = ueUl->crntUlCqi[0];
15228 ue->validTxAnt = 0;
15230 iTbsNew = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][ueUl->validUlCqi];
15231 previTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
15233 if (RG_ITBS_DIFF(iTbsNew, previTbs) > 5)
15235 /* Ignore this iTBS report and mark that last iTBS report was */
15236 /* ignored so that subsequently we reset the LA algorithm */
15237 ueUl->ulLaCb.lastiTbsIgnored = TRUE;
15241 if (ueUl->ulLaCb.lastiTbsIgnored != TRUE)
15243 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
15244 (80 * ueUl->ulLaCb.cqiBasediTbs))/100;
15248 /* Reset the LA as iTbs in use caught up with the value */
15249 /* reported by UE. */
15250 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
15251 (80 * previTbs * 100))/100;
15252 ueUl->ulLaCb.deltaiTbs = 0;
15253 ueUl->ulLaCb.lastiTbsIgnored = FALSE;
15258 rgSCHPwrUlCqiInd(cell, ue);
15260 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
15262 rgSCHCmnSpsUlCqiInd(cell, ue);
15265 /* Applicable to only some schedulers */
15267 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
15269 cellSch->apisEmtcUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
15274 cellSch->apisUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
15278 ueUl->numCqiOccns++;
15279 ueUl->avgCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15284 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15285 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulNumCqi ++;
15286 cell->tenbStats->sch.ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
15287 cell->tenbStats->sch.ulNumCqi ++;
15292 } /* rgSCHCmnUlCqiInd */
15295 * @brief Returns HARQ proc for which data expected now.
15299 * Function: rgSCHCmnUlHqProcForUe
15300 * Purpose: This function returns the harq process for
15301 * which data is expected in the current subframe.
15302 * It does not validate that the HARQ process
15303 * has an allocation.
15307 * @param[in] RgSchCellCb *cell
15308 * @param[in] CmLteTimingInfo frm
15309 * @param[in] RgSchUeCb *ue
15310 * @param[out] RgSchUlHqProcCb **procRef
15313 Void rgSCHCmnUlHqProcForUe
15316 CmLteTimingInfo frm,
15318 RgSchUlHqProcCb **procRef
15322 uint8_t procId = rgSCHCmnGetUlHqProcIdx(&frm, cell);
15325 *procRef = rgSCHUhmGetUlHqProc(cell, ue, procId);
15327 *procRef = rgSCHUhmGetUlProcByTime(cell, ue, frm);
15334 * @brief Update harq process for allocation.
15338 * Function : rgSCHCmnUpdUlHqProc
15340 * This function is invoked when harq process
15341 * control block is now in a new memory location
15342 * thus requiring a pointer/reference update.
15344 * @param[in] RgSchCellCb *cell
15345 * @param[in] RgSchUlHqProcCb *curProc
15346 * @param[in] RgSchUlHqProcCb *oldProc
15351 S16 rgSCHCmnUpdUlHqProc
15354 RgSchUlHqProcCb *curProc,
15355 RgSchUlHqProcCb *oldProc
15361 #if (ERRCLASS & ERRCLS_DEBUG)
15362 if (curProc->alloc == NULLP)
15367 curProc->alloc->hqProc = curProc;
15369 } /* rgSCHCmnUpdUlHqProc */
15372 /*MS_WORKAROUND for CR FIXME */
15374 * @brief Hsndles BSR timer expiry
15378 * Function : rgSCHCmnBsrTmrExpry
15380 * This function is invoked when periodic BSR timer expires for a UE.
15382 * @param[in] RgSchUeCb *ue
15387 S16 rgSCHCmnBsrTmrExpry(RgSchUeCb *ueCb)
15389 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ueCb->cell);
15392 ueCb->isSrGrant = TRUE;
15395 emtcStatsUlBsrTmrTxp++;
15399 if(ueCb->cell->emtcEnable)
15403 cellSch->apisEmtcUl->rgSCHSrRcvd(ueCb->cell, ueCb);
15410 cellSch->apisUl->rgSCHSrRcvd(ueCb->cell, ueCb);
15417 * @brief Short BSR update.
15421 * Function : rgSCHCmnUpdBsrShort
15423 * This functions does requisite updates to handle short BSR reporting.
15425 * @param[in] RgSchCellCb *cell
15426 * @param[in] RgSchUeCb *ue
15427 * @param[in] RgSchLcgCb *ulLcg
15428 * @param[in] uint8_t bsr
15429 * @param[out] RgSchErrInfo *err
15434 S16 rgSCHCmnUpdBsrShort
15445 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15447 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15448 RgSchCmnLcg *cmnLcg = NULLP;
15454 if (!RGSCH_LCG_ISCFGD(ulLcg))
15456 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
15459 for (lcgCnt=0; lcgCnt<4; lcgCnt++)
15462 /* Set BS of all other LCGs to Zero.
15463 If Zero BSR is reported in Short BSR include this LCG too */
15464 if ((lcgCnt != ulLcg->lcgId) ||
15465 (!bsr && !ueUl->hqEnt.numBusyHqProcs))
15467 /* If old BO is zero do nothing */
15468 if(((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs != 0)
15470 for(idx = 0; idx < ue->ul.lcgArr[lcgCnt].numLch; idx++)
15472 if((ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount) &&
15473 (ue->ulActiveLCs & (1 <<
15474 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1))))
15477 ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount--;
15478 ue->ulActiveLCs &= ~(1 <<
15479 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1));
15485 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgCnt]))
15487 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs = 0;
15488 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->reportedBs = 0;
15493 if(ulLcg->lcgId && bsr && (((RgSchCmnLcg *)(ulLcg->sch))->bs == 0))
15495 for(idx = 0; idx < ulLcg->numLch; idx++)
15498 if (!(ue->ulActiveLCs & (1 << (ulLcg->lcArray[idx]->qciCb->qci -1))))
15500 ulLcg->lcArray[idx]->qciCb->ulUeCount++;
15501 ue->ulActiveLCs |= (1 << (ulLcg->lcArray[idx]->qciCb->qci -1));
15506 /* Resetting the nonGbrLcgBs info here */
15507 ue->ul.nonGbrLcgBs = 0;
15508 ue->ul.nonLcg0Bs = 0;
15510 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
15512 if (TRUE == ue->ul.useExtBSRSizes)
15514 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
15518 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
15520 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15522 /* TBD check for effGbr != 0 */
15523 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15525 else if (0 == ulLcg->lcgId)
15527 /* This is added for handling LCG0 */
15528 cmnLcg->bs = cmnLcg->reportedBs;
15532 /* Update non GBR LCG's BS*/
15533 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
15534 cmnLcg->bs = ue->ul.nonGbrLcgBs;
15536 ue->ul.totalBsr = cmnLcg->bs;
15539 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (bsr == 0))
15541 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
15545 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
15547 rgSCHCmnSpsBsrRpt(cell, ue, ulLcg);
15550 rgSCHCmnUpdUlCompEffBsr(ue);
15553 if(cell->emtcEnable)
15557 cellSch->apisEmtcUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
15564 cellSch->apisUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
15568 if (ue->ul.isUlCaEnabled && ue->numSCells)
15570 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
15572 #ifndef PAL_ENABLE_UL_CA
15573 if((ue->cellInfo[sCellIdx] != NULLP) &&
15574 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
15576 if(ue->cellInfo[sCellIdx] != NULLP)
15579 cellSch->apisUl->rgSCHUpdBsrShort(ue->cellInfo[sCellIdx]->cell,
15590 * @brief Truncated BSR update.
15594 * Function : rgSCHCmnUpdBsrTrunc
15596 * This functions does required updates to handle truncated BSR report.
15599 * @param[in] RgSchCellCb *cell
15600 * @param[in] RgSchUeCb *ue
15601 * @param[in] RgSchLcgCb *ulLcg
15602 * @param[in] uint8_t bsr
15603 * @param[out] RgSchErrInfo *err
15608 S16 rgSCHCmnUpdBsrTrunc
15617 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15618 RgSchCmnLcg *cmnLcg = NULLP;
15625 if (!RGSCH_LCG_ISCFGD(ulLcg))
15627 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
15630 /* set all higher prio lcgs bs to 0 and update this lcgs bs and
15631 total bsr= sumofall lcgs bs */
15634 for (cnt = ulLcg->lcgId-1; cnt >= 0; cnt--)
15637 /* If Existing BO is zero the don't do anything */
15638 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs != 0)
15640 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
15643 if((ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount) &&
15644 (ue->ulActiveLCs & (1 <<
15645 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
15647 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount--;
15648 ue->ulActiveLCs &= ~(1 <<
15649 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
15654 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs = 0;
15655 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->reportedBs = 0;
15660 for (cnt = ulLcg->lcgId; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
15662 if (ulLcg->lcgId == 0)
15666 /* If Existing BO is zero the don't do anything */
15667 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs == 0)
15669 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
15672 if (!(ue->ulActiveLCs & (1 <<
15673 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
15675 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount++;
15676 ue->ulActiveLCs |= (1 <<
15677 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
15683 ue->ul.nonGbrLcgBs = 0;
15684 ue->ul.nonLcg0Bs = 0;
15685 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
15686 if (TRUE == ue->ul.useExtBSRSizes)
15688 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
15692 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
15694 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15696 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15698 else if(ulLcg->lcgId == 0)
15700 /* This is for handeling LCG0 */
15701 cmnLcg->bs = cmnLcg->reportedBs;
15705 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
15706 cmnLcg->bs = ue->ul.nonGbrLcgBs;
15708 ue->ul.totalBsr = cmnLcg->bs;
15710 for (cnt = ulLcg->lcgId+1; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
15712 /* TODO: The bs for the other LCGs may be stale because some or all of
15713 * the part of bs may have been already scheduled/data received. Please
15714 * consider this when truncated BSR is tested/implemented */
15715 ue->ul.totalBsr += ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs;
15718 rgSCHCmnUpdUlCompEffBsr(ue);
15721 if(cell->emtcEnable)
15725 cellSch->apisEmtcUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
15732 cellSch->apisUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
15736 if (ue->ul.isUlCaEnabled && ue->numSCells)
15738 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
15740 #ifndef PAL_ENABLE_UL_CA
15741 if((ue->cellInfo[sCellIdx] != NULLP) &&
15742 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
15744 if(ue->cellInfo[sCellIdx] != NULLP)
15747 cellSch->apisUl->rgSCHUpdBsrTrunc(ue->cellInfo[sCellIdx]->cell, ue, ulLcg, bsr);
15757 * @brief Long BSR update.
15761 * Function : rgSCHCmnUpdBsrLong
15763 * - Update BSRs for all configured LCGs.
15764 * - Update priority of LCGs if needed.
15765 * - Update UE's position within/across uplink scheduling queues.
15768 * @param[in] RgSchCellCb *cell
15769 * @param[in] RgSchUeCb *ue
15770 * @param[in] uint8_t bsArr[]
15771 * @param[out] RgSchErrInfo *err
15776 S16 rgSCHCmnUpdBsrLong
15784 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15785 uint32_t tmpBsArr[4] = {0, 0, 0, 0};
15786 uint32_t nonGbrBs = 0;
15794 for(idx1 = 1; idx1 < RGSCH_MAX_LCG_PER_UE; idx1++)
15796 /* If Old BO is non zero then do nothing */
15797 if ((((RgSchCmnLcg *)(ue->ul.lcgArr[idx1].sch))->bs == 0)
15800 for(idx2 = 0; idx2 < ue->ul.lcgArr[idx1].numLch; idx2++)
15803 if (!(ue->ulActiveLCs & (1 <<
15804 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1))))
15806 ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->ulUeCount++;
15807 ue->ulActiveLCs |= (1 <<
15808 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1));
15814 ue->ul.nonGbrLcgBs = 0;
15815 ue->ul.nonLcg0Bs = 0;
15817 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[0]))
15819 if (TRUE == ue->ul.useExtBSRSizes)
15821 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnExtBsrTbl[bsArr[0]];
15822 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnExtBsrTbl[bsArr[0]];
15823 tmpBsArr[0] = rgSchCmnExtBsrTbl[bsArr[0]];
15827 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnBsrTbl[bsArr[0]];
15828 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnBsrTbl[bsArr[0]];
15829 tmpBsArr[0] = rgSchCmnBsrTbl[bsArr[0]];
15832 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
15834 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
15836 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
15838 if (TRUE == ue->ul.useExtBSRSizes)
15840 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsArr[lcgId]];
15844 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsArr[lcgId]];
15846 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
15848 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
15849 tmpBsArr[lcgId] = cmnLcg->bs;
15853 nonGbrBs += cmnLcg->reportedBs;
15854 tmpBsArr[lcgId] = cmnLcg->reportedBs;
15855 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
15859 ue->ul.nonGbrLcgBs = RGSCH_MIN(nonGbrBs,ue->ul.effAmbr);
15861 ue->ul.totalBsr = tmpBsArr[0] + tmpBsArr[1] + tmpBsArr[2] + tmpBsArr[3];
15863 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (ue->ul.totalBsr == 0))
15865 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
15870 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE) /* SPS_FIX */
15872 if(ue->ul.totalBsr - tmpBsArr[1] == 0)
15873 {/* Updaing the BSR to SPS only if LCG1 BS is present in sps active state */
15874 rgSCHCmnSpsBsrRpt(cell, ue, &ue->ul.lcgArr[1]);
15878 rgSCHCmnUpdUlCompEffBsr(ue);
15881 if(cell->emtcEnable)
15885 cellSch->apisEmtcUl->rgSCHUpdBsrLong(cell, ue, bsArr);
15892 cellSch->apisUl->rgSCHUpdBsrLong(cell, ue, bsArr);
15896 if (ue->ul.isUlCaEnabled && ue->numSCells)
15898 for(uint8_t idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
15900 #ifndef PAL_ENABLE_UL_CA
15901 if((ue->cellInfo[idx] != NULLP) &&
15902 (ue->cellInfo[idx]->sCellState == RG_SCH_SCELL_ACTIVE))
15904 if(ue->cellInfo[idx] != NULLP)
15907 cellSch->apisUl->rgSCHUpdBsrLong(ue->cellInfo[idx]->cell, ue, bsArr);
15917 * @brief PHR update.
15921 * Function : rgSCHCmnUpdExtPhr
15923 * Updates extended power headroom information for an UE.
15925 * @param[in] RgSchCellCb *cell
15926 * @param[in] RgSchUeCb *ue
15927 * @param[in] uint8_t phr
15928 * @param[out] RgSchErrInfo *err
15933 S16 rgSCHCmnUpdExtPhr
15937 RgInfExtPhrCEInfo *extPhr,
15941 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
15942 RgSchCmnAllocRecord *allRcd;
15943 CmLList *node = ueUl->ulAllocLst.last;
15946 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
15953 allRcd = (RgSchCmnAllocRecord *)node->node;
15955 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
15957 rgSCHPwrUpdExtPhr(cell, ue, extPhr, allRcd);
15962 if(ulSpsUe->isUlSpsActv)
15964 rgSCHCmnSpsPhrInd(cell,ue);
15969 } /* rgSCHCmnUpdExtPhr */
15975 * @brief PHR update.
15979 * Function : rgSCHCmnUpdPhr
15981 * Updates power headroom information for an UE.
15983 * @param[in] RgSchCellCb *cell
15984 * @param[in] RgSchUeCb *ue
15985 * @param[in] uint8_t phr
15986 * @param[out] RgSchErrInfo *err
15999 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16000 RgSchCmnAllocRecord *allRcd;
16001 CmLList *node = ueUl->ulAllocLst.last;
16004 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
16011 allRcd = (RgSchCmnAllocRecord *)node->node;
16013 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
16015 rgSCHPwrUpdPhr(cell, ue, phr, allRcd, RG_SCH_CMN_PWR_USE_CFG_MAX_PWR);
16020 if(ulSpsUe->isUlSpsActv)
16022 rgSCHCmnSpsPhrInd(cell,ue);
16027 } /* rgSCHCmnUpdPhr */
16030 * @brief UL grant for contention resolution.
16034 * Function : rgSCHCmnContResUlGrant
16036 * Add UE to another queue specifically for CRNTI based contention
16040 * @param[in] RgSchUeCb *ue
16041 * @param[out] RgSchErrInfo *err
16046 S16 rgSCHCmnContResUlGrant
16053 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16056 if(cell->emtcEnable)
16060 cellSch->apisEmtcUl->rgSCHContResUlGrant(cell, ue);
16067 cellSch->apisUl->rgSCHContResUlGrant(cell, ue);
16073 * @brief SR reception handling.
16077 * Function : rgSCHCmnSrRcvd
16079 * - Update UE's position within/across uplink scheduling queues
16080 * - Update priority of LCGs if needed.
16082 * @param[in] RgSchCellCb *cell
16083 * @param[in] RgSchUeCb *ue
16084 * @param[in] CmLteTimingInfo frm
16085 * @param[out] RgSchErrInfo *err
16094 CmLteTimingInfo frm,
16098 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16099 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16100 CmLList *node = ueUl->ulAllocLst.last;
16104 emtcStatsUlTomSrInd++;
16107 RGSCH_INCR_SUB_FRAME(frm, 1); /* 1 TTI after the time SR was sent */
16110 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)node->node;
16111 if (RGSCH_TIMEINFO_SAME(frm, allRcd->allocTime))
16117 //TODO_SID Need to check when it is getting triggered
16118 ue->isSrGrant = TRUE;
16120 if(cell->emtcEnable)
16124 cellSch->apisEmtcUl->rgSCHSrRcvd(cell, ue);
16131 cellSch->apisUl->rgSCHSrRcvd(cell, ue);
16137 * @brief Returns first uplink allocation to send reception
16142 * Function: rgSCHCmnFirstRcptnReq(cell)
16143 * Purpose: This function returns the first uplink allocation
16144 * (or NULLP if there is none) in the subframe
16145 * in which is expected to prepare and send reception
16150 * @param[in] RgSchCellCb *cell
16151 * @return RgSchUlAlloc*
16153 RgSchUlAlloc *rgSCHCmnFirstRcptnReq(RgSchCellCb *cell)
16155 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16157 RgSchUlAlloc* alloc = NULLP;
16160 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
16162 RgSchUlSf* sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16163 alloc = rgSCHUtlUlAllocFirst(sf);
16165 if (alloc && alloc->hqProc == NULLP)
16167 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16175 * @brief Returns first uplink allocation to send reception
16180 * Function: rgSCHCmnNextRcptnReq(cell)
16181 * Purpose: This function returns the next uplink allocation
16182 * (or NULLP if there is none) in the subframe
16183 * in which is expected to prepare and send reception
16188 * @param[in] RgSchCellCb *cell
16189 * @return RgSchUlAlloc*
16191 RgSchUlAlloc *rgSCHCmnNextRcptnReq(RgSchCellCb *cell,RgSchUlAlloc *alloc)
16193 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16195 //RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16198 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
16200 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
16202 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16203 if (alloc && alloc->hqProc == NULLP)
16205 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16216 * @brief Collates DRX enabled UE's scheduled in this SF
16220 * Function: rgSCHCmnDrxStrtInActvTmrInUl(cell)
16221 * Purpose: This function collates the link
16222 * of UE's scheduled in this SF who
16223 * have drx enabled. It then calls
16224 * DRX specific function to start/restart
16225 * inactivity timer in Ul
16229 * @param[in] RgSchCellCb *cell
16232 Void rgSCHCmnDrxStrtInActvTmrInUl(RgSchCellCb *cell)
16234 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16235 RgSchUlSf *sf = &(cellUl->ulSfArr[cellUl->schdIdx]);
16236 RgSchUlAlloc *alloc = rgSCHUtlUlAllocFirst(sf);
16241 cmLListInit(&ulUeLst);
16249 if (!(alloc->grnt.isRtx) && ueCb->isDrxEnabled && !(ueCb->isSrGrant)
16251 /* ccpu00139513- DRX inactivity timer should not be started for
16252 * UL SPS occasions */
16253 && (alloc->hqProc->isSpsOccnHqP == FALSE)
16257 cmLListAdd2Tail(&ulUeLst,&(ueCb->ulDrxInactvTmrLnk));
16258 ueCb->ulDrxInactvTmrLnk.node = (PTR)ueCb;
16262 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16265 (Void)rgSCHDrxStrtInActvTmr(cell,&ulUeLst,RG_SCH_DRX_UL);
16272 * @brief Returns first uplink allocation to send HARQ feedback
16277 * Function: rgSCHCmnFirstHqFdbkAlloc
16278 * Purpose: This function returns the first uplink allocation
16279 * (or NULLP if there is none) in the subframe
16280 * for which it is expected to prepare and send HARQ
16285 * @param[in] RgSchCellCb *cell
16286 * @param[in] uint8_t idx
16287 * @return RgSchUlAlloc*
16289 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc(RgSchCellCb *cell,uint8_t idx)
16291 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16293 RgSchUlAlloc *alloc = NULLP;
16296 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
16298 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
16299 alloc = rgSCHUtlUlAllocFirst(sf);
16301 while (alloc && (alloc->hqProc == NULLP))
16303 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16311 * @brief Returns next allocation to send HARQ feedback for.
16315 * Function: rgSCHCmnNextHqFdbkAlloc(cell)
16316 * Purpose: This function returns the next uplink allocation
16317 * (or NULLP if there is none) in the subframe
16318 * for which HARQ feedback needs to be sent.
16322 * @param[in] RgSchCellCb *cell
16323 * @return RgSchUlAlloc*
16325 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc(RgSchCellCb *cell,RgSchUlAlloc *alloc,uint8_t idx)
16327 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16329 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
16331 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
16333 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16334 while (alloc && (alloc->hqProc == NULLP))
16336 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
16346 /***********************************************************
16348 * Func : rgSCHCmnUlGetITbsFrmIMcs
16350 * Desc : Returns the Itbs that is mapped to an Imcs
16351 * for the case of uplink.
16359 **********************************************************/
16360 uint8_t rgSCHCmnUlGetITbsFrmIMcs(uint8_t iMcs)
16362 return (rgUlIMcsTbl[iMcs].iTbs);
16365 /***********************************************************
16367 * Func : rgSCHCmnUlGetIMcsFrmITbs
16369 * Desc : Returns the Imcs that is mapped to an Itbs
16370 * for the case of uplink.
16374 * Notes: For iTbs 19, iMcs is dependant on modulation order.
16375 * Refer to 36.213, Table 8.6.1-1 and 36.306 Table 4.1-2
16376 * for UE capability information
16380 **********************************************************/
16381 uint8_t rgSCHCmnUlGetIMcsFrmITbs(uint8_t iTbs,CmLteUeCategory ueCtg)
16389 /*a higher layer can force a 64QAM UE to transmit at 16QAM.
16390 * We currently do not support this. Once the support for such
16391 * is added, ueCtg should be replaced by current transmit
16392 * modulation configuration.Refer to 36.213 -8.6.1
16394 else if ( iTbs < 19 )
16398 else if ((iTbs == 19) && (ueCtg != CM_LTE_UE_CAT_5))
16408 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
16409 was seen when IMCS exceeds 20 on T2k TDD*/
16419 /***********************************************************
16421 * Func : rgSCHCmnUlMinTbBitsForITbs
16423 * Desc : Returns the minimum number of bits that can
16424 * be given as grant for a specific CQI.
16432 **********************************************************/
16433 uint32_t rgSCHCmnUlMinTbBitsForITbs(RgSchCmnUlCell *cellUl,uint8_t iTbs)
16436 RGSCH_ARRAY_BOUND_CHECK(0, rgTbSzTbl[0], iTbs);
16438 return (rgTbSzTbl[0][iTbs][cellUl->sbSize-1]);
16441 /***********************************************************
16443 * Func : rgSCHCmnUlSbAlloc
16445 * Desc : Given a required 'number of subbands' and a hole,
16446 * returns a suitable alloc such that the subband
16447 * allocation size is valid
16451 * Notes: Does not assume either passed numSb or hole size
16452 * to be valid for allocation, and hence arrives at
16453 * an acceptable value.
16456 **********************************************************/
16457 RgSchUlAlloc *rgSCHCmnUlSbAlloc
16464 uint8_t holeSz; /* valid hole size */
16465 RgSchUlAlloc *alloc;
16467 if ((holeSz = rgSchCmnMult235Tbl[hole->num].prvMatch) == hole->num)
16469 numSb = rgSchCmnMult235Tbl[numSb].match;
16470 if (numSb >= holeSz)
16472 alloc = rgSCHUtlUlAllocGetCompHole(sf, hole);
16476 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
16481 if (numSb < holeSz)
16483 numSb = rgSchCmnMult235Tbl[numSb].match;
16487 numSb = rgSchCmnMult235Tbl[numSb].prvMatch;
16490 if ( numSb >= holeSz )
16494 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
16500 * @brief To fill the RgSchCmnUeUlAlloc structure of UeCb.
16504 * Function: rgSCHCmnUlUeFillAllocInfo
16505 * Purpose: Specific scheduler to call this API to fill the alloc
16508 * Invoked by: Scheduler
16510 * @param[in] RgSchCellCb *cell
16511 * @param[out] RgSchUeCb *ue
16514 Void rgSCHCmnUlUeFillAllocInfo(RgSchCellCb *cell,RgSchUeCb *ue)
16516 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16517 RgSchCmnUeUlAlloc *ulAllocInfo;
16518 RgSchCmnUlUe *ueUl;
16521 ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16522 ulAllocInfo = &ueUl->alloc;
16524 /* Fill alloc structure */
16525 rgSCHCmnUlAllocFillTpc(cell, ue, ulAllocInfo->alloc);
16526 rgSCHCmnUlAllocFillNdmrs(cellUl, ulAllocInfo->alloc);
16527 rgSCHCmnUlAllocLnkHqProc(ue, ulAllocInfo->alloc, ulAllocInfo->alloc->hqProc,
16528 ulAllocInfo->alloc->hqProc->isRetx);
16530 rgSCHCmnUlFillPdcchWithAlloc(ulAllocInfo->alloc->pdcch,
16531 ulAllocInfo->alloc, ue);
16532 /* Recording information about this allocation */
16533 rgSCHCmnUlRecordUeAlloc(cell, ue);
16535 /* Update the UE's outstanding allocation */
16536 if (!ulAllocInfo->alloc->hqProc->isRetx)
16538 rgSCHCmnUlUpdOutStndAlloc(cell, ue, ulAllocInfo->allocdBytes);
16545 * @brief Update the UEs outstanding alloc based on the BSR report's timing.
16550 * Function: rgSCHCmnUpdUlCompEffBsr
16551 * Purpose: Clear off all the allocations from outstanding allocation that
16552 * are later than or equal to BSR timing information (stored in UEs datIndTime).
16554 * Invoked by: Scheduler
16556 * @param[in] RgSchUeCb *ue
16559 static Void rgSCHCmnUpdUlCompEffBsr(RgSchUeCb *ue)
16561 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,ue->cell);
16562 CmLList *node = ueUl->ulAllocLst.last;
16563 RgSchCmnAllocRecord *allRcd;
16564 uint32_t outStndAlloc=0;
16565 uint32_t nonLcg0OutStndAllocBs=0;
16566 uint32_t nonLcg0Bsr=0;
16568 RgSchCmnLcg *cmnLcg = NULLP;
16572 allRcd = (RgSchCmnAllocRecord *)node->node;
16573 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
16582 allRcd = (RgSchCmnAllocRecord *)node->node;
16584 outStndAlloc += allRcd->alloc;
16587 cmnLcg = (RgSchCmnLcg *)(ue->ul.lcgArr[0].sch);
16588 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
16589 if (cmnLcg->bs > outStndAlloc)
16591 cmnLcg->bs -= outStndAlloc;
16592 ue->ul.minReqBytes = cmnLcg->bs;
16597 nonLcg0OutStndAllocBs = outStndAlloc - cmnLcg->bs;
16601 for(lcgId = 1;lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
16603 if(RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
16605 cmnLcg = ((RgSchCmnLcg *) (ue->ul.lcgArr[lcgId].sch));
16606 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
16608 nonLcg0Bsr += cmnLcg->bs;
16612 nonLcg0Bsr += ue->ul.nonGbrLcgBs;
16613 if (nonLcg0OutStndAllocBs > nonLcg0Bsr)
16619 nonLcg0Bsr -= nonLcg0OutStndAllocBs;
16621 ue->ul.nonLcg0Bs = nonLcg0Bsr;
16622 /* Cap effBsr with nonLcg0Bsr and append lcg0 bs.
16623 * nonLcg0Bsr limit applies only to lcg1,2,3 */
16624 /* better be handled in individual scheduler */
16625 ue->ul.effBsr = nonLcg0Bsr +\
16626 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16631 * @brief Records information about the current allocation.
16635 * Function: rgSCHCmnUlRecordUeAlloc
16636 * Purpose: Records information about the curent allocation.
16637 * This includes the allocated bytes, as well
16638 * as some power information.
16640 * Invoked by: Scheduler
16642 * @param[in] RgSchCellCb *cell
16643 * @param[in] RgSchUeCb *ue
16646 Void rgSCHCmnUlRecordUeAlloc(RgSchCellCb *cell,RgSchUeCb *ue)
16649 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
16651 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16652 CmLListCp *lst = &ueUl->ulAllocLst;
16653 CmLList *node = ueUl->ulAllocLst.first;
16654 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
16655 RgSchCmnUeUlAlloc *ulAllocInfo = &ueUl->alloc;
16656 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
16658 cmLListDelFrm(lst, &allRcd->lnk);
16660 /* To the crntTime, add the MIN time at which UE will
16661 * actually send the BSR i.e DELTA+4 */
16662 allRcd->allocTime = cell->crntTime;
16663 /*ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
16665 if(ue->isEmtcUe == TRUE)
16667 RGSCH_INCR_SUB_FRAME_EMTC(allRcd->allocTime,
16668 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
16673 RGSCH_INCR_SUB_FRAME(allRcd->allocTime,
16674 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
16677 allRcd->allocTime = cellUl->schdTime;
16679 cmLListAdd2Tail(lst, &allRcd->lnk);
16681 /* Filling in the parameters to be recorded */
16682 allRcd->alloc = ulAllocInfo->allocdBytes;
16683 //allRcd->numRb = ulAllocInfo->alloc->grnt.numRb;
16684 allRcd->numRb = (ulAllocInfo->alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
16685 /*Recording the UL CQI derived from the maxUlCqi */
16686 allRcd->cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
16687 allRcd->tpc = ulAllocInfo->alloc->grnt.tpc;
16689 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
16691 cell->measurements.ulBytesCnt += ulAllocInfo->allocdBytes;
16696 /** PHR handling for MSG3
16697 * @brief Records allocation information of msg3 in the the UE.
16701 * Function: rgSCHCmnUlRecMsg3Alloc
16702 * Purpose: Records information about msg3 allocation.
16703 * This includes the allocated bytes, as well
16704 * as some power information.
16706 * Invoked by: Scheduler
16708 * @param[in] RgSchCellCb *cell
16709 * @param[in] RgSchUeCb *ue
16710 * @param[in] RgSchRaCb *raCb
16713 Void rgSCHCmnUlRecMsg3Alloc(RgSchCellCb *cell,RgSchUeCb *ue,RgSchRaCb *raCb)
16715 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16716 CmLListCp *lst = &ueUl->ulAllocLst;
16717 CmLList *node = ueUl->ulAllocLst.first;
16718 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
16720 /* Stack Crash problem for TRACE5 changes */
16722 cmLListDelFrm(lst, node);
16723 allRcd->allocTime = raCb->msg3AllocTime;
16724 cmLListAdd2Tail(lst, node);
16726 /* Filling in the parameters to be recorded */
16727 allRcd->alloc = raCb->msg3Grnt.datSz;
16728 allRcd->numRb = raCb->msg3Grnt.numRb;
16729 allRcd->cqi = raCb->ccchCqi;
16730 allRcd->tpc = raCb->msg3Grnt.tpc;
16732 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
16737 * @brief Keeps track of the most recent RG_SCH_CMN_MAX_ALLOC_TRACK
16738 * allocations to track. Adds this allocation to the ueUl's ulAllocLst.
16743 * Function: rgSCHCmnUlUpdOutStndAlloc
16744 * Purpose: Recent Allocation shall be at First Pos'n.
16745 * Remove the last node, update the fields
16746 * with the new allocation and add at front.
16748 * Invoked by: Scheduler
16750 * @param[in] RgSchCellCb *cell
16751 * @param[in] RgSchUeCb *ue
16752 * @param[in] uint32_t alloc
16755 Void rgSCHCmnUlUpdOutStndAlloc(RgSchCellCb *cell,RgSchUeCb *ue,uint32_t alloc)
16757 uint32_t nonLcg0Alloc=0;
16759 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
16760 if (((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs > alloc)
16762 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs -= alloc;
16766 nonLcg0Alloc = alloc - ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16767 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = 0;
16770 if (nonLcg0Alloc >= ue->ul.nonLcg0Bs)
16772 ue->ul.nonLcg0Bs = 0;
16776 ue->ul.nonLcg0Bs -= nonLcg0Alloc;
16778 /* Cap effBsr with effAmbr and append lcg0 bs.
16779 * effAmbr limit applies only to lcg1,2,3 non GBR LCG's*/
16780 /* better be handled in individual scheduler */
16781 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
16782 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
16784 if (ue->ul.effBsr == 0)
16786 if (ue->bsrTmr.tmrEvnt != TMR_NONE)
16788 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
16791 if (FALSE == ue->isSrGrant)
16793 if (ue->ul.bsrTmrCfg.isPrdBsrTmrPres)
16796 rgSCHTmrStartTmr(cell, ue, RG_SCH_TMR_BSR,
16797 ue->ul.bsrTmrCfg.prdBsrTmr);
16803 /* Resetting UEs lower Cap */
16804 ue->ul.minReqBytes = 0;
16811 * @brief Returns the "Itbs" for a given UE.
16815 * Function: rgSCHCmnUlGetITbs
16816 * Purpose: This function returns the "Itbs" for a given UE.
16818 * Invoked by: Scheduler
16820 * @param[in] RgSchUeCb *ue
16823 uint8_t rgSCHCmnUlGetITbs
16830 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
16831 /* CQI will be capped to maxUlCqi for 16qam UEs */
16832 CmLteUeCategory ueCtgy = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
16836 uint8_t maxiTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ueUl->maxUlCqi];
16840 /* #ifdef RG_SCH_CMN_EXT_CP_SUP For ECP pick index 1 */
16842 if ( (ueCtgy != CM_LTE_UE_CAT_5) &&
16843 (ueUl->validUlCqi > ueUl->maxUlCqi)
16846 cqi = ueUl->maxUlCqi;
16850 cqi = ueUl->validUlCqi;
16854 iTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
16856 RG_SCH_CHK_ITBS_RANGE(iTbs, maxiTbs);
16858 iTbs = RGSCH_MIN(iTbs, ue->cell->thresholds.maxUlItbs);
16861 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
16862 was seen when IMCS exceeds 20 on T2k TDD */
16871 if ( (ueCtgy != CM_LTE_UE_CAT_5) && (ueUl->crntUlCqi[0] > ueUl->maxUlCqi ))
16873 cqi = ueUl->maxUlCqi;
16877 cqi = ueUl->crntUlCqi[0];
16880 return (rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][cqi]);
16884 * @brief This function adds the UE to DLRbAllocInfo TX lst.
16888 * Function: rgSCHCmnDlRbInfoAddUeTx
16889 * Purpose: This function adds the UE to DLRbAllocInfo TX lst.
16891 * Invoked by: Common Scheduler
16893 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16894 * @param[in] RgSchUeCb *ue
16895 * @param[in] RgSchDlHqProcCb *hqP
16899 static Void rgSCHCmnDlRbInfoAddUeTx
16902 RgSchCmnDlRbAllocInfo *allocInfo,
16904 RgSchDlHqProcCb *hqP
16907 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16910 if (hqP->reqLnk.node == NULLP)
16912 if (cellSch->dl.isDlFreqSel)
16914 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16915 &allocInfo->dedAlloc.txHqPLst, hqP);
16920 cmLListAdd2Tail(&allocInfo->dedAlloc.txHqPLst, &hqP->reqLnk);
16922 hqP->reqLnk.node = (PTR)hqP;
16929 * @brief This function adds the UE to DLRbAllocInfo RETX lst.
16933 * Function: rgSCHCmnDlRbInfoAddUeRetx
16934 * Purpose: This function adds the UE to DLRbAllocInfo RETX lst.
16936 * Invoked by: Common Scheduler
16938 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16939 * @param[in] RgSchUeCb *ue
16940 * @param[in] RgSchDlHqProcCb *hqP
16944 static Void rgSCHCmnDlRbInfoAddUeRetx
16947 RgSchCmnDlRbAllocInfo *allocInfo,
16949 RgSchDlHqProcCb *hqP
16952 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
16955 if (cellSch->dl.isDlFreqSel)
16957 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16958 &allocInfo->dedAlloc.retxHqPLst, hqP);
16962 /* checking UE's presence in this lst is unnecessary */
16963 cmLListAdd2Tail(&allocInfo->dedAlloc.retxHqPLst, &hqP->reqLnk);
16964 hqP->reqLnk.node = (PTR)hqP;
16970 * @brief This function adds the UE to DLRbAllocInfo TX-RETX lst.
16974 * Function: rgSCHCmnDlRbInfoAddUeRetxTx
16975 * Purpose: This adds the UE to DLRbAllocInfo TX-RETX lst.
16977 * Invoked by: Common Scheduler
16979 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
16980 * @param[in] RgSchUeCb *ue
16981 * @param[in] RgSchDlHqProcCb *hqP
16985 static Void rgSCHCmnDlRbInfoAddUeRetxTx
16988 RgSchCmnDlRbAllocInfo *allocInfo,
16990 RgSchDlHqProcCb *hqP
16993 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
16996 if (cellSch->dl.isDlFreqSel)
16998 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
16999 &allocInfo->dedAlloc.txRetxHqPLst, hqP);
17003 cmLListAdd2Tail(&allocInfo->dedAlloc.txRetxHqPLst, &hqP->reqLnk);
17004 hqP->reqLnk.node = (PTR)hqP;
17010 * @brief This function adds the UE to DLRbAllocInfo NonSchdRetxLst.
17014 * Function: rgSCHCmnDlAdd2NonSchdRetxLst
17015 * Purpose: During RB estimation for RETX, if allocation fails
17016 * then appending it to NonSchdRetxLst, the further
17017 * action is taken as part of Finalization in
17018 * respective schedulers.
17020 * Invoked by: Common Scheduler
17022 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
17023 * @param[in] RgSchUeCb *ue
17024 * @param[in] RgSchDlHqProcCb *hqP
17028 static Void rgSCHCmnDlAdd2NonSchdRetxLst
17030 RgSchCmnDlRbAllocInfo *allocInfo,
17032 RgSchDlHqProcCb *hqP
17035 CmLList *schdLnkNode;
17039 if ( (hqP->sch != (RgSchCmnDlHqProc *)NULLP) &&
17040 (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP)))
17046 schdLnkNode = &hqP->schdLstLnk;
17047 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
17048 cmLListAdd2Tail(&allocInfo->dedAlloc.nonSchdRetxHqPLst, schdLnkNode);
17056 * @brief This function adds the UE to DLRbAllocInfo NonSchdTxRetxLst.
17060 * Function: rgSCHCmnDlAdd2NonSchdTxRetxLst
17061 * Purpose: During RB estimation for TXRETX, if allocation fails
17062 * then appending it to NonSchdTxRetxLst, the further
17063 * action is taken as part of Finalization in
17064 * respective schedulers.
17066 * Invoked by: Common Scheduler
17068 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
17069 * @param[in] RgSchUeCb *ue
17070 * @param[in] RgSchDlHqProcCb *hqP
17076 * @brief This function handles the initialisation of DL HARQ/ACK feedback
17077 * timing information for eaach DL subframe.
17081 * Function: rgSCHCmnDlANFdbkInit
17082 * Purpose: Each DL subframe stores the sfn and subframe
17083 * information of UL subframe in which it expects
17084 * HARQ ACK/NACK feedback for this subframe.It
17085 * generates the information based on Downlink
17086 * Association Set Index table.
17088 * Invoked by: Scheduler
17090 * @param[in] RgSchCellCb* cell
17094 static S16 rgSCHCmnDlANFdbkInit(RgSchCellCb *cell)
17097 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17098 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17102 uint8_t calcSfnOffset;
17104 uint8_t ulSfCnt =0;
17105 RgSchTddSubfrmInfo ulSubfrmInfo;
17106 uint8_t maxUlSubfrms;
17109 ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17110 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17112 /* Generate HARQ ACK/NACK feedback information for each DL sf in a radio frame
17113 * Calculate this information based on DL Association set Index table */
17114 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17116 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17117 RG_SCH_TDD_UL_SUBFRAME)
17119 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17123 for(idx=0; idx < rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
17124 numFdbkSubfrms; idx++)
17126 calcSfNum = sfNum - rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
17130 calcSfnOffset = RGSCH_CEIL(-calcSfNum, RGSCH_NUM_SUB_FRAMES);
17137 calcSfNum = ((RGSCH_NUM_SUB_FRAMES * calcSfnOffset) + calcSfNum)\
17138 % RGSCH_NUM_SUB_FRAMES;
17140 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17144 else if((ulSubfrmInfo.switchPoints == 2) && (calcSfNum <= \
17145 RG_SCH_CMN_SPL_SUBFRM_6))
17147 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17151 dlIdx = calcSfNum - maxUlSubfrms;
17154 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = sfNum;
17155 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = calcSfnOffset;
17156 cell->subFrms[dlIdx]->dlFdbkInfo.m = idx;
17158 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17161 /* DL subframes in the subsequent radio frames are initialized
17162 * with the previous radio frames */
17163 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;\
17166 sfNum = dlIdx - rgSchTddNumDlSubfrmTbl[ulDlCfgIdx]\
17167 [RGSCH_NUM_SUB_FRAMES-1];
17168 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = \
17169 cell->subFrms[sfNum]->dlFdbkInfo.subframe;
17170 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = \
17171 cell->subFrms[sfNum]->dlFdbkInfo.sfnOffset;
17172 cell->subFrms[dlIdx]->dlFdbkInfo.m = cell->subFrms[sfNum]->dlFdbkInfo.m;
17178 * @brief This function handles the initialization of uplink association
17179 * set information for each DL subframe.
17184 * Function: rgSCHCmnDlKdashUlAscInit
17185 * Purpose: Each DL sf stores the sfn and sf information of UL sf
17186 * in which it expects HQ ACK/NACK trans. It generates the information
17187 * based on k` in UL association set index table.
17189 * Invoked by: Scheduler
17191 * @param[in] RgSchCellCb* cell
17195 static S16 rgSCHCmnDlKdashUlAscInit(RgSchCellCb *cell)
17198 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17199 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17204 uint8_t ulSfCnt =0;
17205 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17206 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17207 uint8_t dlPres = 0;
17210 /* Generate ACK/NACK offset information for each DL subframe in a radio frame
17211 * Calculate this information based on K` in UL Association Set table */
17212 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17214 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17215 RG_SCH_TDD_UL_SUBFRAME)
17217 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17221 calcSfNum = (sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum] + \
17222 RGSCH_NUM_SUB_FRAMES) % RGSCH_NUM_SUB_FRAMES;
17223 calcSfnOffset = sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum];
17224 if(calcSfnOffset < 0)
17226 calcSfnOffset = RGSCH_CEIL(-calcSfnOffset, RGSCH_NUM_SUB_FRAMES);
17233 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17237 else if((ulSubfrmInfo.switchPoints == 2) &&
17238 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
17240 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17244 dlIdx = calcSfNum - maxUlSubfrms;
17247 cell->subFrms[dlIdx]->ulAscInfo.subframe = sfNum;
17248 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset = calcSfnOffset;
17250 /* set dlIdx for which ulAscInfo is updated */
17251 dlPres = dlPres | (1 << dlIdx);
17252 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17255 /* Set Invalid information for which ulAscInfo is not present */
17257 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17260 /* If dlPres is 0, ulAscInfo is not present in that DL index */
17261 if(! ((dlPres >> sfCount)&0x01))
17263 cell->subFrms[sfCount]->ulAscInfo.sfnOffset =
17264 RGSCH_INVALID_INFO;
17265 cell->subFrms[sfCount]->ulAscInfo.subframe =
17266 RGSCH_INVALID_INFO;
17270 /* DL subframes in the subsequent radio frames are initialized
17271 * with the previous radio frames */
17272 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;
17276 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17277 cell->subFrms[dlIdx]->ulAscInfo.subframe =
17278 cell->subFrms[sfNum]->ulAscInfo.subframe;
17279 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset =
17280 cell->subFrms[sfNum]->ulAscInfo.sfnOffset;
17287 * @brief This function initialises the 'Np' value for 'p'
17291 * Function: rgSCHCmnDlNpValInit
17292 * Purpose: To initialise the 'Np' value for each 'p'. It is used
17293 * to find the mapping between nCCE and 'p' and used in
17294 * HARQ ACK/NACK reception.
17296 * Invoked by: Scheduler
17298 * @param[in] RgSchCellCb* cell
17302 static S16 rgSCHCmnDlNpValInit(RgSchCellCb *cell)
17307 /* Always Np is 0 for p=0 */
17308 cell->rgSchTddNpValTbl[0] = 0;
17310 for(idx=1; idx < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; idx++)
17312 np = cell->bwCfg.dlTotalBw * (idx * RG_SCH_CMN_NUM_SUBCAR - 4);
17313 cell->rgSchTddNpValTbl[idx] = (uint8_t) (np/36);
17320 * @brief This function handles the creation of RACH preamble
17321 * list to queue the preambles and process at the scheduled
17326 * Function: rgSCHCmnDlCreateRachPrmLst
17327 * Purpose: To create RACH preamble list based on RA window size.
17328 * It is used to queue the preambles and process it at the
17331 * Invoked by: Scheduler
17333 * @param[in] RgSchCellCb* cell
17337 static S16 rgSCHCmnDlCreateRachPrmLst(RgSchCellCb *cell)
17343 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
17345 lstSize = raArrSz * RGSCH_MAX_RA_RNTI_PER_SUBFRM * RGSCH_NUM_SUB_FRAMES;
17347 cell->raInfo.maxRaSize = raArrSz;
17348 ret = rgSCHUtlAllocSBuf(cell->instIdx,
17349 (Data **)(&cell->raInfo.raReqLst), (Size)(lstSize * sizeof(CmLListCp)));
17355 cell->raInfo.lstSize = lstSize;
17362 * @brief This function handles the initialization of RACH Response
17363 * information at each DL subframe.
17367 * Function: rgSCHCmnDlRachInfoInit
17368 * Purpose: Each DL subframe stores the sfn and subframe information of
17369 * possible RACH response allowed for UL subframes. It generates
17370 * the information based on PRACH configuration.
17372 * Invoked by: Scheduler
17374 * @param[in] RgSchCellCb* cell
17378 static S16 rgSCHCmnDlRachInfoInit(RgSchCellCb *cell)
17381 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17383 uint8_t ulSfCnt =0;
17384 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
17385 [RGSCH_NUM_SUB_FRAMES-1];
17387 RgSchTddRachRspLst rachRspLst[3][RGSCH_NUM_SUB_FRAMES];
17392 uint8_t endSubfrmIdx;
17393 uint8_t startSubfrmIdx;
17395 RgSchTddRachDelInfo *delInfo;
17397 uint8_t numSubfrms;
17400 memset(rachRspLst, 0, sizeof(rachRspLst));
17402 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
17404 /* Include Special subframes */
17405 maxUlSubfrms = maxUlSubfrms + \
17406 rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx].switchPoints;
17407 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17409 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] ==
17410 RG_SCH_TDD_DL_SUBFRAME)
17412 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17416 startWin = (sfNum + RG_SCH_CMN_RARSP_WAIT_PRD + \
17417 ((RgSchCmnCell *)cell->sc.sch)->dl.numRaSubFrms);
17418 endWin = (startWin + cell->rachCfg.raWinSize - 1);
17420 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][startWin%RGSCH_NUM_SUB_FRAMES];
17421 /* Find the next DL subframe starting from Subframe 0 */
17422 if((startSubfrmIdx % RGSCH_NUM_SUB_FRAMES) == 0)
17424 startWin = RGSCH_CEIL(startWin, RGSCH_NUM_SUB_FRAMES);
17425 startWin = startWin * RGSCH_NUM_SUB_FRAMES;
17429 rgSchTddLowDlSubfrmIdxTbl[ulDlCfgIdx][endWin%RGSCH_NUM_SUB_FRAMES];
17430 endWin = (endWin/RGSCH_NUM_SUB_FRAMES) * RGSCH_NUM_SUB_FRAMES \
17432 if(startWin > endWin)
17436 /* Find all the possible RACH Response transmission
17437 * time within the RA window size */
17438 startSubfrmIdx = startWin%RGSCH_NUM_SUB_FRAMES;
17439 for(sfnIdx = startWin/RGSCH_NUM_SUB_FRAMES;
17440 sfnIdx <= endWin/RGSCH_NUM_SUB_FRAMES; sfnIdx++)
17442 if(sfnIdx == endWin/RGSCH_NUM_SUB_FRAMES)
17444 endSubfrmIdx = endWin%RGSCH_NUM_SUB_FRAMES;
17448 endSubfrmIdx = RGSCH_NUM_SUB_FRAMES-1;
17451 /* Find all the possible RACH Response transmission
17452 * time within radio frame */
17453 for(subfrmIdx = startSubfrmIdx;
17454 subfrmIdx <= endSubfrmIdx; subfrmIdx++)
17456 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][subfrmIdx] ==
17457 RG_SCH_TDD_UL_SUBFRAME)
17461 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
17462 /* Find the next DL subframe starting from Subframe 0 */
17463 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
17467 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx], subfrmIdx);
17469 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
17470 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset = sfnIdx;
17471 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].subframe[numSubfrms]
17473 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms++;
17475 startSubfrmIdx = RG_SCH_CMN_SUBFRM_0;
17477 /* Update the subframes to be deleted at this subframe */
17478 /* Get the subframe after the end of RA window size */
17481 sfnOffset = endWin/RGSCH_NUM_SUB_FRAMES;
17484 sfnOffset += raArrSz;
17486 sfnIdx = (endWin/RGSCH_NUM_SUB_FRAMES) % raArrSz;
17488 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx],endSubfrmIdx-1);
17489 if((endSubfrmIdx == RGSCH_NUM_SUB_FRAMES) ||
17490 (rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx] ==
17491 RGSCH_NUM_SUB_FRAMES))
17494 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][RG_SCH_CMN_SUBFRM_0];
17498 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx];
17501 delInfo = &rachRspLst[sfnIdx][subfrmIdx].delInfo;
17502 delInfo->sfnOffset = sfnOffset;
17503 delInfo->subframe[delInfo->numSubfrms] = sfNum;
17504 delInfo->numSubfrms++;
17506 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17509 ret = rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz);
17519 * @brief This function handles the initialization of PHICH information
17520 * for each DL subframe based on PHICH table.
17524 * Function: rgSCHCmnDlPhichOffsetInit
17525 * Purpose: Each DL subf stores the sfn and subf information of UL subframe
17526 * for which it trnsmts PHICH in this subframe. It generates the information
17527 * based on PHICH table.
17529 * Invoked by: Scheduler
17531 * @param[in] RgSchCellCb* cell
17535 static S16 rgSCHCmnDlPhichOffsetInit(RgSchCellCb *cell)
17538 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17539 uint8_t maxDlSubfrms = cell->numDlSubfrms;
17542 uint8_t dlPres = 0;
17543 uint8_t calcSfnOffset;
17545 uint8_t ulSfCnt =0;
17546 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
17547 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
17548 [RGSCH_NUM_SUB_FRAMES-1];
17551 /* Generate PHICH offset information for each DL subframe in a radio frame
17552 * Calculate this information based on K in PHICH table */
17553 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
17555 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
17556 RG_SCH_TDD_UL_SUBFRAME)
17558 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17562 calcSfNum = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) % \
17563 RGSCH_NUM_SUB_FRAMES;
17564 calcSfnOffset = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) / \
17565 RGSCH_NUM_SUB_FRAMES;
17567 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
17571 else if((ulSubfrmInfo.switchPoints == 2) &&
17572 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
17574 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
17578 dlIdx = calcSfNum - maxUlSubfrms;
17581 cell->subFrms[dlIdx]->phichOffInfo.subframe = sfNum;
17582 cell->subFrms[dlIdx]->phichOffInfo.numSubfrms = 1;
17584 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset = calcSfnOffset;
17586 /* set dlIdx for which phich offset is updated */
17587 dlPres = dlPres | (1 << dlIdx);
17588 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
17591 /* Set Invalid information for which phich offset is not present */
17593 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17596 /* If dlPres is 0, phich offset is not present in that DL index */
17597 if(! ((dlPres >> sfCount)&0x01))
17599 cell->subFrms[sfCount]->phichOffInfo.sfnOffset =
17600 RGSCH_INVALID_INFO;
17601 cell->subFrms[sfCount]->phichOffInfo.subframe =
17602 RGSCH_INVALID_INFO;
17603 cell->subFrms[sfCount]->phichOffInfo.numSubfrms = 0;
17607 /* DL subframes in the subsequent radio frames are
17608 * initialized with the previous radio frames */
17609 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms;
17610 dlIdx < maxDlSubfrms; dlIdx++)
17613 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
17615 cell->subFrms[dlIdx]->phichOffInfo.subframe =
17616 cell->subFrms[sfNum]->phichOffInfo.subframe;
17618 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset =
17619 cell->subFrms[sfNum]->phichOffInfo.sfnOffset;
17626 * @brief Updation of Sch vars per TTI.
17630 * Function: rgSCHCmnUpdVars
17631 * Purpose: Updation of Sch vars per TTI.
17633 * @param[in] RgSchCellCb *cell
17637 Void rgSCHCmnUpdVars(RgSchCellCb *cell)
17639 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
17640 CmLteTimingInfo timeInfo;
17642 uint8_t ulSubframe;
17643 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
17644 uint8_t msg3Subfrm;
17647 /* ccpu00132654-ADD- Initializing all the indices in every subframe*/
17648 rgSCHCmnInitVars(cell);
17650 idx = (cell->crntTime.slot + TFU_ULCNTRL_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
17651 /* Calculate the UL scheduling subframe idx based on the
17653 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][idx] != 0)
17655 /* PUSCH transmission is based on offset from DL
17656 * PDCCH scheduling */
17657 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
17658 ulSubframe = rgSchTddPuschTxKTbl[ulDlCfgIdx][timeInfo.subframe];
17659 /* Add the DCI-0 to PUSCH time to get the time of UL subframe */
17660 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, ulSubframe);
17662 cellUl->schdTti = timeInfo.sfn * 10 + timeInfo.subframe;
17664 /* Fetch the corresponding UL subframe Idx in UL sf array */
17665 cellUl->schdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17666 /* Fetch the corresponding UL Harq Proc ID */
17667 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
17668 cellUl->schdTime = timeInfo;
17670 Mval = rgSchTddPhichMValTbl[ulDlCfgIdx][idx];
17673 /* Fetch the tx time for DL HIDCI-0 */
17674 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
17675 /* Fetch the corresponding n-k tx time of PUSCH */
17676 cellUl->hqFdbkIdx[0] = rgSCHCmnGetPhichUlSfIdx(&timeInfo, cell);
17677 /* Retx will happen according to the Pusch k table */
17678 cellUl->reTxIdx[0] = cellUl->schdIdx;
17680 if(ulDlCfgIdx == 0)
17682 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[0] */
17683 cellUl->reTxIdx[0] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
17684 cellUl->hqFdbkIdx[0]);
17687 /* At Idx 1 store the UL SF adjacent(left) to the UL SF
17689 cellUl->hqFdbkIdx[1] = (cellUl->hqFdbkIdx[0]-1 +
17690 cellUl->numUlSubfrms) % cellUl->numUlSubfrms;
17691 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[1] */
17692 cellUl->reTxIdx[1] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
17693 cellUl->hqFdbkIdx[1]);
17698 idx = (cell->crntTime.slot + TFU_RECPREQ_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
17699 if (rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] == RG_SCH_TDD_UL_SUBFRAME)
17701 RGSCHCMNADDTOCRNTTIME(cell->crntTime, timeInfo, TFU_RECPREQ_DLDELTA)
17702 cellUl->rcpReqIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17704 idx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) % RGSCH_NUM_SUB_FRAMES;
17706 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
17707 special subframe */
17708 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] != RG_SCH_TDD_UL_SUBFRAME)
17710 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
17711 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
17712 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, msg3Subfrm);
17713 cellUl->msg3SchdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17714 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
17717 if(!rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][idx])
17719 cellUl->spsUlRsrvIdx = RGSCH_INVALID_INFO;
17723 /* introduce some reuse with above code? */
17725 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
17726 //offst = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
17727 offst = rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][timeInfo.subframe];
17728 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, offst);
17729 cellUl->spsUlRsrvIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
17730 /* The harq proc continues to be accessed and used the same delta before
17731 * actual data occurance, and hence use the same idx */
17732 cellUl->spsUlRsrvHqProcIdx = cellUl->schdHqProcIdx;
17736 /* RACHO: update cmn sched specific RACH variables,
17737 * mainly the prachMaskIndex */
17738 rgSCHCmnUpdRachParam(cell);
17744 * @brief To get 'p' value from nCCE.
17748 * Function: rgSCHCmnGetPValFrmCCE
17749 * Purpose: Gets 'p' value for HARQ ACK/NACK reception from CCE.
17751 * @param[in] RgSchCellCb *cell
17752 * @param[in] uint8_t cce
17756 uint8_t rgSCHCmnGetPValFrmCCE(RgSchCellCb *cell,uint8_t cce)
17760 for(i=1; i < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; i++)
17762 if(cce < cell->rgSchTddNpValTbl[i])
17771 /***********************************************************
17773 * Func : rgSCHCmnUlAdapRetx
17775 * Desc : Adaptive retransmission for an allocation.
17783 **********************************************************/
17784 static Void rgSCHCmnUlAdapRetx(RgSchUlAlloc *alloc,RgSchUlHqProcCb *proc)
17787 rgSCHUhmRetx(proc, alloc);
17789 if (proc->rvIdx != 0)
17791 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[proc->rvIdx];
17796 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
17802 * @brief Scheduler invocation per TTI.
17806 * Function: rgSCHCmnHdlUlInactUes
17809 * Invoked by: Common Scheduler
17811 * @param[in] RgSchCellCb *cell
17814 static Void rgSCHCmnHdlUlInactUes(RgSchCellCb *cell)
17816 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17817 CmLListCp ulInactvLst;
17818 /* Get a List of Inactv UEs for UL*/
17819 cmLListInit(&ulInactvLst);
17821 /* Trigger Spfc Schedulers with Inactive UEs */
17822 rgSCHMeasGapANRepGetUlInactvUe (cell, &ulInactvLst);
17823 /* take care of this in UL retransmission */
17824 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInactvLst);
17830 * @brief Scheduler invocation per TTI.
17834 * Function: rgSCHCmnHdlDlInactUes
17837 * Invoked by: Common Scheduler
17839 * @param[in] RgSchCellCb *cell
17842 static Void rgSCHCmnHdlDlInactUes(RgSchCellCb *cell)
17844 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17845 CmLListCp dlInactvLst;
17846 /* Get a List of Inactv UEs for DL */
17847 cmLListInit(&dlInactvLst);
17849 /* Trigger Spfc Schedulers with Inactive UEs */
17850 rgSCHMeasGapANRepGetDlInactvUe (cell, &dlInactvLst);
17852 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInactvLst);
17856 /* RACHO: Rach handover functions start here */
17857 /***********************************************************
17859 * Func : rgSCHCmnUeIdleExdThrsld
17861 * Desc : RETURN ROK if UE has been idle more
17870 **********************************************************/
17871 static S16 rgSCHCmnUeIdleExdThrsld(RgSchCellCb *cell,RgSchUeCb *ue)
17873 /* Time difference in subframes */
17874 uint32_t sfDiff = RGSCH_CALC_SF_DIFF(cell->crntTime, ue->ul.ulTransTime);
17876 if (sfDiff > (uint32_t)RG_SCH_CMN_UE_IDLE_THRSLD(ue))
17888 * @brief Scheduler processing for Ded Preambles on cell configuration.
17892 * Function : rgSCHCmnCfgRachDedPrm
17894 * This function does requisite initialisation
17895 * for RACH Ded Preambles.
17898 * @param[in] RgSchCellCb *cell
17901 static Void rgSCHCmnCfgRachDedPrm(RgSchCellCb *cell)
17903 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
17904 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
17908 if (cell->macPreambleSet.pres == NOTPRSNT)
17912 cellSch->rachCfg.numDedPrm = cell->macPreambleSet.size;
17913 cellSch->rachCfg.dedPrmStart = cell->macPreambleSet.start;
17914 /* Initialize handover List */
17915 cmLListInit(&cellSch->rachCfg.hoUeLst);
17916 /* Initialize pdcch Order List */
17917 cmLListInit(&cellSch->rachCfg.pdcchOdrLst);
17919 /* Intialize the rapId to UE mapping structure */
17920 for (cnt = 0; cnt<cellSch->rachCfg.numDedPrm; cnt++)
17922 cellSch->rachCfg.rapIdMap[cnt].rapId = cellSch->rachCfg.dedPrmStart + \
17924 cmLListInit(&cellSch->rachCfg.rapIdMap[cnt].assgndUes);
17926 /* Perform Prach Mask Idx, remDedPrm, applFrm initializations */
17927 /* Set remDedPrm as numDedPrm */
17928 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
17929 /* Initialize applFrm */
17930 cellSch->rachCfg.prachMskIndx = 0;
17931 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_EVEN)
17933 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + \
17934 (cell->crntTime.sfn % 2)) % RGSCH_MAX_SFN;
17937 else if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ODD)
17939 if((cell->crntTime.sfn%2) == 0)
17941 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + 1)\
17948 cellSch->rachCfg.applFrm.sfn = cell->crntTime.sfn;
17950 /* Initialize cellSch->rachCfg.applFrm as >= crntTime.
17951 * This is because of RGSCH_CALC_SF_DIFF logic */
17952 if (cellSch->rachCfg.applFrm.sfn == cell->crntTime.sfn)
17954 while (cellSch->rachCfg.prachMskIndx < cell->rachCfg.raOccasion.size)
17956 if (cell->crntTime.slot <\
17957 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx])
17961 cellSch->rachCfg.prachMskIndx++;
17963 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size)
17965 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
17967 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) %\
17972 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) %\
17975 cellSch->rachCfg.prachMskIndx = 0;
17977 cellSch->rachCfg.applFrm.slot = \
17978 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17982 cellSch->rachCfg.applFrm.slot = \
17983 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
17986 /* Note first param to this macro should always be the latest in time */
17987 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
17988 while (sfDiff <= gap)
17990 rgSCHCmnUpdNxtPrchMskIdx(cell);
17991 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
17998 * @brief Updates the PRACH MASK INDEX.
18002 * Function: rgSCHCmnUpdNxtPrchMskIdx
18003 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
18004 * CFG is always >= "n"+"DELTA", where "n" is the crntTime
18005 * of the cell. If not, applFrm is updated to the next avl
18006 * PRACH oppurtunity as per the PRACH Cfg Index configuration.
18009 * Invoked by: Common Scheduler
18011 * @param[in] RgSchCellCb *cell
18014 static Void rgSCHCmnUpdNxtPrchMskIdx(RgSchCellCb *cell)
18016 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18018 /* Determine the next prach mask Index */
18019 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size - 1)
18021 /* PRACH within applFrm.sfn are done, go to next AVL sfn */
18022 cellSch->rachCfg.prachMskIndx = 0;
18023 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
18025 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) % \
18028 else/* RGR_SFN_EVEN or RGR_SFN_ODD */
18030 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) % \
18033 cellSch->rachCfg.applFrm.slot = cell->rachCfg.raOccasion.\
18036 else /* applFrm.sfn is still valid */
18038 cellSch->rachCfg.prachMskIndx += 1;
18039 if ( cellSch->rachCfg.prachMskIndx < RGR_MAX_SUBFRAME_NUM )
18041 cellSch->rachCfg.applFrm.slot = \
18042 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
18049 * @brief Updates the Ded preamble RACH parameters
18054 * Function: rgSCHCmnUpdRachParam
18055 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
18056 * CFG is always >= "n"+"6"+"DELTA", where "n" is the crntTime
18057 * of the cell. If not, applFrm is updated to the next avl
18058 * PRACH oppurtunity as per the PRACH Cfg Index configuration,
18059 * accordingly the "remDedPrm" is reset to "numDedPrm" and
18060 * "prachMskIdx" field is updated as per "applFrm".
18063 * Invoked by: Common Scheduler
18065 * @param[in] RgSchCellCb *cell
18068 static Void rgSCHCmnUpdRachParam(RgSchCellCb *cell)
18071 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18072 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
18075 if (cell->macPreambleSet.pres == NOTPRSNT)
18079 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, \
18083 /* applFrm is still a valid next Prach Oppurtunity */
18086 rgSCHCmnUpdNxtPrchMskIdx(cell);
18087 /* Reset remDedPrm as numDedPrm */
18088 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
18094 * @brief Dedicated Preamble allocation function.
18098 * Function: rgSCHCmnAllocPOParam
18099 * Purpose: Allocate pdcch, rapId and PrachMskIdx.
18100 * Set mapping of UE with the allocated rapId.
18102 * Invoked by: Common Scheduler
18104 * @param[in] RgSchCellCb *cell
18105 * @param[in] RgSchDlSf *dlSf
18106 * @param[in] RgSchUeCb *ue
18107 * @param[out] RgSchPdcch **pdcch
18108 * @param[out] uint8_t *rapId
18109 * @param[out] uint8_t *prachMskIdx
18112 static S16 rgSCHCmnAllocPOParam
18117 RgSchPdcch **pdcch,
18119 uint8_t *prachMskIdx
18123 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18124 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18127 if (cell->macPreambleSet.pres == PRSNT_NODEF)
18129 if (cellSch->rachCfg.remDedPrm == 0)
18133 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
18134 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
18138 /* The stored prachMskIdx is the index of PRACH Oppurtunities in
18139 * raOccasions.subframes[].
18140 * Converting the same to the actual PRACHMskIdx to be transmitted. */
18141 *prachMskIdx = cellSch->rachCfg.prachMskIndx + 1;
18142 /* Distribution starts from dedPrmStart till dedPrmStart + numDedPrm */
18143 *rapId = cellSch->rachCfg.dedPrmStart +
18144 cellSch->rachCfg.numDedPrm - cellSch->rachCfg.remDedPrm;
18145 cellSch->rachCfg.remDedPrm--;
18146 /* Map UE with the allocated RapId */
18147 ueDl->rachInfo.asgnOppr = cellSch->rachCfg.applFrm;
18148 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, cellSch->rachCfg.rapIdMap, (*rapId - cellSch->rachCfg.dedPrmStart));
18149 cmLListAdd2Tail(&cellSch->rachCfg.rapIdMap[*rapId - cellSch->rachCfg.dedPrmStart].assgndUes,
18150 &ueDl->rachInfo.rapIdLnk);
18151 ueDl->rachInfo.rapIdLnk.node = (PTR)ue;
18152 ueDl->rachInfo.poRapId = *rapId;
18154 else /* if dedicated preambles not configured */
18156 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
18157 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
18169 * @brief Dowlink Scheduling Handler.
18173 * Function: rgSCHCmnGenPdcchOrder
18174 * Purpose: For each UE in PO Q, grab a PDCCH,
18175 * get an available ded RapId and fill PDCCH
18176 * with PO information.
18178 * Invoked by: Common Scheduler
18180 * @param[in] RgSchCellCb *cell
18181 * @param[in] RgSchDlSf *dlSf
18184 static Void rgSCHCmnGenPdcchOrder(RgSchCellCb *cell,RgSchDlSf *dlSf)
18186 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18187 CmLList *node = cellSch->rachCfg.pdcchOdrLst.first;
18190 uint8_t prachMskIdx;
18191 RgSchPdcch *pdcch = NULLP;
18195 ue = (RgSchUeCb *)node->node;
18197 /* Skip sending for this subframe is Measuring or inActive in UL due
18198 * to MeasGap or inactie due to DRX
18200 if ((ue->measGapCb.isMeasuring == TRUE) ||
18201 (ue->ul.ulInactvMask & RG_MEASGAP_INACTIVE) ||
18202 (ue->isDrxEnabled &&
18203 ue->dl.dlInactvMask & RG_DRX_INACTIVE)
18208 if (rgSCHCmnAllocPOParam(cell, dlSf, ue, &pdcch, &rapId,\
18209 &prachMskIdx) != ROK)
18211 /* No More rapIds left for the valid next avl Oppurtunity.
18212 * Unsatisfied UEs here would be given a chance, when the
18213 * prach Mask Index changes as per rachUpd every TTI */
18215 /* PDDCH can also be ordered with rapId=0, prachMskIdx=0
18216 * so that UE triggers a RACH procedure with non-dedicated preamble.
18217 * But the implementation here does not do this. Instead, the "break"
18218 * here implies, that PDCCH Odr always given with valid rapId!=0,
18219 * prachMskIdx!=0 if dedicated preambles are configured.
18220 * If not configured, then trigger a PO with rapId=0,prchMskIdx=0*/
18223 /* Fill pdcch with pdcch odr information */
18224 rgSCHCmnFillPdcchOdr2Sf(cell, ue, pdcch, rapId, prachMskIdx);
18225 /* Remove this UE from the PDCCH ORDER QUEUE */
18226 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
18227 /* Reset UE's power state */
18228 rgSCHPwrUeReset(cell, ue);
18235 * @brief This function add UE to PdcchOdr Q if not already present.
18239 * Function: rgSCHCmnDlAdd2PdcchOdrQ
18242 * Invoked by: CMN Scheduler
18244 * @param[in] RgSchCellCb* cell
18245 * @param[in] RgSchUeCb* ue
18249 static Void rgSCHCmnDlAdd2PdcchOdrQ(RgSchCellCb *cell,RgSchUeCb *ue)
18251 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18252 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18255 if (ueDl->rachInfo.poLnk.node == NULLP)
18257 cmLListAdd2Tail(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
18258 ueDl->rachInfo.poLnk.node = (PTR)ue;
18265 * @brief This function rmvs UE to PdcchOdr Q if not already present.
18269 * Function: rgSCHCmnDlRmvFrmPdcchOdrQ
18272 * Invoked by: CMN Scheduler
18274 * @param[in] RgSchCellCb* cell
18275 * @param[in] RgSchUeCb* ue
18279 static Void rgSCHCmnDlRmvFrmPdcchOdrQ(RgSchCellCb *cell,RgSchUeCb *ue)
18281 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18282 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18284 cmLListDelFrm(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
18285 ueDl->rachInfo.poLnk.node = NULLP;
18290 * @brief Fill pdcch with PDCCH order information.
18294 * Function: rgSCHCmnFillPdcchOdr2Sf
18295 * Purpose: Fill PDCCH with PDCCH order information,
18297 * Invoked by: Common Scheduler
18299 * @param[in] RgSchUeCb *ue
18300 * @param[in] RgSchPdcch *pdcch
18301 * @param[in] uint8_t rapId
18302 * @param[in] uint8_t prachMskIdx
18305 static Void rgSCHCmnFillPdcchOdr2Sf
18311 uint8_t prachMskIdx
18314 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
18317 pdcch->rnti = ue->ueId;
18318 pdcch->dci.dciFormat = TFU_DCI_FORMAT_1A;
18319 pdcch->dci.u.format1aInfo.isPdcchOrder = TRUE;
18320 pdcch->dci.u.format1aInfo.t.pdcchOrder.preambleIdx = rapId;
18321 pdcch->dci.u.format1aInfo.t.pdcchOrder.prachMaskIdx = prachMskIdx;
18323 /* Request for APer CQI immediately after PDCCH Order */
18324 /* CR ccpu00144525 */
18326 if(ue->dl.ueDlCqiCfg.aprdCqiCfg.pres)
18328 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
18329 acqiCb->aCqiTrigWt = 0;
18338 * @brief UE deletion for scheduler.
18342 * Function : rgSCHCmnDelRachInfo
18344 * This functions deletes all scheduler information
18345 * pertaining to an UE.
18347 * @param[in] RgSchCellCb *cell
18348 * @param[in] RgSchUeCb *ue
18351 static Void rgSCHCmnDelRachInfo(RgSchCellCb *cell,RgSchUeCb *ue)
18353 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18354 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18358 if (ueDl->rachInfo.poLnk.node)
18360 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
18362 if (ueDl->rachInfo.hoLnk.node)
18364 cmLListDelFrm(&cellSch->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
18365 ueDl->rachInfo.hoLnk.node = NULLP;
18367 if (ueDl->rachInfo.rapIdLnk.node)
18369 rapIdIdx = ueDl->rachInfo.poRapId - cellSch->rachCfg.dedPrmStart;
18370 cmLListDelFrm(&cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes,
18371 &ueDl->rachInfo.rapIdLnk);
18372 ueDl->rachInfo.rapIdLnk.node = NULLP;
18378 * @brief This function retrieves the ue which has sent this raReq
18379 * and it allocates grant for UEs undergoing (for which RAR
18380 * is being generated) HandOver/PdcchOrder.
18385 * Function: rgSCHCmnHdlHoPo
18386 * Purpose: This function retrieves the ue which has sent this raReq
18387 * and it allocates grant for UEs undergoing (for which RAR
18388 * is being generated) HandOver/PdcchOrder.
18390 * Invoked by: Common Scheduler
18392 * @param[in] RgSchCellCb *cell
18393 * @param[out] CmLListCp *raRspLst
18394 * @param[in] RgSchRaReqInfo *raReq
18398 static Void rgSCHCmnHdlHoPo
18401 CmLListCp *raRspLst,
18402 RgSchRaReqInfo *raReq
18405 RgSchUeCb *ue = raReq->ue;
18407 if ( ue->isDrxEnabled )
18409 rgSCHDrxDedRa(cell,ue);
18411 rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq);
18416 * @brief This function retrieves the UE which has sent this raReq
18417 * for handover case.
18422 * Function: rgSCHCmnGetHoUe
18423 * Purpose: This function retrieves the UE which has sent this raReq
18424 * for handover case.
18426 * Invoked by: Common Scheduler
18428 * @param[in] RgSchCellCb *cell
18429 * @param[in] RgSchRaReqInfo *raReq
18430 * @return RgSchUeCb*
18433 RgSchUeCb* rgSCHCmnGetHoUe(RgSchCellCb *cell,uint16_t rapId)
18435 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18439 RgSchCmnDlUe *ueDl;
18441 ueLst = &cellSch->rachCfg.hoUeLst;
18442 node = ueLst->first;
18445 ue = (RgSchUeCb *)node->node;
18447 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18448 if (ueDl->rachInfo.hoRapId == rapId)
18456 static Void rgSCHCmnDelDedPreamble(RgSchCellCb *cell,uint8_t preambleId)
18458 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18462 RgSchCmnDlUe *ueDl;
18464 ueLst = &cellSch->rachCfg.hoUeLst;
18465 node = ueLst->first;
18468 ue = (RgSchUeCb *)node->node;
18470 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18471 if (ueDl->rachInfo.hoRapId == preambleId)
18473 cmLListDelFrm(ueLst, &ueDl->rachInfo.hoLnk);
18474 ueDl->rachInfo.hoLnk.node = (PTR)NULLP;
18480 * @brief This function retrieves the UE which has sent this raReq
18481 * for PDCCh Order case.
18486 * Function: rgSCHCmnGetPoUe
18487 * Purpose: This function retrieves the UE which has sent this raReq
18488 * for PDCCH Order case.
18490 * Invoked by: Common Scheduler
18492 * @param[in] RgSchCellCb *cell
18493 * @param[in] RgSchRaReqInfo *raReq
18494 * @return RgSchUeCb*
18497 RgSchUeCb* rgSCHCmnGetPoUe
18501 CmLteTimingInfo timingInfo
18504 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
18508 RgSchCmnDlUe *ueDl;
18511 rapIdIdx = rapId -cellSch->rachCfg.dedPrmStart;
18512 ueLst = &cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes;
18513 node = ueLst->first;
18516 ue = (RgSchUeCb *)node->node;
18518 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
18519 /* Remove UEs irrespective.
18520 * Old UE associations are removed.*/
18521 cmLListDelFrm(ueLst, &ueDl->rachInfo.rapIdLnk);
18522 ueDl->rachInfo.rapIdLnk.node = (PTR)NULLP;
18523 if (RGSCH_TIMEINFO_SAME(ueDl->rachInfo.asgnOppr, timingInfo))
18534 * @brief This function returns the valid UL cqi for a given UE.
18538 * Function: rgSCHCmnUlGetCqi
18539 * Purpose: This function returns the "valid UL cqi" for a given UE
18540 * based on UE category
18542 * Invoked by: Scheduler
18544 * @param[in] RgSchUeCb *ue
18545 * @param[in] uint8_t ueCtgy
18548 uint8_t rgSCHCmnUlGetCqi
18552 CmLteUeCategory ueCtgy
18555 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18559 cqi = ueUl->maxUlCqi;
18561 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
18562 (ueUl->validUlCqi > ueUl->maxUlCqi)))
18564 cqi = ueUl->validUlCqi;
18567 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
18568 (ueUl->crntUlCqi[0] > ueUl->maxUlCqi )))
18570 cqi = ueUl->crntUlCqi[0];
18574 }/* End of rgSCHCmnUlGetCqi */
18576 /***********************************************************
18578 * Func : rgSCHCmnUlRbAllocForPoHoUe
18580 * Desc : Do uplink RB allocation for a HO/PO UE.
18584 * Notes: Note that as of now, for retx, maxRb
18585 * is not considered. Alternatives, such
18586 * as dropping retx if it crosses maxRb
18587 * could be considered.
18591 **********************************************************/
18592 static S16 rgSCHCmnUlRbAllocForPoHoUe
18600 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18601 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18602 uint8_t sbSize = cellUl->sbSize;
18603 uint32_t maxBits = ue->ul.maxBytesPerUePerTti*8;
18605 RgSchUlAlloc *alloc;
18615 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->msg3SchdHqProcIdx];
18616 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18618 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
18622 /*MS_WORKAROUND for HO ccpu00121116*/
18623 cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
18624 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend], cqi);
18625 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
18626 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
18627 while(iMcs > RG_SCH_CMN_MAX_MSG3_IMCS)
18630 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
18631 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg);
18633 /* Filling the modorder in the grant structure*/
18634 RG_SCH_UL_MCS_TO_MODODR(iMcs,modOdr);
18635 if (!cell->isCpUlExtend)
18637 eff = rgSchCmnNorUlEff[0][iTbs];
18641 eff = rgSchCmnExtUlEff[0][iTbs];
18644 bits = ueUl->alloc.reqBytes * 8;
18646 #if (ERRCLASS & ERRCLS_DEBUG)
18653 if (bits < rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs))
18656 nPrb = numSb * sbSize;
18660 if (bits > maxBits)
18663 nPrb = bits * 1024 / eff / RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
18668 numSb = nPrb / sbSize;
18672 /*ccpu00128775:MOD-Change to get upper threshold nPrb*/
18673 nPrb = RGSCH_CEIL((RGSCH_CEIL(bits * 1024, eff)),
18674 RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl));
18679 numSb = RGSCH_DIV_ROUND(nPrb, sbSize);
18684 alloc = rgSCHCmnUlSbAlloc(sf, (uint8_t)RGSCH_MIN(numSb, cellUl->maxSbPerUe),\
18686 if (alloc == NULLP)
18688 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
18689 "rgSCHCmnUlRbAllocForPoHoUe(): Could not get UlAlloc");
18692 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
18694 /* Filling the modorder in the grant structure start*/
18695 alloc->grnt.modOdr = (TfuModScheme) modOdr;
18696 alloc->grnt.iMcs = iMcs;
18697 alloc->grnt.iMcsCrnt = iMcsCrnt;
18698 alloc->grnt.hop = 0;
18699 /* Fix for ccpu00123915*/
18700 alloc->forMsg3 = TRUE;
18701 alloc->hqProc = proc;
18702 alloc->hqProc->ulSfIdx = cellUl->msg3SchdIdx;
18704 alloc->rnti = ue->ueId;
18705 /* updating initNumRbs in case of HO */
18707 ue->initNumRbs = alloc->grnt.numRb;
18709 ueUl->alloc.alloc = alloc;
18710 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
18711 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
18712 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
18713 /* MS_WORKAROUND for HO ccpu00121124*/
18714 /*[Adi temp change] Need to fil modOdr */
18715 RG_SCH_UL_MCS_TO_MODODR(alloc->grnt.iMcsCrnt,alloc->grnt.modOdr);
18716 rgSCHUhmNewTx(proc, ueUl->hqEnt.maxHqRetx, alloc);
18717 /* No grant attr recorded now */
18722 * @brief This function allocates grant for UEs undergoing (for which RAR
18723 * is being generated) HandOver/PdcchOrder.
18728 * Function: rgSCHCmnAllocPoHoGrnt
18729 * Purpose: This function allocates grant for UEs undergoing (for which RAR
18730 * is being generated) HandOver/PdcchOrder.
18732 * Invoked by: Common Scheduler
18734 * @param[in] RgSchCellCb *cell
18735 * @param[out] CmLListCp *raRspLst,
18736 * @param[in] RgSchUeCb *ue
18737 * @param[in] RgSchRaReqInfo *raReq
18741 static Void rgSCHCmnAllocPoHoGrnt
18744 CmLListCp *raRspLst,
18746 RgSchRaReqInfo *raReq
18749 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18750 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18752 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
18755 /* Clearing previous allocs if any*/
18756 rgSCHCmnUlUeDelAllocs(cell, ue);
18757 /* Fix : syed allocs are limited */
18758 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
18762 ueUl->alloc.reqBytes = RG_SCH_MIN_GRNT_HOPO;
18763 if (rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, RGSCH_MAX_UL_RB) != ROK)
18768 /* Fill grant information */
18769 grnt = &ueUl->alloc.alloc->grnt;
18774 RLOG_ARG1(L_ERROR,DBG_INSTID,cell->instIdx, "Failed to get"
18775 "the grant for HO/PDCCH Order. CRNTI:%d",ue->ueId);
18778 ue->ul.rarGrnt.rapId = raReq->raReq.rapId;
18779 ue->ul.rarGrnt.hop = grnt->hop;
18780 ue->ul.rarGrnt.rbStart = grnt->rbStart;
18781 ue->ul.rarGrnt.numRb = grnt->numRb;
18782 ue->ul.rarGrnt.tpc = grnt->tpc;
18783 ue->ul.rarGrnt.iMcsCrnt = grnt->iMcsCrnt;
18784 ue->ul.rarGrnt.ta.pres = TRUE;
18785 ue->ul.rarGrnt.ta.val = raReq->raReq.ta;
18786 ue->ul.rarGrnt.datSz = grnt->datSz;
18787 if((sf->numACqiCount < RG_SCH_MAX_ACQI_PER_ULSF) && (RG_SCH_APCQI_NO != ue->dl.reqForCqi))
18791 /* Send two bits cqireq field if more than one cells are configured else one*/
18792 for (idx = 1;idx < CM_LTE_MAX_CELLS;idx++)
18794 if (ue->cellInfo[idx] != NULLP)
18796 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
18800 if (idx == CM_LTE_MAX_CELLS)
18803 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
18805 ue->dl.reqForCqi = RG_SCH_APCQI_NO;
18806 sf->numACqiCount++;
18810 ue->ul.rarGrnt.cqiReqBit = 0;
18812 /* Attach Ho/Po allocation to RAR Rsp cont free Lst */
18813 cmLListAdd2Tail(raRspLst, &ue->ul.rarGrnt.raRspLnk);
18814 ue->ul.rarGrnt.raRspLnk.node = (PTR)ue;
18820 * @brief This is a utility function to set the fields in
18821 * an UL harq proc which is identified for non-adaptive retx
18825 * Function: rgSCHCmnUlNonadapRetx
18826 * Purpose: Sets the fields in UL Harq proc for non-adaptive retx
18828 * @param[in] RgSchCmnUlCell *cellUl
18829 * @param[out] RgSchUlAlloc *alloc
18830 * @param[in] uint8_t idx
18835 static Void rgSCHCmnUlNonadapRetx
18837 RgSchCmnUlCell *cellUl,
18838 RgSchUlAlloc *alloc,
18842 rgSCHUhmRetx(alloc->hqProc, alloc);
18844 /* Update alloc to retx */
18845 alloc->hqProc->isRetx = TRUE;
18846 alloc->hqProc->ulSfIdx = cellUl->reTxIdx[idx];
18848 if (alloc->hqProc->rvIdx != 0)
18850 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[alloc->hqProc->rvIdx];
18854 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
18856 alloc->grnt.isRtx = TRUE;
18857 alloc->pdcch = NULLP;
18861 * @brief Check if 2 allocs overlap
18865 * Function : rgSCHCmnUlAllocsOvrLap
18867 * - Return TRUE if alloc1 and alloc2 overlap.
18869 * @param[in] RgSchUlAlloc *alloc1
18870 * @param[in] RgSchUlAlloc *alloc2
18873 static Bool rgSCHCmnUlAllocsOvrLap(RgSchUlAlloc *alloc1,RgSchUlAlloc *alloc2)
18876 if (((alloc1->sbStart >= alloc2->sbStart) &&
18877 (alloc1->sbStart <= alloc2->sbStart + alloc2->numSb-1)) ||
18878 ((alloc2->sbStart >= alloc1->sbStart) &&
18879 (alloc2->sbStart <= alloc1->sbStart + alloc1->numSb-1)))
18886 * @brief Copy allocation Info from src to dst.
18890 * Function : rgSCHCmnUlCpyAllocInfo
18892 * - Copy allocation Info from src to dst.
18894 * @param[in] RgSchUlAlloc *srcAlloc
18895 * @param[in] RgSchUlAlloc *dstAlloc
18898 static Void rgSCHCmnUlCpyAllocInfo(RgSchCellCb *cell,RgSchUlAlloc *srcAlloc,RgSchUlAlloc *dstAlloc)
18900 RgSchCmnUlUe *ueUl;
18902 dstAlloc->grnt = srcAlloc->grnt;
18903 dstAlloc->hqProc = srcAlloc->hqProc;
18904 /* Fix : syed During UE context release, hqProc->alloc
18905 * was pointing to srcAlloc instead of dstAlloc and
18906 * freeing from incorrect sf->allocDb was
18907 * corrupting the list. */
18908 /* In case of SPS Occasion Allocation is done in advance and
18909 at a later time Hq Proc is linked. Hence HqProc
18910 pointer in alloc shall be NULL */
18912 if (dstAlloc->hqProc)
18915 dstAlloc->hqProc->alloc = dstAlloc;
18917 dstAlloc->ue = srcAlloc->ue;
18918 dstAlloc->rnti = srcAlloc->rnti;
18919 dstAlloc->forMsg3 = srcAlloc->forMsg3;
18920 dstAlloc->raCb = srcAlloc->raCb;
18921 dstAlloc->pdcch = srcAlloc->pdcch;
18922 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
18925 ueUl = RG_SCH_CMN_GET_UL_UE(dstAlloc->ue,cell);
18926 ueUl->alloc.alloc = dstAlloc;
18928 if (dstAlloc->ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
18930 if((dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc != NULLP)
18931 && (dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc == srcAlloc))
18933 dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc = dstAlloc;
18942 * @brief Update TX and RETX subframe's allocation
18947 * Function : rgSCHCmnUlInsAllocFrmNewSf2OldSf
18949 * - Release all preassigned allocations of newSf and merge
18951 * - If alloc of newSf collide with one or more allocs of oldSf
18952 * - mark all such allocs of oldSf for Adaptive Retx.
18953 * - Swap the alloc and hole DB references of oldSf and newSf.
18955 * @param[in] RgSchCellCb *cell
18956 * @param[in] RgSchUlSf *newSf
18957 * @param[in] RgSchUlSf *oldSf
18958 * @param[in] RgSchUlAlloc *srcAlloc
18961 static Void rgSCHCmnUlInsAllocFrmNewSf2OldSf
18966 RgSchUlAlloc *srcAlloc
18969 RgSchUlAlloc *alloc, *dstAlloc, *nxtAlloc;
18971 /* MS_WORKAROUND ccpu00120827 */
18972 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
18975 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
18979 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
18980 /* If there is an overlap between alloc and srcAlloc
18981 * then alloc is marked for Adaptive retx and it is released
18983 if (rgSCHCmnUlAllocsOvrLap(alloc, srcAlloc) == TRUE)
18985 rgSCHCmnUlUpdAllocRetx(cell, alloc);
18986 rgSCHUtlUlAllocRls(oldSf, alloc);
18988 /* No further allocs spanning the srcAlloc subbands */
18989 if (srcAlloc->sbStart + srcAlloc->numSb - 1 <= alloc->sbStart)
18993 } while ((alloc = nxtAlloc) != NULLP);
18996 /* After freeing all the colliding allocs, request for an allocation
18997 * specifying the start and numSb with in txSf. This function should
18998 * always return positively with a nonNULL dstAlloc */
18999 /* MS_WORKAROUND ccpu00120827 */
19000 remAllocs = schCmnCell->ul.maxAllocPerUlSf - *oldSf->allocCountRef;
19003 /* Fix : If oldSf already has max Allocs then release the
19004 * old RETX alloc to make space for new alloc of newSf.
19005 * newSf allocs(i.e new Msg3s) are given higher priority
19006 * over retx allocs. */
19007 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
19011 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
19012 if (!alloc->mrgdNewTxAlloc)
19014 /* If alloc is for RETX */
19015 /* TODO: Incase of this ad also in case of choosing
19016 * and alloc for ADAP RETX, we need to send ACK for
19017 * the corresponding alloc in PHICH */
19018 #ifndef EMTC_ENABLE
19019 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc);
19021 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc,FALSE);
19025 }while((alloc = nxtAlloc) != NULLP);
19028 dstAlloc = rgSCHUtlUlGetSpfcAlloc(oldSf, srcAlloc->sbStart, srcAlloc->numSb);
19030 /* This should never happen */
19031 if (dstAlloc == NULLP)
19033 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"CRNTI:%d "
19034 "rgSCHUtlUlGetSpfcAlloc failed in rgSCHCmnUlInsAllocFrmNewSf2OldSf",
19039 /* Copy the srcAlloc's state information in to dstAlloc */
19040 rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc);
19041 /* Set new Tx merged Alloc Flag to TRUE, indicating that this
19042 * alloc shall not be processed for non-adaptive retransmission */
19043 dstAlloc->mrgdNewTxAlloc = TRUE;
19047 * @brief Merge all allocations of newSf to oldSf.
19051 * Function : rgSCHCmnUlMergeSfAllocs
19053 * - Merge all allocations of newSf to oldSf.
19054 * - If newSf's alloc collides with oldSf's alloc
19055 * then oldSf's alloc is marked for adaptive Retx
19056 * and is released from oldSf to create space for
19059 * @param[in] RgSchCellCb *cell
19060 * @param[in] RgSchUlSf *oldSf
19061 * @param[in] RgSchUlSf *newSf
19064 static Void rgSCHCmnUlMergeSfAllocs(RgSchCellCb *cell,RgSchUlSf *oldSf,RgSchUlSf *newSf)
19066 RgSchUlAlloc *alloc, *nxtAlloc;
19069 /* Merge each alloc of newSf in to oldSf
19070 * and release it from newSf */
19071 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
19075 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
19076 rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, alloc);
19077 rgSCHUtlUlAllocRls(newSf, alloc);
19078 } while((alloc = nxtAlloc) != NULLP);
19083 * @brief Swap Hole/Alloc DB context of newSf and oldSf.
19087 * Function : rgSCHCmnUlSwapSfAllocs
19089 * - Swap Hole/Alloc DB context of newSf and oldSf.
19091 * @param[in] RgSchCellCb *cell
19092 * @param[in] RgSchUlSf *oldSf
19093 * @param[in] RgSchUlSf *newSf
19096 static Void rgSCHCmnUlSwapSfAllocs(RgSchCellCb *cell,RgSchUlSf *oldSf,RgSchUlSf *newSf)
19098 RgSchUlAllocDb *tempAllocDb = newSf->allocDb;
19099 RgSchUlHoleDb *tempHoleDb = newSf->holeDb;
19100 uint8_t tempAvailSbs = newSf->availSubbands;
19104 newSf->allocDb = oldSf->allocDb;
19105 newSf->holeDb = oldSf->holeDb;
19106 newSf->availSubbands = oldSf->availSubbands;
19108 oldSf->allocDb = tempAllocDb;
19109 oldSf->holeDb = tempHoleDb;
19110 oldSf->availSubbands = tempAvailSbs;
19112 /* Fix ccpu00120610*/
19113 newSf->allocCountRef = &newSf->allocDb->count;
19114 oldSf->allocCountRef = &oldSf->allocDb->count;
19118 * @brief Perform non-adaptive RETX for non-colliding allocs.
19122 * Function : rgSCHCmnUlPrcNonAdptRetx
19124 * - Perform non-adaptive RETX for non-colliding allocs.
19126 * @param[in] RgSchCellCb *cell
19127 * @param[in] RgSchUlSf *newSf
19128 * @param[in] uint8_t idx
19131 static Void rgSCHCmnUlPrcNonAdptRetx(RgSchCellCb *cell,RgSchUlSf *newSf,uint8_t idx)
19133 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19134 RgSchUlAlloc *alloc, *nxtAlloc;
19136 /* perform non-adaptive retx allocation(adjustment) */
19137 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
19141 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
19142 /* A merged new TX alloc, reset the state and skip */
19143 if (alloc->mrgdNewTxAlloc)
19145 alloc->mrgdNewTxAlloc = FALSE;
19150 rgSCHCmnUlNonadapRetx(cellUl, alloc, idx);
19152 } while((alloc = nxtAlloc) != NULLP);
19158 * @brief Update TX and RETX subframe's allocation
19163 * Function : rgSCHCmnUlPrfmSfMerge
19165 * - Release all preassigned allocations of newSf and merge
19167 * - If alloc of newSf collide with one or more allocs of oldSf
19168 * - mark all such allocs of oldSf for Adaptive Retx.
19169 * - Swap the alloc and hole DB references of oldSf and newSf.
19170 * - The allocs which did not collide with pre-assigned msg3
19171 * allocs are marked for non-adaptive RETX.
19173 * @param[in] RgSchCellCb *cell
19174 * @param[in] RgSchUlSf *oldSf
19175 * @param[in] RgSchUlSf *newSf
19176 * @param[in] uint8_t idx
19179 static Void rgSCHCmnUlPrfmSfMerge
19187 /* Preassigned resources for msg3 in newSf.
19188 * Hence do adaptive retx for all NACKED TXs */
19189 rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf);
19190 /* swap alloc and hole DBs of oldSf and newSf. */
19191 rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf);
19192 /* Here newSf has the resultant merged allocs context */
19193 /* Perform non-adaptive RETX for non-colliding allocs */
19194 rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx);
19200 * @brief Update TX and RETX subframe's allocation
19205 * Function : rgSCHCmnUlRmvCmpltdAllocs
19207 * - Free all Transmission which are ACKED
19208 * OR for which MAX retransmission have
19212 * @param[in] RgSchCellCb *cell,
19213 * @param[in] RgSchUlSf *sf
19216 static Void rgSCHCmnUlRmvCmpltdAllocs(RgSchCellCb *cell,RgSchUlSf *sf)
19218 RgSchUlAlloc *alloc, *nxtAlloc;
19220 if ((alloc = rgSCHUtlUlAllocFirst(sf)) == NULLP)
19226 nxtAlloc = rgSCHUtlUlAllocNxt(sf, alloc);
19228 printf("rgSCHCmnUlRmvCmpltdAllocs:time(%d %d) alloc->hqProc->remTx %d hqProcId(%d) \n",cell->crntTime.sfn,cell->crntTime.slot,alloc->hqProc->remTx, alloc->grnt.hqProcId);
19230 alloc->hqProc->rcvdCrcInd = TRUE;
19231 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
19234 /* SR_RACH_STATS : MSG 3 MAX RETX FAIL*/
19235 if ((alloc->forMsg3 == TRUE) && (alloc->hqProc->remTx == 0))
19237 rgNumMsg3FailMaxRetx++;
19239 cell->tenbStats->sch.msg3Fail++;
19243 #ifdef MAC_SCH_STATS
19244 if(alloc->ue != NULLP)
19246 /* access from ulHarqProc*/
19247 RgSchUeCb *ueCb = alloc->ue;
19248 RgSchCmnUe *cmnUe = (RgSchCmnUe*)ueCb->sch;
19249 RgSchCmnUlUe *ulUe = &(cmnUe->ul);
19250 uint8_t cqi = ulUe->crntUlCqi[0];
19251 uint16_t numUlRetx = ueCb->ul.hqEnt.maxHqRetx - alloc->hqProc->remTx;
19253 hqRetxStats.ulCqiStat[(cqi - 1)].mcs = alloc->grnt.iMcs;
19258 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1++;
19261 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2++;
19264 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3++;
19267 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4++;
19270 hqRetxStats.ulCqiStat[(cqi - 1)].totalTx = \
19271 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1 + \
19272 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2 * 2) + \
19273 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3 * 3) + \
19274 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4 * 4);
19277 #endif /*MAC_SCH_STATS*/
19278 rgSCHCmnUlFreeAllocation(cell, sf, alloc);
19280 /*ccpu00106104 MOD added check for AckNackRep */
19281 /*added check for acknack so that adaptive retx considers ue
19282 inactivity due to ack nack repetition*/
19283 else if((alloc->ue != NULLP) && (TRUE != alloc->forMsg3))
19285 rgSCHCmnUlUpdAllocRetx(cell, alloc);
19286 rgSCHUtlUlAllocRls(sf, alloc);
19288 } while ((alloc = nxtAlloc) != NULLP);
19294 * @brief Update an uplink subframe.
19298 * Function : rgSCHCmnRlsUlSf
19300 * For each allocation
19301 * - if no more tx needed
19302 * - Release allocation
19304 * - Perform retransmission
19306 * @param[in] RgSchUlSf *sf
19307 * @param[in] uint8_t idx
19310 Void rgSCHCmnRlsUlSf(RgSchCellCb *cell,uint8_t idx)
19313 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19314 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
19316 RgSchUlSf *oldSf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
19318 /* Initialize the reTxLst of UL HqProcs for RETX subframe */
19319 if (rgSCHUtlUlAllocFirst(oldSf) == NULLP)
19323 /* Release all completed TX allocs from sf */
19324 rgSCHCmnUlRmvCmpltdAllocs(cell, oldSf);
19326 oldSf->numACqiCount = 0;
19332 * @brief Handle uplink allocation for retransmission.
19336 * Function : rgSCHCmnUlUpdAllocRetx
19338 * - Perform adaptive retransmission
19340 * @param[in] RgSchUlSf *sf
19341 * @param[in] RgSchUlAlloc *alloc
19344 static Void rgSCHCmnUlUpdAllocRetx(RgSchCellCb *cell,RgSchUlAlloc *alloc)
19346 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
19348 alloc->hqProc->reTxAlloc.rnti = alloc->rnti;
19349 alloc->hqProc->reTxAlloc.numSb = alloc->numSb;
19350 alloc->hqProc->reTxAlloc.iMcs = alloc->grnt.iMcs;
19352 alloc->hqProc->reTxAlloc.dciFrmt = alloc->grnt.dciFrmt;
19353 alloc->hqProc->reTxAlloc.numLyr = alloc->grnt.numLyr;
19354 alloc->hqProc->reTxAlloc.vrbgStart = alloc->grnt.vrbgStart;
19355 alloc->hqProc->reTxAlloc.numVrbg = alloc->grnt.numVrbg;
19356 alloc->hqProc->reTxAlloc.modOdr = alloc->grnt.modOdr;
19358 //iTbs = rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs);
19359 //iTbs = alloc->grnt.iMcs;
19360 //RGSCH_ARRAY_BOUND_CHECK( 0, rgTbSzTbl[0], iTbs);
19361 alloc->hqProc->reTxAlloc.tbSz = alloc->grnt.datSz;
19362 //rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1]/8;
19363 alloc->hqProc->reTxAlloc.ue = alloc->ue;
19364 alloc->hqProc->reTxAlloc.forMsg3 = alloc->forMsg3;
19365 alloc->hqProc->reTxAlloc.raCb = alloc->raCb;
19367 /* Set as retransmission is pending */
19368 alloc->hqProc->isRetx = TRUE;
19369 alloc->hqProc->alloc = NULLP;
19370 alloc->hqProc->ulSfIdx = RGSCH_INVALID_INFO;
19372 printf("Adding Harq Proc Id in the retx list hqProcId %d \n",alloc->grnt.hqProcId);
19374 cmLListAdd2Tail(&cmnUlCell->reTxLst, &alloc->hqProc->reTxLnk);
19375 alloc->hqProc->reTxLnk.node = (PTR)alloc->hqProc;
19380 * @brief Attempts allocation for msg3s for which ADAP retransmissions
19385 * Function : rgSCHCmnUlAdapRetxAlloc
19387 * Attempts allocation for msg3s for which ADAP retransmissions
19390 * @param[in] RgSchCellCb *cell
19391 * @param[in] RgSchUlSf *sf
19392 * @param[in] RgSchUlHqProcCb *proc;
19393 * @param[in] RgSchUlHole *hole;
19396 static Bool rgSCHCmnUlAdapRetxAlloc
19400 RgSchUlHqProcCb *proc,
19404 uint8_t numSb = proc->reTxAlloc.numSb;
19405 uint8_t iMcs = proc->reTxAlloc.iMcs;
19406 CmLteTimingInfo frm = cell->crntTime;
19407 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19410 RgSchUlAlloc *alloc;
19412 /* Fetch PDCCH for msg3 */
19413 /* ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
19414 /* Introduced timing delta for UL control */
19415 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
19416 dlSf = rgSCHUtlSubFrmGet(cell, frm);
19417 pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
19418 if (pdcch == NULLP)
19423 /* Fetch UL Alloc for msg3 */
19424 if (numSb <= hole->num)
19426 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
19431 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
19432 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
19433 "UL Alloc fail for msg3 retx for rnti: %d\n",
19434 proc->reTxAlloc.rnti);
19438 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
19439 alloc->grnt.iMcs = iMcs;
19440 alloc->grnt.datSz = proc->reTxAlloc.tbSz;
19443 //RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
19445 /* Fill UL Alloc for msg3 */
19446 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
19447 alloc->grnt.nDmrs = 0;
19448 alloc->grnt.hop = 0;
19449 alloc->grnt.delayBit = 0;
19450 alloc->grnt.isRtx = TRUE;
19451 proc->ulSfIdx = cellUl->schdIdx;
19453 proc->schdTime = cellUl->schdTime;
19454 alloc->grnt.hqProcId = proc->procId;
19455 alloc->grnt.dciFrmt = proc->reTxAlloc.dciFrmt;
19456 alloc->grnt.numLyr = proc->reTxAlloc.numLyr;
19457 alloc->grnt.vrbgStart = proc->reTxAlloc.vrbgStart;
19458 alloc->grnt.numVrbg = proc->reTxAlloc.numVrbg;
19459 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
19460 alloc->grnt.modOdr = proc->reTxAlloc.modOdr;
19462 /* TODO : Hardcoding these as of now */
19463 alloc->grnt.hop = 0;
19464 alloc->grnt.SCID = 0;
19465 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
19466 alloc->grnt.PMI = 0;
19467 alloc->grnt.uciOnxPUSCH = 0;
19469 alloc->rnti = proc->reTxAlloc.rnti;
19470 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
19471 alloc->ue = proc->reTxAlloc.ue;
19472 alloc->pdcch = pdcch;
19473 alloc->forMsg3 = proc->reTxAlloc.forMsg3;
19474 alloc->raCb = proc->reTxAlloc.raCb;
19475 alloc->hqProc = proc;
19476 alloc->isAdaptive = TRUE;
19478 sf->totPrb += alloc->grnt.numRb;
19480 /* FIX : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
19483 alloc->raCb->msg3Grnt= alloc->grnt;
19485 /* To the crntTime, add the time at which UE will
19486 * actually send MSG3 */
19487 alloc->raCb->msg3AllocTime = cell->crntTime;
19488 RGSCH_INCR_SUB_FRAME(alloc->raCb->msg3AllocTime, RG_SCH_CMN_MIN_RETXMSG3_RECP_INTRVL);
19490 alloc->raCb->msg3AllocTime = cellUl->schdTime;
19492 rgSCHCmnUlAdapRetx(alloc, proc);
19493 /* Fill PDCCH with alloc info */
19494 pdcch->rnti = alloc->rnti;
19495 pdcch->dci.dciFormat = TFU_DCI_FORMAT_0;
19496 pdcch->dci.u.format0Info.hoppingEnbld = alloc->grnt.hop;
19497 pdcch->dci.u.format0Info.rbStart = alloc->grnt.rbStart;
19498 pdcch->dci.u.format0Info.numRb = alloc->grnt.numRb;
19499 pdcch->dci.u.format0Info.mcs = alloc->grnt.iMcsCrnt;
19500 pdcch->dci.u.format0Info.ndi = alloc->hqProc->ndi;
19501 pdcch->dci.u.format0Info.nDmrs = alloc->grnt.nDmrs;
19502 pdcch->dci.u.format0Info.tpcCmd = alloc->grnt.tpc;
19506 /* ulIdx setting for cfg 0 shall be appropriately fixed thru ccpu00109015 */
19507 pdcch->dci.u.format0Info.ulIdx = RG_SCH_ULIDX_MSB;
19508 pdcch->dci.u.format0Info.dai = RG_SCH_MAX_DAI_IDX;
19511 pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_0];
19515 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue,cell);
19517 alloc->ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
19520 ue->ul.nPrb = alloc->grnt.numRb;
19522 ueUl->alloc.alloc = alloc;
19523 /* FIx: Removed the call to rgSCHCmnUlAdapRetx */
19524 rgSCHCmnUlUeFillAllocInfo(cell, alloc->ue);
19525 /* Setting csireq as false for Adaptive Retx*/
19526 ueUl->alloc.alloc->pdcch->dci.u.format0Info.cqiReq = RG_SCH_APCQI_NO;
19527 pdcch->dciNumOfBits = alloc->ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
19529 /* Reset as retransmission is done */
19530 proc->isRetx = FALSE;
19532 else /* Intg fix */
19534 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
19535 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
19536 "Num SB not suffiecient for adap retx for rnti: %d",
19537 proc->reTxAlloc.rnti);
19543 /* Fix: syed Adaptive Msg3 Retx crash. */
19545 * @brief Releases all Adaptive Retx HqProcs which failed for
19546 * allocations in this scheduling occassion.
19550 * Function : rgSCHCmnUlSfRlsRetxProcs
19553 * @param[in] RgSchCellCb *cell
19554 * @param[in] RgSchUlSf *sf
19558 static Void rgSCHCmnUlSfRlsRetxProcs(RgSchCellCb *cell,RgSchUlSf *sf)
19562 RgSchUlHqProcCb *proc;
19563 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19565 cp = &(cellUl->reTxLst);
19569 proc = (RgSchUlHqProcCb *)node->node;
19571 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
19572 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
19573 proc->reTxLnk.node = (PTR)NULLP;
19580 * @brief Attempts allocation for UEs for which retransmissions
19585 * Function : rgSCHCmnUlSfReTxAllocs
19587 * Attempts allocation for UEs for which retransmissions
19590 * @param[in] RgSchCellCb *cell
19591 * @param[in] RgSchUlSf *sf
19594 static Void rgSCHCmnUlSfReTxAllocs(RgSchCellCb *cell,RgSchUlSf *sf)
19598 RgSchUlHqProcCb *proc;
19601 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
19602 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19604 cp = &(cellUl->reTxLst);
19608 proc = (RgSchUlHqProcCb *)node->node;
19609 ue = proc->reTxAlloc.ue;
19611 /*ccpu00106104 MOD added check for AckNackRep */
19612 /*added check for acknack so that adaptive retx considers ue
19613 inactivity due to ack nack repetition*/
19614 if((ue != NULLP) &&
19615 ((ue->measGapCb.isMeasuring == TRUE)||
19616 (ue->ackNakRepCb.isAckNakRep == TRUE)))
19620 /* Fix for ccpu00123917: Check if maximum allocs per UL sf have been exhausted */
19621 if (((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
19622 || (sf->allocDb->count == schCmnCell->ul.maxAllocPerUlSf))
19624 /* No more UL BW then return */
19627 /* perform adaptive retx for UE's */
19628 if (rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole) == FALSE)
19632 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
19633 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
19634 /* Fix: syed Adaptive Msg3 Retx crash. */
19635 proc->reTxLnk.node = (PTR)NULLP;
19641 * @brief Handles RB allocation for downlink.
19645 * Function : rgSCHCmnDlRbAlloc
19647 * Invoking Module Processing:
19648 * - This function is invoked for DL RB allocation
19650 * Processing Steps:
19651 * - If cell is frequency selecive,
19652 * - Call rgSCHDlfsAllocRb().
19654 * - Call rgSCHCmnNonDlfsRbAlloc().
19656 * @param[in] RgSchCellCb *cell
19657 * @param[in] RgSchDlRbAllocInfo *allocInfo
19661 static Void rgSCHCmnDlRbAlloc(RgSchCellCb *cell,RgSchCmnDlRbAllocInfo *allocInfo)
19663 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
19665 if (cellSch->dl.isDlFreqSel)
19667 printf("5GTF_ERROR DLFS SCH Enabled\n");
19668 cellSch->apisDlfs->rgSCHDlfsAllocRb(cell, allocInfo);
19672 rgSCHCmnNonDlfsRbAlloc(cell, allocInfo);
19680 * @brief Determines number of RBGs and RBG subset sizes for the given DL
19681 * bandwidth and rbgSize
19684 * Function : rgSCHCmnDlGetRbgInfo
19687 * Processing Steps:
19688 * - Fill-up rbgInfo data structure for given DL bandwidth and rbgSize
19690 * @param[in] uint8_t dlTotalBw
19691 * @param[in] uint8_t dlSubsetBw
19692 * @param[in] uint8_t maxRaType1SubsetBw
19693 * @param[in] uint8_t rbgSize
19694 * @param[out] RgSchBwRbgInfo *rbgInfo
19697 Void rgSCHCmnDlGetRbgInfo
19700 uint8_t dlSubsetBw,
19701 uint8_t maxRaType1SubsetBw,
19703 RgSchBwRbgInfo *rbgInfo
19706 #ifdef RGSCH_SPS_UNUSED
19708 uint8_t lastRbgIdx = ((dlTotalBw + rbgSize - 1)/rbgSize) - 1;
19709 uint8_t currRbgSize = rbgSize;
19710 uint8_t subsetSizeIdx = 0;
19711 uint8_t subsetSize[RG_SCH_NUM_RATYPE1_SUBSETS] = {0};
19712 uint8_t lastRbgSize = rbgSize - (dlTotalBw - ((dlTotalBw/rbgSize) * rbgSize));
19713 uint8_t numRaType1Rbgs = (maxRaType1SubsetBw + rbgSize - 1)/rbgSize;
19716 /* Compute maximum number of SPS RBGs for the cell */
19717 rbgInfo->numRbgs = ((dlSubsetBw + rbgSize - 1)/rbgSize);
19719 #ifdef RGSCH_SPS_UNUSED
19720 /* Distribute RBGs across subsets except last RBG */
19721 for (;idx < numRaType1Rbgs - 1; ++idx)
19723 subsetSize[subsetSizeIdx] += currRbgSize;
19724 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
19727 /* Computation for last RBG */
19728 if (idx == lastRbgIdx)
19730 currRbgSize = lastRbgSize;
19732 subsetSize[subsetSizeIdx] += currRbgSize;
19733 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
19736 /* Update the computed sizes */
19737 #ifdef RGSCH_SPS_UNUSED
19738 rbgInfo->lastRbgSize = currRbgSize;
19740 rbgInfo->lastRbgSize = rbgSize -
19741 (dlSubsetBw - ((dlSubsetBw/rbgSize) * rbgSize));
19742 #ifdef RGSCH_SPS_UNUSED
19743 memcpy(rbgInfo->rbgSubsetSize, subsetSize, 4 * sizeof(uint8_t));
19745 rbgInfo->numRbs = (rbgInfo->numRbgs * rbgSize > dlTotalBw) ?
19746 dlTotalBw:(rbgInfo->numRbgs * rbgSize);
19747 rbgInfo->rbgSize = rbgSize;
19751 * @brief Handles RB allocation for Resource allocation type 0
19755 * Function : rgSCHCmnDlRaType0Alloc
19757 * Invoking Module Processing:
19758 * - This function is invoked for DL RB allocation for resource allocation
19761 * Processing Steps:
19762 * - Determine the available positions in the rbgMask.
19763 * - Allocate RBGs in the available positions.
19764 * - Update RA Type 0, RA Type 1 and RA type 2 masks.
19766 * @param[in] RgSchDlSfAllocInfo *allocedInfo
19767 * @param[in] uint8_t rbsReq
19768 * @param[in] RgSchBwRbgInfo *rbgInfo
19769 * @param[out] uint8_t *numAllocRbs
19770 * @param[out] RgSchDlSfAllocInfo *resAllocInfo
19771 * @param[in] Bool isPartialAlloc
19776 uint8_t rgSCHCmnDlRaType0Alloc
19778 RgSchDlSfAllocInfo *allocedInfo,
19780 RgSchBwRbgInfo *rbgInfo,
19781 uint8_t *numAllocRbs,
19782 RgSchDlSfAllocInfo *resAllocInfo,
19783 Bool isPartialAlloc
19786 /* Note: This function atttempts allocation only full allocation */
19787 uint32_t remNumRbs, rbgPosInRbgMask, ueRaType2Mask;
19788 uint8_t type2MaskIdx, cnt, rbIdx;
19789 uint8_t maskSize, rbg;
19790 uint8_t bestNumAvailRbs = 0;
19791 uint8_t usedRbs = 0;
19792 uint8_t numAllocRbgs = 0;
19793 uint8_t rbgSize = rbgInfo->rbgSize;
19794 uint32_t *rbgMask = &(resAllocInfo->raType0Mask);
19795 #ifdef RGSCH_SPS_UNUSED
19797 uint32_t ueRaType1Mask;
19798 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
19799 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
19801 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
19803 uint32_t allocedMask = allocedInfo->raType0Mask;
19805 maskSize = rbgInfo->numRbgs;
19808 RG_SCH_CMN_DL_COUNT_ONES(allocedMask, maskSize, &usedRbs);
19809 if (maskSize == usedRbs)
19811 /* All RBGs are allocated, including the last one */
19816 remNumRbs = (maskSize - usedRbs - 1) * rbgSize; /* vamsee: removed minus 1 */
19818 /* If last RBG is available, add last RBG size */
19819 if (!(allocedMask & (1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(maskSize - 1))))
19821 remNumRbs += rbgInfo->lastRbgSize;
19825 /* If complete allocation is needed, check if total requested RBs are available else
19826 * check the best available RBs */
19827 if (!isPartialAlloc)
19829 if (remNumRbs >= rbsReq)
19831 bestNumAvailRbs = rbsReq;
19836 bestNumAvailRbs = remNumRbs > rbsReq ? rbsReq : remNumRbs;
19839 /* Allocate for bestNumAvailRbs */
19840 if (bestNumAvailRbs)
19842 for (rbg = 0; rbg < maskSize - 1; ++rbg)
19844 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
19845 if (!(allocedMask & rbgPosInRbgMask))
19847 /* Update RBG mask */
19848 *rbgMask |= rbgPosInRbgMask;
19850 /* Compute RB index of the first RB of the RBG allocated */
19851 rbIdx = rbg * rbgSize;
19853 for (cnt = 0; cnt < rbgSize; ++cnt)
19855 #ifdef RGSCH_SPS_UNUSED
19856 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
19858 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19859 #ifdef RGSCH_SPS_UNUSED
19860 /* Update RBG mask for RA type 1 */
19861 raType1Mask[rbgSubset] |= ueRaType1Mask;
19862 raType1UsedRbs[rbgSubset]++;
19864 /* Update RA type 2 mask */
19865 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19868 *numAllocRbs += rbgSize;
19869 remNumRbs -= rbgSize;
19871 if (*numAllocRbs >= bestNumAvailRbs)
19877 /* If last RBG available and allocation is not completed, allocate
19879 if (*numAllocRbs < bestNumAvailRbs)
19881 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
19882 *rbgMask |= rbgPosInRbgMask;
19883 *numAllocRbs += rbgInfo->lastRbgSize;
19885 /* Compute RB index of the first RB of the last RBG */
19886 rbIdx = ((rbgInfo->numRbgs - 1 ) * rbgSize ); /* removed minus 1 vamsee */
19888 for (cnt = 0; cnt < rbgInfo->lastRbgSize; ++cnt)
19890 #ifdef RGSCH_SPS_UNUSED
19891 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
19893 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
19894 #ifdef RGSCH_SPS_UNUSED
19895 /* Update RBG mask for RA type 1 */
19896 raType1Mask[rbgSubset] |= ueRaType1Mask;
19897 raType1UsedRbs[rbgSubset]++;
19899 /* Update RA type 2 mask */
19900 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
19903 remNumRbs -= rbgInfo->lastRbgSize;
19906 /* Note: this should complete allocation, not checking for the
19910 return (numAllocRbgs);
19913 #ifdef RGSCH_SPS_UNUSED
19915 * @brief Handles RB allocation for Resource allocation type 1
19919 * Function : rgSCHCmnDlRaType1Alloc
19921 * Invoking Module Processing:
19922 * - This function is invoked for DL RB allocation for resource allocation
19925 * Processing Steps:
19926 * - Determine the available positions in the subsets.
19927 * - Allocate RB in the available subset.
19928 * - Update RA Type1, RA type 0 and RA type 2 masks.
19930 * @param[in] RgSchDlSfAllocInfo *allocedInfo
19931 * @param[in] uint8_t rbsReq
19932 * @param[in] RgSchBwRbgInfo *rbgInfo
19933 * @param[in] uint8_t startRbgSubset
19934 * @param[in] uint8_t *allocRbgSubset
19935 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
19936 * @param[in] Bool isPartialAlloc
19939 * Number of allocated RBs
19942 uint8_t rgSCHCmnDlRaType1Alloc
19944 RgSchDlSfAllocInfo *allocedInfo,
19946 RgSchBwRbgInfo *rbgInfo,
19947 uint8_t startRbgSubset,
19948 uint8_t *allocRbgSubset,
19949 RgSchDlSfAllocInfo *resAllocInfo,
19950 Bool isPartialAlloc
19953 /* Note: This function atttempts only full allocation */
19954 uint8_t *rbgSubsetSzArr;
19955 uint8_t type2MaskIdx, subsetIdx, rbIdx, rbInSubset, rbgInSubset;
19956 uint8_t offset, rbg, maskSize, bestSubsetIdx;
19957 uint8_t startPos = 0;
19958 uint8_t bestNumAvailRbs = 0;
19959 uint8_t numAllocRbs = 0;
19960 uint32_t ueRaType2Mask, ueRaType0Mask, rbPosInSubset;
19961 uint32_t remNumRbs, allocedMask;
19962 uint8_t usedRbs = 0;
19963 uint8_t rbgSize = rbgInfo->rbgSize;
19964 uint8_t rbgSubset = startRbgSubset;
19965 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
19966 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
19967 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
19968 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
19969 uint32_t *allocMask = allocedInfo->raType1Mask;
19971 /* Initialize the subset size Array */
19972 rbgSubsetSzArr = rbgInfo->rbgSubsetSize;
19974 /* Perform allocation for RA type 1 */
19975 for (subsetIdx = 0;subsetIdx < rbgSize; ++subsetIdx)
19977 allocedMask = allocMask[rbgSubset];
19978 maskSize = rbgSubsetSzArr[rbgSubset];
19980 /* Determine number of available RBs in the subset */
19981 usedRbs = allocedInfo->raType1UsedRbs[subsetIdx];
19982 remNumRbs = maskSize - usedRbs;
19984 if (remNumRbs >= rbsReq)
19986 bestNumAvailRbs = rbsReq;
19987 bestSubsetIdx = rbgSubset;
19990 else if (isPartialAlloc && (remNumRbs > bestNumAvailRbs))
19992 bestNumAvailRbs = remNumRbs;
19993 bestSubsetIdx = rbgSubset;
19996 rbgSubset = (rbgSubset + 1) % rbgSize;
19997 } /* End of for (each rbgsubset) */
19999 if (bestNumAvailRbs)
20001 /* Initialize alloced mask and subsetSize depending on the RBG
20002 * subset of allocation */
20003 uint8_t startIdx = 0;
20004 maskSize = rbgSubsetSzArr[bestSubsetIdx];
20005 allocedMask = allocMask[bestSubsetIdx];
20006 RG_SCH_CMN_DL_GET_START_POS(allocedMask, maskSize,
20008 for (; startIdx < rbgSize; ++startIdx, ++startPos)
20010 for (rbInSubset = startPos; rbInSubset < maskSize;
20011 rbInSubset = rbInSubset + rbgSize)
20013 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
20014 if (!(allocedMask & rbPosInSubset))
20016 raType1Mask[bestSubsetIdx] |= rbPosInSubset;
20017 raType1UsedRbs[bestSubsetIdx]++;
20019 /* Compute RB index value for the RB being allocated */
20020 rbgInSubset = rbInSubset /rbgSize;
20021 offset = rbInSubset % rbgSize;
20022 rbg = (rbgInSubset * rbgSize) + bestSubsetIdx;
20023 rbIdx = (rbg * rbgSize) + offset;
20025 /* Update RBG mask for RA type 0 allocation */
20026 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
20027 *rbgMask |= ueRaType0Mask;
20029 /* Update RA type 2 mask */
20030 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
20031 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
20033 /* Update the counters */
20036 if (numAllocRbs == bestNumAvailRbs)
20041 } /* End of for (each position in the subset mask) */
20042 if (numAllocRbs == bestNumAvailRbs)
20046 } /* End of for startIdx = 0 to rbgSize */
20048 *allocRbgSubset = bestSubsetIdx;
20049 } /* End of if (bestNumAvailRbs) */
20051 return (numAllocRbs);
20055 * @brief Handles RB allocation for Resource allocation type 2
20059 * Function : rgSCHCmnDlRaType2Alloc
20061 * Invoking Module Processing:
20062 * - This function is invoked for DL RB allocation for resource allocation
20065 * Processing Steps:
20066 * - Determine the available positions in the mask
20067 * - Allocate best fit cosecutive RBs.
20068 * - Update RA Type2, RA type 1 and RA type 0 masks.
20070 * @param[in] RgSchDlSfAllocInfo *allocedInfo
20071 * @param[in] uint8_t rbsReq
20072 * @param[in] RgSchBwRbgInfo *rbgInfo
20073 * @param[out] uint8_t *rbStart
20074 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
20075 * @param[in] Bool isPartialAlloc
20078 * Number of allocated RBs
20081 uint8_t rgSCHCmnDlRaType2Alloc
20083 RgSchDlSfAllocInfo *allocedInfo,
20085 RgSchBwRbgInfo *rbgInfo,
20087 RgSchDlSfAllocInfo *resAllocInfo,
20088 Bool isPartialAlloc
20091 uint8_t numAllocRbs = 0;
20093 uint8_t rbgSize = rbgInfo->rbgSize;
20094 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
20095 #ifdef RGSCH_SPS_UNUSED
20096 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
20098 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
20099 #ifdef RGSCH_SPS_UNUSED
20100 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
20102 uint32_t *allocedMask = allocedInfo->raType2Mask;
20104 /* Note: This function atttempts only full allocation */
20105 rgSCHCmnDlGetBestFitHole(allocedMask, rbgInfo->numRbs,
20106 raType2Mask, rbsReq, rbStart, &numAllocRbs, isPartialAlloc);
20109 /* Update the allocation in RA type 0 and RA type 1 masks */
20110 uint8_t rbCnt = numAllocRbs;
20111 #ifdef RGSCH_SPS_UNUSED
20113 uint32_t ueRaType1Mask;
20115 uint32_t ueRaType0Mask;
20120 /* Update RBG mask for RA type 0 allocation */
20121 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
20122 *rbgMask |= ueRaType0Mask;
20124 #ifdef RGSCH_SPS_UNUSED
20125 /* Update RBG mask for RA type 1 */
20126 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
20127 raType1Mask[rbgSubset] |= ueRaType1Mask;
20128 raType1UsedRbs[rbgSubset]++;
20130 /* Update the counters */
20136 return (numAllocRbs);
20140 * @brief Determines RA type 0 mask from given RB index.
20144 * Function : rgSCHCmnGetRaType0Mask
20147 * Processing Steps:
20148 * - Determine RA Type 0 mask for given rbIdex and rbg size.
20150 * @param[in] uint8_t rbIdx
20151 * @param[in] uint8_t rbgSize
20152 * @return uint32_t RA type 0 mask
20154 static uint32_t rgSCHCmnGetRaType0Mask(uint8_t rbIdx,uint8_t rbgSize)
20157 uint32_t rbgPosInRbgMask = 0;
20159 rbg = rbIdx/rbgSize;
20160 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
20162 return (rbgPosInRbgMask);
20165 #ifdef RGSCH_SPS_UNUSED
20167 * @brief Determines RA type 1 mask from given RB index.
20171 * Function : rgSCHCmnGetRaType1Mask
20174 * Processing Steps:
20175 * - Determine RA Type 1 mask for given rbIdex and rbg size.
20177 * @param[in] uint8_t rbIdx
20178 * @param[in] uint8_t rbgSize
20179 * @param[out] uint8_t *type1Subset
20180 * @return uint32_t RA type 1 mask
20182 static uint32_t rgSCHCmnGetRaType1Mask(uint8_t rbIdx,uint8_t rbgSize,uint8_t *type1Subset)
20184 uint8_t rbg, rbgSubset, rbgInSubset, offset, rbInSubset;
20185 uint32_t rbPosInSubset;
20187 rbg = rbIdx/rbgSize;
20188 rbgSubset = rbg % rbgSize;
20189 rbgInSubset = rbg/rbgSize;
20190 offset = rbIdx % rbgSize;
20191 rbInSubset = rbgInSubset * rbgSize + offset;
20192 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
20194 *type1Subset = rbgSubset;
20195 return (rbPosInSubset);
20197 #endif /* RGSCH_SPS_UNUSED */
20199 * @brief Determines RA type 2 mask from given RB index.
20203 * Function : rgSCHCmnGetRaType2Mask
20206 * Processing Steps:
20207 * - Determine RA Type 2 mask for given rbIdx and rbg size.
20209 * @param[in] uint8_t rbIdx
20210 * @param[out] uint8_t *maskIdx
20211 * @return uint32_t RA type 2 mask
20213 static uint32_t rgSCHCmnGetRaType2Mask(uint8_t rbIdx,uint8_t *maskIdx)
20215 uint32_t rbPosInType2;
20217 *maskIdx = rbIdx / 32;
20218 rbPosInType2 = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbIdx % 32);
20220 return (rbPosInType2);
20224 * @brief Performs resource allocation for a non-SPS UE in SPS bandwidth
20228 * Function : rgSCHCmnAllocUeInSpsBw
20231 * Processing Steps:
20232 * - Determine allocation for the UE.
20233 * - Use resource allocation type 0, 1 and 2 for allocation
20234 * within maximum SPS bandwidth.
20236 * @param[in] RgSchDlSf *dlSf
20237 * @param[in] RgSchCellCb *cell
20238 * @param[in] RgSchUeCb *ue
20239 * @param[in] RgSchDlRbAlloc *rbAllocInfo
20240 * @param[in] Bool isPartialAlloc
20245 Bool rgSCHCmnAllocUeInSpsBw
20250 RgSchDlRbAlloc *rbAllocInfo,
20251 Bool isPartialAlloc
20254 uint8_t rbgSize = cell->rbgSize;
20255 uint8_t numAllocRbs = 0;
20256 uint8_t numAllocRbgs = 0;
20257 uint8_t rbStart = 0;
20258 uint8_t idx, noLyr, iTbs;
20259 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
20260 RgSchDlSfAllocInfo *dlSfAlloc = &rbAllocInfo->dlSf->dlSfAllocInfo;
20261 RgSchBwRbgInfo *spsRbgInfo = &cell->spsBwRbgInfo;
20263 /* SPS_FIX : Check if this Hq proc is scheduled */
20264 if ((0 == rbAllocInfo->tbInfo[0].schdlngForTb) &&
20265 (0 == rbAllocInfo->tbInfo[1].schdlngForTb))
20270 /* Check if the requirement can be accomodated in SPS BW */
20271 if (dlSf->spsAllocdBw == spsRbgInfo->numRbs)
20273 /* SPS Bandwidth has been exhausted: no further allocations possible */
20276 if (!isPartialAlloc)
20278 if((dlSf->spsAllocdBw + rbAllocInfo->rbsReq) > spsRbgInfo->numRbs)
20284 /* Perform allocation for RA type 0 if rbsReq is multiple of RBG size (also
20285 * if RBG size = 1) */
20286 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20288 rbAllocInfo->rbsReq += (rbgSize - rbAllocInfo->rbsReq % rbgSize);
20289 numAllocRbgs = rgSCHCmnDlRaType0Alloc(dlSfAlloc,
20290 rbAllocInfo->rbsReq, spsRbgInfo, &numAllocRbs,
20291 &rbAllocInfo->resAllocInfo, isPartialAlloc);
20293 #ifdef RGSCH_SPS_UNUSED
20294 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
20296 /* If no RBS could be allocated, attempt RA TYPE 1 */
20298 numAllocRbs = rgSCHCmnDlRaType1Alloc(dlSfAlloc,
20299 rbAllocInfo->rbsReq, spsRbgInfo, (uint8_t)dlSfAlloc->nxtRbgSubset,
20300 &rbAllocInfo->allocInfo.raType1.rbgSubset,
20301 &rbAllocInfo->resAllocInfo, isPartialAlloc);
20305 dlSfAlloc->nxtRbgSubset =
20306 (rbAllocInfo->allocInfo.raType1.rbgSubset + 1 ) % rbgSize;
20310 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20312 numAllocRbs = rgSCHCmnDlRaType2Alloc(dlSfAlloc,
20313 rbAllocInfo->rbsReq, spsRbgInfo,
20314 &rbStart, &rbAllocInfo->resAllocInfo, isPartialAlloc);
20321 if (!(rbAllocInfo->pdcch =
20322 rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi,\
20323 rbAllocInfo->dciFormat, FALSE)))
20325 /* Note: Returning TRUE since PDCCH might be available for another UE */
20329 /* Update Tb info for each scheduled TB */
20330 iTbs = rbAllocInfo->tbInfo[0].iTbs;
20331 noLyr = rbAllocInfo->tbInfo[0].noLyr;
20332 rbAllocInfo->tbInfo[0].bytesAlloc =
20333 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
20335 if (rbAllocInfo->tbInfo[1].schdlngForTb)
20337 iTbs = rbAllocInfo->tbInfo[1].iTbs;
20338 noLyr = rbAllocInfo->tbInfo[1].noLyr;
20339 rbAllocInfo->tbInfo[1].bytesAlloc =
20340 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
20343 /* Update rbAllocInfo with the allocation information */
20344 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20346 rbAllocInfo->allocInfo.raType0.dlAllocBitMask =
20347 rbAllocInfo->resAllocInfo.raType0Mask;
20348 rbAllocInfo->allocInfo.raType0.numDlAlloc = numAllocRbgs;
20350 #ifdef RGSCH_SPS_UNUSED
20351 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
20353 rbAllocInfo->allocInfo.raType1.dlAllocBitMask =
20354 rbAllocInfo->resAllocInfo.raType1Mask[rbAllocInfo->allocInfo.raType1.rbgSubset];
20355 rbAllocInfo->allocInfo.raType1.numDlAlloc = numAllocRbs;
20356 rbAllocInfo->allocInfo.raType1.shift = 0;
20359 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20361 rbAllocInfo->allocInfo.raType2.isLocal = TRUE;
20362 rbAllocInfo->allocInfo.raType2.rbStart = rbStart;
20363 rbAllocInfo->allocInfo.raType2.numRb = numAllocRbs;
20366 rbAllocInfo->rbsAlloc = numAllocRbs;
20367 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
20369 /* Update allocation masks for RA types 0, 1 and 2 in DL SF */
20371 /* Update type 0 allocation mask */
20372 dlSfAlloc->raType0Mask |= rbAllocInfo->resAllocInfo.raType0Mask;
20373 #ifdef RGSCH_SPS_UNUSED
20374 /* Update type 1 allocation masks */
20375 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
20377 dlSfAlloc->raType1Mask[idx] |= rbAllocInfo->resAllocInfo.raType1Mask[idx];
20378 dlSfAlloc->raType1UsedRbs[idx] +=
20379 rbAllocInfo->resAllocInfo.raType1UsedRbs[idx];
20382 /* Update type 2 allocation masks */
20383 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
20385 dlSfAlloc->raType2Mask[idx] |= rbAllocInfo->resAllocInfo.raType2Mask[idx];
20388 dlSf->spsAllocdBw += numAllocRbs;
20392 /***********************************************************
20394 * Func : rgSCHCmnDlGetBestFitHole
20397 * Desc : Converts the best fit hole into allocation and returns the
20398 * allocation information.
20408 **********************************************************/
20409 static Void rgSCHCmnDlGetBestFitHole
20411 uint32_t *allocMask,
20412 uint8_t numMaskRbs,
20413 uint32_t *crntAllocMask,
20415 uint8_t *allocStart,
20416 uint8_t *allocNumRbs,
20417 Bool isPartialAlloc
20420 uint8_t maskSz = (numMaskRbs + 31)/32;
20421 uint8_t maxMaskPos = (numMaskRbs % 32);
20422 uint8_t maskIdx, maskPos;
20423 uint8_t numAvailRbs = 0;
20424 uint8_t bestAvailNumRbs = 0;
20425 S8 bestStartPos = -1;
20427 uint32_t tmpMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
20428 uint32_t bestMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
20430 *allocNumRbs = numAvailRbs;
20433 for (maskIdx = 0; maskIdx < maskSz; ++maskIdx)
20436 if (maskIdx == (maskSz - 1))
20438 if (numMaskRbs % 32)
20440 maxMaskPos = numMaskRbs % 32;
20443 for (maskPos = 0; maskPos < maxMaskPos; ++maskPos)
20445 if (!(allocMask[maskIdx] & (1 << (31 - maskPos))))
20447 tmpMask[maskIdx] |= (1 << (31 - maskPos));
20448 if (startPos == -1)
20450 startPos = maskIdx * 32 + maskPos;
20453 if (numAvailRbs == rbsReq)
20455 *allocStart = (uint8_t)startPos;
20456 *allocNumRbs = rbsReq;
20462 if (numAvailRbs > bestAvailNumRbs)
20464 bestAvailNumRbs = numAvailRbs;
20465 bestStartPos = startPos;
20466 memcpy(bestMask, tmpMask, 4 * sizeof(uint32_t));
20470 memset(tmpMask, 0, 4 * sizeof(uint32_t));
20473 if (*allocNumRbs == rbsReq)
20479 if (*allocNumRbs == rbsReq)
20481 /* Convert the hole into allocation */
20482 memcpy(crntAllocMask, tmpMask, 4 * sizeof(uint32_t));
20487 if (bestAvailNumRbs && isPartialAlloc)
20489 /* Partial allocation could have been done */
20490 *allocStart = (uint8_t)bestStartPos;
20491 *allocNumRbs = bestAvailNumRbs;
20492 /* Convert the hole into allocation */
20493 memcpy(crntAllocMask, bestMask, 4 * sizeof(uint32_t));
20499 #endif /* LTEMAC_SPS */
20501 /***************************************************************************
20503 * NON-DLFS Allocation functions
20505 * *************************************************************************/
20509 * @brief Function to find out code rate
20513 * Function : rgSCHCmnFindCodeRate
20515 * Processing Steps:
20517 * @param[in] RgSchCellCb *cell
20518 * @param[in] RgSchDlSf *dlSf
20519 * @param[in,out] RgSchDlRbAlloc *allocInfo
20523 static Void rgSCHCmnFindCodeRate
20527 RgSchDlRbAlloc *allocInfo,
20536 /* Adjust the Imcs and bytes allocated also with respect to the adjusted
20537 RBs - Here we will find out the Imcs by identifying first Highest
20538 number of bits compared to the original bytes allocated. */
20540 * @brief Adjust IMCS according to tbSize and ITBS
20544 * Function : rgSCHCmnNonDlfsPbchTbImcsAdj
20546 * Processing Steps:
20547 * - Adjust Imcs according to tbSize and ITBS.
20549 * @param[in,out] RgSchDlRbAlloc *allocInfo
20550 * @param[in] uint8_t *idx
20553 static Void rgSCHCmnNonDlfsPbchTbImcsAdj
20556 RgSchDlRbAlloc *allocInfo,
20561 uint8_t noLyrs = 0;
20563 uint32_t origBytesReq;
20564 uint8_t noRbgs = 0;
20566 RgSchDlSf *dlSf = allocInfo->dlSf;
20568 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
20569 noLyrs = allocInfo->tbInfo[idx].noLyr;
20571 if((allocInfo->raType == RG_SCH_CMN_RA_TYPE0))
20573 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + dlSf->lstRbgDfct), cell->rbgSize);
20574 noRbs = (noRbgs * cell->rbgSize) - dlSf->lstRbgDfct;
20578 noRbs = allocInfo->rbsReq;
20581 /* This line will help in case if tbs is zero and reduction in MCS is not possible */
20582 if (allocInfo->rbsReq == 0 )
20586 origBytesReq = rgTbSzTbl[noLyrs - 1][tbs][rbsReq - 1]/8;
20588 /* Find out the ITbs & Imcs by identifying first Highest
20589 number of bits compared to the original bytes allocated.*/
20592 if(((rgTbSzTbl[noLyrs - 1][0][noRbs - 1])/8) < origBytesReq)
20594 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyrs - 1], tbs);
20595 while(((rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1])/8) > origBytesReq)
20604 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1]/8;
20605 allocInfo->tbInfo[idx].iTbs = tbs;
20606 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
20611 /* Added funcion to adjust TBSize*/
20613 * @brief Function to adjust the tbsize in case of subframe 0 & 5 when
20614 * we were not able to do RB alloc adjustment by adding extra required Rbs
20618 * Function : rgSCHCmnNonDlfsPbchTbSizeAdj
20620 * Processing Steps:
20622 * @param[in,out] RgSchDlRbAlloc *allocInfo
20623 * @param[in] uint8_t numOvrlapgPbchRb
20624 * @param[in] uint8_t idx
20625 * @param[in] uint8_t pbchSsRsSym
20628 static Void rgSCHCmnNonDlfsPbchTbSizeAdj
20630 RgSchDlRbAlloc *allocInfo,
20631 uint8_t numOvrlapgPbchRb,
20632 uint8_t pbchSsRsSym,
20637 uint32_t reducedTbs = 0;
20638 uint8_t noLyrs = 0;
20641 noLyrs = allocInfo->tbInfo[idx].noLyr;
20643 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
20645 reducedTbs = bytesReq - (((uint32_t)numOvrlapgPbchRb * (uint32_t)pbchSsRsSym * 6)/8);
20647 /* find out the ITbs & Imcs by identifying first Highest
20648 number of bits compared with reduced bits considering the bits that are
20649 reserved for PBCH/PSS/SSS */
20650 if(((rgTbSzTbl[noLyrs - 1][0][allocInfo->rbsReq - 1])/8) < reducedTbs)
20652 while(((rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1])/8) > reducedTbs)
20661 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1]/8;
20662 allocInfo->tbInfo[idx].iTbs = tbs;
20663 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
20668 /* Added this function to find num of ovrlapping PBCH rb*/
20670 * @brief Function to find out how many additional rbs are available
20671 * in the entire bw which can be allocated to a UE
20674 * Function : rgSCHCmnFindNumAddtlRbsAvl
20676 * Processing Steps:
20677 * - Calculates number of additinal rbs available
20679 * @param[in] RgSchCellCb *cell
20680 * @param[in] RgSchDlSf *dlSf
20681 * @param[in,out] RgSchDlRbAlloc *allocInfo
20682 * @param[out] uint8_t addtlRbsAvl
20685 static uint8_t rgSCHCmnFindNumAddtlRbsAvl(RgSchCellCb *cell,RgSchDlSf *dlSf,RgSchDlRbAlloc *allocInfo)
20687 uint8_t addtlRbsAvl = 0;
20688 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
20690 addtlRbsAvl = (((dlSf->type0End - dlSf->type2End + 1)*\
20691 cell->rbgSize) - dlSf->lstRbgDfct) - allocInfo->rbsReq;
20693 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
20695 addtlRbsAvl = (dlSf->bw - dlSf->bwAlloced) - allocInfo->rbsReq;
20698 return (addtlRbsAvl);
20701 /* Added this function to find num of ovrlapping PBCH rb*/
20703 * @brief Function to find out how many of the requested RBs are
20704 * falling in the center 6 RBs of the downlink bandwidth.
20707 * Function : rgSCHCmnFindNumPbchOvrlapRbs
20709 * Processing Steps:
20710 * - Calculates number of overlapping rbs
20712 * @param[in] RgSchCellCb *cell
20713 * @param[in] RgSchDlSf *dlSf
20714 * @param[in,out] RgSchDlRbAlloc *allocInfo
20715 * @param[out] uint8_t* numOvrlapgPbchRb
20718 static Void rgSCHCmnFindNumPbchOvrlapRbs
20722 RgSchDlRbAlloc *allocInfo,
20723 uint8_t *numOvrlapgPbchRb
20726 *numOvrlapgPbchRb = 0;
20727 /*Find if we have already crossed the start boundary for PBCH 6 RBs,
20728 * if yes then lets find the number of RBs which are getting overlapped
20729 * with this allocation.*/
20730 if(dlSf->bwAlloced <= (cell->pbchRbStart))
20732 /*We have not crossed the start boundary of PBCH RBs. Now we need
20733 * to know that if take this allocation then how much PBCH RBs
20734 * are overlapping with this allocation.*/
20735 /* Find out the overlapping RBs in the centre 6 RBs */
20736 if((dlSf->bwAlloced + allocInfo->rbsReq) > cell->pbchRbStart)
20738 *numOvrlapgPbchRb = (dlSf->bwAlloced + allocInfo->rbsReq) - (cell->pbchRbStart);
20739 if(*numOvrlapgPbchRb > 6)
20740 *numOvrlapgPbchRb = 6;
20743 else if ((dlSf->bwAlloced > (cell->pbchRbStart)) &&
20744 (dlSf->bwAlloced < (cell->pbchRbEnd)))
20746 /*We have already crossed the start boundary of PBCH RBs.We need to
20747 * find that if we take this allocation then how much of the RBs for
20748 * this allocation will overlap with PBCH RBs.*/
20749 /* Find out the overlapping RBs in the centre 6 RBs */
20750 if(dlSf->bwAlloced + allocInfo->rbsReq < (cell->pbchRbEnd))
20752 /*If we take this allocation then also we are not crossing the
20753 * end boundary of PBCH 6 RBs.*/
20754 *numOvrlapgPbchRb = allocInfo->rbsReq;
20758 /*If we take this allocation then we are crossing the
20759 * end boundary of PBCH 6 RBs.*/
20760 *numOvrlapgPbchRb = (cell->pbchRbEnd) - dlSf->bwAlloced;
20767 * @brief Performs RB allocation adjustment if the requested RBs are
20768 * falling in the center 6 RBs of the downlink bandwidth.
20771 * Function : rgSCHCmnNonDlfsPbchRbAllocAdj
20773 * Processing Steps:
20774 * - Allocate consecutively available RBs.
20776 * @param[in] RgSchCellCb *cell
20777 * @param[in,out] RgSchDlRbAlloc *allocInfo
20778 * @param[in] uint8_t pbchSsRsSym
20781 static Void rgSCHCmnNonDlfsPbchRbAllocAdj
20784 RgSchDlRbAlloc *allocInfo,
20785 uint8_t pbchSsRsSym,
20789 RgSchDlSf *dlSf = allocInfo->dlSf;
20790 uint8_t numOvrlapgPbchRb = 0;
20791 uint8_t numOvrlapgAdtlPbchRb = 0;
20793 uint8_t addtlRbsReq = 0;
20794 uint8_t moreAddtlRbsReq = 0;
20795 uint8_t addtlRbsAdd = 0;
20796 uint8_t moreAddtlRbsAdd = 0;
20798 uint8_t origRbsReq = 0;
20806 origRbsReq = allocInfo->rbsReq;
20807 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20809 totSym = (cell->isCpDlExtend) ? RGSCH_TOT_NUM_SYM_EXTCP : RGSCH_TOT_NUM_SYM_NORCP;
20811 /* Additional RBs are allocated by considering the loss due to
20812 the reserved symbols for CFICH, PBCH, PSS, SSS and cell specific RS */
20814 divResult = (numOvrlapgPbchRb * pbchSsRsSym)/totSym;
20815 if((numOvrlapgPbchRb * pbchSsRsSym) % totSym)
20819 addtlRbsReq = divResult;
20821 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, addtlRbsReq, addtlRbsAdd)
20823 /*Now RBs requires is original requested RBs + these additional RBs to make
20824 * up for PSS/SSS/BCCH.*/
20825 allocInfo->rbsReq = allocInfo->rbsReq + addtlRbsAdd;
20827 /*Check if with these additional RBs we have taken up, these are also falling
20828 * under PBCH RBs range, if yes then we would need to account for
20829 * PSS/BSS/BCCH for these additional RBs too.*/
20830 if(addtlRbsAdd && ((dlSf->bwAlloced + allocInfo->rbsReq - addtlRbsAdd) < (cell->pbchRbEnd)))
20832 if((dlSf->bwAlloced + allocInfo->rbsReq) <= (cell->pbchRbEnd))
20834 /*With additional RBs taken into account, we are not crossing the
20835 * PBCH RB end boundary.Thus here we need to account just for
20836 * overlapping PBCH RBs for these additonal RBs.*/
20837 divResult = (addtlRbsAdd * pbchSsRsSym)/totSym;
20838 if((addtlRbsAdd * pbchSsRsSym) % totSym)
20843 moreAddtlRbsReq = divResult;
20845 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
20847 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
20852 /*Here we have crossed the PBCH RB end boundary, thus we need to take
20853 * into account the overlapping RBs for additional RBs which will be
20854 * subset of addtlRbs.*/
20855 numOvrlapgAdtlPbchRb = (cell->pbchRbEnd) - ((dlSf->bwAlloced + allocInfo->rbsReq) - addtlRbsAdd);
20857 divResult = (numOvrlapgAdtlPbchRb * pbchSsRsSym)/totSym;
20858 if((numOvrlapgAdtlPbchRb * pbchSsRsSym) % totSym)
20863 moreAddtlRbsReq = divResult;
20865 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
20867 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
20870 if (isBcchPcch == TRUE)
20875 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
20878 /* This case might be for Imcs value 6 and NPrb = 1 case - Not
20879 Adjusting either RBs or Imcs or Bytes Allocated */
20880 allocInfo->rbsReq = allocInfo->rbsReq - addtlRbsAdd - moreAddtlRbsAdd;
20882 else if(tbs && ((0 == addtlRbsAdd) && (moreAddtlRbsAdd == 0)))
20884 /*In case of a situation where we the entire bandwidth is already occupied
20885 * and we dont have room to add additional Rbs then in order to decrease the
20886 * code rate we reduce the tbsize such that we reduce the present calculated
20887 * tbsize by number of bytes that would be occupied by PBCH/PSS/SSS in overlapping
20888 * rbs and find the nearest tbsize which would be less than this deduced value*/
20890 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20892 noLyr = allocInfo->tbInfo[0].noLyr;
20893 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyr - 1], tbs);
20894 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
20896 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,bytesReq);
20898 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20900 noLyr = allocInfo->tbInfo[1].noLyr;
20901 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
20902 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,bytesReq);
20906 else if(tbs && ((addtlRbsAdd != addtlRbsReq) ||
20907 (addtlRbsAdd && (moreAddtlRbsReq != moreAddtlRbsAdd))))
20909 /*In case of a situation where we were not able to add required number of
20910 * additional RBs then we adjust the Imcs based on original RBs requested.
20911 * Doing this would comensate for the few extra Rbs we have added but inorder
20912 * to comensate for number of RBS we couldnt add we again do the TBSize adjustment*/
20914 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
20916 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20918 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
20921 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
20922 numOvrlapgPbchRb = numOvrlapgPbchRb - (addtlRbsAdd + moreAddtlRbsAdd);
20924 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,allocInfo->tbInfo[0].bytesReq);
20926 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20928 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,allocInfo->tbInfo[1].bytesReq);
20934 /*We hit this code when we were able to add the required additional RBS
20935 * hence we should adjust the IMcs based on orignals RBs requested*/
20937 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
20939 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
20941 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
20946 } /* end of rgSCHCmnNonDlfsPbchRbAllocAdj */
20950 * @brief Performs RB allocation for frequency non-selective cell.
20954 * Function : rgSCHCmnNonDlfsCmnRbAlloc
20956 * Processing Steps:
20957 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
20959 * @param[in] RgSchCellCb *cell
20960 * @param[in, out] RgSchDlRbAlloc *allocInfo
20965 static S16 rgSCHCmnNonDlfsCmnRbAlloc(RgSchCellCb *cell,RgSchDlRbAlloc *allocInfo)
20970 uint8_t pbchSsRsSym = 0;
20971 uint8_t pbchFrame = 0;
20973 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
20975 RgSchDlSf *dlSf = allocInfo->dlSf;
20977 uint8_t rbStart = 0;
20978 uint8_t spsRbsAlloc = 0;
20979 RgSchDlSfAllocInfo *dlSfAlloc = &allocInfo->dlSf->dlSfAllocInfo;
20982 allocInfo->tbInfo[0].noLyr = 1;
20985 /* Note: Initialize the masks to 0, this might not be needed since alloInfo
20986 * is initialized to 0 at the beginning of allcoation */
20987 allocInfo->resAllocInfo.raType0Mask = 0;
20988 memset(allocInfo->resAllocInfo.raType1Mask, 0,
20989 RG_SCH_NUM_RATYPE1_32BIT_MASK * sizeof (uint32_t));
20990 memset(allocInfo->resAllocInfo.raType2Mask, 0,
20991 RG_SCH_NUM_RATYPE2_32BIT_MASK * sizeof (uint32_t));
20993 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
20994 (dlSf->bwAlloced == dlSf->bw))
20996 if(dlSf->bwAlloced == dlSf->bw)
21002 if (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced))
21005 if ((allocInfo->tbInfo[0].imcs < 29) && (dlSf->bwAlloced < dlSf->bw))
21007 if(allocInfo->tbInfo[0].imcs < 29)
21010 /* set the remaining RBs for the requested UE */
21011 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
21012 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21013 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[0][tbs][allocInfo->rbsReq - 1]/8;
21018 /* Attempt RA Type 2 allocation in SPS Bandwidth */
21019 if (dlSf->spsAllocdBw < cell->spsBwRbgInfo.numRbs)
21022 rgSCHCmnDlRaType2Alloc(dlSfAlloc,
21023 allocInfo->rbsReq, &cell->spsBwRbgInfo, &rbStart,
21024 &allocInfo->resAllocInfo, FALSE);
21025 /* rbsAlloc assignment moved from line 16671 to here to avoid
21026 * compilation error. Recheck */
21027 dlSf->spsAllocdBw += spsRbsAlloc;
21030 #endif /* LTEMAC_SPS */
21038 /* Update allocation information */
21039 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
21040 if (allocInfo->pdcch == NULLP)
21044 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
21045 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
21046 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
21047 allocInfo->allocInfo.raType2.isLocal = TRUE;
21051 allocInfo->allocInfo.raType2.rbStart = rbStart;
21052 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
21053 allocInfo->rbsAlloc = allocInfo->rbsReq;
21064 if(!(dlSf->sfNum == 5))
21066 /* case for subframes 1 to 9 except 5 */
21068 allocInfo->allocInfo.raType2.rbStart = rbStart;
21070 /*Fix for ccpu00123918*/
21071 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21076 pbchFrame = 1; /* case for subframe 5 */
21077 /* In subframe 5, symbols are reserved for PSS and SSS and CFICH
21078 and Cell Specific Reference Signals */
21079 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PSS_SSS_SYM) *
21080 RGSCH_NUM_SC_IN_RB + cell->numCellRSPerSf);
21086 /* In subframe 0, symbols are reserved for PSS, SSS, PBCH, CFICH and
21087 and Cell Specific Reference signals */
21088 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PBCH_SYM +
21089 RGSCH_NUM_PSS_SSS_SYM) * RGSCH_NUM_SC_IN_RB +
21090 cell->numCellRSPerSf);
21091 } /* end of outer else */
21094 (((dlSf->bwAlloced + allocInfo->rbsReq) - cell->pbchRbStart) > 0)&&
21095 (dlSf->bwAlloced < cell->pbchRbEnd))
21097 if(allocInfo->tbInfo[0].imcs < 29)
21099 rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo, pbchSsRsSym, TRUE);
21111 /*Fix for ccpu00123918*/
21112 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21113 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
21114 allocInfo->rbsAlloc = allocInfo->rbsReq;
21116 /* LTE_ADV_FLAG_REMOVED_START */
21118 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
21120 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
21121 allocInfo->allocInfo.raType2.rbStart, \
21122 allocInfo->allocInfo.raType2.numRb);
21127 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
21128 allocInfo->allocInfo.raType2.rbStart, \
21129 allocInfo->allocInfo.raType2.numRb);
21135 /* LTE_ADV_FLAG_REMOVED_END */
21136 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21143 /* Update type 0, 1 and 2 masks */
21144 dlSfAlloc->raType0Mask |= allocInfo->resAllocInfo.raType0Mask;
21145 #ifdef RGSCH_SPS_UNUSED
21146 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
21148 dlSfAlloc->raType1Mask[idx] |=
21149 allocInfo->resAllocInfo.raType1Mask[idx];
21150 dlSfAlloc->raType1UsedRbs[idx] +=
21151 allocInfo->resAllocInfo.raType1UsedRbs[idx];
21154 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
21156 dlSfAlloc->raType2Mask[idx] |=
21157 allocInfo->resAllocInfo.raType2Mask[idx];
21167 * @brief Performs RB allocation for frequency non-selective cell.
21171 * Function : rgSCHCmnNonDlfsCmnRbAllocRar
21173 * Processing Steps:
21174 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
21176 * @param[in] RgSchCellCb *cell
21177 * @param[in, out] RgSchDlRbAlloc *allocInfo
21182 static S16 rgSCHCmnNonDlfsCmnRbAllocRar(RgSchCellCb *cell,RgSchDlRbAlloc *allocInfo)
21184 RgSchDlSf *dlSf = allocInfo->dlSf;
21186 if(dlSf->bwAlloced == dlSf->bw)
21191 allocInfo->tbInfo[0].noLyr = 1;
21193 /* Update allocation information */
21194 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
21195 if (allocInfo->pdcch == NULLP)
21199 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
21200 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
21201 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
21202 allocInfo->allocInfo.raType2.isLocal = TRUE;
21204 /*Fix for ccpu00123918*/
21205 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
21206 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
21207 allocInfo->rbsAlloc = allocInfo->rbsReq;
21209 /* LTE_ADV_FLAG_REMOVED_END */
21210 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21213 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, NULLP, dlSf, 13, TFU_DCI_FORMAT_B1, FALSE);
21214 if (allocInfo->pdcch == NULLP)
21218 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
21219 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
21221 printf("5GTF_ERROR vrbg allocated > 25\n");
21225 allocInfo->tbInfo[0].cmnGrnt.vrbgStart = beamInfo->vrbgStart;
21226 allocInfo->tbInfo[0].cmnGrnt.numVrbg = allocInfo->vrbgReq;
21228 /* Update allocation information */
21229 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
21231 allocInfo->tbInfo[0].cmnGrnt.xPDSCHRange = 1;
21232 allocInfo->tbInfo[0].cmnGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
21233 allocInfo->tbInfo[0].cmnGrnt.vrbgStart, allocInfo->tbInfo[0].cmnGrnt.numVrbg);
21235 allocInfo->tbInfo[0].cmnGrnt.rbStrt = (allocInfo->tbInfo[0].cmnGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
21236 allocInfo->tbInfo[0].cmnGrnt.numRb = (allocInfo->tbInfo[0].cmnGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
21238 beamInfo->vrbgStart += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
21239 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
21240 allocInfo->tbInfo[0].cmnGrnt.rv = 0;
21241 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
21244 printf("\n[%s],allocInfo->tbInfo[0].bytesAlloc:%u,vrbgReq:%u\n",
21245 __func__,allocInfo->tbInfo[0].bytesAlloc,allocInfo->vrbgReq);
21251 /* LTE_ADV_FLAG_REMOVED_START */
21254 * @brief To check if DL BW available for non-DLFS allocation.
21258 * Function : rgSCHCmnNonDlfsBwAvlbl
21260 * Processing Steps:
21261 * - Determine availability based on RA Type.
21263 * @param[in] RgSchCellCb *cell
21264 * @param[in] RgSchDlSf *dlSf
21265 * @param[in] RgSchDlRbAlloc *allocInfo
21272 static Bool rgSCHCmnNonDlfsSFRBwAvlbl
21275 RgSchSFRPoolInfo **sfrpoolInfo,
21277 RgSchDlRbAlloc *allocInfo,
21285 RgSchSFRPoolInfo *sfrPool;
21286 RgSchSFRPoolInfo *sfrCEPool;
21290 RgSchSFRPoolInfo *poolWithMaxAvlblBw = NULLP;
21291 uint32_t bwAvlbl = 0;
21292 uint32_t addtnlPRBs = 0;
21294 if (dlSf->bw <= dlSf->bwAlloced)
21296 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
21297 "BW is fully allocated for subframe (%d) CRNTI:%d", dlSf->sfNum,allocInfo->rnti);
21301 if (dlSf->sfrTotalPoolInfo.ccBwFull == TRUE)
21303 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
21304 "BW is fully allocated for CC Pool CRNTI:%d",allocInfo->rnti);
21308 if ((dlSf->sfrTotalPoolInfo.ceBwFull == TRUE) && (isUeCellEdge))
21310 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
21311 "BW is fully allocated for CE Pool CRNTI:%d",allocInfo->rnti);
21315 /* We first check if the ue scheduled is a cell edge or cell centre and accordingly check the avaialble
21316 memory in their pool. If the cell centre UE doesnt have Bw available in its pool, then it will check
21317 Bw availability in cell edge pool but the other way around is NOT possible. */
21320 l = &dlSf->sfrTotalPoolInfo.cePool;
21324 l = &dlSf->sfrTotalPoolInfo.ccPool;
21327 n = cmLListFirst(l);
21331 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
21333 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21335 /* MS_FIX for ccpu00123919 : Number of RBs in case of RETX should be same as that of initial transmission. */
21336 if(allocInfo->tbInfo[0].tbCb->txCntr)
21338 /* If RB assignment is being done for RETX. Then if reqRbs are a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
21339 * not a multiple of rbgSize then check if lsgRbgDfct exists */
21340 if (allocInfo->rbsReq % cell->rbgSize == 0)
21342 if ((sfrPool->type2End == dlSf->type2End) && dlSf->lstRbgDfct)
21344 /* In this scenario we are wasting the last RBG for this dlSf */
21345 sfrPool->type0End--;
21346 sfrPool->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21348 dlSf->lstRbgDfct = 0;
21350 /*ABHINAV To check if these variables need to be taken care of*/
21352 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21357 if (dlSf->lstRbgDfct)
21359 /* Check if type0 allocation can cater to this RETX requirement */
21360 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
21366 if (sfrPool->type2End != dlSf->type2End) /*Search again for some pool which has the END RBG of the BandWidth*/
21374 /* cannot allocate same number of required RBs */
21380 /*rg002.301 ccpu00120391 MOD condition is modified approprialtely to find if rbsReq is less than available RBS*/
21381 if(allocInfo->rbsReq <= (((sfrPool->type0End - sfrPool->type2End + 1)*\
21382 cell->rbgSize) - dlSf->lstRbgDfct))
21384 *sfrpoolInfo = sfrPool;
21389 if (sfrPool->bw <= sfrPool->bwAlloced + cell->rbgSize)
21391 n = cmLListNext(l);
21392 /* If the ue is cell centre then it will simply check the memory available in next pool.
21393 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
21395 if((!isUeCellEdge) && (!n->node))
21397 l = &dlSf->sfrTotalPoolInfo.cePool;
21398 n = cmLListFirst(l);
21404 /* MS_FIX: Number of RBs in case of RETX should be same as that of initial transmission */
21405 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21407 /*rg002.301 ccpu00120391 MOD setting the remaining RBs for the requested UE*/
21408 allocInfo->rbsReq = (((sfrPool->type0End - sfrPool->type2End + 1)*\
21409 cell->rbgSize) - dlSf->lstRbgDfct);
21410 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21411 noLyrs = allocInfo->tbInfo[0].noLyr;
21412 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21413 *sfrpoolInfo = sfrPool;
21418 n = cmLListNext(l);
21420 /* If the ue is cell centre then it will simply check the memory available in next pool.
21421 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
21422 if((!isUeCellEdge) && (!n->node))
21424 l = &dlSf->sfrTotalPoolInfo.cePool;
21425 n = cmLListFirst(l);
21434 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
21436 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21437 /* This is a Case where a UE was CC and had more RBs allocated than present in CE pool.
21438 In case this UE whn become CE with retx going on, then BW is not sufficient for Retx */
21439 if ((isUeCellEdge) &&
21440 (allocInfo->tbInfo[0].tbCb->txCntr != 0))
21442 if(allocInfo->rbsReq > (sfrPool->bw - sfrPool->bwAlloced))
21444 /* Adjust CE BW such that Retx alloc is successful */
21445 /* Check if merging CE with adjacent CC pool will be sufficient to process Retx */
21447 /* If no Type 0 allocations are made from this pool */
21448 if (sfrPool->type0End == (((sfrPool->poolendRB + 1) / cell->rbgSize) - 1))
21450 if (sfrPool->adjCCPool &&
21451 (sfrPool->adjCCPool->type2Start == sfrPool->poolendRB + 1) &&
21452 (allocInfo->rbsReq <= ((sfrPool->bw - sfrPool->bwAlloced) +
21453 ((sfrPool->adjCCPool->bw - sfrPool->adjCCPool->bwAlloced)))))
21455 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
21457 /* Adjusting CE Pool Info */
21458 sfrPool->bw += addtnlPRBs;
21459 sfrPool->type0End = ((sfrPool->poolendRB + addtnlPRBs + 1) /
21460 cell->rbgSize) - 1;
21462 /* Adjusting CC Pool Info */
21463 sfrPool->adjCCPool->type2Start += addtnlPRBs;
21464 sfrPool->adjCCPool->type2End = RGSCH_CEIL(sfrPool->adjCCPool->type2Start,
21466 sfrPool->adjCCPool->bw -= addtnlPRBs;
21467 *sfrpoolInfo = sfrPool;
21474 /* Check if CC pool is one of the following:
21475 * 1. |CE| + |CC "CCPool2Exists" = TRUE|
21476 * 2. |CC "CCPool2Exists" = FALSE| + |CE| + |CC "CCPool2Exists" = TRUE|
21478 if(TRUE == sfrPool->CCPool2Exists)
21480 l1 = &dlSf->sfrTotalPoolInfo.cePool;
21481 n1 = cmLListFirst(l1);
21482 sfrCEPool = (RgSchSFRPoolInfo*)(n1->node);
21483 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced))
21485 *sfrpoolInfo = sfrCEPool;
21488 else if(allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
21490 *sfrpoolInfo = sfrPool;
21493 /* Check if CE and CC boundary has unallocated prbs */
21494 else if ((sfrPool->poolstartRB == sfrPool->type2Start) &&
21495 (sfrCEPool->type0End == ((sfrCEPool->poolendRB + 1) / cell->rbgSize) - 1))
21497 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced) +
21498 (sfrPool->bw - sfrPool->bwAlloced))
21500 /* Checking if BW can be allocated partly from CE pool and partly
21503 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
21504 /* Updating CE and CC type2 parametrs based on the RBs allocated
21505 * from these pools*/
21506 sfrPool->type2Start -= addtnlPRBs;
21507 sfrPool->type2End = RGSCH_CEIL(sfrPool->type2Start, cell->rbgSize);
21508 sfrPool->bw += addtnlPRBs;
21509 if (addtnlPRBs == (sfrCEPool->bw - sfrCEPool->bwAlloced))
21511 sfrCEPool->bwAlloced = sfrCEPool->bw;
21512 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21516 sfrCEPool->bw -= addtnlPRBs;
21517 sfrCEPool->type0End = ((sfrCEPool->poolendRB + 1 - addtnlPRBs) / cell->rbgSize) - 1;
21519 *sfrpoolInfo = sfrPool;
21522 else if ( bwAvlbl <
21523 ((sfrCEPool->bw - sfrCEPool->bwAlloced) +
21524 (sfrPool->bw - sfrPool->bwAlloced)))
21526 /* All the Prbs from CE BW shall be allocated */
21527 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21529 sfrPool->type2Start = sfrCEPool->type2Start;
21530 sfrPool->bw += sfrCEPool->bw - sfrCEPool->bwAlloced;
21531 sfrCEPool->type2Start = sfrCEPool->poolendRB + 1;
21532 sfrCEPool->bwAlloced = sfrCEPool->bw;
21533 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21535 /* set the remaining RBs for the requested UE */
21536 allocInfo->rbsReq = (sfrPool->bw - sfrPool->bwAlloced);
21537 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21538 noLyrs = allocInfo->tbInfo[0].noLyr;
21539 allocInfo->tbInfo[0].bytesReq =
21540 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21541 *sfrpoolInfo = sfrPool;
21552 /* Checking if no. of RBs required can be allocated from
21554 * 1. If available return the SFR pool.
21555 * 2. Else update the RBs required parameter based on the
21556 * BW available in the pool
21557 * 3. Return FALSE if no B/W is available.
21559 if (allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
21561 *sfrpoolInfo = sfrPool;
21566 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
21568 if (bwAvlbl < sfrPool->bw - sfrPool->bwAlloced)
21572 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21574 bwAvlbl = sfrPool->bw - sfrPool->bwAlloced;
21575 poolWithMaxAvlblBw = sfrPool;
21577 n = cmLListNext(l);
21579 if ((isUeCellEdge == FALSE) && (n == NULLP))
21581 if(l != &dlSf->sfrTotalPoolInfo.cePool)
21583 l = &dlSf->sfrTotalPoolInfo.cePool;
21584 n = cmLListFirst(l);
21594 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
21598 dlSf->sfrTotalPoolInfo.ccBwFull = TRUE;
21604 /* set the remaining RBs for the requested UE */
21605 allocInfo->rbsReq = poolWithMaxAvlblBw->bw -
21606 poolWithMaxAvlblBw->bwAlloced;
21607 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21608 noLyrs = allocInfo->tbInfo[0].noLyr;
21609 allocInfo->tbInfo[0].bytesReq =
21610 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21611 *sfrpoolInfo = poolWithMaxAvlblBw;
21618 n = cmLListNext(l);
21620 if ((isUeCellEdge == FALSE) && (n == NULLP))
21622 if(l != &dlSf->sfrTotalPoolInfo.cePool)
21624 l = &dlSf->sfrTotalPoolInfo.cePool;
21625 n = cmLListFirst(l);
21641 #endif /* end of ifndef LTE_TDD*/
21642 /* LTE_ADV_FLAG_REMOVED_END */
21645 * @brief To check if DL BW available for non-DLFS allocation.
21649 * Function : rgSCHCmnNonDlfsUeRbAlloc
21651 * Processing Steps:
21652 * - Determine availability based on RA Type.
21654 * @param[in] RgSchCellCb *cell
21655 * @param[in] RgSchDlSf *dlSf
21656 * @param[in] RgSchDlRbAlloc *allocInfo
21663 static Bool rgSCHCmnNonDlfsBwAvlbl
21667 RgSchDlRbAlloc *allocInfo
21672 uint8_t ignoredDfctRbg = FALSE;
21674 if (dlSf->bw <= dlSf->bwAlloced)
21676 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId, "(%d:%d)FAILED CRNTI:%d",
21677 dlSf->bw, dlSf->bwAlloced,allocInfo->rnti);
21680 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
21682 /* Fix for ccpu00123919 : Number of RBs in case of RETX should be same as
21683 * that of initial transmission. */
21684 if(allocInfo->tbInfo[0].tbCb->txCntr)
21686 /* If RB assignment is being done for RETX. Then if reqRbs are
21687 * a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
21688 * not a multiple of rbgSize then check if lsgRbgDfct exists */
21689 if (allocInfo->rbsReq % cell->rbgSize == 0)
21691 if (dlSf->lstRbgDfct)
21693 /* In this scenario we are wasting the last RBG for this dlSf */
21696 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
21697 /* Fix: MUE_PERTTI_DL */
21698 dlSf->lstRbgDfct = 0;
21699 ignoredDfctRbg = TRUE;
21705 if (dlSf->lstRbgDfct)
21707 /* Check if type0 allocation can cater to this RETX requirement */
21708 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
21715 /* cannot allocate same number of required RBs */
21721 /* Condition is modified approprialtely to find
21722 * if rbsReq is less than available RBS*/
21723 if(allocInfo->rbsReq <= (((dlSf->type0End - dlSf->type2End + 1)*\
21724 cell->rbgSize) - dlSf->lstRbgDfct))
21728 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
21729 * allocation in TDD when requested RBs are more than available RBs*/
21732 /* MS_WORKAROUND for ccpu00122022 */
21733 if (dlSf->bw < dlSf->bwAlloced + cell->rbgSize)
21735 /* ccpu00132358- Re-assigning the values which were updated above
21736 * if it is RETX and Last RBG available*/
21737 if(ignoredDfctRbg == TRUE)
21740 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
21741 dlSf->lstRbgDfct = 1;
21747 /* Fix: Number of RBs in case of RETX should be same as
21748 * that of initial transmission. */
21749 if(allocInfo->tbInfo[0].tbCb->txCntr == 0
21751 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
21755 /* Setting the remaining RBs for the requested UE*/
21756 allocInfo->rbsReq = (((dlSf->type0End - dlSf->type2End + 1)*\
21757 cell->rbgSize) - dlSf->lstRbgDfct);
21758 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21759 noLyrs = allocInfo->tbInfo[0].noLyr;
21760 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21761 /* DwPts Scheduling Changes Start */
21763 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
21765 allocInfo->tbInfo[0].bytesReq =
21766 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
21769 /* DwPts Scheduling Changes End */
21773 /* ccpu00132358- Re-assigning the values which were updated above
21774 * if it is RETX and Last RBG available*/
21775 if(ignoredDfctRbg == TRUE)
21778 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
21779 dlSf->lstRbgDfct = 1;
21782 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "FAILED for CRNTI:%d",
21784 printf ("RB Alloc failed for LAA TB type 0\n");
21790 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
21792 if (allocInfo->rbsReq <= (dlSf->bw - dlSf->bwAlloced))
21796 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
21797 * allocation in TDD when requested RBs are more than available RBs*/
21800 /* Fix: Number of RBs in case of RETX should be same as
21801 * that of initial transmission. */
21802 if((allocInfo->tbInfo[0].tbCb->txCntr == 0)
21804 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
21808 /* set the remaining RBs for the requested UE */
21809 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
21810 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
21811 noLyrs = allocInfo->tbInfo[0].noLyr;
21812 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
21813 /* DwPts Scheduling Changes Start */
21815 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
21817 allocInfo->tbInfo[0].bytesReq =
21818 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
21821 /* DwPts Scheduling Changes End */
21825 printf ("RB Alloc failed for LAA TB type 2\n");
21826 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
21829 /* Fix: Number of RBs in case of RETX should be same as
21830 * that of initial transmission. */
21834 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
21838 /* LTE_ADV_FLAG_REMOVED_START */
21841 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21845 * Function : rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
21847 * Processing Steps:
21849 * @param[in] RgSchCellCb *cell
21850 * @param[in] RgSchDlSf *dlSf
21851 * @param[in] uint8_t rbStrt
21852 * @param[in] uint8_t numRb
21856 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
21866 RgSchSFRPoolInfo *sfrPool;
21868 l = &dlSf->sfrTotalPoolInfo.ccPool;
21870 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
21871 dlSf->bwAlloced += numRb;
21872 dlSf->type2Start += numRb;
21873 n = cmLListFirst(l);
21877 sfrPool = (RgSchSFRPoolInfo*)(n->node);
21878 n = cmLListNext(l);
21880 /* If the pool contains some RBs allocated in this allocation, e.g: Pool is [30.50]. Pool->type2Start is 40 , dlSf->type2Start is 45. then update the variables in pool */
21881 if((sfrPool->poolendRB >= dlSf->type2Start) && (sfrPool->type2Start < dlSf->type2Start))
21883 sfrPool->type2End = dlSf->type2End;
21884 sfrPool->bwAlloced = dlSf->type2Start - sfrPool->poolstartRB;
21885 sfrPool->type2Start = dlSf->type2Start;
21889 /* If the pool contains all RBs allocated in this allocation*/
21890 if(dlSf->type2Start > sfrPool->poolendRB)
21892 sfrPool->type2End = sfrPool->type0End + 1;
21893 sfrPool->bwAlloced = sfrPool->bw;
21894 sfrPool->type2Start = sfrPool->poolendRB + 1;
21899 if (l != &dlSf->sfrTotalPoolInfo.cePool)
21901 l = &dlSf->sfrTotalPoolInfo.cePool;
21902 n = cmLListFirst(l);
21912 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
21916 * Function : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
21918 * Processing Steps:
21920 * @param[in] RgSchCellCb *cell
21921 * @param[in] RgSchDlSf *dlSf
21922 * @param[in] uint8_t rbStrt
21923 * @param[in] uint8_t numRb
21928 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
21939 RgSchSFRPoolInfo *sfrCCPool1 = NULL;
21940 RgSchSFRPoolInfo *sfrCCPool2 = NULL;
21943 /* Move the type2End pivot forward */
21946 l = &dlSf->sfrTotalPoolInfo.ccPool;
21947 n = cmLListFirst(l);
21950 sfrCCPool1 = (RgSchSFRPoolInfo*)(n->node);
21952 if (sfrCCPool1 == NULLP)
21954 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21955 "sfrCCPool1 is NULL for CRNTI:%d",ue->ueId);
21958 n = cmLListNext(l);
21961 sfrCCPool2 = (RgSchSFRPoolInfo*)(n->node);
21962 n = cmLListNext(l);
21964 if((sfrCCPool1) && (sfrCCPool2))
21966 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
21967 if(((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
21968 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb)) ||
21969 ((dlSf->type2Start >= sfrCCPool2->pwrHiCCRange.startRb) &&
21970 (dlSf->type2Start + numRb < sfrCCPool2->pwrHiCCRange.endRb)))
21972 ue->lteAdvUeCb.isCCUePHigh = TRUE;
21974 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
21975 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
21978 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21979 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
21986 if((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
21987 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb))
21989 ue->lteAdvUeCb.isCCUePHigh = TRUE;
21991 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
21992 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
21995 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
21996 "rgSCHCmnBuildRntpInfo() function returned RFAILED CRNTI:%d",ue->ueId);
22002 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22004 dlSf->bwAlloced += numRb;
22005 /*MS_FIX for ccpu00123918*/
22006 dlSf->type2Start += numRb;
22012 #endif /* end of ifndef LTE_TDD*/
22013 /* LTE_ADV_FLAG_REMOVED_END */
22015 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
22019 * Function : rgSCHCmnNonDlfsUpdTyp2Alloc
22021 * Processing Steps:
22023 * @param[in] RgSchCellCb *cell
22024 * @param[in] RgSchDlSf *dlSf
22025 * @param[in] uint8_t rbStrt
22026 * @param[in] uint8_t numRb
22030 static Void rgSCHCmnNonDlfsUpdTyp2Alloc
22038 /* Move the type2End pivot forward */
22039 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22040 //#ifndef LTEMAC_SPS
22041 dlSf->bwAlloced += numRb;
22042 /*Fix for ccpu00123918*/
22043 dlSf->type2Start += numRb;
22049 * @brief To do DL allocation using TYPE0 RA.
22053 * Function : rgSCHCmnNonDlfsType0Alloc
22055 * Processing Steps:
22056 * - Perform TYPE0 allocation using the RBGs between
22057 * type0End and type2End.
22058 * - Build the allocation mask as per RBG positioning.
22059 * - Update the allocation parameters.
22061 * @param[in] RgSchCellCb *cell
22062 * @param[in] RgSchDlSf *dlSf
22063 * @param[in] RgSchDlRbAlloc *allocInfo
22068 static Void rgSCHCmnNonDlfsType0Alloc
22072 RgSchDlRbAlloc *allocInfo,
22076 uint32_t dlAllocMsk = 0;
22077 uint8_t rbgFiller = dlSf->lstRbgDfct;
22078 uint8_t noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
22079 //uint8_t noRbgs = (allocInfo->rbsReq + rbgFiller)/ cell->rbgSize;
22083 uint32_t tb1BytesAlloc = 0;
22084 uint32_t tb2BytesAlloc = 0;
22085 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22087 //if(noRbgs == 0) noRbgs = 1; /* Not required as ceilling is used above*/
22089 /* Fix for ccpu00123919*/
22090 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22091 if (dlSf->bwAlloced + noRbs > dlSf->bw)
22097 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22100 /* Fix for ccpu00138701: Ceilling is using to derive num of RBGs, Therefore,
22101 * after this operation,checking Max TB size and Max RBs are not crossed
22102 * if it is crossed then decrement num of RBGs. */
22103 //if((noRbs + rbgFiller) % cell->rbgSize)
22104 if((noRbs > allocInfo->rbsReq) &&
22105 (allocInfo->rbsReq + rbgFiller) % cell->rbgSize)
22106 {/* considering ue category limitation
22107 * due to ceiling */
22110 if (rgSCHLaaIsLaaTB(allocInfo)== FALSE)
22113 if ((allocInfo->tbInfo[0].schdlngForTb) && (!allocInfo->tbInfo[0].tbCb->txCntr))
22115 iTbs = allocInfo->tbInfo[0].iTbs;
22116 noLyr = allocInfo->tbInfo[0].noLyr;
22117 tb1BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22120 if ((allocInfo->tbInfo[1].schdlngForTb) && (!allocInfo->tbInfo[1].tbCb->txCntr))
22122 iTbs = allocInfo->tbInfo[1].iTbs;
22123 noLyr = allocInfo->tbInfo[1].noLyr;
22124 tb2BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22128 /* Only Check for New Tx No need for Retx */
22129 if (tb1BytesAlloc || tb2BytesAlloc)
22131 if (( ue->dl.aggTbBits >= dlUe->maxTbBits) ||
22132 (tb1BytesAlloc >= dlUe->maxTbSz/8) ||
22133 (tb2BytesAlloc >= dlUe->maxTbSz/8) ||
22134 (noRbs >= dlUe->maxRb))
22140 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22144 /* type0End would have been initially (during subfrm Init) at the bit position
22145 * (cell->noOfRbgs - 1), 0 being the most significant.
22146 * Getting DlAllocMsk for noRbgs and at the appropriate position */
22147 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - dlSf->type0End));
22148 /* Move backwards the type0End pivot */
22149 dlSf->type0End -= noRbgs;
22150 /*Fix for ccpu00123919*/
22151 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
22152 /* Update the bwAlloced field accordingly */
22153 //#ifndef LTEMAC_SPS /* ccpu00129474*/
22154 dlSf->bwAlloced += noRbs;
22156 /* Update Type0 Alloc Info */
22157 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
22158 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
22159 allocInfo->rbsAlloc = noRbs;
22161 /* Update Tb info for each scheduled TB */
22162 iTbs = allocInfo->tbInfo[0].iTbs;
22163 noLyr = allocInfo->tbInfo[0].noLyr;
22164 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
22165 * RETX TB Size is same as Init TX TB Size */
22166 if (allocInfo->tbInfo[0].tbCb->txCntr)
22168 allocInfo->tbInfo[0].bytesAlloc =
22169 allocInfo->tbInfo[0].bytesReq;
22173 allocInfo->tbInfo[0].bytesAlloc =
22174 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22175 /* DwPts Scheduling Changes Start */
22177 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
22179 allocInfo->tbInfo[0].bytesAlloc =
22180 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
22183 /* DwPts Scheduling Changes End */
22186 if (allocInfo->tbInfo[1].schdlngForTb)
22188 iTbs = allocInfo->tbInfo[1].iTbs;
22189 noLyr = allocInfo->tbInfo[1].noLyr;
22190 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
22191 * RETX TB Size is same as Init TX TB Size */
22192 if (allocInfo->tbInfo[1].tbCb->txCntr)
22194 allocInfo->tbInfo[1].bytesAlloc =
22195 allocInfo->tbInfo[1].bytesReq;
22199 allocInfo->tbInfo[1].bytesAlloc =
22200 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22201 /* DwPts Scheduling Changes Start */
22203 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
22205 allocInfo->tbInfo[1].bytesAlloc =
22206 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
22209 /* DwPts Scheduling Changes End */
22213 /* The last RBG which can be smaller than the RBG size is consedered
22214 * only for the first time allocation of TYPE0 UE */
22215 dlSf->lstRbgDfct = 0;
22222 * @brief To prepare RNTP value from the PRB allocation (P-High -> 1 and P-Low -> 0)
22226 * Function : rgSCHCmnBuildRntpInfo
22228 * Processing Steps:
22230 * @param[in] uint8_t *rntpPtr
22231 * @param[in] uint8_t startRb
22232 * @param[in] uint8_t numRb
22237 static S16 rgSCHCmnBuildRntpInfo
22246 uint16_t rbPtrStartIdx; /* Start Index of Octete Buffer to be filled */
22247 uint16_t rbPtrEndIdx; /* End Index of Octete Buffer to be filled */
22248 uint16_t rbBitLoc; /* Bit Location to be set as 1 in the current Byte */
22249 uint16_t nmbRbPerByte; /* PRB's to be set in the current Byte (in case of multiple Bytes) */
22252 rbPtrStartIdx = (startRb)/8;
22253 rbPtrEndIdx = (startRb + nmbRb)/8;
22255 if (rntpPtr == NULLP)
22257 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
22258 "rgSCHCmnBuildRntpInfo():"
22259 "rntpPtr can't be NULLP (Memory Allocation Failed)");
22263 while(rbPtrStartIdx <= rbPtrEndIdx)
22265 rbBitLoc = (startRb)%8;
22267 /* case 1: startRb and endRb lies in same Byte */
22268 if (rbPtrStartIdx == rbPtrEndIdx)
22270 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
22271 | (((1<<nmbRb)-1)<<rbBitLoc);
22274 /* case 2: startRb and endRb lies in different Byte */
22275 if (rbPtrStartIdx != rbPtrEndIdx)
22277 nmbRbPerByte = 8 - rbBitLoc;
22278 nmbRb = nmbRb - nmbRbPerByte;
22279 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
22280 | (((1<<nmbRbPerByte)-1)<<rbBitLoc);
22281 startRb = startRb + nmbRbPerByte;
22287 /* dsfr_pal_fixes ** 21-March-2013 ** SKS ** Adding Debug logs */
22289 /* dsfr_pal_fixes ** 25-March-2013 ** SKS ** Adding Debug logs to print RNTP */
22295 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
22299 * Function : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
22301 * Processing Steps:
22303 * @param[in] RgSchCellCb *cell
22304 * @param[in] RgSchDlSf *dlSf
22305 * @param[in] uint8_t rbStrt
22306 * @param[in] uint8_t numRb
22310 static S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
22315 RgSchSFRPoolInfo *sfrPool,
22324 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22325 sfrPool->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
22328 dlSf->type2Start += numRb;
22329 dlSf->bwAlloced += numRb;
22331 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
22333 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
22334 if(FALSE == ue->lteAdvUeCb.rgrLteAdvUeCfg.isUeCellEdge)
22336 if((sfrPool->type2Start >= sfrPool->pwrHiCCRange.startRb) &&
22337 (sfrPool->type2Start + numRb < sfrPool->pwrHiCCRange.endRb))
22339 ue->lteAdvUeCb.isCCUePHigh = TRUE;
22341 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
22342 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
22345 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
22346 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
22353 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
22354 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
22357 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
22358 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
22363 sfrPool->type2Start += numRb;
22364 sfrPool->bwAlloced += numRb;
22371 * @brief To do DL allocation using TYPE0 RA.
22375 * Function : rgSCHCmnNonDlfsSFRPoolType0Alloc
22377 * Processing Steps:
22378 * - Perform TYPE0 allocation using the RBGs between type0End and type2End.
22379 * - Build the allocation mask as per RBG positioning.
22380 * - Update the allocation parameters.
22382 * @param[in] RgSchCellCb *cell
22383 * @param[in] RgSchDlSf *dlSf
22384 * @param[in] RgSchDlRbAlloc *allocInfo
22388 static Void rgSCHCmnNonDlfsSFRPoolType0Alloc
22392 RgSchSFRPoolInfo *poolInfo,
22393 RgSchDlRbAlloc *allocInfo
22396 uint32_t dlAllocMsk = 0;
22397 uint8_t rbgFiller = 0;
22398 uint8_t noRbgs = 0;
22404 if (poolInfo->poolstartRB + poolInfo->bw == dlSf->bw)
22406 if (poolInfo->type0End == dlSf->bw/4)
22408 rbgFiller = dlSf->lstRbgDfct;
22409 /* The last RBG which can be smaller than the RBG size is consedered
22410 * only for the first time allocation of TYPE0 UE */
22411 dlSf->lstRbgDfct = 0;
22415 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
22417 /* Abhinav to-do start */
22418 /* MS_FIX for ccpu00123919*/
22419 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22420 if (dlSf->bwAlloced + noRbs > dlSf->bw)
22426 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
22428 /* Abhinav to-do end */
22432 /* type0End would have been initially (during subfrm Init) at the bit position
22433 * (cell->noOfRbgs - 1), 0 being the most significant.
22434 * Getting DlAllocMsk for noRbgs and at the appropriate position */
22435 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - poolInfo->type0End));
22436 /* Move backwards the type0End pivot */
22437 poolInfo->type0End -= noRbgs;
22438 /*MS_FIX for ccpu00123919*/
22439 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
22440 /* Update the bwAlloced field accordingly */
22441 poolInfo->bwAlloced += noRbs + dlSf->lstRbgDfct;
22442 dlSf->bwAlloced += noRbs + dlSf->lstRbgDfct;
22444 /* Update Type0 Alloc Info */
22445 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
22446 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
22447 allocInfo->rbsAlloc = noRbs;
22449 /* Update Tb info for each scheduled TB */
22450 iTbs = allocInfo->tbInfo[0].iTbs;
22451 noLyr = allocInfo->tbInfo[0].noLyr;
22452 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
22453 * RETX TB Size is same as Init TX TB Size */
22454 if (allocInfo->tbInfo[0].tbCb->txCntr)
22456 allocInfo->tbInfo[0].bytesAlloc =
22457 allocInfo->tbInfo[0].bytesReq;
22461 allocInfo->tbInfo[0].bytesAlloc =
22462 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22465 if (allocInfo->tbInfo[1].schdlngForTb)
22467 iTbs = allocInfo->tbInfo[1].iTbs;
22468 noLyr = allocInfo->tbInfo[1].noLyr;
22469 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
22470 * RETX TB Size is same as Init TX TB Size */
22471 if (allocInfo->tbInfo[1].tbCb->txCntr)
22473 allocInfo->tbInfo[1].bytesAlloc =
22474 allocInfo->tbInfo[1].bytesReq;
22478 allocInfo->tbInfo[1].bytesAlloc =
22479 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
22483 /* The last RBG which can be smaller than the RBG size is consedered
22484 * only for the first time allocation of TYPE0 UE */
22485 dlSf->lstRbgDfct = 0;
22490 * @brief Computes RNTP Info for a subframe.
22494 * Function : rgSCHCmnNonDlfsDsfrRntpComp
22496 * Processing Steps:
22497 * - Computes RNTP info from individual pools.
22499 * @param[in] RgSchDlSf *dlSf
22504 static void rgSCHCmnNonDlfsDsfrRntpComp(RgSchCellCb *cell,RgSchDlSf *dlSf)
22506 static uint16_t samples = 0;
22508 uint16_t bwBytes = (dlSf->bw-1)/8;
22509 RgrLoadInfIndInfo *rgrLoadInf;
22511 uint16_t ret = ROK;
22514 len = (dlSf->bw % 8 == 0) ? dlSf->bw/8 : dlSf->bw/8 + 1;
22516 /* RNTP info is ORed every TTI and the sample is stored in cell control block */
22517 for(i = 0; i <= bwBytes; i++)
22519 cell->rntpAggrInfo.val[i] |= dlSf->rntpInfo.val[i];
22521 samples = samples + 1;
22522 /* After every 1000 ms, the RNTP info will be sent to application to be further sent to all neighbouring eNB
22523 informing them about the load indication for cell edge users */
22524 if(RG_SCH_MAX_RNTP_SAMPLES == samples)
22527 ret = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&rgrLoadInf,
22528 sizeof(RgrLoadInfIndInfo));
22531 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
22532 "allocate memory for sending LoadInfo");
22536 rgrLoadInf->u.rntpInfo.pres = cell->rntpAggrInfo.pres;
22537 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
22538 rgrLoadInf->u.rntpInfo.len = len;
22540 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
22541 rgrLoadInf->u.rntpInfo.val = cell->rntpAggrInfo.val;
22542 rgrLoadInf->cellId = cell->cellId;
22544 /* dsfr_pal_fixes ** 22-March-2013 ** SKS */
22545 rgrLoadInf->bw = dlSf->bw;
22546 rgrLoadInf->type = RGR_SFR;
22548 ret = rgSCHUtlRgrLoadInfInd(cell, rgrLoadInf);
22551 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsDsfrRntpComp():"
22552 "rgSCHUtlRgrLoadInfInd() returned RFAILED");
22555 memset(cell->rntpAggrInfo.val,0,len);
22559 /* LTE_ADV_FLAG_REMOVED_END */
22561 /* LTE_ADV_FLAG_REMOVED_START */
22563 * @brief Performs RB allocation per UE from a pool.
22567 * Function : rgSCHCmnSFRNonDlfsUeRbAlloc
22569 * Processing Steps:
22570 * - Allocate consecutively available RBs.
22572 * @param[in] RgSchCellCb *cell
22573 * @param[in] RgSchUeCb *ue
22574 * @param[in] RgSchDlSf *dlSf
22575 * @param[out] uint8_t *isDlBwAvail
22582 static S16 rgSCHCmnSFRNonDlfsUeRbAlloc
22587 uint8_t *isDlBwAvail
22590 RgSchDlRbAlloc *allocInfo;
22591 RgSchCmnDlUe *dlUe;
22593 RgSchSFRPoolInfo *sfrpoolInfo = NULLP;
22596 isUECellEdge = RG_SCH_CMN_IS_UE_CELL_EDGE(ue);
22598 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22599 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
22600 *isDlBwAvail = TRUE;
22602 /*Find which pool is available for this UE*/
22603 if (rgSCHCmnNonDlfsSFRBwAvlbl(cell, &sfrpoolInfo, dlSf, allocInfo, isUECellEdge) != TRUE)
22605 /* SFR_FIX - If this is CE UE there may be BW available in CC Pool
22606 So CC UEs will be scheduled */
22609 *isDlBwAvail = TRUE;
22613 *isDlBwAvail = FALSE;
22618 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX || dlUe->proc->tbInfo[1].isAckNackDtx)
22620 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
22624 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
22627 if (!(allocInfo->pdcch))
22629 /* Returning ROK since PDCCH might be available for another UE and further allocations could be done */
22634 allocInfo->rnti = ue->ueId;
22637 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22639 allocInfo->allocInfo.raType2.isLocal = TRUE;
22640 /* rg004.201 patch - ccpu00109921 fix end */
22641 /* MS_FIX for ccpu00123918*/
22642 allocInfo->allocInfo.raType2.rbStart = (uint8_t)sfrpoolInfo->type2Start;
22643 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22644 /* rg007.201 - Changes for MIMO feature addition */
22645 /* rg008.201 - Removed dependency on MIMO compile-time flag */
22646 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrpoolInfo, \
22647 allocInfo->allocInfo.raType2.rbStart, \
22648 allocInfo->allocInfo.raType2.numRb);
22649 allocInfo->rbsAlloc = allocInfo->rbsReq;
22650 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22652 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22654 rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, sfrpoolInfo, allocInfo);
22658 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,0);
22659 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
22661 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,1);
22666 #if defined(LTEMAC_SPS)
22667 /* Update the sub-frame with new allocation */
22668 dlSf->bwAlloced += allocInfo->rbsReq;
22674 /* LTE_ADV_FLAG_REMOVED_END */
22675 #endif /* LTE_TDD */
22678 * @brief Performs RB allocation per UE for frequency non-selective cell.
22682 * Function : rgSCHCmnNonDlfsUeRbAlloc
22684 * Processing Steps:
22685 * - Allocate consecutively available RBs.
22687 * @param[in] RgSchCellCb *cell
22688 * @param[in] RgSchUeCb *ue
22689 * @param[in] RgSchDlSf *dlSf
22690 * @param[out] uint8_t *isDlBwAvail
22696 static S16 rgSCHCmnNonDlfsUeRbAlloc
22701 uint8_t *isDlBwAvail
22704 RgSchDlRbAlloc *allocInfo;
22705 RgSchCmnDlUe *dlUe;
22707 uint32_t dbgRbsReq = 0;
22711 RgSch5gtfUeCb *ue5gtfCb = &(ue->ue5gtfCb);
22712 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[ue5gtfCb->BeamId]);
22714 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22715 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
22716 *isDlBwAvail = TRUE;
22718 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
22720 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
22721 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
22723 printf("5GTF_ERROR vrbg allocated > 25\n");
22727 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX
22728 || dlUe->proc->tbInfo[1].isAckNackDtx)
22730 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
22734 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
22736 if (!(allocInfo->pdcch))
22738 /* Returning ROK since PDCCH might be available for another UE and
22739 * further allocations could be done */
22740 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
22741 "5GTF_ERROR : PDCCH allocation failed :ue (%u)",
22743 printf("5GTF_ERROR PDCCH allocation failed\n");
22747 //maxPrb = RGSCH_MIN((allocInfo->vrbgReq * MAX_5GTF_VRBG_SIZE), ue5gtfCb->maxPrb);
22748 //maxPrb = RGSCH_MIN(maxPrb,
22749 //((beamInfo->totVrbgAvail - beamInfo->vrbgStart)* MAX_5GTF_VRBG_SIZE)));
22750 //TODO_SID Need to check for vrbg available after scheduling for same beam.
22751 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
22752 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
22753 //TODO_SID: Setting for max TP
22754 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
22755 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
22756 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
22757 allocInfo->tbInfo[0].tbCb->dlGrnt.SCID = 0;
22758 allocInfo->tbInfo[0].tbCb->dlGrnt.dciFormat = allocInfo->dciFormat;
22759 //Filling temporarily
22760 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
22761 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
22763 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
22764 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
22765 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22773 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
22777 * Function : rgSCHCmnNonDlfsCcchSduAlloc
22779 * Processing Steps:
22780 * - For each element in the list, Call rgSCHCmnNonDlfsCcchSduRbAlloc().
22781 * - If allocation is successful, add the ueCb to scheduled list of CCCH
22783 * - else, add UeCb to non-scheduled list.
22785 * @param[in] RgSchCellCb *cell
22786 * @param[in, out] RgSchCmnCcchSduRbAlloc *allocInfo
22787 * @param[in] uint8_t isRetx
22791 static Void rgSCHCmnNonDlfsCcchSduAlloc
22794 RgSchCmnCcchSduRbAlloc *allocInfo,
22799 CmLListCp *ccchSduLst = NULLP;
22800 CmLListCp *schdCcchSduLst = NULLP;
22801 CmLListCp *nonSchdCcchSduLst = NULLP;
22802 CmLList *schdLnkNode = NULLP;
22803 CmLList *toBeSchdLnk = NULLP;
22804 RgSchDlSf *dlSf = allocInfo->ccchSduDlSf;
22805 RgSchUeCb *ueCb = NULLP;
22806 RgSchDlHqProcCb *hqP = NULLP;
22810 /* Initialize re-transmitting lists */
22811 ccchSduLst = &(allocInfo->ccchSduRetxLst);
22812 schdCcchSduLst = &(allocInfo->schdCcchSduRetxLst);
22813 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduRetxLst);
22817 /* Initialize transmitting lists */
22818 ccchSduLst = &(allocInfo->ccchSduTxLst);
22819 schdCcchSduLst = &(allocInfo->schdCcchSduTxLst);
22820 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduTxLst);
22823 /* Perform allocaations for the list */
22824 toBeSchdLnk = cmLListFirst(ccchSduLst);
22825 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
22827 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
22828 ueCb = hqP->hqE->ue;
22829 schdLnkNode = &hqP->schdLstLnk;
22830 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
22831 ret = rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf);
22834 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
22835 * list and return */
22838 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
22839 ueCb = hqP->hqE->ue;
22840 schdLnkNode = &hqP->schdLstLnk;
22841 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
22842 cmLListAdd2Tail(nonSchdCcchSduLst, schdLnkNode);
22843 toBeSchdLnk = toBeSchdLnk->next;
22844 } while(toBeSchdLnk);
22848 /* Allocation successful: Add UE to the scheduled list */
22849 cmLListAdd2Tail(schdCcchSduLst, schdLnkNode);
22857 * @brief Performs RB allocation for CcchSdu for frequency non-selective cell.
22861 * Function : rgSCHCmnNonDlfsCcchSduRbAlloc
22863 * Processing Steps:
22865 * - Allocate consecutively available RBs
22867 * @param[in] RgSchCellCb *cell
22868 * @param[in] RgSchUeCb *ueCb
22869 * @param[in] RgSchDlSf *dlSf
22874 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc
22881 RgSchDlRbAlloc *allocInfo;
22882 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
22886 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb,cell);
22888 /* [ccpu00138802]-MOD-If Bw is less than required, return fail
22889 It will be allocated in next TTI */
22891 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
22892 (dlSf->bwAlloced == dlSf->bw))
22894 if((dlSf->bwAlloced == dlSf->bw) ||
22895 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
22900 /* Retrieve PDCCH */
22901 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
22902 if (ueDl->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
22904 /* allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, dlSf, y, ueDl->cqi,
22905 * TFU_DCI_FORMAT_1A, TRUE);*/
22906 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, TRUE);
22910 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE);
22912 if (!(allocInfo->pdcch))
22914 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
22918 /* Update allocation information */
22919 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
22920 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
22921 allocInfo->allocInfo.raType2.isLocal = TRUE;
22923 /*Fix for ccpu00123918*/
22924 /* Push this harq process back to the free queue */
22925 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
22926 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
22927 allocInfo->rbsAlloc = allocInfo->rbsReq;
22928 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
22929 /* Update the sub-frame with new allocation */
22931 /* LTE_ADV_FLAG_REMOVED_START */
22933 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
22935 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf,
22936 allocInfo->allocInfo.raType2.rbStart,
22937 allocInfo->allocInfo.raType2.numRb);
22940 #endif /* end of ifndef LTE_TDD*/
22942 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf,
22943 allocInfo->allocInfo.raType2.rbStart,
22944 allocInfo->allocInfo.raType2.numRb);
22947 /* LTE_ADV_FLAG_REMOVED_END */
22948 /* ccpu00131941 - bwAlloced is updated from SPS bandwidth */
22956 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
22960 * Function : rgSCHCmnNonDlfsMsg4RbAlloc
22962 * Processing Steps:
22964 * - Allocate consecutively available RBs
22966 * @param[in] RgSchCellCb *cell
22967 * @param[in] RgSchRaCb *raCb
22968 * @param[in] RgSchDlSf *dlSf
22973 static S16 rgSCHCmnNonDlfsMsg4RbAlloc
22980 RgSchDlRbAlloc *allocInfo;
22983 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_RACB(raCb);
22986 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
22987 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
22989 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
22990 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
22992 printf("5GTF_ERROR vrbg allocated > 25\n");
22997 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
22998 (dlSf->bwAlloced == dlSf->bw))
23000 if((dlSf->bwAlloced == dlSf->bw) ||
23001 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
23008 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
23009 if (raCb->dlHqE->msg4Proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
23011 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, TRUE);
23015 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, FALSE);
23017 if (!(allocInfo->pdcch))
23019 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
23024 /* SR_RACH_STATS : MSG4 TX Failed */
23025 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
23027 /* Update allocation information */
23028 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
23029 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
23030 allocInfo->allocInfo.raType2.isLocal = TRUE;
23033 /*Fix for ccpu00123918*/
23034 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
23035 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23036 /* LTE_ADV_FLAG_REMOVED_START */
23038 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
23040 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
23041 allocInfo->allocInfo.raType2.rbStart, \
23042 allocInfo->allocInfo.raType2.numRb);
23045 #endif /* end of ifndef LTE_TDD */
23047 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
23048 allocInfo->allocInfo.raType2.rbStart, \
23049 allocInfo->allocInfo.raType2.numRb);
23051 /* LTE_ADV_FLAG_REMOVED_END */
23053 allocInfo->rbsAlloc = allocInfo->rbsReq;
23054 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23058 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
23060 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
23061 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
23063 /* Update allocation information */
23064 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23066 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
23067 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
23068 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
23070 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
23071 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
23074 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
23075 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
23076 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23084 * @brief Performs RB allocation for Msg4 lists of frequency non-selective cell.
23088 * Function : rgSCHCmnNonDlfsMsg4Alloc
23090 * Processing Steps:
23091 * - For each element in the list, Call rgSCHCmnNonDlfsMsg4RbAlloc().
23092 * - If allocation is successful, add the raCb to scheduled list of MSG4.
23093 * - else, add RaCb to non-scheduled list.
23095 * @param[in] RgSchCellCb *cell
23096 * @param[in, out] RgSchCmnMsg4RbAlloc *allocInfo
23097 * @param[in] uint8_t isRetx
23101 static Void rgSCHCmnNonDlfsMsg4Alloc
23104 RgSchCmnMsg4RbAlloc *allocInfo,
23109 CmLListCp *msg4Lst = NULLP;
23110 CmLListCp *schdMsg4Lst = NULLP;
23111 CmLListCp *nonSchdMsg4Lst = NULLP;
23112 CmLList *schdLnkNode = NULLP;
23113 CmLList *toBeSchdLnk = NULLP;
23114 RgSchDlSf *dlSf = allocInfo->msg4DlSf;
23115 RgSchRaCb *raCb = NULLP;
23116 RgSchDlHqProcCb *hqP = NULLP;
23120 /* Initialize re-transmitting lists */
23121 msg4Lst = &(allocInfo->msg4RetxLst);
23122 schdMsg4Lst = &(allocInfo->schdMsg4RetxLst);
23123 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4RetxLst);
23127 /* Initialize transmitting lists */
23128 msg4Lst = &(allocInfo->msg4TxLst);
23129 schdMsg4Lst = &(allocInfo->schdMsg4TxLst);
23130 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4TxLst);
23133 /* Perform allocaations for the list */
23134 toBeSchdLnk = cmLListFirst(msg4Lst);
23135 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
23137 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23138 raCb = hqP->hqE->raCb;
23139 schdLnkNode = &hqP->schdLstLnk;
23140 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23141 ret = rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf);
23144 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
23145 * list and return */
23148 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23149 raCb = hqP->hqE->raCb;
23150 schdLnkNode = &hqP->schdLstLnk;
23151 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23152 cmLListAdd2Tail(nonSchdMsg4Lst, schdLnkNode);
23153 toBeSchdLnk = toBeSchdLnk->next;
23154 } while(toBeSchdLnk);
23158 /* Allocation successful: Add UE to the scheduled list */
23159 cmLListAdd2Tail(schdMsg4Lst, schdLnkNode);
23170 * @brief Performs RB allocation for the list of UEs of a frequency
23171 * non-selective cell.
23175 * Function : rgSCHCmnNonDlfsDedRbAlloc
23177 * Processing Steps:
23178 * - For each element in the list, Call rgSCHCmnNonDlfsUeRbAlloc().
23179 * - If allocation is successful, add the ueCb to scheduled list of UEs.
23180 * - else, add ueCb to non-scheduled list of UEs.
23182 * @param[in] RgSchCellCb *cell
23183 * @param[in, out] RgSchCmnUeRbAlloc *allocInfo
23184 * @param[in] CmLListCp *ueLst,
23185 * @param[in, out] CmLListCp *schdHqPLst,
23186 * @param[in, out] CmLListCp *nonSchdHqPLst
23190 Void rgSCHCmnNonDlfsDedRbAlloc
23193 RgSchCmnUeRbAlloc *allocInfo,
23195 CmLListCp *schdHqPLst,
23196 CmLListCp *nonSchdHqPLst
23200 CmLList *schdLnkNode = NULLP;
23201 CmLList *toBeSchdLnk = NULLP;
23202 RgSchDlSf *dlSf = allocInfo->dedDlSf;
23203 RgSchUeCb *ue = NULLP;
23204 RgSchDlHqProcCb *hqP = NULLP;
23205 uint8_t isDlBwAvail;
23208 /* Perform allocaations for the list */
23209 toBeSchdLnk = cmLListFirst(ueLst);
23210 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
23212 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23214 schdLnkNode = &hqP->schdLstLnk;
23215 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23217 ret = rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, &isDlBwAvail);
23220 /* Allocation failed: Add remaining UEs to non-scheduled
23221 * list and return */
23224 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
23226 schdLnkNode = &hqP->schdLstLnk;
23227 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
23228 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
23229 toBeSchdLnk = toBeSchdLnk->next;
23230 } while(toBeSchdLnk);
23236 #if defined (TENB_STATS) && defined (RG_5GTF)
23237 cell->tenbStats->sch.dl5gtfRbAllocPass++;
23239 /* Allocation successful: Add UE to the scheduled list */
23240 cmLListAdd2Tail(schdHqPLst, schdLnkNode);
23244 #if defined (TENB_STATS) && defined (RG_5GTF)
23245 cell->tenbStats->sch.dl5gtfRbAllocFail++;
23247 /* Allocation failed : Add UE to the non-scheduled list */
23248 printf("5GTF_ERROR Dl rb alloc failed adding nonSchdHqPLst\n");
23249 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
23257 * @brief Handles RB allocation for frequency non-selective cell.
23261 * Function : rgSCHCmnNonDlfsRbAlloc
23263 * Invoking Module Processing:
23264 * - SCH shall invoke this if downlink frequency selective is disabled for
23265 * the cell for RB allocation.
23266 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
23267 * estimate and subframe for each allocation to be made to SCH.
23269 * Processing Steps:
23270 * - Allocate sequentially for common channels.
23271 * - For transmitting and re-transmitting UE list.
23273 * - Perform wide-band allocations for UE in increasing order of
23275 * - Determine Imcs for the allocation.
23276 * - Determine RA type.
23277 * - Determine DCI format.
23279 * @param[in] RgSchCellCb *cell
23280 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
23284 Void rgSCHCmnNonDlfsRbAlloc
23287 RgSchCmnDlRbAllocInfo *allocInfo
23290 uint8_t raRspCnt = 0;
23291 RgSchDlRbAlloc *reqAllocInfo;
23293 /* Allocate for MSG4 retransmissions */
23294 if (allocInfo->msg4Alloc.msg4RetxLst.count)
23296 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc RetxLst\n");
23297 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), TRUE);
23300 /* Allocate for MSG4 transmissions */
23301 /* Assuming all the nodes in the list need allocations: rbsReq is valid */
23302 if (allocInfo->msg4Alloc.msg4TxLst.count)
23304 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc txLst\n");
23305 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), FALSE);
23308 /* Allocate for CCCH SDU (received after guard timer expiry)
23309 * retransmissions */
23310 if (allocInfo->ccchSduAlloc.ccchSduRetxLst.count)
23312 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
23313 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), TRUE);
23316 /* Allocate for CCCD SDU transmissions */
23317 /* Allocate for CCCH SDU (received after guard timer expiry) transmissions */
23318 if (allocInfo->ccchSduAlloc.ccchSduTxLst.count)
23320 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
23321 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), FALSE);
23325 /* Allocate for Random access response */
23326 for (raRspCnt = 0; raRspCnt < RG_SCH_CMN_MAX_CMN_PDCCH; ++raRspCnt)
23328 /* Assuming that the requests will be filled in sequentially */
23329 reqAllocInfo = &(allocInfo->raRspAlloc[raRspCnt]);
23330 if (!reqAllocInfo->rbsReq)
23334 printf("5GTF_ERROR calling RAR rgSCHCmnNonDlfsCmnRbAlloc\n");
23335 // if ((rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo)) != ROK)
23336 if ((rgSCHCmnNonDlfsCmnRbAllocRar(cell, reqAllocInfo)) != ROK)
23342 /* Allocate for RETX+TX UEs */
23343 if(allocInfo->dedAlloc.txRetxHqPLst.count)
23345 printf("5GTF_ERROR TX RETX rgSCHCmnNonDlfsDedRbAlloc\n");
23346 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23347 &(allocInfo->dedAlloc.txRetxHqPLst),
23348 &(allocInfo->dedAlloc.schdTxRetxHqPLst),
23349 &(allocInfo->dedAlloc.nonSchdTxRetxHqPLst));
23352 if((allocInfo->dedAlloc.retxHqPLst.count))
23354 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23355 &(allocInfo->dedAlloc.retxHqPLst),
23356 &(allocInfo->dedAlloc.schdRetxHqPLst),
23357 &(allocInfo->dedAlloc.nonSchdRetxHqPLst));
23360 /* Allocate for transmitting UEs */
23361 if((allocInfo->dedAlloc.txHqPLst.count))
23363 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
23364 &(allocInfo->dedAlloc.txHqPLst),
23365 &(allocInfo->dedAlloc.schdTxHqPLst),
23366 &(allocInfo->dedAlloc.nonSchdTxHqPLst));
23369 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cell);
23370 if ((allocInfo->dedAlloc.txRetxHqPLst.count +
23371 allocInfo->dedAlloc.retxHqPLst.count +
23372 allocInfo->dedAlloc.txHqPLst.count) >
23373 cmnCell->dl.maxUePerDlSf)
23375 #ifndef ALIGN_64BIT
23376 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
23377 " scheduler exceed maximumUePerDlSf(%u)tx-retx %ld retx %ld tx %ld\n",
23378 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
23379 allocInfo->dedAlloc.retxHqPLst.count,
23380 allocInfo->dedAlloc.txHqPLst.count));
23382 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
23383 " scheduler exceed maximumUePerDlSf(%u)tx-retx %d retx %d tx %d\n",
23384 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
23385 allocInfo->dedAlloc.retxHqPLst.count,
23386 allocInfo->dedAlloc.txHqPLst.count));
23391 /* LTE_ADV_FLAG_REMOVED_START */
23392 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
23394 printf("5GTF_ERROR RETX rgSCHCmnNonDlfsDsfrRntpComp\n");
23395 rgSCHCmnNonDlfsDsfrRntpComp(cell, allocInfo->dedAlloc.dedDlSf);
23397 /* LTE_ADV_FLAG_REMOVED_END */
23398 #endif /* LTE_TDD */
23402 /***********************************************************
23404 * Func : rgSCHCmnCalcRiv
23406 * Desc : This function calculates RIV.
23412 * File : rg_sch_utl.c
23414 **********************************************************/
23416 uint32_t rgSCHCmnCalcRiv
23423 uint32_t rgSCHCmnCalcRiv
23431 uint8_t numRbMinus1 = numRb - 1;
23435 if (numRbMinus1 <= bw/2)
23437 riv = bw * numRbMinus1 + rbStart;
23441 riv = bw * (bw - numRbMinus1) + (bw - rbStart - 1);
23444 } /* rgSCHCmnCalcRiv */
23448 * @brief This function allocates and copies the RACH response scheduling
23449 * related information into cell control block.
23453 * Function: rgSCHCmnDlCpyRachInfo
23454 * Purpose: This function allocates and copies the RACH response
23455 * scheduling related information into cell control block
23456 * for each DL subframe.
23459 * Invoked by: Scheduler
23461 * @param[in] RgSchCellCb* cell
23462 * @param[in] RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES]
23463 * @param[in] uint8_t raArrSz
23467 static S16 rgSCHCmnDlCpyRachInfo
23470 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
23474 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
23477 uint16_t subfrmIdx;
23479 uint8_t numSubfrms;
23484 /* Allocate RACH response information for each DL
23485 * subframe in a radio frame */
23486 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cell->rachRspLst,
23487 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1] *
23488 sizeof(RgSchTddRachRspLst));
23494 for(sfnIdx=raArrSz-1; sfnIdx>=0; sfnIdx--)
23496 for(subfrmIdx=0; subfrmIdx < RGSCH_NUM_SUB_FRAMES; subfrmIdx++)
23498 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
23499 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
23504 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx],subfrmIdx);
23506 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
23508 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddNumDlSubfrmTbl[ulDlCfgIdx],subfrmIdx);
23509 sfNum = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][subfrmIdx]-1;
23510 numRfs = cell->rachRspLst[sfNum].numRadiofrms;
23511 /* For each DL subframe in which RACH response can
23512 * be sent is updated */
23515 cell->rachRspLst[sfNum].rachRsp[numRfs].sfnOffset =
23516 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset;
23517 for(sfcount=0; sfcount < numSubfrms; sfcount++)
23519 cell->rachRspLst[sfNum].rachRsp[numRfs].\
23520 subframe[sfcount] =
23521 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].\
23524 cell->rachRspLst[sfNum].rachRsp[numRfs].numSubfrms =
23525 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
23526 cell->rachRspLst[sfNum].numRadiofrms++;
23529 /* Copy the subframes to be deleted at ths subframe */
23531 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
23534 cell->rachRspLst[sfNum].delInfo.sfnOffset =
23535 rachRspLst[sfnIdx][subfrmIdx].delInfo.sfnOffset;
23536 for(sfcount=0; sfcount < numSubfrms; sfcount++)
23538 cell->rachRspLst[sfNum].delInfo.subframe[sfcount] =
23539 rachRspLst[sfnIdx][subfrmIdx].delInfo.subframe[sfcount];
23541 cell->rachRspLst[sfNum].delInfo.numSubfrms =
23542 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
23550 * @brief This function determines the iTbs based on the new CFI,
23551 * CQI and BLER based delta iTbs
23555 * Function: rgSchCmnFetchItbs
23556 * Purpose: Fetch the new iTbs when CFI changes.
23558 * @param[in] RgSchCellCb *cell
23559 * @param[in] RgSchCmnDlUe *ueDl
23560 * @param[in] uint8_t cqi
23566 static S32 rgSchCmnFetchItbs
23569 RgSchCmnDlUe *ueDl,
23577 static S32 rgSchCmnFetchItbs
23580 RgSchCmnDlUe *ueDl,
23589 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
23594 /* Special Handling for Spl Sf when CFI is 3 as
23595 * CFI in Spl Sf will be max 2 */
23596 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
23598 if((cellDl->currCfi == 3) ||
23599 ((cell->bwCfg.dlTotalBw <= 10) && (cellDl->currCfi == 1)))
23601 /* Use CFI 2 in this case */
23602 iTbs = (ueDl->laCb[cwIdx].deltaiTbs +
23603 ((*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][2]))[cqi])* 100)/100;
23605 RG_SCH_CHK_ITBS_RANGE(iTbs, RGSCH_NUM_ITBS - 1);
23609 iTbs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1];
23611 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
23613 else /* CFI Changed. Update with new iTbs Reset the BLER*/
23616 S32 tmpiTbs = (*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][cfi]))[cqi];
23618 iTbs = (ueDl->laCb[cwIdx].deltaiTbs + tmpiTbs*100)/100;
23620 RG_SCH_CHK_ITBS_RANGE(iTbs, tmpiTbs);
23622 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
23624 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1] = iTbs;
23626 ueDl->lastCfi = cfi;
23627 ueDl->laCb[cwIdx].deltaiTbs = 0;
23634 * @brief This function determines the RBs and Bytes required for BO
23635 * transmission for UEs configured with TM 1/2/6/7.
23639 * Function: rgSCHCmnDlAllocTxRb1Tb1Cw
23640 * Purpose: Allocate TB1 on CW1.
23642 * Reference Parameter effBo is filled with alloced bytes.
23643 * Returns RFAILED if BO not satisfied at all.
23645 * Invoked by: rgSCHCmnDlAllocTxRbTM1/2/6/7
23647 * @param[in] RgSchCellCb *cell
23648 * @param[in] RgSchDlSf *subFrm
23649 * @param[in] RgSchUeCb *ue
23650 * @param[in] uint32_t bo
23651 * @param[out] uint32_t *effBo
23652 * @param[in] RgSchDlHqProcCb *proc
23653 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23657 static Void rgSCHCmnDlAllocTxRb1Tb1Cw
23664 RgSchDlHqProcCb *proc,
23665 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23668 RgSchDlRbAlloc *allocInfo;
23673 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
23675 if (ue->ue5gtfCb.rank == 2)
23677 allocInfo->dciFormat = TFU_DCI_FORMAT_B2;
23681 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23684 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
23685 allocInfo->raType);
23687 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
23688 bo, &numRb, effBo);
23689 if (ret == RFAILED)
23691 /* If allocation couldn't be made then return */
23694 /* Adding UE to RbAllocInfo TX Lst */
23695 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
23696 /* Fill UE alloc Info */
23697 allocInfo->rbsReq = numRb;
23698 allocInfo->dlSf = subFrm;
23700 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
23708 * @brief This function determines the RBs and Bytes required for BO
23709 * retransmission for UEs configured with TM 1/2/6/7.
23713 * Function: rgSCHCmnDlAllocRetxRb1Tb1Cw
23714 * Purpose: Allocate TB1 on CW1.
23716 * Reference Parameter effBo is filled with alloced bytes.
23717 * Returns RFAILED if BO not satisfied at all.
23719 * Invoked by: rgSCHCmnDlAllocRetxRbTM1/2/6/7
23721 * @param[in] RgSchCellCb *cell
23722 * @param[in] RgSchDlSf *subFrm
23723 * @param[in] RgSchUeCb *ue
23724 * @param[in] uint32_t bo
23725 * @param[out] uint32_t *effBo
23726 * @param[in] RgSchDlHqProcCb *proc
23727 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23731 static Void rgSCHCmnDlAllocRetxRb1Tb1Cw
23738 RgSchDlHqProcCb *proc,
23739 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23742 RgSchDlRbAlloc *allocInfo;
23747 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
23750 /* 5GTF: RETX DCI format same as TX */
23751 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
23752 &allocInfo->raType);
23755 /* Get the Allocation in terms of RBs that are required for
23756 * this retx of TB1 */
23757 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
23759 if (ret == RFAILED)
23761 /* Allocation couldn't be made for Retx */
23762 /* Fix : syed If TxRetx allocation failed then add the UE along with the proc
23763 * to the nonSchdTxRetxUeLst and let spfc scheduler take care of it during
23765 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
23768 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
23769 /* Fill UE alloc Info */
23770 allocInfo->rbsReq = numRb;
23771 allocInfo->dlSf = subFrm;
23773 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
23781 * @brief This function determines the RBs and Bytes required for BO
23782 * transmission for UEs configured with TM 2.
23786 * Function: rgSCHCmnDlAllocTxRbTM1
23789 * Reference Parameter effBo is filled with alloced bytes.
23790 * Returns RFAILED if BO not satisfied at all.
23792 * Invoked by: rgSCHCmnDlAllocTxRb
23794 * @param[in] RgSchCellCb *cell
23795 * @param[in] RgSchDlSf *subFrm
23796 * @param[in] RgSchUeCb *ue
23797 * @param[in] uint32_t bo
23798 * @param[out] uint32_t *effBo
23799 * @param[in] RgSchDlHqProcCb *proc
23800 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23804 static Void rgSCHCmnDlAllocTxRbTM1
23811 RgSchDlHqProcCb *proc,
23812 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23815 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23821 * @brief This function determines the RBs and Bytes required for BO
23822 * retransmission for UEs configured with TM 2.
23826 * Function: rgSCHCmnDlAllocRetxRbTM1
23829 * Reference Parameter effBo is filled with alloced bytes.
23830 * Returns RFAILED if BO not satisfied at all.
23832 * Invoked by: rgSCHCmnDlAllocRetxRb
23834 * @param[in] RgSchCellCb *cell
23835 * @param[in] RgSchDlSf *subFrm
23836 * @param[in] RgSchUeCb *ue
23837 * @param[in] uint32_t bo
23838 * @param[out] uint32_t *effBo
23839 * @param[in] RgSchDlHqProcCb *proc
23840 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23844 static Void rgSCHCmnDlAllocRetxRbTM1
23851 RgSchDlHqProcCb *proc,
23852 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23855 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23861 * @brief This function determines the RBs and Bytes required for BO
23862 * transmission for UEs configured with TM 2.
23866 * Function: rgSCHCmnDlAllocTxRbTM2
23869 * Reference Parameter effBo is filled with alloced bytes.
23870 * Returns RFAILED if BO not satisfied at all.
23872 * Invoked by: rgSCHCmnDlAllocTxRb
23874 * @param[in] RgSchCellCb *cell
23875 * @param[in] RgSchDlSf *subFrm
23876 * @param[in] RgSchUeCb *ue
23877 * @param[in] uint32_t bo
23878 * @param[out] uint32_t *effBo
23879 * @param[in] RgSchDlHqProcCb *proc
23880 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23884 static Void rgSCHCmnDlAllocTxRbTM2
23891 RgSchDlHqProcCb *proc,
23892 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23895 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23901 * @brief This function determines the RBs and Bytes required for BO
23902 * retransmission for UEs configured with TM 2.
23906 * Function: rgSCHCmnDlAllocRetxRbTM2
23909 * Reference Parameter effBo is filled with alloced bytes.
23910 * Returns RFAILED if BO not satisfied at all.
23912 * Invoked by: rgSCHCmnDlAllocRetxRb
23914 * @param[in] RgSchCellCb *cell
23915 * @param[in] RgSchDlSf *subFrm
23916 * @param[in] RgSchUeCb *ue
23917 * @param[in] uint32_t bo
23918 * @param[out] uint32_t *effBo
23919 * @param[in] RgSchDlHqProcCb *proc
23920 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23924 static Void rgSCHCmnDlAllocRetxRbTM2
23931 RgSchDlHqProcCb *proc,
23932 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23935 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
23941 * @brief This function determines the RBs and Bytes required for BO
23942 * transmission for UEs configured with TM 3.
23946 * Function: rgSCHCmnDlAllocTxRbTM3
23949 * Reference Parameter effBo is filled with alloced bytes.
23950 * Returns RFAILED if BO not satisfied at all.
23952 * Invoked by: rgSCHCmnDlAllocTxRb
23954 * @param[in] RgSchCellCb *cell
23955 * @param[in] RgSchDlSf *subFrm
23956 * @param[in] RgSchUeCb *ue
23957 * @param[in] uint32_t bo
23958 * @param[out] uint32_t *effBo
23959 * @param[in] RgSchDlHqProcCb *proc
23960 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23964 static Void rgSCHCmnDlAllocTxRbTM3
23971 RgSchDlHqProcCb *proc,
23972 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
23977 /* Both TBs free for TX allocation */
23978 rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo,\
23979 proc, cellWdAllocInfo);
23986 * @brief This function determines the RBs and Bytes required for BO
23987 * retransmission for UEs configured with TM 3.
23991 * Function: rgSCHCmnDlAllocRetxRbTM3
23994 * Reference Parameter effBo is filled with alloced bytes.
23995 * Returns RFAILED if BO not satisfied at all.
23997 * Invoked by: rgSCHCmnDlAllocRetxRb
23999 * @param[in] RgSchCellCb *cell
24000 * @param[in] RgSchDlSf *subFrm
24001 * @param[in] RgSchUeCb *ue
24002 * @param[in] uint32_t bo
24003 * @param[out] uint32_t *effBo
24004 * @param[in] RgSchDlHqProcCb *proc
24005 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24009 static Void rgSCHCmnDlAllocRetxRbTM3
24016 RgSchDlHqProcCb *proc,
24017 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24022 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
24023 (proc->tbInfo[1].state == HQ_TB_NACKED))
24026 printf ("RETX RB TM3 nack for both hqp %d cell %d \n", proc->procId, proc->hqE->cell->cellId);
24028 /* Both TBs require RETX allocation */
24029 rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo,\
24030 proc, cellWdAllocInfo);
24034 /* One of the TBs need RETX allocation. Other TB may/maynot
24035 * be available for new TX allocation. */
24036 rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo,\
24037 proc, cellWdAllocInfo);
24045 * @brief This function performs the DCI format selection in case of
24046 * Transmit Diversity scheme where there can be more
24047 * than 1 option for DCI format selection.
24051 * Function: rgSCHCmnSlctPdcchFrmt
24052 * Purpose: 1. If DLFS is enabled, then choose TM specific
24053 * DCI format for Transmit diversity. All the
24054 * TM Specific DCI Formats support Type0 and/or
24055 * Type1 resource allocation scheme. DLFS
24056 * supports only Type-0&1 Resource allocation.
24057 * 2. If DLFS is not enabled, select a DCI format
24058 * which is of smaller size. Since Non-DLFS
24059 * scheduler supports all Resource allocation
24060 * schemes, selection is based on efficiency.
24062 * Invoked by: DL UE Allocation by Common Scheduler.
24064 * @param[in] RgSchCellCb *cell
24065 * @param[in] RgSchUeCb *ue
24066 * @param[out] uint8_t *raType
24067 * @return TfuDciFormat
24070 TfuDciFormat rgSCHCmnSlctPdcchFrmt
24077 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
24080 /* ccpu00140894- Selective DCI Format and RA type should be selected only
24081 * after TX Mode transition is completed*/
24082 if ((cellSch->dl.isDlFreqSel) && (ue->txModeTransCmplt))
24084 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciRAType;
24085 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciFrmt);
24089 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciRAType;
24090 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciFrmt);
24096 * @brief This function handles Retx allocation in case of TM3 UEs
24097 * where both the TBs were NACKED previously.
24101 * Function: rgSCHCmnDlTM3RetxRetx
24102 * Purpose: If forceTD flag enabled
24103 * TD for TB1 on CW1.
24105 * DCI Frmt 2A and RA Type 0
24106 * RI layered SM of both TBs on 2 CWs
24107 * Add UE to cell Alloc Info.
24108 * Fill UE alloc Info.
24111 * Successful allocation is indicated by non-zero effBo value.
24113 * Invoked by: rgSCHCmnDlAllocRbTM3
24115 * @param[in] RgSchCellCb *cell
24116 * @param[in] RgSchDlSf *subFrm
24117 * @param[in] RgSchUeCb *ue
24118 * @param[in] uint32_t bo
24119 * @param[out] uint32_t *effBo
24120 * @param[in] RgSchDlHqProcCb *proc
24121 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24125 static Void rgSCHCmnDlTM3RetxRetx
24132 RgSchDlHqProcCb *proc,
24133 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24137 RgSchDlRbAlloc *allocInfo;
24142 uint8_t precInfoAntIdx;
24146 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24148 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
24150 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
24151 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
24153 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
24155 if (ret == RFAILED)
24157 /* Allocation couldn't be made for Retx */
24158 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24161 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
24162 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
24163 #ifdef FOUR_TX_ANTENNA
24164 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1 should
24165 * have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
24166 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
24169 proc->cwSwpEnabled = TRUE;
24172 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24173 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
24177 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24180 /* Adding UE to allocInfo RETX Lst */
24181 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24183 /* Fill UE alloc Info scratch pad */
24184 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24185 precInfo, noTxLyrs, subFrm);
24192 * @brief This function handles Retx allocation in case of TM4 UEs
24193 * where both the TBs were NACKED previously.
24197 * Function: rgSCHCmnDlTM4RetxRetx
24198 * Purpose: If forceTD flag enabled
24199 * TD for TB1 on CW1.
24201 * DCI Frmt 2 and RA Type 0
24203 * 1 layer SM of TB1 on CW1.
24205 * RI layered SM of both TBs on 2 CWs
24206 * Add UE to cell Alloc Info.
24207 * Fill UE alloc Info.
24210 * Successful allocation is indicated by non-zero effBo value.
24212 * Invoked by: rgSCHCmnDlAllocRbTM4
24214 * @param[in] RgSchCellCb *cell
24215 * @param[in] RgSchDlSf *subFrm
24216 * @param[in] RgSchUeCb *ue
24217 * @param[in] uint32_t bo
24218 * @param[out] uint32_t *effBo
24219 * @param[in] RgSchDlHqProcCb *proc
24220 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24224 static Void rgSCHCmnDlTM4RetxRetx
24231 RgSchDlHqProcCb *proc,
24232 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24236 RgSchDlRbAlloc *allocInfo;
24238 Bool swpFlg = FALSE;
24240 #ifdef FOUR_TX_ANTENNA
24241 uint8_t precInfoAntIdx;
24247 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24249 /* Irrespective of RI Schedule both CWs */
24250 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
24251 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
24253 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
24255 if (ret == RFAILED)
24257 /* Allocation couldn't be made for Retx */
24258 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24261 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
24263 #ifdef FOUR_TX_ANTENNA
24264 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1
24265 * should have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
24266 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
24269 proc->cwSwpEnabled = TRUE;
24271 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24272 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
24276 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
24279 /* Adding UE to allocInfo RETX Lst */
24280 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
24282 /* Fill UE alloc Info scratch pad */
24283 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
24284 precInfo, noTxLyrs, subFrm);
24292 * @brief This function determines Transmission attributes
24293 * incase of Spatial multiplexing for TX and RETX TBs.
24297 * Function: rgSCHCmnDlSMGetAttrForTxRetx
24298 * Purpose: 1. Reached here for a TM3/4 UE's HqP whose one of the TBs is
24299 * NACKED and the other TB is either NACKED or WAITING.
24300 * 2. Select the NACKED TB for RETX allocation.
24301 * 3. Allocation preference for RETX TB by mapping it to a better
24302 * CW (better in terms of efficiency).
24303 * 4. Determine the state of the other TB.
24304 * Determine if swapFlag were to be set.
24305 * Swap flag would be set if Retx TB is cross
24307 * 5. If UE has new data available for TX and if the other TB's state
24308 * is ACKED then set furtherScope as TRUE.
24310 * Invoked by: rgSCHCmnDlTM3[4]TxRetx
24312 * @param[in] RgSchUeCb *ue
24313 * @param[in] RgSchDlHqProcCb *proc
24314 * @param[out] RgSchDlHqTbCb **retxTb
24315 * @param[out] RgSchDlHqTbCb **txTb
24316 * @param[out] Bool *frthrScp
24317 * @param[out] Bool *swpFlg
24321 static Void rgSCHCmnDlSMGetAttrForTxRetx
24324 RgSchDlHqProcCb *proc,
24325 RgSchDlHqTbCb **retxTb,
24326 RgSchDlHqTbCb **txTb,
24331 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,proc->hqE->cell);
24332 RgSchDlRbAlloc *allocInfo;
24335 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24337 *retxTb = &proc->tbInfo[0];
24338 *txTb = &proc->tbInfo[1];
24339 /* TENB_BRDCM_TM4- Currently disabling swapflag for TM3/TM4, since
24340 * HqFeedback processing does not consider a swapped hq feedback */
24341 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 1))
24344 proc->cwSwpEnabled = TRUE;
24346 if (proc->tbInfo[1].state == HQ_TB_ACKED)
24348 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
24349 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
24354 *retxTb = &proc->tbInfo[1];
24355 *txTb = &proc->tbInfo[0];
24356 /* TENB_BRDCM_TM4 - Currently disabling swapflag for TM3/TM4, since
24357 * HqFeedback processing does not consider a swapped hq feedback */
24358 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 0))
24361 proc->cwSwpEnabled = TRUE;
24363 if (proc->tbInfo[0].state == HQ_TB_ACKED)
24365 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
24366 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
24374 * @brief Determine Precoding information for TM3 2 TX Antenna.
24378 * Function: rgSCHCmnDlTM3PrecInf2
24381 * Invoked by: rgSCHCmnDlGetAttrForTM3
24383 * @param[in] RgSchUeCb *ue
24384 * @param[in] uint8_t numTxLyrs
24385 * @param[in] Bool bothCwEnbld
24389 static uint8_t rgSCHCmnDlTM3PrecInf2
24403 * @brief Determine Precoding information for TM4 2 TX Antenna.
24407 * Function: rgSCHCmnDlTM4PrecInf2
24408 * Purpose: To determine a logic of deriving precoding index
24409 * information from 36.212 table 5.3.3.1.5-4
24411 * Invoked by: rgSCHCmnDlGetAttrForTM4
24413 * @param[in] RgSchUeCb *ue
24414 * @param[in] uint8_t numTxLyrs
24415 * @param[in] Bool bothCwEnbld
24419 static uint8_t rgSCHCmnDlTM4PrecInf2
24427 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24431 if (ueDl->mimoInfo.ri == numTxLyrs)
24433 if (ueDl->mimoInfo.ri == 2)
24435 /* PrecInfo corresponding to 2 CW
24437 if (ue->mimoInfo.puschFdbkVld)
24443 precIdx = ueDl->mimoInfo.pmi - 1;
24448 /* PrecInfo corresponding to 1 CW
24450 if (ue->mimoInfo.puschFdbkVld)
24456 precIdx = ueDl->mimoInfo.pmi + 1;
24460 else if (ueDl->mimoInfo.ri > numTxLyrs)
24462 /* In case of choosing among the columns of a
24463 * precoding matrix, choose the column corresponding
24464 * to the MAX-CQI */
24465 if (ue->mimoInfo.puschFdbkVld)
24471 precIdx = (ueDl->mimoInfo.pmi- 1)* 2 + 1;
24474 else /* if RI < numTxLyrs */
24476 precIdx = (ueDl->mimoInfo.pmi < 2)? 0:1;
24483 * @brief Determine Precoding information for TM3 4 TX Antenna.
24487 * Function: rgSCHCmnDlTM3PrecInf4
24488 * Purpose: To determine a logic of deriving precoding index
24489 * information from 36.212 table 5.3.3.1.5A-2
24491 * Invoked by: rgSCHCmnDlGetAttrForTM3
24493 * @param[in] RgSchUeCb *ue
24494 * @param[in] uint8_t numTxLyrs
24495 * @param[in] Bool bothCwEnbld
24499 static uint8_t rgSCHCmnDlTM3PrecInf4
24512 precIdx = numTxLyrs - 2;
24514 else /* one 1 CW transmission */
24523 * @brief Determine Precoding information for TM4 4 TX Antenna.
24527 * Function: rgSCHCmnDlTM4PrecInf4
24528 * Purpose: To determine a logic of deriving precoding index
24529 * information from 36.212 table 5.3.3.1.5-5
24531 * Invoked by: rgSCHCmnDlGetAttrForTM4
24533 * @param[in] RgSchUeCb *ue
24534 * @param[in] uint8_t numTxLyrs
24535 * @param[in] Bool bothCwEnbld
24539 static uint8_t rgSCHCmnDlTM4PrecInf4
24547 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24548 uint8_t precInfoBaseIdx, precIdx;
24551 precInfoBaseIdx = (ue->mimoInfo.puschFdbkVld)? (16):
24552 (ueDl->mimoInfo.pmi);
24555 precIdx = precInfoBaseIdx + (numTxLyrs-2)*17;
24557 else /* one 1 CW transmission */
24559 precInfoBaseIdx += 1;
24560 precIdx = precInfoBaseIdx + (numTxLyrs-1)*17;
24567 * @brief This function determines Transmission attributes
24568 * incase of TM3 scheduling.
24572 * Function: rgSCHCmnDlGetAttrForTM3
24573 * Purpose: Determine retx TB and tx TB based on TB states.
24574 * If forceTD enabled
24575 * perform only retx TB allocation.
24576 * If retxTB == TB2 then DCI Frmt = 2A, RA Type = 0.
24577 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
24579 * perform retxTB allocation on CW1.
24581 * Determine further Scope and Swap Flag attributes
24582 * assuming a 2 CW transmission of RetxTB and new Tx TB.
24583 * If no further scope for new TX allocation
24584 * Allocate only retx TB using 2 layers if
24585 * this TB was previously transmitted using 2 layers AND
24586 * number of Tx antenna ports == 4.
24587 * otherwise do single layer precoding.
24589 * Invoked by: rgSCHCmnDlTM3TxRetx
24591 * @param[in] RgSchUeCb *ue
24592 * @param[in] RgSchDlHqProcCb *proc
24593 * @param[out] uint8_t *numTxLyrs
24594 * @param[out] Bool *isTraDiv
24595 * @param[out] uint8_t *prcdngInf
24596 * @param[out] uint8_t *raType
24600 static Void rgSCHCmnDlGetAttrForTM3
24604 RgSchDlHqProcCb *proc,
24605 uint8_t *numTxLyrs,
24606 TfuDciFormat *dciFrmt,
24607 uint8_t *prcdngInf,
24608 RgSchDlHqTbCb **retxTb,
24609 RgSchDlHqTbCb **txTb,
24615 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24616 uint8_t precInfoAntIdx;
24619 /* Avoiding Tx-Retx for LAA cell as firstSchedTime is associated with
24621 /* Integration_fix: SPS Proc shall always have only one Cw */
24623 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
24624 (ueDl->mimoInfo.forceTD))
24626 ||(TRUE == rgSCHLaaSCellEnabled(cell))
24630 if ((ueDl->mimoInfo.forceTD)
24632 || (TRUE == rgSCHLaaSCellEnabled(cell))
24637 /* Transmit Diversity. Format based on dlfsEnabled
24638 * No further scope */
24639 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24641 *retxTb = &proc->tbInfo[0];
24642 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24646 *retxTb = &proc->tbInfo[1];
24647 *dciFrmt = TFU_DCI_FORMAT_2A;
24648 *raType = RG_SCH_CMN_RA_TYPE0;
24656 /* Determine the 2 TB transmission attributes */
24657 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
24661 /* Prefer allocation of RETX TB over 2 layers rather than combining
24662 * it with a new TX. */
24663 if ((ueDl->mimoInfo.ri == 2)
24664 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
24666 /* Allocate TB on CW1, using 2 Lyrs,
24667 * Format 2, precoding accordingly */
24673 *numTxLyrs= ((*retxTb)->numLyrs + ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)].noLyr);
24675 if((*retxTb)->tbIdx == 0 && ((*retxTb)->numLyrs == 2 ) && *numTxLyrs ==3)
24678 proc->cwSwpEnabled = TRUE;
24680 else if((*retxTb)->tbIdx == 1 && ((*retxTb)->numLyrs == 1) && *numTxLyrs ==3)
24683 proc->cwSwpEnabled = TRUE;
24687 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24688 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])\
24689 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
24690 *dciFrmt = TFU_DCI_FORMAT_2A;
24691 *raType = RG_SCH_CMN_RA_TYPE0;
24693 else /* frthrScp == FALSE */
24695 if (cell->numTxAntPorts == 2)
24697 /* Transmit Diversity */
24699 if ((*retxTb)->tbIdx == 0)
24701 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24705 /* If retxTB is TB2 then use format 2A */
24706 *dciFrmt = TFU_DCI_FORMAT_2A;
24707 *raType = RG_SCH_CMN_RA_TYPE0;
24712 else /* NumAntPorts == 4 */
24714 if ((*retxTb)->numLyrs == 2)
24716 /* Allocate TB on CW1, using 2 Lyrs,
24717 * Format 2A, precoding accordingly */
24719 *dciFrmt = TFU_DCI_FORMAT_2A;
24720 *raType = RG_SCH_CMN_RA_TYPE0;
24721 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24722 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, *numTxLyrs, *frthrScp);
24727 /* Transmit Diversity */
24729 if ((*retxTb)->tbIdx == 0)
24731 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24735 /* If retxTB is TB2 then use format 2A */
24736 *dciFrmt = TFU_DCI_FORMAT_2A;
24737 *raType = RG_SCH_CMN_RA_TYPE0;
24751 * @brief This function determines Transmission attributes
24752 * incase of TM4 scheduling.
24756 * Function: rgSCHCmnDlGetAttrForTM4
24757 * Purpose: Determine retx TB and tx TB based on TB states.
24758 * If forceTD enabled
24759 * perform only retx TB allocation.
24760 * If retxTB == TB2 then DCI Frmt = 2, RA Type = 0.
24761 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
24763 * perform retxTB allocation on CW1.
24765 * Determine further Scope and Swap Flag attributes
24766 * assuming a 2 CW transmission of RetxTB and new Tx TB.
24767 * If no further scope for new TX allocation
24768 * Allocate only retx TB using 2 layers if
24769 * this TB was previously transmitted using 2 layers AND
24770 * number of Tx antenna ports == 4.
24771 * otherwise do single layer precoding.
24773 * Invoked by: rgSCHCmnDlTM4TxRetx
24775 * @param[in] RgSchUeCb *ue
24776 * @param[in] RgSchDlHqProcCb *proc
24777 * @param[out] uint8_t *numTxLyrs
24778 * @param[out] Bool *isTraDiv
24779 * @param[out] uint8_t *prcdngInf
24780 * @param[out] uint8_t *raType
24784 static Void rgSCHCmnDlGetAttrForTM4
24788 RgSchDlHqProcCb *proc,
24789 uint8_t *numTxLyrs,
24790 TfuDciFormat *dciFrmt,
24791 uint8_t *prcdngInf,
24792 RgSchDlHqTbCb **retxTb,
24793 RgSchDlHqTbCb **txTb,
24799 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
24800 uint8_t precInfoAntIdx;
24804 /* Integration_fix: SPS Proc shall always have only one Cw */
24806 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
24807 (ueDl->mimoInfo.forceTD))
24809 ||(TRUE == rgSCHLaaSCellEnabled(cell))
24813 if ((ueDl->mimoInfo.forceTD)
24815 || (TRUE == rgSCHLaaSCellEnabled(cell))
24820 /* Transmit Diversity. Format based on dlfsEnabled
24821 * No further scope */
24822 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24824 *retxTb = &proc->tbInfo[0];
24825 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
24829 *retxTb = &proc->tbInfo[1];
24830 *dciFrmt = TFU_DCI_FORMAT_2;
24831 *raType = RG_SCH_CMN_RA_TYPE0;
24839 if (ueDl->mimoInfo.ri == 1)
24841 /* single layer precoding. Format 2.
24842 * No further scope */
24843 if (proc->tbInfo[0].state == HQ_TB_NACKED)
24845 *retxTb = &proc->tbInfo[0];
24849 *retxTb = &proc->tbInfo[1];
24852 *dciFrmt = TFU_DCI_FORMAT_2;
24853 *raType = RG_SCH_CMN_RA_TYPE0;
24855 *prcdngInf = 0; /*When RI= 1*/
24859 /* Determine the 2 TB transmission attributes */
24860 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
24862 *dciFrmt = TFU_DCI_FORMAT_2;
24863 *raType = RG_SCH_CMN_RA_TYPE0;
24866 /* Prefer allocation of RETX TB over 2 layers rather than combining
24867 * it with a new TX. */
24868 if ((ueDl->mimoInfo.ri == 2)
24869 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
24871 /* Allocate TB on CW1, using 2 Lyrs,
24872 * Format 2, precoding accordingly */
24876 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24877 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])
24878 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
24880 else /* frthrScp == FALSE */
24882 if (cell->numTxAntPorts == 2)
24884 /* single layer precoding. Format 2. */
24886 *prcdngInf = (getPrecInfoFunc[1][cell->numTxAntPorts/2 - 1])\
24887 (cell, ue, *numTxLyrs, *frthrScp);
24890 else /* NumAntPorts == 4 */
24892 if ((*retxTb)->numLyrs == 2)
24894 /* Allocate TB on CW1, using 2 Lyrs,
24895 * Format 2, precoding accordingly */
24897 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24898 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
24899 (cell, ue, *numTxLyrs, *frthrScp);
24904 /* Allocate TB with 1 lyr precoding,
24905 * Format 2, precoding info accordingly */
24907 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
24908 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
24909 (cell, ue, *numTxLyrs, *frthrScp);
24920 * @brief This function handles Retx allocation in case of TM3 UEs
24921 * where previously one of the TBs was NACKED and the other
24922 * TB is either ACKED/WAITING.
24926 * Function: rgSCHCmnDlTM3TxRetx
24927 * Purpose: Determine the TX attributes for TM3 TxRetx Allocation.
24928 * If futher Scope for New Tx Allocation on other TB
24929 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
24930 * Add UE to cell wide RetxTx List.
24932 * Perform only RETX alloc'n on CW1.
24933 * Add UE to cell wide Retx List.
24935 * effBo is set to a non-zero value if allocation is
24938 * Invoked by: rgSCHCmnDlAllocRbTM3
24940 * @param[in] RgSchCellCb *cell
24941 * @param[in] RgSchDlSf *subFrm
24942 * @param[in] RgSchUeCb *ue
24943 * @param[in] uint32_t bo
24944 * @param[out] uint32_t *effBo
24945 * @param[in] RgSchDlHqProcCb *proc
24946 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24950 static Void rgSCHCmnDlTM3TxRetx
24957 RgSchDlHqProcCb *proc,
24958 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
24962 RgSchDlRbAlloc *allocInfo;
24964 RgSchDlHqTbCb *retxTb, *txTb;
24973 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
24976 /* Determine the transmission attributes */
24977 rgSCHCmnDlGetAttrForTM3(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
24978 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
24979 &allocInfo->raType);
24984 printf ("TX RETX called from proc %d cell %d \n",proc->procId, cell->cellId);
24986 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
24988 if (ret == RFAILED)
24990 /* Allocation couldn't be made for Retx */
24991 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
24994 /* Adding UE to RbAllocInfo RETX-TX Lst */
24995 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
24999 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
25000 numTxLyrs, &numRb, effBo);
25001 if (ret == RFAILED)
25003 /* Allocation couldn't be made for Retx */
25004 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25008 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25011 /* Adding UE to allocInfo RETX Lst */
25012 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
25015 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
25016 prcdngInf, numTxLyrs, subFrm);
25023 * @brief This function handles Retx allocation in case of TM4 UEs
25024 * where previously one of the TBs was NACKED and the other
25025 * TB is either ACKED/WAITING.
25029 * Function: rgSCHCmnDlTM4TxRetx
25030 * Purpose: Determine the TX attributes for TM4 TxRetx Allocation.
25031 * If futher Scope for New Tx Allocation on other TB
25032 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
25033 * Add UE to cell wide RetxTx List.
25035 * Perform only RETX alloc'n on CW1.
25036 * Add UE to cell wide Retx List.
25038 * effBo is set to a non-zero value if allocation is
25041 * Invoked by: rgSCHCmnDlAllocRbTM4
25043 * @param[in] RgSchCellCb *cell
25044 * @param[in] RgSchDlSf *subFrm
25045 * @param[in] RgSchUeCb *ue
25046 * @param[in] uint32_t bo
25047 * @param[out] uint32_t *effBo
25048 * @param[in] RgSchDlHqProcCb *proc
25049 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25053 static Void rgSCHCmnDlTM4TxRetx
25060 RgSchDlHqProcCb *proc,
25061 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25065 RgSchDlRbAlloc *allocInfo;
25067 RgSchDlHqTbCb *retxTb, *txTb;
25075 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25078 /* Determine the transmission attributes */
25079 rgSCHCmnDlGetAttrForTM4(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
25080 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
25081 &allocInfo->raType);
25085 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
25087 if (ret == RFAILED)
25089 /* Fix : syed If TxRetx allocation failed then add the UE along
25090 * with the proc to the nonSchdTxRetxUeLst and let spfc scheduler
25091 * take care of it during finalization. */
25092 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25095 /* Adding UE to RbAllocInfo RETX-TX Lst */
25096 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
25100 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
25101 numTxLyrs, &numRb, effBo);
25102 if (ret == RFAILED)
25104 /* Allocation couldn't be made for Retx */
25105 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25109 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25112 /* Adding UE to allocInfo RETX Lst */
25113 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
25116 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
25117 prcdngInf, numTxLyrs, subFrm)
25124 * @brief This function handles Retx allocation in case of TM4 UEs
25125 * where previously both the TBs were ACKED and ACKED
25130 * Function: rgSCHCmnDlTM3TxTx
25131 * Purpose: Reached here for a TM3 UE's HqP's fresh allocation
25132 * where both the TBs are free for TX scheduling.
25133 * If forceTD flag is set
25134 * perform TD on CW1 with TB1.
25139 * RI layered precoding 2 TB on 2 CW.
25140 * Set precoding info.
25141 * Add UE to cellAllocInfo.
25142 * Fill ueAllocInfo.
25144 * effBo is set to a non-zero value if allocation is
25147 * Invoked by: rgSCHCmnDlAllocRbTM3
25149 * @param[in] RgSchCellCb *cell
25150 * @param[in] RgSchDlSf *subFrm
25151 * @param[in] RgSchUeCb *ue
25152 * @param[in] uint32_t bo
25153 * @param[out] uint32_t *effBo
25154 * @param[in] RgSchDlHqProcCb *proc
25155 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25159 static Void rgSCHCmnDlTM3TxTx
25166 RgSchDlHqProcCb *proc,
25167 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25170 RgSchCmnDlUe *ueDl;
25171 RgSchDlRbAlloc *allocInfo;
25176 uint8_t precInfoAntIdx;
25180 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25181 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25183 /* Integration_fix: SPS Proc shall always have only one Cw */
25185 #ifdef FOUR_TX_ANTENNA
25186 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25187 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
25189 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25190 (ueDl->mimoInfo.forceTD))
25193 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
25196 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
25197 &allocInfo->raType);
25198 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25199 bo, &numRb, effBo);
25200 if (ret == RFAILED)
25202 /* If allocation couldn't be made then return */
25206 precInfo = 0; /* TD */
25208 else /* Precoding */
25210 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
25211 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
25213 /* Spatial Multiplexing using 2 CWs */
25214 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
25215 if (ret == RFAILED)
25217 /* If allocation couldn't be made then return */
25220 noTxLyrs = ueDl->mimoInfo.ri;
25221 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
25222 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, getPrecInfoFunc[0], precInfoAntIdx);
25223 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
25227 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25230 /* Adding UE to RbAllocInfo TX Lst */
25231 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25233 /* Fill UE allocInfo scrath pad */
25234 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
25235 precInfo, noTxLyrs, subFrm);
25242 * @brief This function handles Retx allocation in case of TM4 UEs
25243 * where previously both the TBs were ACKED and ACKED
25248 * Function: rgSCHCmnDlTM4TxTx
25249 * Purpose: Reached here for a TM4 UE's HqP's fresh allocation
25250 * where both the TBs are free for TX scheduling.
25251 * If forceTD flag is set
25252 * perform TD on CW1 with TB1.
25258 * Single layer precoding of TB1 on CW1.
25259 * Set precoding info.
25261 * RI layered precoding 2 TB on 2 CW.
25262 * Set precoding info.
25263 * Add UE to cellAllocInfo.
25264 * Fill ueAllocInfo.
25266 * effBo is set to a non-zero value if allocation is
25269 * Invoked by: rgSCHCmnDlAllocRbTM4
25271 * @param[in] RgSchCellCb *cell
25272 * @param[in] RgSchDlSf *subFrm
25273 * @param[in] RgSchUeCb *ue
25274 * @param[in] uint32_t bo
25275 * @param[out] uint32_t *effBo
25276 * @param[in] RgSchDlHqProcCb *proc
25277 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25281 static Void rgSCHCmnDlTM4TxTx
25288 RgSchDlHqProcCb *proc,
25289 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25292 RgSchCmnDlUe *ueDl;
25293 RgSchDlRbAlloc *allocInfo;
25297 uint8_t precInfoAntIdx;
25302 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25303 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25305 /* Integration_fix: SPS Proc shall always have only one Cw */
25307 #ifdef FOUR_TX_ANTENNA
25308 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25309 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
25311 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
25312 (ueDl->mimoInfo.forceTD))
25315 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
25318 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
25319 &allocInfo->raType);
25321 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25322 bo, &numRb, effBo);
25323 if (ret == RFAILED)
25325 /* If allocation couldn't be made then return */
25329 precInfo = 0; /* TD */
25331 else /* Precoding */
25333 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
25334 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
25336 if (ueDl->mimoInfo.ri == 1)
25338 /* Single Layer SM using FORMAT 2 */
25339 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25340 bo, &numRb, effBo);
25341 if (ret == RFAILED)
25343 /* If allocation couldn't be made then return */
25347 precInfo = 0; /* PrecInfo as 0 for RI=1*/
25351 /* Spatial Multiplexing using 2 CWs */
25352 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
25353 if (ret == RFAILED)
25355 /* If allocation couldn't be made then return */
25358 noTxLyrs = ueDl->mimoInfo.ri;
25359 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
25360 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
25366 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25369 /* Adding UE to RbAllocInfo TX Lst */
25370 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25373 /* Fill UE allocInfo scrath pad */
25374 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
25375 precInfo, noTxLyrs, subFrm);
25382 * @brief This function determines the RBs and Bytes required for BO
25383 * transmission for UEs configured with TM 4.
25387 * Function: rgSCHCmnDlAllocTxRbTM4
25388 * Purpose: Invokes the functionality particular to the
25389 * current state of the TBs of the "proc".
25391 * Reference Parameter effBo is filled with alloced bytes.
25392 * Returns RFAILED if BO not satisfied at all.
25394 * Invoked by: rgSCHCmnDlAllocTxRb
25396 * @param[in] RgSchCellCb *cell
25397 * @param[in] RgSchDlSf *subFrm
25398 * @param[in] RgSchUeCb *ue
25399 * @param[in] uint32_t bo
25400 * @param[out] uint32_t *effBo
25401 * @param[in] RgSchDlHqProcCb *proc
25402 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25406 static Void rgSCHCmnDlAllocTxRbTM4
25413 RgSchDlHqProcCb *proc,
25414 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25418 /* Both TBs free for TX allocation */
25419 rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo,\
25420 proc, cellWdAllocInfo);
25427 * @brief This function determines the RBs and Bytes required for BO
25428 * retransmission for UEs configured with TM 4.
25432 * Function: rgSCHCmnDlAllocRetxRbTM4
25433 * Purpose: Invokes the functionality particular to the
25434 * current state of the TBs of the "proc".
25436 * Reference Parameter effBo is filled with alloced bytes.
25437 * Returns RFAILED if BO not satisfied at all.
25439 * Invoked by: rgSCHCmnDlAllocRetxRb
25441 * @param[in] RgSchCellCb *cell
25442 * @param[in] RgSchDlSf *subFrm
25443 * @param[in] RgSchUeCb *ue
25444 * @param[in] uint32_t bo
25445 * @param[out] uint32_t *effBo
25446 * @param[in] RgSchDlHqProcCb *proc
25447 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25451 static Void rgSCHCmnDlAllocRetxRbTM4
25458 RgSchDlHqProcCb *proc,
25459 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25463 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
25464 (proc->tbInfo[1].state == HQ_TB_NACKED))
25466 /* Both TBs require RETX allocation */
25467 rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo,\
25468 proc, cellWdAllocInfo);
25472 /* One of the TBs need RETX allocation. Other TB may/maynot
25473 * be available for new TX allocation. */
25474 rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo,\
25475 proc, cellWdAllocInfo);
25484 * @brief This function determines the RBs and Bytes required for BO
25485 * transmission for UEs configured with TM 5.
25489 * Function: rgSCHCmnDlAllocTxRbTM5
25492 * Reference Parameter effBo is filled with alloced bytes.
25493 * Returns RFAILED if BO not satisfied at all.
25495 * Invoked by: rgSCHCmnDlAllocTxRb
25497 * @param[in] RgSchCellCb *cell
25498 * @param[in] RgSchDlSf *subFrm
25499 * @param[in] RgSchUeCb *ue
25500 * @param[in] uint32_t bo
25501 * @param[out] uint32_t *effBo
25502 * @param[in] RgSchDlHqProcCb *proc
25503 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25507 static Void rgSCHCmnDlAllocTxRbTM5
25514 RgSchDlHqProcCb *proc,
25515 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25518 #if (ERRCLASS & ERRCLS_DEBUG)
25519 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
25526 * @brief This function determines the RBs and Bytes required for BO
25527 * retransmission for UEs configured with TM 5.
25531 * Function: rgSCHCmnDlAllocRetxRbTM5
25534 * Reference Parameter effBo is filled with alloced bytes.
25535 * Returns RFAILED if BO not satisfied at all.
25537 * Invoked by: rgSCHCmnDlAllocRetxRb
25539 * @param[in] RgSchCellCb *cell
25540 * @param[in] RgSchDlSf *subFrm
25541 * @param[in] RgSchUeCb *ue
25542 * @param[in] uint32_t bo
25543 * @param[out] uint32_t *effBo
25544 * @param[in] RgSchDlHqProcCb *proc
25545 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25549 static Void rgSCHCmnDlAllocRetxRbTM5
25556 RgSchDlHqProcCb *proc,
25557 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25560 #if (ERRCLASS & ERRCLS_DEBUG)
25561 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
25569 * @brief This function determines the RBs and Bytes required for BO
25570 * transmission for UEs configured with TM 6.
25574 * Function: rgSCHCmnDlAllocTxRbTM6
25577 * Reference Parameter effBo is filled with alloced bytes.
25578 * Returns RFAILED if BO not satisfied at all.
25580 * Invoked by: rgSCHCmnDlAllocTxRb
25582 * @param[in] RgSchCellCb *cell
25583 * @param[in] RgSchDlSf *subFrm
25584 * @param[in] RgSchUeCb *ue
25585 * @param[in] uint32_t bo
25586 * @param[out] uint32_t *effBo
25587 * @param[in] RgSchDlHqProcCb *proc
25588 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25592 static Void rgSCHCmnDlAllocTxRbTM6
25599 RgSchDlHqProcCb *proc,
25600 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25603 RgSchDlRbAlloc *allocInfo;
25604 RgSchCmnDlUe *ueDl;
25610 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25611 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25613 if (ueDl->mimoInfo.forceTD)
25615 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25616 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25620 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
25621 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25622 /* Fill precoding information for FORMAT 1B */
25623 /* First 4 least significant bits to indicate PMI.
25624 * 4th most significant corresponds to pmi Confirmation.
25626 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
25627 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
25629 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
25630 bo, &numRb, effBo);
25631 if (ret == RFAILED)
25633 /* If allocation couldn't be made then return */
25638 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
25641 /* Adding UE to RbAllocInfo TX Lst */
25642 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
25644 /* Fill UE alloc Info */
25645 allocInfo->rbsReq = numRb;
25646 allocInfo->dlSf = subFrm;
25652 * @brief This function determines the RBs and Bytes required for BO
25653 * retransmission for UEs configured with TM 6.
25657 * Function: rgSCHCmnDlAllocRetxRbTM6
25660 * Reference Parameter effBo is filled with alloced bytes.
25661 * Returns RFAILED if BO not satisfied at all.
25663 * Invoked by: rgSCHCmnDlAllocRetxRb
25665 * @param[in] RgSchCellCb *cell
25666 * @param[in] RgSchDlSf *subFrm
25667 * @param[in] RgSchUeCb *ue
25668 * @param[in] uint32_t bo
25669 * @param[out] uint32_t *effBo
25670 * @param[in] RgSchDlHqProcCb *proc
25671 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25675 static Void rgSCHCmnDlAllocRetxRbTM6
25682 RgSchDlHqProcCb *proc,
25683 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25686 RgSchDlRbAlloc *allocInfo;
25687 RgSchCmnDlUe *ueDl;
25693 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25694 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
25696 if (ueDl->mimoInfo.forceTD)
25698 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25699 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25703 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
25704 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25705 /* Fill precoding information for FORMAT 1B */
25706 /* First 4 least significant bits to indicate PMI.
25707 * 4th most significant corresponds to pmi Confirmation.
25709 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
25710 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
25713 /* Get the Allocation in terms of RBs that are required for
25714 * this retx of TB1 */
25715 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
25717 if (ret == RFAILED)
25719 /* Allocation couldn't be made for Retx */
25720 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
25723 /* Adding UE to allocInfo RETX Lst */
25724 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
25725 /* Fill UE alloc Info */
25726 allocInfo->rbsReq = numRb;
25727 allocInfo->dlSf = subFrm;
25733 * @brief This function determines the RBs and Bytes required for BO
25734 * transmission for UEs configured with TM 7.
25738 * Function: rgSCHCmnDlAllocTxRbTM7
25741 * Reference Parameter effBo is filled with alloced bytes.
25742 * Returns RFAILED if BO not satisfied at all.
25744 * Invoked by: rgSCHCmnDlAllocTxRb
25746 * @param[in] RgSchCellCb *cell
25747 * @param[in] RgSchDlSf *subFrm
25748 * @param[in] RgSchUeCb *ue
25749 * @param[in] uint32_t bo
25750 * @param[out] uint32_t *effBo
25751 * @param[in] RgSchDlHqProcCb *proc
25752 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25756 static Void rgSCHCmnDlAllocTxRbTM7
25763 RgSchDlHqProcCb *proc,
25764 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25767 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
25773 * @brief This function determines the RBs and Bytes required for BO
25774 * retransmission for UEs configured with TM 7.
25778 * Function: rgSCHCmnDlAllocRetxRbTM7
25781 * Reference Parameter effBo is filled with alloced bytes.
25782 * Returns RFAILED if BO not satisfied at all.
25784 * Invoked by: rgSCHCmnDlAllocRetxRb
25786 * @param[in] RgSchCellCb *cell
25787 * @param[in] RgSchDlSf *subFrm
25788 * @param[in] RgSchUeCb *ue
25789 * @param[in] uint32_t bo
25790 * @param[out] uint32_t *effBo
25791 * @param[in] RgSchDlHqProcCb *proc
25792 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25796 static Void rgSCHCmnDlAllocRetxRbTM7
25803 RgSchDlHqProcCb *proc,
25804 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25807 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
25813 * @brief This function invokes the TM specific DL TX RB Allocation routine.
25817 * Function: rgSCHCmnDlAllocTxRb
25818 * Purpose: This function invokes the TM specific
25819 * DL TX RB Allocation routine.
25821 * Invoked by: Specific Schedulers
25823 * @param[in] RgSchCellCb *cell
25824 * @param[in] RgSchDlSf *subFrm
25825 * @param[in] RgSchUeCb *ue
25826 * @param[in] uint32_t bo
25827 * @param[out] uint32_t *effBo
25828 * @param[in] RgSchDlHqProcCb *proc
25829 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25833 S16 rgSCHCmnDlAllocTxRb
25840 RgSchDlHqProcCb *proc,
25841 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
25844 uint32_t newSchBits = 0;
25845 uint32_t prevSchBits = 0;
25846 RgSchDlRbAlloc *allocInfo;
25849 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
25851 ue->dl.aggTbBits = 0;
25855 /* Calculate totals bits previously allocated */
25856 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25857 if (allocInfo->tbInfo[0].schdlngForTb)
25859 prevSchBits += allocInfo->tbInfo[0].bytesReq;
25861 if (allocInfo->tbInfo[1].schdlngForTb)
25863 prevSchBits += allocInfo->tbInfo[1].bytesReq;
25866 /* Call TM specific RB allocation routine */
25867 (dlAllocTxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
25868 proc, cellWdAllocInfo);
25872 /* Calculate totals bits newly allocated */
25873 if (allocInfo->tbInfo[0].schdlngForTb)
25875 newSchBits += allocInfo->tbInfo[0].bytesReq;
25877 if (allocInfo->tbInfo[1].schdlngForTb)
25879 newSchBits += allocInfo->tbInfo[1].bytesReq;
25881 if (newSchBits > prevSchBits)
25883 ue->dl.aggTbBits += ((newSchBits - prevSchBits) * 8);
25884 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
25891 /* DwPTS Scheduling Changes Start */
25894 * @brief Retransmit decision for TDD. Retx is avoided in below cases
25895 * 1) DL Sf -> Spl Sf
25896 * 2) DL SF -> DL SF 0
25900 * Function: rgSCHCmnRetxAvoidTdd
25901 * Purpose: Avoid allocating RETX for cases 1, 2
25903 * Invoked by: rgSCHCmnRetxAvoidTdd
25905 * @param[in] RgSchDlSf *curSf
25906 * @param[in] RgSchCellCb *cell
25907 * @param[in] RgSchDlHqProcCb *proc
25911 Bool rgSCHCmnRetxAvoidTdd
25915 RgSchDlHqProcCb *proc
25918 RgSchTddSfType txSfType = 0;
25921 /* Get the RBs of TB that will be retransmitted */
25922 if (proc->tbInfo[0].state == HQ_TB_NACKED)
25924 txSfType = proc->tbInfo[0].sfType;
25926 #ifdef XEON_SPECIFIC_CHANGES
25927 #ifndef XEON_TDD_SPCL
25928 /* Avoid re-transmission on Normal SF when the corresponding TB wss transmitted on SPCL SF */
25929 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
25936 if (proc->tbInfo[1].state == HQ_TB_NACKED)
25938 /* Select the TxSf with the highest num of possible REs
25939 * In ascending order -> 1) SPL SF 2) DL_SF_0 3) DL_SF */
25940 txSfType = RGSCH_MAX(txSfType, proc->tbInfo[1].sfType);
25942 #ifdef XEON_SPECIFIC_CHANGES
25943 #ifndef XEON_TDD_SPCL
25944 /* Avoid re-transmission on Normal SF when the corresponding TB wss tranmitted on SPCL SF */
25945 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
25953 if (txSfType > curSf->sfType)
25964 /* DwPTS Scheduling Changes End */
25967 * @brief Avoid allocating RETX incase of collision
25968 * with reserved resources for BCH/PSS/SSS occassions.
25972 * Function: rgSCHCmnRetxAllocAvoid
25973 * Purpose: Avoid allocating RETX incase of collision
25974 * with reserved resources for BCH/PSS/SSS occassions
25976 * Invoked by: rgSCHCmnDlAllocRetxRb
25978 * @param[in] RgSchDlSf *subFrm
25979 * @param[in] RgSchUeCb *ue
25980 * @param[in] RgSchDlHqProcCb *proc
25984 Bool rgSCHCmnRetxAllocAvoid
25988 RgSchDlHqProcCb *proc
25994 if (proc->tbInfo[0].state == HQ_TB_NACKED)
25996 reqRbs = proc->tbInfo[0].dlGrnt.numRb;
26000 reqRbs = proc->tbInfo[1].dlGrnt.numRb;
26002 /* Consider the dlGrnt.numRb of the Retransmitting proc->tbInfo
26003 * and current available RBs to determine if this RETX TB
26004 * will collide with the BCH/PSS/SSS occassion */
26005 if (subFrm->sfNum % 5 == 0)
26007 if ((subFrm->bwAssigned < cell->pbchRbEnd) &&
26008 (((subFrm->bwAssigned + reqRbs) - cell->pbchRbStart) > 0))
26020 * @brief This function invokes the TM specific DL RETX RB Allocation routine.
26024 * Function: rgSCHCmnDlAllocRetxRb
26025 * Purpose: This function invokes the TM specific
26026 * DL RETX RB Allocation routine.
26028 * Invoked by: Specific Schedulers
26030 * @param[in] RgSchCellCb *cell
26031 * @param[in] RgSchDlSf *subFrm
26032 * @param[in] RgSchUeCb *ue
26033 * @param[in] uint32_t bo
26034 * @param[out] uint32_t *effBo
26035 * @param[in] RgSchDlHqProcCb *proc
26036 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26040 S16 rgSCHCmnDlAllocRetxRb
26047 RgSchDlHqProcCb *proc,
26048 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26051 uint32_t newSchBits = 0;
26052 RgSchDlRbAlloc *allocInfo;
26055 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
26057 ue->dl.aggTbBits = 0;
26061 /* Check for DL BW exhaustion */
26062 if (subFrm->bw <= subFrm->bwAssigned)
26066 /* Call TM specific RB allocation routine */
26067 (dlAllocRetxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
26068 proc, cellWdAllocInfo);
26072 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26073 /* Calculate totals bits newly allocated */
26074 if (allocInfo->tbInfo[0].schdlngForTb)
26076 newSchBits += allocInfo->tbInfo[0].bytesReq;
26078 if (allocInfo->tbInfo[1].schdlngForTb)
26080 newSchBits += allocInfo->tbInfo[1].bytesReq;
26082 ue->dl.aggTbBits += (newSchBits * 8);
26083 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
26091 * @brief This function determines the RBs and Bytes required for
26092 * Transmission on 1 CW.
26096 * Function: rgSCHCmnDlAlloc1CwTxRb
26097 * Purpose: This function determines the RBs and Bytes required
26098 * for Transmission of DL SVC BO on 1 CW.
26099 * Also, takes care of SVC by SVC allocation by tracking
26100 * previous SVCs allocations.
26101 * Returns RFAILED if BO not satisfied at all.
26103 * Invoked by: DL UE Allocation
26105 * @param[in] RgSchCellCb *cell
26106 * @param[in] RgSchDlSf *subFrm
26107 * @param[in] RgSchUeCb *ue
26108 * @param[in] RgSchDlHqTbCb *tbInfo
26109 * @param[in] uint32_t bo
26110 * @param[out] uint8_t *numRb
26111 * @param[out] uint32_t *effBo
26115 static S16 rgSCHCmnDlAlloc1CwTxRb
26120 RgSchDlHqTbCb *tbInfo,
26129 RgSchCmnDlUe *ueDl;
26130 RgSchDlRbAlloc *allocInfo;
26133 /* Correcting wrap around issue.
26134 * This change has been done at mutliple places in this function.*/
26135 uint32_t tempNumRb;
26138 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26139 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26140 oldReq = ueDl->outStndAlloc;
26143 //TODO_SID: Currently setting max Tb size wrt to 5GTF TM3
26144 iTbs = ue->ue5gtfCb.mcs;
26145 ueDl->maxTbSz = MAX_5GTF_TB_SIZE * ue->ue5gtfCb.rank;
26146 ueDl->maxRb = MAX_5GTF_PRBS;
26148 ueDl->outStndAlloc += bo;
26149 /* consider Cumulative amount of this BO and bytes so far allocated */
26150 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbSz/8);
26151 /* Get the number of REs needed for this bo. */
26152 //noRes = ((bo * 8 * 1024) / eff);
26154 /* Get the number of RBs needed for this transmission */
26155 /* Number of RBs = No of REs / No of REs per RB */
26156 //tempNumRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
26157 tempNumRb = MAX_5GTF_PRBS;
26158 tbSz = RGSCH_MIN(bo, (rgSch5gtfTbSzTbl[iTbs]/8) * ue->ue5gtfCb.rank);
26160 /* DwPts Scheduling Changes End */
26161 *effBo = RGSCH_MIN(tbSz - oldReq, reqBytes);
26164 //RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs);
26169 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbSz, \
26170 iTbs, imcs, tbInfo, ue->ue5gtfCb.rank);
26171 *numRb = (uint8_t) tempNumRb;
26173 /* Update the subframe Allocated BW field */
26174 subFrm->bwAssigned = subFrm->bwAssigned + tempNumRb - allocInfo->rbsReq;
26181 * @brief This function is invoked in the event of any TB's allocation
26182 * being underutilized by the specific scheduler. Here we reduce iMcs
26183 * to increase redundancy and hence increase reception quality at UE.
26187 * Function: rgSCHCmnRdcImcsTxTb
26188 * Purpose: This function shall reduce the iMcs in accordance with
26189 * the total consumed bytes by the UE at allocation
26192 * Invoked by: UE DL Allocation finalization routine
26193 * of specific scheduler.
26195 * @param[in] RgSchDlRbAlloc *allocInfo
26196 * @param[in] uint8_t tbInfoIdx
26197 * @param[in] uint32_t cnsmdBytes
26201 Void rgSCHCmnRdcImcsTxTb
26203 RgSchDlRbAlloc *allocInfo,
26205 uint32_t cnsmdBytes
26209 /*The below functionality is not needed.*/
26215 iTbs = allocInfo->tbInfo[tbInfoIdx].iTbs;
26216 noLyr = allocInfo->tbInfo[tbInfoIdx].noLyr;
26217 numRb = allocInfo->rbsAlloc;
26220 if ((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) == cnsmdBytes)
26225 /* Get iTbs as suitable for the consumed bytes */
26226 while((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) > cnsmdBytes)
26230 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].\
26231 tbCb->dlGrnt.iMcs);
26237 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].tbCb->dlGrnt.iMcs);
26244 * @brief This function determines the RBs and Bytes required for
26245 * Transmission on 2 CWs.
26249 * Function: rgSCHCmnDlAlloc2CwTxRb
26250 * Purpose: This function determines the RBs and Bytes required
26251 * for Transmission of DL SVC BO on 2 CWs.
26252 * Also, takes care of SVC by SVC allocation by tracking
26253 * previous SVCs allocations.
26254 * Returns RFAILED if BO not satisfied at all.
26256 * Invoked by: TM3 and TM4 DL UE Allocation
26258 * @param[in] RgSchCellCb *cell
26259 * @param[in] RgSchDlSf *subFrm
26260 * @param[in] RgSchUeCb *ue
26261 * @param[in] RgSchDlHqProcCb *proc
26262 * @param[in] RgSchDlHqProcCb bo
26263 * @param[out] uint8_t *numRb
26264 * @param[out] uint32_t *effBo
26268 static S16 rgSCHCmnDlAlloc2CwTxRb
26273 RgSchDlHqProcCb *proc,
26280 uint32_t eff1, eff2;
26281 uint32_t tb1Sz, tb2Sz;
26282 uint8_t imcs1, imcs2;
26283 uint8_t noLyr1, noLyr2;
26284 uint8_t iTbs1, iTbs2;
26285 RgSchCmnDlCell *cellDl;
26286 RgSchCmnDlUe *ueDl;
26287 RgSchDlRbAlloc *allocInfo;
26290 /* Fix: MUE_PERTTI_DL */
26292 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
26293 uint8_t cfi = cellSch->dl.currCfi;
26295 uint32_t availBits = 0;
26297 uint32_t boTmp = bo;
26302 cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26303 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26304 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26305 oldReq = ueDl->outStndAlloc;
26308 if (ueDl->maxTbBits > ue->dl.aggTbBits)
26310 availBits = ueDl->maxTbBits - ue->dl.aggTbBits;
26312 /* check if we can further allocate to this UE */
26313 if ((ue->dl.aggTbBits >= ueDl->maxTbBits) ||
26314 (allocInfo->tbInfo[0].bytesReq >= ueDl->maxTbSz/8) ||
26315 (allocInfo->tbInfo[1].bytesReq >= ueDl->maxTbSz/8) ||
26316 (allocInfo->rbsReq >= ueDl->maxRb))
26318 RLOG_ARG0(L_DEBUG,DBG_CELLID,cell->cellId,
26319 "rgSCHCmnDlAllocRb(): UEs max allocation exceed");
26323 noLyr1 = ueDl->mimoInfo.cwInfo[0].noLyr;
26324 noLyr2 = ueDl->mimoInfo.cwInfo[1].noLyr;
26326 /* If there is no CFI change, continue to use the BLER based
26328 if (ueDl->lastCfi == cfi)
26330 iTbs1 = ueDl->mimoInfo.cwInfo[0].iTbs[noLyr1 - 1];
26331 iTbs2 = ueDl->mimoInfo.cwInfo[1].iTbs[noLyr2 - 1];
26335 uint8_t cqi = ueDl->mimoInfo.cwInfo[0].cqi;
26337 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 0, noLyr1);
26339 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 0, noLyr1);
26342 cqi = ueDl->mimoInfo.cwInfo[1].cqi;
26344 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 1, noLyr2);
26346 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 1, noLyr2);
26350 /*ccpu00131191 and ccpu00131317 - Fix for RRC Reconfig failure
26351 * issue for VoLTE call */
26352 //if ((proc->hasDcch) || (TRUE == rgSCHLaaSCellEnabled(cell)))
26372 else if(!cellSch->dl.isDlFreqSel)
26375 /* for Tdd reduce iTbs only for SF0. SF5 contains only
26376 * SSS and can be ignored */
26377 if (subFrm->sfNum == 0)
26379 (iTbs1 > 1)? (iTbs1 -= 1) : (iTbs1 = 0);
26380 (iTbs2 > 1)? (iTbs2 -= 1) : (iTbs2 = 0);
26382 /* For SF 3 and 8 CRC is getting failed in DL.
26383 Need to do proper fix after the replay from
26385 #ifdef CA_PHY_BRDCM_61765
26386 if ((subFrm->sfNum == 3) || (subFrm->sfNum == 8))
26388 (iTbs1 > 2)? (iTbs1 -= 2) : (iTbs1 = 0);
26389 (iTbs2 > 2)? (iTbs2 -= 2) : (iTbs2 = 0);
26397 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26399 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
26403 eff1 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr1 - 1][cfi]))[iTbs1];
26404 eff2 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr2 - 1][cfi]))[iTbs2];
26407 bo = RGSCH_MIN(bo,availBits/8);
26408 ueDl->outStndAlloc += bo;
26409 /* consider Cumulative amount of this BO and bytes so far allocated */
26410 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbBits/8);
26411 bo = RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff1)/(eff1+eff2)),
26413 RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff2)/(eff1+eff2)),
26414 (ueDl->maxTbSz)/8) +
26415 1; /* Add 1 to adjust the truncation at weighted averaging */
26416 /* Get the number of REs needed for this bo. */
26417 noRes = ((bo * 8 * 1024) / (eff1 + eff2));
26419 /* Get the number of RBs needed for this transmission */
26420 /* Number of RBs = No of REs / No of REs per RB */
26421 numRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
26422 /* Cannot exceed the maximum number of RBs per UE */
26423 if (numRb > ueDl->maxRb)
26425 numRb = ueDl->maxRb;
26430 if(RFAILED == rgSCHLaaCmn2CwAdjustPrb(allocInfo, boTmp, &numRb, ueDl, noLyr1, noLyr2, iTbs1, iTbs2))
26433 while ((numRb <= ueDl->maxRb) &&
26434 (rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1] <= ueDl->maxTbSz) &&
26435 (rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1] <= ueDl->maxTbSz) &&
26436 ((rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8 +
26437 rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8) <= bo))
26443 availBw = subFrm->bw - subFrm->bwAssigned;
26444 /* Cannot exceed the total number of RBs in the cell */
26445 if ((S16)(numRb - allocInfo->rbsReq) > availBw)
26447 numRb = availBw + allocInfo->rbsReq;
26449 tb1Sz = rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8;
26450 tb2Sz = rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8;
26451 /* DwPts Scheduling Changes Start */
26453 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26455 /* Max Rb for Special Sf is approximated as 4/3 of maxRb */
26456 rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, (uint8_t*)&numRb, ueDl->maxRb*4/3,
26457 &iTbs1, &iTbs2, noLyr1,
26458 noLyr2, &tb1Sz, &tb2Sz, cfi);
26459 /* Check for available Bw */
26460 if ((S16)numRb - allocInfo->rbsReq > availBw)
26462 numRb = availBw + allocInfo->rbsReq;
26463 tb1Sz = rgTbSzTbl[noLyr1-1][iTbs1][RGSCH_MAX(numRb*3/4,1)-1]/8;
26464 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs2][RGSCH_MAX(numRb*3/4,1)-1]/8;
26468 /* DwPts Scheduling Changes End */
26469 /* Update the subframe Allocated BW field */
26470 subFrm->bwAssigned = subFrm->bwAssigned + numRb - \
26473 *effBo = RGSCH_MIN((tb1Sz + tb2Sz) - oldReq, reqBytes);
26476 if (ROK != rgSCHLaaCmn2TBPrbCheck(allocInfo, tb1Sz, tb2Sz, boTmp, effBo, iTbs1, iTbs2, numRb, proc))
26482 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs1, imcs1);
26483 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs2, imcs2);
26484 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tb1Sz, \
26485 iTbs1, imcs1, &proc->tbInfo[0], noLyr1);
26486 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
26487 iTbs2, imcs2, &proc->tbInfo[1], noLyr2);
26488 *numRbRef = (uint8_t)numRb;
26496 * @brief This function determines the RBs and Bytes required for
26497 * Transmission & Retransmission on 2 CWs.
26501 * Function: rgSCHCmnDlAlloc2CwTxRetxRb
26502 * Purpose: This function determines the RBs and Bytes required
26503 * for Transmission & Retransmission on 2 CWs. Allocate
26504 * RETX TB on a better CW and restrict new TX TB by
26506 * Returns RFAILED if BO not satisfied at all.
26508 * Invoked by: TM3 and TM4 DL UE Allocation
26510 * @param[in] RgSchCellCb *cell
26511 * @param[in] RgSchDlSf *subFrm
26512 * @param[in] RgSchUeCb *ue
26513 * @param[in] RgSchDlHqTbCb *reTxTb
26514 * @param[in] RgSchDlHqTbCb *txTb
26515 * @param[out] uint8_t *numRb
26516 * @param[out] uint32_t *effBo
26520 static S16 rgSCHCmnDlAlloc2CwTxRetxRb
26525 RgSchDlHqTbCb *reTxTb,
26526 RgSchDlHqTbCb *txTb,
26531 RgSchCmnDlUe *ueDl;
26532 RgSchDlRbAlloc *allocInfo;
26533 uint8_t imcs1, imcs2;
26536 RgSchCmnDlUeCwInfo *otherCw;
26538 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26539 uint8_t cfi = cellDl->currCfi;
26543 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
26544 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26545 otherCw = &ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)];
26548 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26549 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26551 availBw = subFrm->bw - subFrm->bwAssigned;
26552 *numRb = reTxTb->dlGrnt.numRb;
26554 #ifdef XEON_TDD_SPCL
26555 *numRb = (reTxTb->initTxNumRbs);
26556 if(reTxTb->sfType == RG_SCH_SPL_SF_DATA && subFrm->sfType != RG_SCH_SPL_SF_DATA)
26558 *numRb = (reTxTb->initTxNumRbs*3/4);
26562 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
26568 if ((S16)*numRb > availBw)
26572 /* Update the subframe Allocated BW field */
26573 subFrm->bwAssigned += *numRb;
26574 noLyr2 = otherCw->noLyr;
26575 RG_SCH_CMN_GET_MCS_FOR_RETX(reTxTb, imcs1);
26577 /* If there is no CFI change, continue to use the BLER based
26579 if (ueDl->lastCfi == cfi)
26581 iTbs = otherCw->iTbs[noLyr2-1];
26586 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, otherCw->cqi, cfi,
26587 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
26589 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, otherCw->cqi, cfi,
26590 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
26593 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs][*numRb-1]/8;
26594 /* DwPts Scheduling Changes Start */
26597 /* DwPts Scheduling Changes End */
26598 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs2);
26600 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], reTxTb->tbSz, \
26601 0, imcs1, reTxTb, reTxTb->numLyrs);
26603 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
26604 iTbs, imcs2, txTb, noLyr2);
26606 *effBo = reTxTb->tbSz + tb2Sz;
26613 * @brief This function determines the RBs and Bytes required for BO
26614 * Retransmission on 2 CWs.
26618 * Function: rgSCHCmnDlAlloc2CwRetxRb
26619 * Purpose: This function determines the RBs and Bytes required
26620 * for BO Retransmission on 2 CWs. Allocate larger TB
26621 * on a better CW and check if the smaller TB can be
26622 * accomodated on the other CW.
26623 * Returns RFAILED if BO not satisfied at all.
26625 * Invoked by: Common Scheduler
26627 * @param[in] RgSchCellCb *cell
26628 * @param[in] RgSchDlSf *subFrm
26629 * @param[in] RgSchUeCb *ue
26630 * @param[in] RgSchDlHqProcCb *proc
26631 * @param[out] uint8_t *numRb
26632 * @param[out] Bool *swpFlg
26633 * @param[out] uint32_t *effBo
26637 static S16 rgSCHCmnDlAlloc2CwRetxRb
26642 RgSchDlHqProcCb *proc,
26648 RgSchDlRbAlloc *allocInfo;
26651 RgSchDlHqTbCb *lrgTbInfo, *othrTbInfo;
26654 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26657 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26658 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26660 lrgTbInfo = &proc->tbInfo[0];
26661 othrTbInfo = &proc->tbInfo[1];
26662 *numRb = lrgTbInfo->dlGrnt.numRb;
26663 #ifdef XEON_TDD_SPCL
26664 if((lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA || othrTbInfo->sfType == RG_SCH_SPL_SF_DATA))
26666 if(lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA)
26668 *numRb = (lrgTbInfo->initTxNumRbs);
26672 *numRb = (othrTbInfo->initTxNumRbs);
26675 if(subFrm->sfType != RG_SCH_SPL_SF_DATA)
26677 *numRb = (*numRb)*3/4;
26682 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
26687 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
26691 /* Update the subframe Allocated BW field */
26692 subFrm->bwAssigned += *numRb;
26693 RG_SCH_CMN_GET_MCS_FOR_RETX(lrgTbInfo, imcs1);
26694 RG_SCH_CMN_GET_MCS_FOR_RETX(othrTbInfo, imcs2);
26695 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], lrgTbInfo->tbSz, \
26696 0, imcs1, lrgTbInfo, lrgTbInfo->numLyrs);
26697 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], othrTbInfo->tbSz, \
26698 0, imcs2, othrTbInfo, othrTbInfo->numLyrs);
26699 *effBo = lrgTbInfo->tbSz + othrTbInfo->tbSz;
26708 * @brief This function determines the RBs and Bytes required for BO
26709 * Retransmission on 1 CW.
26713 * Function: rgSCHCmnDlAlloc1CwRetxRb
26714 * Purpose: This function determines the RBs and Bytes required
26715 * for BO Retransmission on 1 CW, the first CW.
26716 * Returns RFAILED if BO not satisfied at all.
26718 * Invoked by: Common Scheduler
26720 * @param[in] RgSchCellCb *cell
26721 * @param[in] RgSchDlSf *subFrm
26722 * @param[in] RgSchUeCb *ue
26723 * @param[in] RgSchDlHqTbCb *tbInfo
26724 * @param[in] uint8_t noLyr
26725 * @param[out] uint8_t *numRb
26726 * @param[out] uint32_t *effBo
26730 static S16 rgSCHCmnDlAlloc1CwRetxRb
26735 RgSchDlHqTbCb *tbInfo,
26741 RgSchDlRbAlloc *allocInfo;
26745 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26748 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
26749 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
26751 *numRb = tbInfo->dlGrnt.numRb;
26752 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
26756 /* Update the subframe Allocated BW field */
26757 subFrm->bwAssigned += *numRb;
26758 imcs = tbInfo->dlGrnt.iMcs;
26759 allocInfo->dciFormat = tbInfo->dlGrnt.dciFormat;
26760 /* Fix: For a RETX TB the iTbs is irrelevant, hence setting 0 */
26761 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbInfo->tbSz, \
26762 0, imcs, tbInfo, tbInfo->numLyrs);
26763 *effBo = tbInfo->tbSz;
26771 * @brief This function is called to handle Release PDCCH feedback for SPS UE
26775 * Function: rgSCHCmnDlRelPdcchFbk
26776 * Purpose: Invokes SPS module to handle release PDCCH feedback
26780 * @param[in] RgSchCellCb *cell
26781 * @param[in] RgSchUeCb *ue
26782 * @param[in] Bool isAck
26786 Void rgSCHCmnDlRelPdcchFbk
26794 rgSCHCmnSpsDlRelPdcchFbk(cell, ue, isAck);
26801 * @brief This function is invoked to handle Ack processing for a HARQ proc.
26805 * Function: rgSCHCmnDlProcAck
26806 * Purpose: DTX processing for HARQ proc
26810 * @param[in] RgSchCellCb *cell
26811 * @param[in] RgSchDlHqProcCb *hqP
26815 Void rgSCHCmnDlProcAck
26818 RgSchDlHqProcCb *hqP
26823 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
26825 /* Invoke SPS module if SPS service was scheduled for this HARQ proc */
26826 rgSCHCmnSpsDlProcAck(cell, hqP);
26830 #ifdef RGSCH_SPS_STATS
26831 uint32_t rgSchStatCrntiCeRcvCnt;
26834 * @brief This function is invoked to handle CRNTI CE reception for an UE
26838 * Function: rgSCHCmnHdlCrntiCE
26839 * Purpose: Handle CRNTI CE reception
26843 * @param[in] RgSchCellCb *cell
26844 * @param[in] RgSchDlHqProcCb *hqP
26848 Void rgSCHCmnHdlCrntiCE
26855 #ifdef RGSCH_SPS_STATS
26856 rgSchStatCrntiCeRcvCnt++;
26859 /* When UL sync lost happened due to TA timer expiry UE is being moved to
26860 PDCCH order inactivity list.But when CRNTI CE received in msg3 from UE
26861 we are not moving UE into active state due to that RRC Reconfiguration is
26863 So here we are moving UE to active list whenever we receive the CRNTI CE and
26865 /* CR ccpu00144525 */
26866 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
26868 /* Activate this UE if it was inactive */
26869 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
26870 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
26873 /* Handling is same as reception of UE RESET for both DL and UL */
26874 if (ue->dl.dlSpsCfg.isDlSpsEnabled)
26876 rgSCHCmnSpsDlUeReset(cell, ue);
26878 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26880 rgSCHCmnSpsUlUeReset(cell, ue);
26888 * @brief This function is called to handle relInd from MAC for a UE
26892 * Function: rgSCHCmnUlSpsRelInd
26893 * Purpose: Invokes SPS module to handle UL SPS release for a UE
26895 * Invoked by: SCH_UTL
26897 * @param[in] RgSchCellCb *cell
26898 * @param[in] RgSchUeCb *ue
26899 * @param[in] Bool isExplRel
26903 Void rgSCHCmnUlSpsRelInd
26911 rgSCHCmnSpsUlProcRelInd(cell, ue, isExplRel);
26914 } /* end of rgSCHCmnUlSpsRelInd */
26917 * @brief This function is called to handle SPS Activate Ind from MAC for a UE
26921 * Function: rgSCHCmnUlSpsActInd
26922 * Purpose: Invokes SPS module to handle UL SPS activate for a UE
26924 * Invoked by: SCH_UTL
26926 * @param[in] RgSchCellCb *cell
26927 * @param[in] RgSchUeCb *ue
26931 Void rgSCHCmnUlSpsActInd
26935 uint16_t spsSduSize
26940 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26942 rgSCHCmnSpsUlProcActInd(cell, ue,spsSduSize);
26946 } /* end of rgSCHCmnUlSpsActInd */
26949 * @brief This function is called to handle CRC in UL for UEs
26950 * undergoing SPS release
26954 * Function: rgSCHCmnUlCrcInd
26955 * Purpose: Invokes SPS module to handle CRC in UL for SPS UE
26957 * Invoked by: SCH_UTL
26959 * @param[in] RgSchCellCb *cell
26960 * @param[in] RgSchUeCb *ue
26961 * @param[in] CmLteTimingInfo crcTime
26965 Void rgSCHCmnUlCrcInd
26969 CmLteTimingInfo crcTime
26973 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
26975 rgSCHCmnSpsUlProcCrcInd(cell, ue, crcTime);
26979 } /* end of rgSCHCmnUlCrcFailInd */
26982 * @brief This function is called to handle CRC failure in UL
26986 * Function: rgSCHCmnUlCrcFailInd
26987 * Purpose: Invokes SPS module to handle CRC failure in UL for SPS UE
26989 * Invoked by: SCH_UTL
26991 * @param[in] RgSchCellCb *cell
26992 * @param[in] RgSchUeCb *ue
26993 * @param[in] CmLteTimingInfo crcTime
26997 Void rgSCHCmnUlCrcFailInd
27001 CmLteTimingInfo crcTime
27005 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
27007 rgSCHCmnSpsUlProcDtxInd(cell, ue, crcTime);
27011 } /* end of rgSCHCmnUlCrcFailInd */
27013 #endif /* LTEMAC_SPS */
27016 * @brief BCH,BCCH,PCCH Dowlink Scheduling Handler.
27020 * Function: rgSCHCmnDlBcchPcchAlloc
27021 * Purpose: This function calls common scheduler APIs to
27022 * schedule for BCCH/PCCH.
27023 * It then invokes Allocator for actual RB
27024 * allocations. It processes on the actual resources allocated
27025 * against requested to the allocator module.
27027 * Invoked by: Common Scheduler
27029 * @param[in] RgSchCellCb *cell
27032 static Void rgSCHCmnDlBcchPcchAlloc(RgSchCellCb *cell)
27035 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
27037 #ifdef LTEMAC_HDFDD
27038 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
27040 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
27043 RgInfSfAlloc *nextsfAlloc = &(cell->sfAllocArr[nextSfIdx]);
27044 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27045 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
27049 /*Reset the bitmask for BCCH/PCCH*/
27050 rgSCHUtlResetSfAlloc(nextsfAlloc,TRUE,FALSE);
27051 #ifndef DISABLE_MIB_SIB /* Not sending MIB and SIB to CL */
27053 rgSCHChkNUpdSiCfg(cell);
27054 rgSCHSelectSi(cell);
27057 /*Perform the scheduling for BCCH,PCCH*/
27058 rgSCHCmnDlBcchPcch(cell, allocInfo, nextsfAlloc);
27060 /* Call common allocator for RB Allocation */
27061 rgSCHBcchPcchDlRbAlloc(cell, allocInfo);
27063 /* Finalize the Allocations for reqested Against alloced */
27064 rgSCHCmnDlBcchPcchFnlz(cell, allocInfo);
27065 #endif /* DISABLE_MIB_SIB */
27070 * @brief Handles RB allocation for BCCH/PCCH for downlink.
27074 * Function : rgSCHBcchPcchDlRbAlloc
27076 * Invoking Module Processing:
27077 * - This function is invoked for DL RB allocation of BCCH/PCCH
27079 * Processing Steps:
27080 * - If cell is frequency selecive,
27081 * - Call rgSCHDlfsBcchPcchAllocRb().
27083 * - Do the processing
27085 * @param[in] RgSchCellCb *cell
27086 * @param[in] RgSchDlRbAllocInfo *allocInfo
27090 static Void rgSCHBcchPcchDlRbAlloc
27093 RgSchCmnDlRbAllocInfo *allocInfo
27096 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27100 if (cellSch->dl.isDlFreqSel)
27102 cellSch->apisDlfs->rgSCHDlfsBcchPcchAllocRb(cell, allocInfo);
27106 rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo);
27113 * @brief Handles RB allocation for BCCH,PCCH for frequency
27114 * non-selective cell.
27118 * Function : rgSCHCmnNonDlfsBcchPcchRbAlloc
27120 * Invoking Module Processing:
27121 * - SCH shall invoke this if downlink frequency selective is disabled for
27122 * the cell for RB allocation.
27123 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
27124 * estimate and subframe for each allocation to be made to SCH.
27126 * Processing Steps:
27127 * - Allocate sequentially for BCCH,PCCH common channels.
27129 * @param[in] RgSchCellCb *cell
27130 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
27134 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc
27137 RgSchCmnDlRbAllocInfo *allocInfo
27140 RgSchDlRbAlloc *reqAllocInfo;
27144 /* Allocate for PCCH */
27145 reqAllocInfo = &(allocInfo->pcchAlloc);
27146 if (reqAllocInfo->rbsReq)
27148 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
27150 /* Allocate for BCCH on DLSCH */
27151 reqAllocInfo = &(allocInfo->bcchAlloc);
27152 if (reqAllocInfo->rbsReq)
27154 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
27162 * @brief This function implements the handling to check and
27163 * update the SI cfg at the start of the modificiation period.
27167 * Function: rgSCHChkNUpdSiCfg
27168 * Purpose: This function implements handling for update of SI Cfg
27169 * at the start of modification period.
27171 * Invoked by: Scheduler
27173 * @param[in] RgSchCellCb* cell
27178 static Void rgSCHChkNUpdSiCfg
27183 CmLteTimingInfo pdSchTmInfo;
27187 pdSchTmInfo = cell->crntTime;
27188 #ifdef LTEMAC_HDFDD
27189 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27190 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27191 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27193 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA);
27197 /* Updating the SIB1 for Warning SI message immediately after it is received
27198 * from application. No need to wait for next modification period.
27200 if((pdSchTmInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
27201 && (RGSCH_SIB1_TX_SF_NUM == (pdSchTmInfo.slot % RGSCH_NUM_SUB_FRAMES)))
27203 /*Check whether SIB1 with PWS has been updated*/
27204 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_PWS_UPD)
27206 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
27207 cell->siCb.newSiInfo.sib1Info.sib1);
27208 cell->siCb.crntSiInfo.sib1Info.mcs =
27209 cell->siCb.newSiInfo.sib1Info.mcs;
27210 cell->siCb.crntSiInfo.sib1Info.nPrb =
27211 cell->siCb.newSiInfo.sib1Info.nPrb;
27212 cell->siCb.crntSiInfo.sib1Info.msgLen =
27213 cell->siCb.newSiInfo.sib1Info.msgLen;
27214 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_PWS_UPD;
27218 /*Check if this SFN and SF No marks the start of next modification
27219 period. If current SFN,SF No doesn't marks the start of next
27220 modification period, then return. */
27221 if(!((pdSchTmInfo.sfn % cell->siCfg.modPrd == 0)
27222 && (0 == pdSchTmInfo.slot)))
27223 /*if(!((((pdSchTmInfo.hSfn * 1024) + pdSchTmInfo.sfn) % cell->siCfg.modPrd == 0)
27224 && (0 == pdSchTmInfo.slot)))*/
27229 /*Check whether MIB has been updated*/
27230 if(cell->siCb.siBitMask & RGSCH_SI_MIB_UPD)
27232 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.mib,
27233 cell->siCb.newSiInfo.mib);
27234 cell->siCb.siBitMask &= ~RGSCH_SI_MIB_UPD;
27237 /*Check whether SIB1 has been updated*/
27238 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_UPD)
27240 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
27241 cell->siCb.newSiInfo.sib1Info.sib1);
27242 cell->siCb.crntSiInfo.sib1Info.mcs = cell->siCb.newSiInfo.sib1Info.mcs;
27243 cell->siCb.crntSiInfo.sib1Info.nPrb = cell->siCb.newSiInfo.sib1Info.nPrb;
27244 cell->siCb.crntSiInfo.sib1Info.msgLen =
27245 cell->siCb.newSiInfo.sib1Info.msgLen;
27246 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_UPD;
27249 /*Check whether SIs have been updated*/
27250 if(cell->siCb.siBitMask & RGSCH_SI_SI_UPD)
27254 /*Check if SI cfg have been modified And Check if numSi have
27255 been changed, if yes then we would need to update the
27256 pointers for all the SIs */
27257 if((cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD) &&
27258 (cell->siCfg.numSi != cell->siCb.newSiCfg.numSi))
27260 for(idx = 0;idx < cell->siCb.newSiCfg.numSi;idx++)
27262 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
27263 cell->siCb.newSiInfo.siInfo[idx].si);
27264 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
27265 cell->siCb.siArray[idx].isWarningSi = FALSE;
27267 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
27268 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
27269 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
27272 /*If numSi have been reduced then we need to free the
27273 pointers at the indexes in crntSiInfo which haven't
27274 been exercised. If numSi has increased then nothing
27275 additional is requires as above handling has taken
27277 if(cell->siCfg.numSi > cell->siCb.newSiCfg.numSi)
27279 for(idx = cell->siCb.newSiCfg.numSi;
27280 idx < cell->siCfg.numSi;idx++)
27282 RGSCH_FREE_MSG(cell->siCb.crntSiInfo.siInfo[idx].si);
27283 cell->siCb.siArray[idx].si = NULLP;
27289 /*numSi has not been updated, we just need to update the
27290 pointers for the SIs which are set to NON NULLP */
27291 /*ccpu00118260 - Correct Update of SIB2 */
27292 for(idx = 0;idx < cell->siCfg.numSi;idx++)
27294 if(NULLP != cell->siCb.newSiInfo.siInfo[idx].si)
27296 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
27297 cell->siCb.newSiInfo.siInfo[idx].si);
27299 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
27300 cell->siCb.siArray[idx].isWarningSi = FALSE;
27301 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
27302 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
27303 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
27307 cell->siCb.siBitMask &= ~RGSCH_SI_SI_UPD;
27310 /*Check whether SI cfg have been updated*/
27311 if(cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD)
27313 cell->siCfg = cell->siCb.newSiCfg;
27314 cell->siCb.siBitMask &= ~RGSCH_SI_SICFG_UPD;
27322 * @brief This function implements the selection of the SI
27323 * that is to be scheduled.
27327 * Function: rgSCHSelectSi
27328 * Purpose: This function implements the selection of SI
27329 * that is to be scheduled.
27331 * Invoked by: Scheduler
27333 * @param[in] RgSchCellCb* cell
27338 static Void rgSCHSelectSi
27343 CmLteTimingInfo crntTmInfo;
27350 crntTmInfo = cell->crntTime;
27351 #ifdef LTEMAC_HDFDD
27352 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27353 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27354 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27356 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA);
27359 siWinSize = cell->siCfg.siWinSize;
27361 /* Select SI only once at the starting of the new window */
27362 if(cell->siCb.inWindow)
27364 if ((crntTmInfo.sfn % cell->siCfg.minPeriodicity) == 0 &&
27365 crntTmInfo.slot == 0)
27367 /* Reinit inWindow at the beginning of every SI window */
27368 cell->siCb.inWindow = siWinSize - 1;
27372 cell->siCb.inWindow--;
27376 else /* New window. Re-init the winSize counter with the window length */
27378 if((cell->siCb.siArray[cell->siCb.siCtx.siId - 1].isWarningSi == TRUE)&&
27379 (cell->siCb.siCtx.retxCntRem != 0))
27381 rgSCHUtlFreeWarningSiPdu(cell);
27382 cell->siCb.siCtx.warningSiFlag = FALSE;
27385 cell->siCb.inWindow = siWinSize - 1;
27388 x = rgSCHCmnGetSiSetId(crntTmInfo.sfn, crntTmInfo.slot,
27389 cell->siCfg.minPeriodicity);
27391 /* Window Id within a SI set. This window Id directly maps to a
27393 windowId = (((crntTmInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) +
27394 crntTmInfo.slot) - (x * (cell->siCfg.minPeriodicity * 10)))
27397 if(windowId >= RGR_MAX_NUM_SI)
27400 /* Update the siCtx if there is a valid SI and its periodicity
27402 if (NULLP != cell->siCb.siArray[windowId].si)
27404 /* Warning SI Periodicity is same as SIB2 Periodicity */
27405 if(((cell->siCb.siArray[windowId].isWarningSi == FALSE) &&
27406 (x % (cell->siCfg.siPeriodicity[windowId]
27407 /cell->siCfg.minPeriodicity) == 0)) ||
27408 ((cell->siCb.siArray[windowId].isWarningSi == TRUE) &&
27409 (x % (cell->siCfg.siPeriodicity[0]
27410 /cell->siCfg.minPeriodicity) == 0)))
27412 cell->siCb.siCtx.siId = windowId+1;
27413 cell->siCb.siCtx.retxCntRem = cell->siCfg.retxCnt;
27414 cell->siCb.siCtx.warningSiFlag = cell->siCb.siArray[windowId].
27416 cell->siCb.siCtx.timeToTx.sfn = crntTmInfo.sfn;
27417 cell->siCb.siCtx.timeToTx.slot = crntTmInfo.slot;
27419 RG_SCH_ADD_TO_CRNT_TIME(cell->siCb.siCtx.timeToTx,
27420 cell->siCb.siCtx.maxTimeToTx, (siWinSize - 1))
27424 {/* Update the siCtx with invalid si Id */
27425 cell->siCb.siCtx.siId = 0;
27433 * @brief This function implements scheduler DL allocation for
27438 * Function: rgSCHDlSiSched
27439 * Purpose: This function implements scheduler for DL allocation
27442 * Invoked by: Scheduler
27444 * @param[in] RgSchCellCb* cell
27449 static Void rgSCHDlSiSched
27452 RgSchCmnDlRbAllocInfo *allocInfo,
27453 RgInfSfAlloc *subfrmAlloc
27456 CmLteTimingInfo crntTimInfo;
27462 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27463 /* DwPTS Scheduling Changes Start */
27466 uint8_t cfi = cellDl->currCfi;
27468 /* DwPTS Scheduling Changes End */
27472 crntTimInfo = cell->crntTime;
27473 #ifdef LTEMAC_HDFDD
27474 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
27475 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
27476 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
27478 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA);
27481 /* Compute the subframe for which allocation is being made.
27482 Essentially, we need pointer to the dl frame for this subframe */
27483 sf = rgSCHUtlSubFrmGet(cell, crntTimInfo);
27485 /*Check if scheduling of MIB is required */
27487 /* since we are adding the MIB repetition logic for EMTC UEs, checking if
27488 * emtcEnabled or not, If enabled MIB would be repeted at as part of EMTC
27489 * feature, otherwise scheduling at (n,0) */
27490 if(0 == cell->emtcEnable)
27493 if((crntTimInfo.sfn % RGSCH_MIB_PERIODICITY == 0)
27494 && (RGSCH_MIB_TX_SF_NUM == crntTimInfo.slot))
27497 uint8_t sfnOctet, mibOct2 = 0;
27498 uint8_t mibOct1 = 0;
27499 /*If MIB has not been yet setup by Application, return*/
27500 if(NULLP == cell->siCb.crntSiInfo.mib)
27503 SFndLenMsg(cell->siCb.crntSiInfo.mib, &mibLen);
27504 sf->bch.tbSize = mibLen;
27505 /*Fill the interface information */
27506 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, NULLD, NULLD);
27508 /*Set the bits of MIB to reflect SFN */
27509 /*First get the Most signficant 8 bits of SFN */
27510 sfnOctet = (uint8_t)(crntTimInfo.sfn >> 2);
27511 /*Get the first two octets of MIB, and then update them
27512 using the SFN octet value obtained above.*/
27513 if(ROK != SExamMsg((Data *)(&mibOct1),
27514 cell->siCb.crntSiInfo.mib, 0))
27517 if(ROK != SExamMsg((Data *)(&mibOct2),
27518 cell->siCb.crntSiInfo.mib, 1))
27521 /* ccpu00114572- Fix for improper way of MIB Octet setting for SFN */
27522 mibOct1 = (mibOct1 & 0xFC) | (sfnOctet >> 6);
27523 mibOct2 = (mibOct2 & 0x03) | (sfnOctet << 2);
27524 /* ccpu00114572- Fix ends*/
27526 /*Now, replace the two octets in MIB */
27527 if(ROK != SRepMsg((Data)(mibOct1),
27528 cell->siCb.crntSiInfo.mib, 0))
27531 if(ROK != SRepMsg((Data)(mibOct2),
27532 cell->siCb.crntSiInfo.mib, 1))
27535 /*Copy the MIB msg buff into interface buffer */
27536 SCpyMsgMsg(cell->siCb.crntSiInfo.mib,
27537 rgSchCb[cell->instIdx].rgSchInit.region,
27538 rgSchCb[cell->instIdx].rgSchInit.pool,
27539 &subfrmAlloc->cmnLcInfo.bchInfo.pdu);
27540 /* Added Dl TB count for MIB message transmission
27541 * This counter is incremented 4 times to consider
27542 * the retransmission at the PHY level on PBCH channel*/
27544 cell->dlUlTbCnt.tbTransDlTotalCnt += RG_SCH_MIB_CNT;
27551 allocInfo->bcchAlloc.schdFirst = FALSE;
27552 /*Check if scheduling of SIB1 is required.
27553 Check of (crntTimInfo.sfn % RGSCH_SIB1_PERIODICITY == 0)
27554 is not required here since the below check takes care
27555 of SFNs applicable for this one too.*/
27556 if((crntTimInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
27557 && (RGSCH_SIB1_TX_SF_NUM == crntTimInfo.slot))
27559 /*If SIB1 has not been yet setup by Application, return*/
27560 if(NULLP == (cell->siCb.crntSiInfo.sib1Info.sib1))
27565 allocInfo->bcchAlloc.schdFirst = TRUE;
27566 mcs = cell->siCb.crntSiInfo.sib1Info.mcs;
27567 nPrb = cell->siCb.crntSiInfo.sib1Info.nPrb;
27568 msgLen = cell->siCb.crntSiInfo.sib1Info.msgLen;
27572 /*Check if scheduling of SI can be performed.*/
27573 Bool invalid = FALSE;
27575 if(cell->siCb.siCtx.siId == 0)
27578 /*Check if the Si-Window for the current Si-Context is completed*/
27579 invalid = rgSCHCmnChkPastWin(crntTimInfo, cell->siCb.siCtx.maxTimeToTx);
27582 /* LTE_ADV_FLAG_REMOVED_START */
27583 if(cell->siCb.siCtx.retxCntRem)
27585 RGSCHLOGERROR(cell->instIdx,ERRCLS_INT_PAR,ERG011,(ErrVal)cell->siCb.siCtx.siId,
27586 "rgSCHDlSiSched(): SI not scheduled and window expired");
27588 /* LTE_ADV_FLAG_REMOVED_END */
27589 if(cell->siCb.siCtx.warningSiFlag == TRUE)
27591 rgSCHUtlFreeWarningSiPdu(cell);
27592 cell->siCb.siCtx.warningSiFlag = FALSE;
27597 /*Check the timinginfo of the current SI-Context to see if its
27598 transmission can be scheduled. */
27599 if(FALSE == (rgSCHCmnChkInWin(crntTimInfo,
27600 cell->siCb.siCtx.timeToTx,
27601 cell->siCb.siCtx.maxTimeToTx)))
27606 /*Check if retransmission count has become 0*/
27607 if(0 == cell->siCb.siCtx.retxCntRem)
27612 /* LTE_ADV_FLAG_REMOVED_START */
27613 /* Check if ABS is enabled/configured */
27614 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
27616 /* The pattern type is RGR_ABS_MUTE, then eNB need to blank the subframe */
27617 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
27619 /* Determine next scheduling subframe is ABS or not */
27620 if(RG_SCH_ABS_ENABLED_ABS_SF == (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern
27621 [((crntTimInfo.sfn*RGSCH_NUM_SUB_FRAMES) + crntTimInfo.slot) % RGR_ABS_PATTERN_LEN]))
27623 /* Skip the SI scheduling to next tti */
27628 /* LTE_ADV_FLAG_REMOVED_END */
27630 /*Schedule the transmission of the current SI-Context */
27631 /*Find out the messg length for the SI message */
27632 /* warningSiFlag is to differentiate between Warning SI
27634 if((rgSCHUtlGetMcsAndNPrb(cell, &nPrb, &mcs, &msgLen)) != ROK)
27639 cell->siCb.siCtx.i = RGSCH_CALC_SF_DIFF(crntTimInfo,
27640 cell->siCb.siCtx.timeToTx);
27644 /*Get the number of rb required */
27645 /*rgSCHCmnClcRbAllocForFxdTb(cell, msgLen, cellDl->ccchCqi, &rb);*/
27646 if(cellDl->bitsPerRb==0)
27648 while ((rgTbSzTbl[0][0][rb]) < (uint32_t) (msgLen*8))
27656 rb = RGSCH_CEIL((msgLen*8), cellDl->bitsPerRb);
27658 /* DwPTS Scheduling Changes Start */
27660 if (sf->sfType == RG_SCH_SPL_SF_DATA)
27662 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
27664 /* Calculate the less RE's because of DwPTS */
27665 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
27667 /* Increase number of RBs in Spl SF to compensate for lost REs */
27668 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
27671 /* DwPTS Scheduling Changes End */
27672 /*ccpu00115595- end*/
27673 /* Additional check to see if required RBs
27674 * exceeds the available */
27675 if (rb > sf->bw - sf->bwAssigned)
27677 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHDlSiSched(): "
27678 "BW allocation failed CRNTI:%d",RGSCH_SI_RNTI);
27682 /* Update the subframe Allocated BW field */
27683 sf->bwAssigned = sf->bwAssigned + rb;
27685 /*Fill the parameters in allocInfo */
27686 allocInfo->bcchAlloc.rnti = RGSCH_SI_RNTI;
27687 allocInfo->bcchAlloc.dlSf = sf;
27688 allocInfo->bcchAlloc.rbsReq = rb;
27689 /*ccpu00116710- MCS is not getting assigned */
27690 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
27692 /* ccpu00117510 - ADD - Assignment of nPrb and other information */
27693 allocInfo->bcchAlloc.nPrb = nPrb;
27694 allocInfo->bcchAlloc.tbInfo[0].bytesReq = msgLen;
27695 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
27698 #endif /*RGR_SI_SCH*/
27701 /* ccpu00117452 - MOD - Changed macro name from
27702 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
27703 #ifdef RGR_CQI_REPT
27705 * @brief This function Updates the DL CQI for the UE.
27709 * Function: rgSCHCmnUeDlPwrCtColltCqiRept
27710 * Purpose: Manages PUSH N CQI reporting
27711 * Step 1: Store the CQI in collation array
27712 * Step 2: Increament the tracking count
27713 * Step 3: Check is it time to to send the report
27714 * Step 4: if yes, Send StaInd to RRM
27715 * Step 4.1: Fill StaInd for sending collated N CQI rpeorts
27716 * Step 4.2: Call utility function (rgSCHUtlRgrStaInd) to send rpts to RRM
27717 * Step 4.2.1: If sending was not sucessful, return RFAILED
27718 * Step 4.2.2: If sending was sucessful, return ROK
27719 * Step 5: If no, return
27720 * Invoked by: rgSCHCmnDlCqiInd
27722 * @param[in] RgSchCellCb *cell
27723 * @param[in] RgSchUeCb *ue
27724 * @param[in] RgrUeCqiRept *ueCqiRpt
27728 static S16 rgSCHCmnUeDlPwrCtColltCqiRept
27732 RgrUeCqiRept *ueCqiRpt
27735 uint8_t *cqiCount = NULLP;
27737 RgrStaIndInfo *staInfo = NULLP;
27740 /* Step 1: Store the CQI in collation array */
27741 /* Step 2: Increament the tracking count */
27742 cqiCount = &(ue->schCqiInfo.cqiCount);
27743 ue->schCqiInfo.cqiRept[(*cqiCount)++] =
27747 /* Step 3: Check is it time to to send the report */
27748 if(RG_SCH_CQIR_IS_TIMTOSEND_CQIREPT(ue))
27750 /* Step 4: if yes, Send StaInd to RRM */
27751 retVal = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&staInfo,
27752 sizeof(RgrStaIndInfo));
27755 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
27756 "allocate memory for sending StaInd CRNTI:%d",ue->ueId);
27760 /* Step 4.1: Fill StaInd for sending collated N CQI rpeorts */
27763 uint32_t gCqiReptToAppCount;
27764 gCqiReptToAppCount++;
27769 retVal = rgSCHUtlFillSndStaInd(cell, ue, staInfo,
27770 ue->cqiReptCfgInfo.numColltdCqiRept);
27776 } /* End of rgSCHCmnUeDlPwrCtColltCqiRept */
27778 #endif /* End of RGR_CQI_REPT */
27781 * @brief This function checks for the retransmisson
27782 * for a DTX scenario.
27789 * @param[in] RgSchCellCb *cell
27790 * @param[in] RgSchUeCb *ue
27795 Void rgSCHCmnChkRetxAllowDtx
27799 RgSchDlHqProcCb *proc,
27807 if ((proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX))
27809 *reTxAllwd = FALSE;
27816 * @brief API for calculating the SI Set Id
27820 * Function: rgSCHCmnGetSiSetId
27822 * This API is used for calculating the SI Set Id, as shown below
27824 * siSetId = 0 siSetId = 1
27825 * |******************|******************|---------------->
27826 * (0,0) (8,0) (16,0) (SFN, SF)
27829 * @param[in] uint16_t sfn
27830 * @param[in] uint8_t sf
27831 * @return uint16_t siSetId
27833 uint16_t rgSCHCmnGetSiSetId
27837 uint16_t minPeriodicity
27840 /* 80 is the minimum SI periodicity in sf. Also
27841 * all other SI periodicities are multiples of 80 */
27842 return (((sfn * RGSCH_NUM_SUB_FRAMES_5G) + sf) / (minPeriodicity * 10));
27846 * @brief API for calculating the DwPts Rb, Itbs and tbSz
27850 * Function: rgSCHCmnCalcDwPtsTbSz
27852 * @param[in] RgSchCellCb *cell
27853 * @param[in] uint32_t bo
27854 * @param[in/out] uint8_t *rb
27855 * @param[in/out] uint8_t *iTbs
27856 * @param[in] uint8_t lyr
27857 * @param[in] uint8_t cfi
27858 * @return uint32_t tbSz
27860 static uint32_t rgSCHCmnCalcDwPtsTbSz
27871 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27872 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
27873 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
27876 /* DwPts Rb cannot exceed the cell Bw */
27877 numDwPtsRb = RGSCH_MIN(numDwPtsRb, cellDl->maxDlBwPerUe);
27879 /* Adjust the iTbs for optimum usage of the DwPts region.
27880 * Using the same iTbs adjustment will not work for all
27881 * special subframe configurations and iTbs levels. Hence use the
27882 * static iTbs Delta table for adjusting the iTbs */
27883 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs);
27887 while(rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1] < bo*8 &&
27888 numDwPtsRb < cellDl->maxDlBwPerUe)
27893 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
27897 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
27905 * @brief API for calculating the DwPts Rb, Itbs and tbSz
27909 * Function: rgSCHCmnCalcDwPtsTbSz2Cw
27911 * @param[in] RgSchCellCb *cell
27912 * @param[in] uint32_t bo
27913 * @param[in/out] uint8_t *rb
27914 * @param[in] uint8_t maxRb
27915 * @param[in/out] uint8_t *iTbs1
27916 * @param[in/out] uint8_t *iTbs2
27917 * @param[in] uint8_t lyr1
27918 * @param[in] uint8_t lyr2
27919 * @return[in/out] uint32_t *tb1Sz
27920 * @return[in/out] uint32_t *tb2Sz
27921 * @param[in] uint8_t cfi
27923 static Void rgSCHCmnCalcDwPtsTbSz2Cw
27938 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
27939 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
27940 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
27943 /* DwPts Rb cannot exceed the cell Bw */
27944 numDwPtsRb = RGSCH_MIN(numDwPtsRb, maxRb);
27946 /* Adjust the iTbs for optimum usage of the DwPts region.
27947 * Using the same iTbs adjustment will not work for all
27948 * special subframe configurations and iTbs levels. Hence use the
27949 * static iTbs Delta table for adjusting the iTbs */
27950 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs1);
27951 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs2);
27953 while((rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1] +
27954 rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1])< bo*8 &&
27955 numDwPtsRb < maxRb)
27960 *tb1Sz = rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
27961 *tb2Sz = rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
27971 * @brief Updates the GBR LCGs when datInd is received from MAC
27975 * Function: rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
27976 * Purpose: This function updates the GBR LCGs
27977 * when datInd is received from MAC.
27981 * @param[in] RgSchCellCb *cell
27982 * @param[in] RgSchUeCb *ue
27983 * @param[in] RgInfUeDatInd *datInd
27986 Void rgSCHCmnUpdUeDataIndLcg
27990 RgInfUeDatInd *datInd
27994 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27996 Inst inst = cell->instIdx;
28000 for (idx = 0; (idx < RGINF_MAX_LCG_PER_UE - 1); idx++)
28002 if (datInd->lcgInfo[idx].bytesRcvd != 0)
28004 uint8_t lcgId = datInd->lcgInfo[idx].lcgId;
28005 uint32_t bytesRcvd = datInd->lcgInfo[idx].bytesRcvd;
28007 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
28009 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
28010 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
28012 if(bytesRcvd > cmnLcg->effGbr)
28014 bytesRcvd -= cmnLcg->effGbr;
28015 cmnLcg->effDeltaMbr = (cmnLcg->effDeltaMbr > bytesRcvd) ? \
28016 (cmnLcg->effDeltaMbr - bytesRcvd) : (0);
28017 cmnLcg->effGbr = 0;
28021 cmnLcg->effGbr -= bytesRcvd;
28023 /* To keep BS updated with the amount of data received for the GBR */
28024 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
28025 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
28026 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr+cmnLcg->effDeltaMbr);
28028 else if(lcgId != 0)
28030 ue->ul.effAmbr = (ue->ul.effAmbr > datInd->lcgInfo[idx].bytesRcvd) ? \
28031 (ue->ul.effAmbr - datInd->lcgInfo[idx].bytesRcvd) : (0);
28032 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
28033 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
28034 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
28035 ue->ul.nonGbrLcgBs = (ue->ul.nonGbrLcgBs > datInd->lcgInfo[idx].bytesRcvd) ? \
28036 (ue->ul.nonGbrLcgBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
28038 ue->ul.nonLcg0Bs = (ue->ul.nonLcg0Bs > datInd->lcgInfo[idx].bytesRcvd) ? \
28039 (ue->ul.nonLcg0Bs - datInd->lcgInfo[idx].bytesRcvd) : (0);
28048 if(TRUE == ue->isEmtcUe)
28050 if (cellSch->apisEmtcUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
28052 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
28059 if (cellSch->apisUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
28061 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
28067 /** @brief This function initializes DL allocation lists and prepares
28072 * Function: rgSCHCmnInitRbAlloc
28074 * @param [in] RgSchCellCb *cell
28079 static Void rgSCHCmnInitRbAlloc
28084 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28085 CmLteTimingInfo frm;
28090 /* Initializing RgSchCmnUlRbAllocInfo structure.*/
28091 rgSCHCmnInitDlRbAllocInfo(&cellSch->allocInfo);
28093 frm = cellSch->dl.time;
28095 dlSf = rgSCHUtlSubFrmGet(cell, frm);
28097 dlSf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
28098 dlSf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
28099 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
28101 dlSf->sfBeamInfo[idx].totVrbgAllocated = 0;
28102 dlSf->sfBeamInfo[idx].totVrbgRequired = 0;
28103 dlSf->sfBeamInfo[idx].vrbgStart = 0;
28106 dlSf->remUeCnt = cellSch->dl.maxUePerDlSf;
28107 /* Updating the Subframe information in RBAllocInfo */
28108 cellSch->allocInfo.dedAlloc.dedDlSf = dlSf;
28109 cellSch->allocInfo.msg4Alloc.msg4DlSf = dlSf;
28111 /* LTE_ADV_FLAG_REMOVED_START */
28112 /* Determine next scheduling subframe is ABS or not */
28113 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
28115 cell->lteAdvCb.absPatternDlIdx =
28116 ((frm.sfn*RGSCH_NUM_SUB_FRAMES_5G) + frm.slot) % RGR_ABS_PATTERN_LEN;
28117 cell->lteAdvCb.absDlSfInfo = (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern[
28118 cell->lteAdvCb.absPatternDlIdx]);
28123 cell->lteAdvCb.absDlSfInfo = RG_SCH_ABS_DISABLED;
28125 /* LTE_ADV_FLAG_REMOVED_END */
28128 cellSch->allocInfo.ccchSduAlloc.ccchSduDlSf = dlSf;
28131 /* Update subframe-wide allocation information with SPS allocation */
28132 rgSCHCmnSpsDlUpdDlSfAllocWithSps(cell, frm, dlSf);
28141 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
28146 * Function: rgSCHCmnSendTxModeInd(cell, ueUl, newTxMode)
28147 * Purpose: This function sends the TX mode Change
28148 * indication to RRM
28153 * @param[in] RgSchCellCb *cell
28154 * @param[in] RgSchUeCb *ue
28155 * @param[in] uint8_t newTxMode
28158 static Void rgSCHCmnSendTxModeInd
28165 RgmTransModeInd *txModeChgInd;
28166 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28169 if(!(ueDl->mimoInfo.forceTD & RG_SCH_CMN_TD_TXMODE_RECFG))
28172 if(SGetSBuf(cell->rgmSap->sapCfg.sapPst.region,
28173 cell->rgmSap->sapCfg.sapPst.pool, (Data**)&txModeChgInd,
28174 sizeof(RgmTransModeInd)) != ROK)
28178 RG_SCH_FILL_RGM_TRANSMODE_IND(ue->ueId, cell->cellId, newTxMode, txModeChgInd);
28179 RgUiRgmChangeTransModeInd(&(cell->rgmSap->sapCfg.sapPst),
28180 cell->rgmSap->sapCfg.suId, txModeChgInd);
28183 ue->mimoInfo.txModUpChgFactor = 0;
28184 ue->mimoInfo.txModDownChgFactor = 0;
28185 ueDl->laCb[0].deltaiTbs = 0;
28191 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
28196 * Function: rgSchCheckAndTriggerModeChange(cell, ueUl, iTbsNew)
28197 * Purpose: This function update and check for threashold for TM mode
28202 * @param[in] RgSchCellCb *cell
28203 * @param[in] RgSchUeCb *ue
28204 * @param[in] uint8_t iTbs
28207 Void rgSchCheckAndTriggerModeChange
28211 uint8_t reportediTbs,
28216 RgrTxMode txMode; /*!< UE's Transmission Mode */
28217 RgrTxMode modTxMode; /*!< UE's Transmission Mode */
28220 txMode = ue->mimoInfo.txMode;
28222 /* Check for Step down */
28223 /* Step down only when TM4 is configured. */
28224 if(RGR_UE_TM_4 == txMode)
28226 if((previTbs <= reportediTbs) && ((reportediTbs - previTbs) >= RG_SCH_MODE_CHNG_STEPDOWN_CHECK_FACTOR))
28228 ue->mimoInfo.txModDownChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
28232 ue->mimoInfo.txModDownChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
28235 ue->mimoInfo.txModDownChgFactor =
28236 RGSCH_MAX(ue->mimoInfo.txModDownChgFactor, -(RG_SCH_MODE_CHNG_STEPDOWN_THRSHD));
28238 if(ue->mimoInfo.txModDownChgFactor >= RG_SCH_MODE_CHNG_STEPDOWN_THRSHD)
28240 /* Trigger Mode step down */
28241 modTxMode = RGR_UE_TM_3;
28242 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
28246 /* Check for Setup up */
28247 /* Step Up only when TM3 is configured, Max possible Mode is TM4*/
28248 if(RGR_UE_TM_3 == txMode)
28250 if((previTbs > reportediTbs) || (maxiTbs == previTbs))
28252 ue->mimoInfo.txModUpChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
28256 ue->mimoInfo.txModUpChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
28259 ue->mimoInfo.txModUpChgFactor =
28260 RGSCH_MAX(ue->mimoInfo.txModUpChgFactor, -(RG_SCH_MODE_CHNG_STEPUP_THRSHD));
28262 /* Check if TM step up need to be triggered */
28263 if(ue->mimoInfo.txModUpChgFactor >= RG_SCH_MODE_CHNG_STEPUP_THRSHD)
28265 /* Trigger mode chnage */
28266 modTxMode = RGR_UE_TM_4;
28267 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
28276 * @brief Updates the GBR LCGs when datInd is received from MAC
28280 * Function: rgSCHCmnIsDlCsgPrio (cell)
28281 * Purpose: This function returns if csg UEs are
28282 * having priority at current time
28284 * Invoked by: Scheduler
28286 * @param[in] RgSchCellCb *cell
28287 * @param[in] RgSchUeCb *ue
28288 * @param[in] RgInfUeDatInd *datInd
28291 Bool rgSCHCmnIsDlCsgPrio(RgSchCellCb *cell)
28294 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
28296 /* Calculating the percentage resource allocated */
28297 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
28303 if(((cmnDlCell->ncsgPrbCnt * 100) / cmnDlCell->totPrbCnt) < cell->minDlResNonCsg)
28315 * @brief Updates the GBR LCGs when datInd is received from MAC
28319 * Function: rgSCHCmnIsUlCsgPrio (cell)
28320 * Purpose: This function returns if csg UEs are
28321 * having priority at current time
28323 * Invoked by: Scheduler
28325 * @param[in] RgSchCellCb *cell
28326 * @param[in] RgSchUeCb *ue
28327 * @param[in] RgInfUeDatInd *datInd
28330 Bool rgSCHCmnIsUlCsgPrio(RgSchCellCb *cell)
28332 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
28335 /* Calculating the percentage resource allocated */
28336 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
28342 if (((cmnUlCell->ncsgPrbCnt * 100) /cmnUlCell->totPrbCnt) < cell->minUlResNonCsg)
28353 /** @brief DL scheduler for SPS, and all other downlink data
28357 * Function: rgSchCmnPreDlSch
28359 * @param [in] Inst schInst;
28363 Void rgSchCmnPreDlSch
28365 RgSchCellCb **cell,
28367 RgSchCellCb **cellLst
28370 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell[0]);
28375 if(nCell > CM_LTE_MAX_CELLS)
28380 if (cell[0]->isDlDataAllwd && (cell[0]->stopDlSch == FALSE))
28382 /* Specific DL scheduler to perform UE scheduling */
28383 cellSch->apisDl->rgSCHDlPreSched(cell[0]);
28385 /* Rearranging the cell entries based on their remueCnt in SF.
28386 * cells will be processed in the order of number of ue scheduled
28388 for (idx = 0; idx < nCell; idx++)
28391 cellSch = RG_SCH_CMN_GET_CELL(cell[idx]);
28392 sf = cellSch->allocInfo.dedAlloc.dedDlSf;
28396 cellLst[idx] = cell[idx];
28400 for(j = 0; j < idx; j++)
28402 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cellLst[j]);
28403 RgSchDlSf *subfrm = cmnCell->allocInfo.dedAlloc.dedDlSf;
28405 if(sf->remUeCnt < subfrm->remUeCnt)
28408 for(k = idx; k > j; k--)
28410 cellLst[k] = cellLst[k-1];
28415 cellLst[j] = cell[idx];
28420 for (idx = 0; idx < nCell; idx++)
28422 cellLst[idx] = cell[idx];
28428 /** @brief DL scheduler for SPS, and all other downlink data
28431 * Function: rgSchCmnPstDlSch
28433 * @param [in] Inst schInst;
28437 Void rgSchCmnPstDlSch(RgSchCellCb *cell)
28439 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28442 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
28444 cellSch->apisDl->rgSCHDlPstSched(cell->instIdx);
28448 uint8_t rgSCHCmnCalcPcqiBitSz(RgSchUeCb *ueCb, uint8_t numTxAnt)
28450 uint8_t confRepMode;
28453 RgSchUePCqiCb *cqiCb = ueCb->nPCqiCb;
28456 confRepMode = cqiCb->cqiCfg.cqiSetup.prdModeEnum;
28457 if((ueCb->mimoInfo.txMode != RGR_UE_TM_3) &&
28458 (ueCb->mimoInfo.txMode != RGR_UE_TM_4))
28464 ri = cqiCb->perRiVal;
28466 switch(confRepMode)
28468 case RGR_PRD_CQI_MOD10:
28474 case RGR_PRD_CQI_MOD11:
28487 else if(numTxAnt == 4)
28500 /* This is number of antenna case 1.
28501 * This is not applicable for Mode 1-1.
28502 * So setting it to invalid value */
28508 case RGR_PRD_CQI_MOD20:
28516 pcqiSz = 4 + cqiCb->label;
28521 case RGR_PRD_CQI_MOD21:
28536 else if(numTxAnt == 4)
28549 /* This might be number of antenna case 1.
28550 * For mode 2-1 wideband case only antenna port 2 or 4 is supported.
28551 * So setting invalid value.*/
28559 pcqiSz = 4 + cqiCb->label;
28563 pcqiSz = 7 + cqiCb->label;
28576 /** @brief DL scheduler for SPS, and all other downlink data
28580 * Function: rgSCHCmnDlSch
28582 * @param [in] RgSchCellCb *cell
28587 Void rgSCHCmnDlSch(RgSchCellCb *cell)
28590 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
28592 RgSchDynTddCb *rgSchDynTddInfo = &(rgSchCb[cell->instIdx].rgSchDynTdd);
28593 uint16_t dlCntrlSfIdx;
28597 dlSf = rgSCHUtlSubFrmGet(cell, cellSch->dl.time);
28599 if (rgSchDynTddInfo->isDynTddEnbld)
28601 RG_SCH_DYN_TDD_GET_SFIDX(dlCntrlSfIdx, rgSchDynTddInfo->crntDTddSfIdx,
28602 RG_SCH_CMN_DL_DELTA);
28603 if(RG_SCH_DYNTDD_DLC_ULD == rgSchDynTddInfo->sfInfo[dlCntrlSfIdx].sfType)
28605 if(1 == cell->cellId)
28607 ul5gtfsidDlAlreadyMarkUl++;
28609 printf("ul5gtfsidDlAlreadyMarkUl: %d, [sfn:sf] [%04d:%02d]\n",
28610 ul5gtfsidDlAlreadyMarkUl, cellSch->dl.time.sfn,
28611 cellSch->dl.time.slot);
28619 /* Specific DL scheduler to perform UE scheduling */
28620 cellSch->apisDl->rgSCHDlNewSched(cell, &cellSch->allocInfo);
28621 /* LTE_ADV_FLAG_REMOVED_END */
28623 /* call common allocator for RB Allocation */
28624 rgSCHCmnDlRbAlloc(cell, &cellSch->allocInfo);
28626 /* Finalize the Allocations for reqested Against alloced */
28627 rgSCHCmnDlAllocFnlz(cell);
28629 /* Perform Pdcch allocations for PDCCH Order Q.
28630 * As of now, giving this the least preference.
28631 * This func call could be moved above other allocations
28633 rgSCHCmnGenPdcchOrder(cell, dlSf);
28635 /* Do group power control for PUCCH */
28636 rgSCHCmnGrpPwrCntrlPucch(cell, dlSf);
28641 /**********************************************************************
28644 **********************************************************************/