1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /************************************************************************
25 Desc: C source code for Entry point fucntions
29 **********************************************************************/
31 /** @file rg_sch_cmn.c
32 @brief This file implements the schedulers main access to MAC layer code.
35 static const char* RLOG_MODULE_NAME="MAC";
36 static int RLOG_FILE_ID=187;
37 static int RLOG_MODULE_ID=4096;
39 /* header include files -- defines (.h) */
40 #include "common_def.h"
46 #include "rg_sch_err.h"
47 #include "rg_sch_inf.h"
49 #include "rg_sch_cmn.h"
50 #include "rl_interface.h"
51 #include "rl_common.h"
53 /* header/extern include files (.x) */
54 #include "tfu.x" /* TFU types */
55 #include "lrg.x" /* layer management typedefs for MAC */
56 #include "rgr.x" /* layer management typedefs for MAC */
57 #include "rgm.x" /* layer management typedefs for MAC */
58 #include "rg_sch_inf.x" /* typedefs for Scheduler */
59 #include "rg_sch.x" /* typedefs for Scheduler */
60 #include "rg_sch_cmn.x" /* typedefs for Scheduler */
62 #include "lrg.x" /* Stats Structures */
63 #endif /* MAC_SCH_STATS */
66 #endif /* __cplusplus */
69 uint32_t emtcStatsUlTomSrInd;
70 uint32_t emtcStatsUlBsrTmrTxp;
73 #define RG_ITBS_DIFF(_x, _y) ((_x) > (_y) ? (_x) - (_y) : (_y) - (_x))
74 Void rgSCHSc1UlInit ARGS((RgUlSchdApis *apis));
75 #ifdef RG_PHASE2_SCHED
76 Void rgSCHRrUlInit ARGS((RgUlSchdApis *apis));
78 Void rgSCHEmtcHqInfoFree ARGS((RgSchCellCb *cell, RgSchDlHqProcCb *hqP));
79 Void rgSCHEmtcRrUlInit ARGS((RgUlSchdApis *apis));
80 Void rgSCHEmtcCmnDlInit ARGS((Void));
81 Void rgSCHEmtcCmnUlInit ARGS((Void));
82 Void rgSCHEmtcCmnUeNbReset ARGS((RgSchUeCb *ueCb));
83 RgSchCmnCqiToTbs *rgSchEmtcCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
85 Void rgSCHMaxciUlInit ARGS((RgUlSchdApis *apis));
86 Void rgSCHPfsUlInit ARGS((RgUlSchdApis *apis));
88 Void rgSCHSc1DlInit ARGS((RgDlSchdApis *apis));
89 #ifdef RG_PHASE2_SCHED
90 Void rgSCHRrDlInit ARGS((RgDlSchdApis *apis));
92 Void rgSCHEmtcRrDlInit ARGS((RgDlEmtcSchdApis *apis));
94 Void rgSCHMaxciDlInit ARGS((RgDlSchdApis *apis));
95 Void rgSCHPfsDlInit ARGS((RgDlSchdApis *apis));
97 Void rgSCHDlfsInit ARGS((RgDlfsSchdApis *apis));
101 Void rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl ARGS((RgSchCellCb *cell));
102 Void rgSCHCmnGetEmtcDciFrmtSizes ARGS((RgSchCellCb *cell));
103 Void rgSCHEmtcRrUlProcRmvFrmRetx ARGS((RgSchCellCb *cell, RgSchUlHqProcCb *proc));
104 S16 rgSCHCmnPrecompEmtcMsg3Vars
106 RgSchCmnUlCell *cellUl,
112 Void rgSCHEmtcCmnUeCcchSduDel
117 Void rgSCHEmtcRmvFrmTaLst
119 RgSchCmnDlCell *cellDl,
122 Void rgSCHEmtcInitTaLst
124 RgSchCmnDlCell *cellDl
126 Void rgSCHEmtcAddToTaLst
128 RgSchCmnDlCell *cellDl,
135 static Void rgSCHDlSiSched ARGS((RgSchCellCb *cell,
136 RgSchCmnDlRbAllocInfo *allocInfo,
137 RgInfSfAlloc *subfrmAlloc));
138 static Void rgSCHChkNUpdSiCfg ARGS((RgSchCellCb *cell));
139 static Void rgSCHSelectSi ARGS((RgSchCellCb *cell));
140 #endif /*RGR_SI_SCH*/
141 /* LTE_ADV_FLAG_REMOVED_START */
144 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
152 static S16 rgSCHCmnBuildRntpInfo (
160 static Void rgSCHCmnNonDlfsType0Alloc
164 RgSchDlRbAlloc *allocInfo,
167 static uint8_t rgSchCmnUlRvIdxToIMcsTbl[4] = {32, 30, 31, 29};
168 static Void rgSCHCmnUlNonadapRetx ARGS((
169 RgSchCmnUlCell *cellUl,
173 static Void rgSCHCmnUlSfRlsRetxProcs ARGS((
179 static S16 rgSCHCmnUlMdfyGrntForCqi ARGS((
186 uint32_t stepDownItbs,
190 static Void rgSCHCmnFillHqPPdcchDciFrmt1 ARGS((
192 RgSchDlRbAlloc *rbAllocInfo,
193 RgSchDlHqProcCb *hqP,
197 static Void rgSCHCmnFillHqPPdcchDciFrmt1A ARGS((
199 RgSchDlRbAlloc *rbAllocInfo,
200 RgSchDlHqProcCb *hqP,
204 static Void rgSCHCmnFillHqPPdcchDciFrmt1B ARGS((
206 RgSchDlRbAlloc *rbAllocInfo,
207 RgSchDlHqProcCb *hqP,
211 static Void rgSCHCmnFillHqPPdcchDciFrmt2 ARGS((
213 RgSchDlRbAlloc *rbAllocInfo,
214 RgSchDlHqProcCb *hqP,
218 static Void rgSCHCmnFillHqPPdcchDciFrmt2A ARGS((
220 RgSchDlRbAlloc *rbAllocInfo,
221 RgSchDlHqProcCb *hqP,
228 Void rgSCHCmnDlSpsSch
232 /* LTE_ADV_FLAG_REMOVED_END */
234 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc ARGS((
236 RgSchCmnDlRbAllocInfo *allocInfo
238 static Void rgSCHBcchPcchDlRbAlloc ARGS((
240 RgSchCmnDlRbAllocInfo *allocInfo
242 static Void rgSCHCmnDlBcchPcchAlloc ARGS((
246 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
249 TfuDlCqiPucch *pucchCqi,
250 RgrUeCqiRept *ueCqiRept,
252 Bool *is2ndCwCqiAvail
254 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
257 TfuDlCqiPusch *puschCqi,
258 RgrUeCqiRept *ueCqiRept,
260 Bool *is2ndCwCqiAvail
263 static Void rgSCHCmnDlCqiOnPucchInd ARGS ((
266 TfuDlCqiPucch *pucchCqi
268 static Void rgSCHCmnDlCqiOnPuschInd ARGS ((
271 TfuDlCqiPusch *puschCqi
274 /* ccpu00117452 - MOD - Changed macro name from
275 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
277 static S16 rgSCHCmnUeDlPwrCtColltCqiRept ARGS((
280 RgrUeCqiRept *ueCqiRept));
281 #endif /* End of RGR_CQI_REPT */
282 /* Fix: syed align multiple UEs to refresh at same time */
283 static Void rgSCHCmnGetRefreshPer ARGS((
287 static S16 rgSCHCmnApplyUeRefresh ARGS((
291 Void rgSCHCmnDlSetUeAllocLmtLa ARGS
296 static Void rgSCHCheckAndSetTxScheme ARGS
304 static uint32_t rgSCHCmnCalcDwPtsTbSz ARGS
314 static Void rgSCHCmnCalcDwPtsTbSz2Cw ARGS
330 static Void rgSCHCmnInitRbAlloc ARGS
336 #endif /* __cplusplus */
340 RgSchdApis rgSchCmnApis;
341 static RgUlSchdApis rgSchUlSchdTbl[RGSCH_NUM_SCHEDULERS];
342 static RgDlSchdApis rgSchDlSchdTbl[RGSCH_NUM_SCHEDULERS];
344 static RgUlSchdApis rgSchEmtcUlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
345 static RgDlEmtcSchdApis rgSchEmtcDlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
347 #ifdef RG_PHASE2_SCHED
348 static RgDlfsSchdApis rgSchDlfsSchdTbl[RGSCH_NUM_DLFS_SCHEDULERS];
350 RgUlSchdInits rgSchUlSchdInits = RGSCH_ULSCHED_INITS;
351 RgDlSchdInits rgSchDlSchdInits = RGSCH_DLSCHED_INITS;
353 static RgEmtcUlSchdInits rgSchEmtcUlSchdInits = RGSCH_EMTC_ULSCHED_INITS;
354 static RgEmtcDlSchdInits rgSchEmtcDlSchdInits = RGSCH_EMTC_DLSCHED_INITS;
356 #if (defined (RG_PHASE2_SCHED) && defined (TFU_UPGRADE))
357 static RgDlfsSchdInits rgSchDlfsSchdInits = RGSCH_DLFSSCHED_INITS;
360 typedef Void (*RgSchCmnDlAllocRbFunc) ARGS((RgSchCellCb *cell, RgSchDlSf *subFrm,
361 RgSchUeCb *ue, uint32_t bo, uint32_t *effBo, RgSchDlHqProcCb *proc,
362 RgSchCmnDlRbAllocInfo *cellWdAllocInfo));
363 typedef uint8_t (*RgSchCmnDlGetPrecInfFunc) ARGS((RgSchCellCb *cell, RgSchUeCb *ue,
364 uint8_t numLyrs, Bool bothCwEnbld));
365 static Void rgSCHCmnDlAllocTxRbTM1 ARGS((
371 RgSchDlHqProcCb *proc,
372 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
374 static Void rgSCHCmnDlAllocTxRbTM2 ARGS((
380 RgSchDlHqProcCb *proc,
381 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
383 static Void rgSCHCmnDlAllocTxRbTM3 ARGS((
389 RgSchDlHqProcCb *proc,
390 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
392 static Void rgSCHCmnDlAllocTxRbTM4 ARGS((
398 RgSchDlHqProcCb *proc,
399 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
402 static Void rgSCHCmnDlAllocTxRbTM5 ARGS((
408 RgSchDlHqProcCb *proc,
409 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
412 static Void rgSCHCmnDlAllocTxRbTM6 ARGS((
418 RgSchDlHqProcCb *proc,
419 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
421 static Void rgSCHCmnDlAllocTxRbTM7 ARGS((
427 RgSchDlHqProcCb *proc,
428 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
430 static Void rgSCHCmnDlAllocRetxRbTM1 ARGS((
436 RgSchDlHqProcCb *proc,
437 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
439 static Void rgSCHCmnDlAllocRetxRbTM2 ARGS((
445 RgSchDlHqProcCb *proc,
446 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
448 static Void rgSCHCmnDlAllocRetxRbTM3 ARGS((
454 RgSchDlHqProcCb *proc,
455 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
457 static Void rgSCHCmnDlAllocRetxRbTM4 ARGS((
463 RgSchDlHqProcCb *proc,
464 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
467 static Void rgSCHCmnDlAllocRetxRbTM5 ARGS((
473 RgSchDlHqProcCb *proc,
474 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
477 static Void rgSCHCmnDlAllocRetxRbTM6 ARGS((
483 RgSchDlHqProcCb *proc,
484 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
486 static Void rgSCHCmnDlAllocRetxRbTM7 ARGS((
492 RgSchDlHqProcCb *proc,
493 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
497 static uint8_t rgSchGetN1ResCount ARGS ((
501 Bool rgSchCmnChkDataOnlyOnPcell
507 uint8_t rgSCHCmnCalcPcqiBitSz
514 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
515 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[7] = {rgSCHCmnDlAllocTxRbTM1,
516 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
517 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7};
519 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
520 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[7] = {rgSCHCmnDlAllocRetxRbTM1,
521 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
522 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7};
524 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
525 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[9] = {rgSCHCmnDlAllocTxRbTM1,
526 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
527 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7, NULLP, NULLP};
529 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
530 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[9] = {rgSCHCmnDlAllocRetxRbTM1,
531 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
532 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7, NULLP, NULLP};
537 static uint8_t rgSCHCmnDlTM3PrecInf2 ARGS((
543 static uint8_t rgSCHCmnDlTM3PrecInf4 ARGS((
549 static uint8_t rgSCHCmnDlTM4PrecInf2 ARGS((
555 static uint8_t rgSCHCmnDlTM4PrecInf4 ARGS((
561 /* Functions specific to each transmission mode for DL RB Allocation*/
562 RgSchCmnDlGetPrecInfFunc getPrecInfoFunc[2][2] = {
563 {rgSCHCmnDlTM3PrecInf2, rgSCHCmnDlTM3PrecInf4},
564 {rgSCHCmnDlTM4PrecInf2, rgSCHCmnDlTM4PrecInf4}
567 static S16 rgSCHCmnDlAlloc1CwRetxRb ARGS((
571 RgSchDlHqTbCb *tbInfo,
576 static S16 rgSCHCmnDlAlloc2CwRetxRb ARGS((
580 RgSchDlHqProcCb *proc,
585 static Void rgSCHCmnDlTM3TxTx ARGS((
591 RgSchDlHqProcCb *proc,
592 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
594 static Void rgSCHCmnDlTM3TxRetx ARGS((
600 RgSchDlHqProcCb *proc,
601 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
603 static Void rgSCHCmnDlTM3RetxRetx ARGS((
609 RgSchDlHqProcCb *proc,
610 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
613 static Void rgSCHCmnNonDlfsUpdTyp2Alloc ARGS((
619 /* LTE_ADV_FLAG_REMOVED_START */
621 static Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc ARGS((
628 /* LTE_ADV_FLAG_REMOVED_END */
629 static Void rgSCHCmnDlRbInfoAddUeTx ARGS((
631 RgSchCmnDlRbAllocInfo *allocInfo,
633 RgSchDlHqProcCb *proc
635 static Void rgSCHCmnDlRbInfoAddUeRetx ARGS((
637 RgSchCmnDlRbAllocInfo *allocInfo,
641 static Void rgSCHCmnDlAdd2NonSchdRetxLst ARGS((
642 RgSchCmnDlRbAllocInfo *allocInfo,
644 RgSchDlHqProcCb *proc
646 static S16 rgSCHCmnDlAlloc2CwTxRetxRb ARGS((
650 RgSchDlHqTbCb *reTxTb,
655 static S16 rgSCHCmnDlAlloc2CwTxRb ARGS((
659 RgSchDlHqProcCb *proc,
664 static S16 rgSCHCmnDlAlloc1CwTxRb ARGS((
668 RgSchDlHqTbCb *tbInfo,
674 static Void rgSCHCmnFillHqPTb ARGS((
676 RgSchDlRbAlloc *rbAllocInfo,
682 static Void rgSCHCmnDlGetBestFitHole ARGS((
685 uint32_t *crntAllocMask,
688 uint8_t *allocNumRbs,
691 #ifdef RGSCH_SPS_UNUSED
692 static uint32_t rgSCHCmnGetRaType1Mask ARGS((
698 static uint32_t rgSCHCmnGetRaType0Mask ARGS((
702 static uint32_t rgSCHCmnGetRaType2Mask ARGS((
708 Bool rgSCHCmnRetxAllocAvoid ARGS((
711 RgSchDlHqProcCb *proc
714 uint16_t rgSCHCmnGetSiSetId ARGS((
717 uint16_t minPeriodicity
722 //TODO_SID: Currenly table is only for 100 Prbs. Need to modify wrt VRBG table 8.1.5.2.1-1 V5G_213
723 uint32_t rgSch5gtfTbSzTbl[MAX_5GTF_MCS] =
724 {1864, 5256, 8776, 13176, 17576, 21976, 26376, 31656, 35176, 39576, 43976, 47496, 52776, 59376, 66392};
725 uint32_t g5gtfTtiCnt = 0;
726 uint32_t gUl5gtfSrRecv = 0;
727 uint32_t gUl5gtfBsrRecv = 0;
728 uint32_t gUl5gtfUeSchPick = 0;
729 uint32_t gUl5gtfPdcchSchd = 0;
730 uint32_t gUl5gtfAllocAllocated = 0;
731 uint32_t gUl5gtfUeRbAllocDone = 0;
732 uint32_t gUl5gtfUeRmvFnlzZeroBo = 0;
733 uint32_t gUl5gtfUeFnlzReAdd = 0;
734 uint32_t gUl5gtfPdcchSend = 0;
735 uint32_t gUl5gtfRbAllocFail = 0;
736 uint32_t ul5gtfsidUlMarkUl = 0;
737 uint32_t ul5gtfsidDlSchdPass = 0;
738 uint32_t ul5gtfsidDlAlreadyMarkUl = 0;
739 uint32_t ul5gtfTotSchdCnt = 0;
742 /* CQI Offset Index to Beta CQI Offset value mapping,
743 * stored as parts per 1000. Reserved is set to 0.
744 * Refer 36.213 sec 8.6.3 Tbl 8.6.3-3 */
745 uint32_t rgSchCmnBetaCqiOffstTbl[16] = {0, 0, 1125,
746 1250, 1375, 1625, 1750, 2000, 2250, 2500, 2875,
747 3125, 3500, 4000, 5000, 6250};
748 uint32_t rgSchCmnBetaHqOffstTbl[16] = {2000, 2500, 3125,
749 4000, 5000, 6250, 8000,10000, 12625, 15875, 20000,
750 31000, 50000,80000,126000,0};
751 uint32_t rgSchCmnBetaRiOffstTbl[16] = {1250, 1625, 2000,
752 2500, 3125, 4000, 5000, 6250, 8000, 10000,12625,
754 S8 rgSchCmnDlCqiDiffOfst[8] = {0, 1, 2, 3, -4, -3, -2, -1};
756 /* Include CRS REs while calculating Efficiency */
757 const static uint8_t rgSchCmnAntIdx[5] = {0,0,1,0,2};
758 const static uint8_t rgSchCmnNumResForCrs[5] = {0,6,12,0,16};
759 uint32_t cfiSwitchCnt ;
765 S8 rgSchCmnApUeSelDiffCqi[4] = {1, 2, 3, 4};
766 S8 rgSchCmnApEnbConfDiffCqi[4] = {0, 1, 2, -1};
769 typedef struct rgSchCmnDlUeDciFrmtOptns
771 TfuDciFormat spfcDciFrmt; /* TM(Transmission Mode) specific DCI format.
772 * Search space : UE Specific by C-RNTI only. */
773 uint8_t spfcDciRAType; /* Resource Alloctn(RA) type for spfcDciFrmt */
774 TfuDciFormat prfrdDciFrmt; /* Preferred DCI format among the available
775 * options for TD (Transmit Diversity) */
776 uint8_t prfrdDciRAType; /* Resource Alloctn(RA) type for prfrdDciFrmt */
777 }RgSchCmnDlUeDciFrmtOptns;
780 /* DCI Format options for each Transmission Mode */
781 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[7] = {
782 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
783 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
784 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
785 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
786 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
787 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
788 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
792 /* DCI Format options for each Transmission Mode */
793 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[9] = {
794 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
795 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
796 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
797 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
798 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
799 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
800 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
805 typedef struct rgSchCmnDlImcsTbl
807 uint8_t modOdr; /* Modulation Order */
808 uint8_t iTbs; /* ITBS */
809 }RgSchCmnDlImcsTbl[29];
811 const struct rgSchCmnMult235Info
813 uint8_t match; /* Closest number satisfying 2^a.3^b.5^c, with a bias
814 * towards the smaller number */
815 uint8_t prvMatch; /* Closest number not greater than array index
816 * satisfying 2^a.3^b.5^c */
817 } rgSchCmnMult235Tbl[110+1] = {
819 {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {6, 6}, {8, 8},
820 {9, 9}, {10, 10}, {10, 10}, {12, 12}, {12, 12}, {15, 12}, {15, 15},
821 {16, 16}, {16, 16}, {18, 18}, {18, 18}, {20, 20}, {20, 20}, {20, 20},
822 {24, 20}, {24, 24}, {25, 25}, {25, 25}, {27, 27}, {27, 27}, {30, 27},
823 {30, 30}, {30, 30}, {32, 32}, {32, 32}, {32, 32}, {36, 32}, {36, 36},
824 {36, 36}, {36, 36}, {40, 36}, {40, 40}, {40, 40}, {40, 40}, {45, 40},
825 {45, 40}, {45, 45}, {45, 45}, {48, 45}, {48, 48}, {48, 48}, {50, 50},
826 {50, 50}, {50, 50}, {54, 50}, {54, 54}, {54, 54}, {54, 54}, {54, 54},
827 {60, 54}, {60, 54}, {60, 60}, {60, 60}, {60, 60}, {64, 60}, {64, 64},
828 {64, 64}, {64, 64}, {64, 64}, {64, 64}, {72, 64}, {72, 64}, {72, 64},
829 {72, 72}, {72, 72}, {75, 72}, {75, 75}, {75, 75}, {75, 75}, {80, 75},
830 {80, 75}, {80, 80}, {81, 81}, {81, 81}, {81, 81}, {81, 81}, {81, 81},
831 {90, 81}, {90, 81}, {90, 81}, {90, 81}, {90, 90}, {90, 90}, {90, 90},
832 {90, 90}, {96, 90}, {96, 90}, {96, 96}, {96, 96}, {96, 96}, {100, 96},
833 {100, 100}, {100, 100}, {100, 100}, {100, 100}, {100, 100}, {108, 100},
834 {108, 100}, {108, 100}, {108, 108}, {108, 108}, {108, 108}
838 /* BI table from 36.321 Table 7.2.1 */
839 const static S16 rgSchCmnBiTbl[RG_SCH_CMN_NUM_BI_VAL] = {
840 0, 10, 20, 30,40,60,80,120,160,240,320,480,960};
841 RgSchCmnUlCqiInfo rgSchCmnUlCqiTbl[RG_SCH_CMN_UL_NUM_CQI] = {
843 {RGSCH_CMN_QM_CQI_1,RGSCH_CMN_UL_EFF_CQI_1 },
844 {RGSCH_CMN_QM_CQI_2,RGSCH_CMN_UL_EFF_CQI_2 },
845 {RGSCH_CMN_QM_CQI_3,RGSCH_CMN_UL_EFF_CQI_3 },
846 {RGSCH_CMN_QM_CQI_4,RGSCH_CMN_UL_EFF_CQI_4 },
847 {RGSCH_CMN_QM_CQI_5,RGSCH_CMN_UL_EFF_CQI_5 },
848 {RGSCH_CMN_QM_CQI_6,RGSCH_CMN_UL_EFF_CQI_6 },
849 {RGSCH_CMN_QM_CQI_7,RGSCH_CMN_UL_EFF_CQI_7 },
850 {RGSCH_CMN_QM_CQI_8,RGSCH_CMN_UL_EFF_CQI_8 },
851 {RGSCH_CMN_QM_CQI_9,RGSCH_CMN_UL_EFF_CQI_9 },
852 {RGSCH_CMN_QM_CQI_10,RGSCH_CMN_UL_EFF_CQI_10 },
853 {RGSCH_CMN_QM_CQI_11,RGSCH_CMN_UL_EFF_CQI_11 },
854 {RGSCH_CMN_QM_CQI_12,RGSCH_CMN_UL_EFF_CQI_12 },
855 {RGSCH_CMN_QM_CQI_13,RGSCH_CMN_UL_EFF_CQI_13 },
856 {RGSCH_CMN_QM_CQI_14,RGSCH_CMN_UL_EFF_CQI_14 },
857 {RGSCH_CMN_QM_CQI_15,RGSCH_CMN_UL_EFF_CQI_15 },
861 /* This table maps a (delta_offset * 2 + 2) to a (beta * 8)
862 * where beta is 10^-(delta_offset/10) rounded off to nearest 1/8
864 static uint16_t rgSchCmnUlBeta8Tbl[29] = {
865 6, RG_SCH_CMN_UL_INVALID_BETA8, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23,
866 25, 28, 32, RG_SCH_CMN_UL_INVALID_BETA8, 40, RG_SCH_CMN_UL_INVALID_BETA8,
867 50, RG_SCH_CMN_UL_INVALID_BETA8, 64, RG_SCH_CMN_UL_INVALID_BETA8, 80,
868 RG_SCH_CMN_UL_INVALID_BETA8, 101, RG_SCH_CMN_UL_INVALID_BETA8, 127,
869 RG_SCH_CMN_UL_INVALID_BETA8, 160
873 /* QCI to SVC priority mapping. Index specifies the Qci*/
874 static uint8_t rgSchCmnDlQciPrio[RG_SCH_CMN_MAX_QCI] = RG_SCH_CMN_QCI_TO_PRIO;
876 /* The configuration is efficiency measured per 1024 REs. */
877 /* The first element stands for when CQI is not known */
878 /* This table is used to translate CQI to its corrospoding */
879 /* allocation parameters. These are currently from 36.213 */
880 /* Just this talbe needs to be edited for modifying the */
881 /* the resource allocation behaviour */
883 /* ADD CQI to MCS mapping correction
884 * single dimensional array is replaced by 2 dimensions for different CFI*/
885 static uint16_t rgSchCmnCqiPdschEff[4][16] = {RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI1,
886 RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI2,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI3};
888 static uint16_t rgSchCmn2LyrCqiPdschEff[4][16] = {RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI1,
889 RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI2, RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI3};
891 /* This configuration determines the transalation of a UEs CQI to its */
892 /* PDCCH coding efficiency. This may be edited based on the installation */
893 static uint8_t rgSchCmnDlRvTbl[4] = {0, 2, 3, 1}; /* RVIdx sequence is corrected*/
895 /* Indexed by [DciFrmt].
896 * Considering the following definition in determining the dciFrmt index.
911 static uint16_t rgSchCmnDciFrmtSizes[10];
913 static uint16_t rgSchCmnCqiPdcchEff[16] = RG_SCH_CMN_CQI_TO_PDCCH_EFF;
917 RgSchTddUlDlSubfrmTbl rgSchTddUlDlSubfrmTbl = {
918 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME},
919 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
920 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
921 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
922 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
923 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
924 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME}
929 uint8_t rgSchTddSpsDlMaxRetxTbl[RGSCH_MAX_TDD_UL_DL_CFG] = {
941 /* Special Subframes in OFDM symbols */
942 /* ccpu00134197-MOD-Correct the number of symbols */
943 RgSchTddSplSubfrmInfoTbl rgSchTddSplSubfrmInfoTbl = {
947 {11, 1, 1, 10, 1, 1},
955 /* PHICH 'm' value Table */
956 RgSchTddPhichMValTbl rgSchTddPhichMValTbl = {
957 {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
958 {0, 1, 0, 0, 1, 0, 1, 0, 0, 1},
959 {0, 0, 0, 1, 0, 0, 0, 0, 1, 0},
960 {1, 0, 0, 0, 0, 0, 0, 0, 1, 1},
961 {0, 0, 0, 0, 0, 0, 0, 0, 1, 1},
962 {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
963 {1, 1, 0, 0, 0, 1, 1, 0, 0, 1}
966 /* PHICH 'K' value Table */
967 RgSchTddKPhichTbl rgSchTddKPhichTbl = {
968 {0, 0, 4, 7, 6, 0, 0, 4, 7, 6},
969 {0, 0, 4, 6, 0, 0, 0, 4, 6, 0},
970 {0, 0, 6, 0, 0, 0, 0, 6, 0, 0},
971 {0, 0, 6, 6, 6, 0, 0, 0, 0, 0},
972 {0, 0, 6, 6, 0, 0, 0, 0, 0, 0},
973 {0, 0, 6, 0, 0, 0, 0, 0, 0, 0},
974 {0, 0, 4, 6, 6, 0, 0, 4, 7, 0}
977 /* Uplink association index 'K' value Table */
978 RgSchTddUlAscIdxKDashTbl rgSchTddUlAscIdxKDashTbl = {
979 {0, 0, 6, 4, 0, 0, 0, 6, 4, 0},
980 {0, 0, 4, 0, 0, 0, 0, 4, 0, 0},
981 {0, 0, 4, 4, 4, 0, 0, 0, 0, 0},
982 {0, 0, 4, 4, 0, 0, 0, 0, 0, 0},
983 {0, 0, 4, 0, 0, 0, 0, 0, 0, 0},
984 {0, 0, 7, 7, 5, 0, 0, 7, 7, 0}
988 /* PUSCH 'K' value Table */
989 RgSchTddPuschTxKTbl rgSchTddPuschTxKTbl = {
990 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
991 {0, 6, 0, 0, 4, 0, 6, 0, 0, 4},
992 {0, 0, 0, 4, 0, 0, 0, 0, 4, 0},
993 {4, 0, 0, 0, 0, 0, 0, 0, 4, 4},
994 {0, 0, 0, 0, 0, 0, 0, 0, 4, 4},
995 {0, 0, 0, 0, 0, 0, 0, 0, 4, 0},
996 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
999 /* PDSCH to PUCCH Table for DL Harq Feed back. Based on the
1000 Downlink association set index 'K' table */
1001 uint8_t rgSchTddPucchTxTbl[7][10] = {
1002 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1003 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1004 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1005 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1006 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1007 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1008 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1011 /* Table to fetch the next DL sf idx for applying the
1012 new CFI. The next Dl sf Idx at which the new CFI
1013 is applied is always the starting Sf of the next ACK/NACK
1016 Ex: In Cfg-2, sf4 and sf9 are the only subframes at which
1017 a new ACK/NACK bundle of DL subframes can start
1019 D S U D D D S U D D D S U D D D S U D D
1022 dlSf Array for Cfg-2:
1023 sfNum: 0 1 3 4 5 6 8 9 0 1 3 4 5 6 8 9
1024 sfIdx: 0 1 2 3 4 5 6 7 8 9 10 11 12 12 14 15
1026 If CFI changes at sf0, nearest DL SF bundle >= 4 TTI is sf4
1027 So at sf4 the new CFI can be applied. To arrive at sf4 from
1028 sf0, the sfIdx has to be increased by 3 */
1030 uint8_t rgSchTddPdcchSfIncTbl[7][10] = {
1031 /* A/N Bundl: 0,1,5,6*/ {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
1032 /* A/N Bundl: 0,4,5,9*/ {2, 2, 0, 0, 3, 2, 2, 0, 0, 3},
1033 /* A/N Bundl: 4,9*/ {3, 6, 0, 5, 4, 3, 6, 0, 5, 4},
1034 /* A/N Bundl: 1,7,9*/ {4, 3, 0, 0, 0, 4, 5, 4, 6, 5},
1035 /* A/N Bundl: 0,6*/ {4, 3, 0, 0, 6, 5, 4, 7, 6, 5},
1036 /* A/N Bundl: 9*/ {8, 7, 0, 6, 5, 4, 12, 11, 10, 9},
1037 /* A/N Bundl: 0,1,5,6,9*/ {2, 1, 0, 0, 0, 2, 2, 0, 0, 3}
1041 /* combine compilation fixes */
1043 /* subframe offset values to be used when twoIntervalsConfig is enabled in UL
1045 RgSchTddSfOffTbl rgSchTddSfOffTbl = {
1046 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1047 {0, 0, 1, -1, 0, 0, 0, 1, -1, 0},
1048 {0, 0, 5, 0, 0, 0, 0, -5, 0, 0},
1049 {0, 0, 1, 1, -2, 0, 0, 0, 0, 0},
1050 {0, 0, 1, -1, 0, 0, 0, 0, 0, 0},
1051 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1052 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1056 /* Table to determine when uplink SPS configured grants should
1057 * explicitly be reserved in a subframe. When enries are same
1058 * as that of Msg3SubfrmTbl, indicates competition with msg3.
1059 * As of now, this is same as Msg3SubfrmTbl (leaving out uldlcfg 2),
1060 * except that all 255s are now zeros. */
1061 RgSchTddSpsUlRsrvTbl rgSchTddSpsUlRsrvTbl = {
1062 {0, 0, 0, 6, 8, 0, 0, 0, 6, 8},
1063 {0, 0, 6, 9, 0, 0, 0, 6, 9, 0},
1064 {0, 0, 10, 0, 0, 0, 0, 10, 0, 0},
1065 {0, 0, 0, 0, 8, 0, 7, 7, 14, 0},
1066 {0, 0, 0, 9, 0, 0, 7, 15, 0, 0},
1067 {0, 0, 10, 0, 0, 0, 16, 0, 0, 0},
1068 {0, 0, 0, 0, 8, 0, 0, 0, 9, 0}
1071 /* Inverse DL Assoc Set index Table */
1072 RgSchTddInvDlAscSetIdxTbl rgSchTddInvDlAscSetIdxTbl = {
1073 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1074 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1075 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1076 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1077 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1078 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1079 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1082 #endif /* (LTEMAC_SPS ) */
1084 /* Number of Uplink subframes Table */
1085 static uint8_t rgSchTddNumUlSf[] = {6, 4, 2, 3, 2, 1, 5};
1087 /* Downlink HARQ processes Table */
1088 RgSchTddUlNumHarqProcTbl rgSchTddUlNumHarqProcTbl = { 7, 4, 2, 3, 2, 1, 6};
1090 /* Uplink HARQ processes Table */
1091 RgSchTddDlNumHarqProcTbl rgSchTddDlNumHarqProcTbl = { 4, 7, 10, 9, 12, 15, 6};
1093 /* Downlink association index set 'K' value Table */
1094 RgSchTddDlAscSetIdxKTbl rgSchTddDlAscSetIdxKTbl = {
1095 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1097 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1099 { {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}} },
1101 { {0, {0}}, {0, {0}}, {3, {7, 6, 11}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1103 { {0, {0}}, {0, {0}}, {4, {12, 8, 7, 11}}, {4, {6, 5, 4, 7}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1105 { {0, {0}}, {0, {0}}, {9, {13, 12, 9, 8, 7, 5, 4, 11, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1107 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1110 /* ccpu132282-ADD-the table rgSchTddDlAscSetIdxKTbl is rearranged in
1111 * decreasing order of Km, this is used to calculate the NCE used for
1112 * calculating N1Pucch Resource for Harq*/
1113 RgSchTddDlAscSetIdxKTbl rgSchTddDlHqPucchResCalTbl = {
1114 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1116 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1118 { {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}} },
1120 { {0, {0}}, {0, {0}}, {3, {11, 7, 6}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1122 { {0, {0}}, {0, {0}}, {4, {12, 11, 8, 7}}, {4, {7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1124 { {0, {0}}, {0, {0}}, {9, {13, 12, 11, 9, 8, 7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1126 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1129 /* Minimum number of Ack/Nack feeback information to be
1130 stored for each UL-DL configuration */
1131 RgSchTddANFdbkMapTbl rgSchTddANFdbkMapTbl = {4, 4, 2, 3, 2, 1, 5};
1133 /* Uplink switch points and number of UL subframes Table */
1134 RgSchTddMaxUlSubfrmTbl rgSchTddMaxUlSubfrmTbl = {
1135 {2,3,3}, {2,2,2}, {2,1,1}, {1,3,0}, {1,2,0}, {1,1,0}, {2,3,2}
1138 /* Uplink switch points and number of DL subframes Table */
1139 RgSchTddMaxDlSubfrmTbl rgSchTddMaxDlSubfrmTbl = {
1140 {2,2,2}, {2,3,3}, {2,4,4}, {1,7,0}, {1,8,0}, {1,9,0}, {2,2,3}
1143 /* Number of UL subframes present before a particular subframe */
1144 RgSchTddNumUlSubfrmTbl rgSchTddNumUlSubfrmTbl = {
1145 {0, 0, 1, 2, 3, 3, 3, 4, 5, 6},
1146 {0, 0, 1, 2, 2, 2, 2, 3, 4, 4},
1147 {0, 0, 1, 1, 1, 1, 1, 2, 2, 2},
1148 {0, 0, 1, 2, 3, 3, 3, 3, 3, 3},
1149 {0, 0, 1, 2, 2, 2, 2, 2, 2, 2},
1150 {0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
1151 {0, 0, 1, 2, 3, 3, 3, 4, 5, 5}
1154 /* Number of DL subframes present till a particular subframe */
1155 RgSchTddNumDlSubfrmTbl rgSchTddNumDlSubfrmTbl = {
1156 {1, 2, 2, 2, 2, 3, 4, 4, 4, 4},
1157 {1, 2, 2, 2, 3, 4, 5, 5, 5, 6},
1158 {1, 2, 2, 3, 4, 5, 6, 6, 7, 8},
1159 {1, 2, 2, 2, 2, 3, 4, 5, 6, 7},
1160 {1, 2, 2, 2, 3, 4, 5, 6, 7, 8},
1161 {1, 2, 2, 3, 4, 5, 6, 7, 8, 9},
1162 {1, 2, 2, 2, 2, 3, 4, 4, 4, 5}
1166 /* Nearest possible UL subframe Index from UL subframe
1167 * DL Index < UL Index */
1168 RgSchTddLowDlSubfrmIdxTbl rgSchTddLowDlSubfrmIdxTbl = {
1169 {0, 1, 1, 1, 1, 5, 6, 6, 6, 6},
1170 {0, 1, 1, 1, 4, 5, 6, 6, 6, 9},
1171 {0, 1, 1, 3, 4, 5, 6, 6, 8, 9},
1172 {0, 1, 1, 1, 1, 5, 6, 7, 8, 9},
1173 {0, 1, 1, 1, 4, 5, 6, 7, 8, 9},
1174 {0, 1, 1, 3, 4, 5, 6, 7, 8, 9},
1175 {0, 1, 1, 1, 1, 5, 6, 6, 6, 9}
1178 /* Nearest possible DL subframe Index from UL subframe
1179 * DL Index > UL Index
1180 * 10 represents Next SFN low DL Idx */
1181 RgSchTddHighDlSubfrmIdxTbl rgSchTddHighDlSubfrmIdxTbl = {
1182 {0, 1, 5, 5, 5, 5, 6, 10, 10, 10},
1183 {0, 1, 4, 4, 4, 5, 6, 9, 9, 9},
1184 {0, 1, 3, 3, 4, 5, 6, 8, 8, 9},
1185 {0, 1, 5, 5, 5, 5, 6, 7, 8, 9},
1186 {0, 1, 4, 4, 4, 5, 6, 7, 8, 9},
1187 {0, 1, 3, 3, 4, 5, 6, 7, 8, 9},
1188 {0, 1, 5, 5, 5, 5, 6, 9, 9, 9}
1191 /* RACH Message3 related information */
1192 RgSchTddMsg3SubfrmTbl rgSchTddMsg3SubfrmTbl = {
1193 {7, 6, 255, 255, 255, 7, 6, 255, 255, 255},
1194 {7, 6, 255, 255, 8, 7, 6, 255, 255, 8},
1195 {7, 6, 255, 9, 8, 7, 6, 255, 9, 8},
1196 {12, 11, 255, 255, 255, 7, 6, 6, 6, 13},
1197 {12, 11, 255, 255, 8, 7, 6, 6, 14, 13},
1198 {12, 11, 255, 9, 8, 7, 6, 15, 14, 13},
1199 {7, 6, 255, 255, 255, 7, 6, 255, 255, 8}
1202 /* ccpu00132341-DEL Removed rgSchTddRlsDlSubfrmTbl and used Kset table for
1203 * releasing DL HARQs */
1205 /* DwPTS Scheduling Changes Start */
1206 /* Provides the number of Cell Reference Signals in DwPTS
1208 static uint8_t rgSchCmnDwptsCrs[2][3] = {/* [Spl Sf cfg][Ant Port] */
1209 {4, 8, 16}, /* Spl Sf cfg 1,2,3,6,7,8 */
1210 {6, 12, 20}, /* Spl Sf cfg 4 */
1213 static S8 rgSchCmnSplSfDeltaItbs[9] = RG_SCH_DWPTS_ITBS_ADJ;
1214 /* DwPTS Scheduling Changes End */
1218 static uint32_t rgSchCmnBsrTbl[64] = {
1219 0, 10, 12, 14, 17, 19, 22, 26,
1220 31, 36, 42, 49, 57, 67, 78, 91,
1221 107, 125, 146, 171, 200, 234, 274, 321,
1222 376, 440, 515, 603, 706, 826, 967, 1132,
1223 1326, 1552, 1817, 2127, 2490, 2915, 3413, 3995,
1224 4677, 5476, 6411, 7505, 8787, 10287, 12043, 14099,
1225 16507, 19325, 22624, 26487, 31009, 36304, 42502, 49759,
1226 58255, 68201, 79846, 93479, 109439, 128125, 150000, 220000
1229 static uint32_t rgSchCmnExtBsrTbl[64] = {
1230 0, 10, 13, 16, 19, 23, 29, 35,
1231 43, 53, 65, 80, 98, 120, 147, 181,
1232 223, 274, 337, 414, 509, 625, 769, 945,
1233 1162, 1429, 1757, 2161, 2657, 3267, 4017, 4940,
1234 6074, 7469, 9185, 11294, 13888, 17077, 20999, 25822,
1235 31752, 39045, 48012, 59039, 72598, 89272, 109774, 134986,
1236 165989, 204111, 250990, 308634, 379519, 466683, 573866, 705666,
1237 867737, 1067031, 1312097, 1613447, 1984009, 2439678, 3000000, 3100000
1240 uint8_t rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_MAX_CP][RG_SCH_CMN_UL_NUM_CQI];
1242 RgSchTbSzTbl rgTbSzTbl = {
1244 {16, 32, 56, 88, 120, 152, 176, 208, 224, 256, 288, 328, 344, 376, 392, 424, 456, 488, 504, 536, 568, 600, 616, 648, 680, 712, 744, 776, 776, 808, 840, 872, 904, 936, 968, 1000, 1032, 1032, 1064, 1096, 1128, 1160, 1192, 1224, 1256, 1256, 1288, 1320, 1352, 1384, 1416, 1416, 1480, 1480, 1544, 1544, 1608, 1608, 1608, 1672, 1672, 1736, 1736, 1800, 1800, 1800, 1864, 1864, 1928, 1928, 1992, 1992, 2024, 2088, 2088, 2088, 2152, 2152, 2216, 2216, 2280, 2280, 2280, 2344, 2344, 2408, 2408, 2472, 2472, 2536, 2536, 2536, 2600, 2600, 2664, 2664, 2728, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 2984, 2984, 3112},
1245 {24, 56, 88, 144, 176, 208, 224, 256, 328, 344, 376, 424, 456, 488, 520, 568, 600, 632, 680, 712, 744, 776, 808, 872, 904, 936, 968, 1000, 1032, 1064, 1128, 1160, 1192, 1224, 1256, 1288, 1352, 1384, 1416, 1416, 1480, 1544, 1544, 1608, 1608, 1672, 1736, 1736, 1800, 1800, 1864, 1864, 1928, 1992, 1992, 2024, 2088, 2088, 2152, 2152, 2216, 2280, 2280, 2344, 2344, 2408, 2472, 2472, 2536, 2536, 2600, 2600, 2664, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008},
1246 {32, 72, 144, 176, 208, 256, 296, 328, 376, 424, 472, 520, 568, 616, 648, 696, 744, 776, 840, 872, 936, 968, 1000, 1064, 1096, 1160, 1192, 1256, 1288, 1320, 1384, 1416, 1480, 1544, 1544, 1608, 1672, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2024, 2088, 2088, 2152, 2216, 2216, 2280, 2344, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2664, 2728, 2792, 2856, 2856, 2856, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968},
1247 {40, 104, 176, 208, 256, 328, 392, 440, 504, 568, 616, 680, 744, 808, 872, 904, 968, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 1992, 2024, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6200, 6456, 6456},
1248 {56, 120, 208, 256, 328, 408, 488, 552, 632, 696, 776, 840, 904, 1000, 1064, 1128, 1192, 1288, 1352, 1416, 1480, 1544, 1608, 1736, 1800, 1864, 1928, 1992, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2600, 2664, 2728, 2792, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992},
1249 {72, 144, 224, 328, 424, 504, 600, 680, 776, 872, 968, 1032, 1128, 1224, 1320, 1384, 1480, 1544, 1672, 1736, 1864, 1928, 2024, 2088, 2216, 2280, 2344, 2472, 2536, 2664, 2728, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4264, 4392, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9528},
1250 {328, 176, 256, 392, 504, 600, 712, 808, 936, 1032, 1128, 1224, 1352, 1480, 1544, 1672, 1736, 1864, 1992, 2088, 2216, 2280, 2408, 2472, 2600, 2728, 2792, 2984, 2984, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3880, 4008, 4136, 4136, 4264, 4392, 4584, 4584, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448},
1251 {104, 224, 328, 472, 584, 712, 840, 968, 1096, 1224, 1320, 1480, 1608, 1672, 1800, 1928, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536},
1252 {120, 256, 392, 536, 680, 808, 968, 1096, 1256, 1384, 1544, 1672, 1800, 1928, 2088, 2216, 2344, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15264},
1253 {136, 296, 456, 616, 776, 936, 1096, 1256, 1416, 1544, 1736, 1864, 2024, 2216, 2344, 2536, 2664, 2856, 2984, 3112, 3368, 3496, 3624, 3752, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 5160, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568},
1254 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080},
1255 {176, 376, 584, 776, 1000, 1192, 1384, 1608, 1800, 2024, 2216, 2408, 2600, 2792, 2984, 3240, 3496, 3624, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152},
1256 {208, 440, 680, 904, 1128, 1352, 1608, 1800, 2024, 2280, 2472, 2728, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 23688, 24496, 24496, 24496, 24496, 25456},
1257 {224, 488, 744, 1000, 1256, 1544, 1800, 2024, 2280, 2536, 2856, 3112, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 26416, 27376, 27376, 27376, 27376, 28336, 28336},
1258 {256, 552, 840, 1128, 1416, 1736, 1992, 2280, 2600, 2856, 3112, 3496, 3752, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 5992, 6200, 6456, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704},
1259 {280, 600, 904, 1224, 1544, 1800, 2152, 2472, 2728, 3112, 3368, 3624, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 6200, 6456, 6712, 6968, 7224, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008},
1260 {328, 632, 968, 1288, 1608, 1928, 2280, 2600, 2984, 3240, 3624, 3880, 4264, 4584, 4968, 5160, 5544, 5992, 6200, 6456, 6712, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 35160},
1261 {336, 696, 1064, 1416, 1800, 2152, 2536, 2856, 3240, 3624, 4008, 4392, 4776, 5160, 5352, 5736, 6200, 6456, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 39232},
1262 {376, 776, 1160, 1544, 1992, 2344, 2792, 3112, 3624, 4008, 4392, 4776, 5160, 5544, 5992, 6200, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14112, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816},
1263 {408, 840, 1288, 1736, 2152, 2600, 2984, 3496, 3880, 4264, 4776, 5160, 5544, 5992, 6456, 6968, 7224, 7736, 8248, 8504, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19080, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888},
1264 {440, 904, 1384, 1864, 2344, 2792, 3240, 3752, 4136, 4584, 5160, 5544, 5992, 6456, 6968, 7480, 7992, 8248, 8760, 9144, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 20616, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 48936, 51024, 51024, 51024},
1265 {488, 1000, 1480, 1992, 2472, 2984, 3496, 4008, 4584, 4968, 5544, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 9912, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056},
1266 {520, 1064, 1608, 2152, 2664, 3240, 3752, 4264, 4776, 5352, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 10296, 10680, 11448, 11832, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 19080, 19080, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256},
1267 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776},
1268 {584, 1192, 1800, 2408, 2984, 3624, 4264, 4968, 5544, 5992, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 22920, 23688, 24496, 25456, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592},
1269 {616, 1256, 1864, 2536, 3112, 3752, 4392, 5160, 5736, 6200, 6968, 7480, 8248, 8760, 9528, 10296, 10680, 11448, 12216, 12576, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592, 68808, 68808, 68808, 71112},
1270 {712, 1480, 2216, 2984, 3752, 4392, 5160, 5992, 6712, 7480, 8248, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 14688, 15264, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376}
1273 {32, 88, 152, 208, 256, 328, 376, 424, 488, 536, 600, 648, 712, 776, 808, 872, 936, 1000, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2088, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 5992, 6200},
1274 {56, 144, 208, 256, 344, 424, 488, 568, 632, 712, 776, 872, 936, 1000, 1064, 1160, 1224, 1288, 1384, 1416, 1544, 1608, 1672, 1736, 1800, 1864, 1992, 2024, 2088, 2152, 2280, 2344, 2408, 2472, 2536, 2600, 2728, 2792, 2856, 2856, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5352, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992},
1275 {72, 176, 256, 328, 424, 520, 616, 696, 776, 872, 968, 1064, 1160, 1256, 1320, 1416, 1544, 1608, 1672, 1800, 1864, 1992, 2088, 2152, 2216, 2344, 2408, 2536, 2600, 2664, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3368, 3496, 3624, 3624, 3752, 3880, 4008, 4008, 4136, 4264, 4264, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912},
1276 {104, 208, 328, 440, 568, 680, 808, 904, 1032, 1160, 1256, 1384, 1480, 1608, 1736, 1864, 1992, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2856, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4392, 4584, 4776, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12576, 12960, 12960},
1277 {120, 256, 408, 552, 696, 840, 1000, 1128, 1288, 1416, 1544, 1736, 1864, 1992, 2152, 2280, 2408, 2600, 2728, 2856, 2984, 3112, 3240, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840},
1278 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19080},
1279 {176, 392, 600, 808, 1032, 1224, 1480, 1672, 1864, 2088, 2280, 2472, 2728, 2984, 3112, 3368, 3496, 3752, 4008, 4136, 4392, 4584, 4776, 4968, 5160, 5352, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7992, 8248, 8248, 8504, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920},
1280 {224, 472, 712, 968, 1224, 1480, 1672, 1928, 2216, 2472, 2664, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376},
1281 {256, 536, 808, 1096, 1384, 1672, 1928, 2216, 2536, 2792, 3112, 3368, 3624, 3880, 4264, 4584, 4776, 4968, 5352, 5544, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576},
1282 {296, 616, 936, 1256, 1544, 1864, 2216, 2536, 2856, 3112, 3496, 3752, 4136, 4392, 4776, 5160, 5352, 5736, 5992, 6200, 6712, 6968, 7224, 7480, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160},
1283 {328, 680, 1032, 1384, 1736, 2088, 2472, 2792, 3112, 3496, 3880, 4264, 4584, 4968, 5352, 5736, 5992, 6200, 6712, 6968, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 21384, 21384, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888},
1284 {376, 776, 1192, 1608, 2024, 2408, 2792, 3240, 3624, 4008, 4392, 4776, 5352, 5736, 5992, 6456, 6968, 7224, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816},
1285 {440, 904, 1352, 1800, 2280, 2728, 3240, 3624, 4136, 4584, 4968, 5544, 5992, 6456, 6712, 7224, 7736, 8248, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 17568, 17568, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024},
1286 {488, 1000, 1544, 2024, 2536, 3112, 3624, 4136, 4584, 5160, 5736, 6200, 6712, 7224, 7736, 8248, 8760, 9144, 9912, 10296, 10680, 11448, 11832, 12216, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336},
1287 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776},
1288 {600, 1224, 1800, 2472, 3112, 3624, 4264, 4968, 5544, 6200, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 23688, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808},
1289 {632, 1288, 1928, 2600, 3240, 3880, 4584, 5160, 5992, 6456, 7224, 7736, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 68808, 71112, 71112, 71112, 71112},
1290 {696, 1416, 2152, 2856, 3624, 4392, 5160, 5736, 6456, 7224, 7992, 8760, 9528, 10296, 10680, 11448, 12216, 12960, 13536, 14688, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 71112, 73712, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 78704},
1291 {776, 1544, 2344, 3112, 4008, 4776, 5544, 6200, 7224, 7992, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 27376, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936},
1292 {840, 1736, 2600, 3496, 4264, 5160, 5992, 6968, 7736, 8504, 9528, 10296, 11064, 12216, 12960, 13536, 14688, 15264, 16416, 16992, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800},
1293 {904, 1864, 2792, 3752, 4584, 5544, 6456, 7480, 8248, 9144, 10296, 11064, 12216, 12960, 14112, 14688, 15840, 16992, 17568, 18336, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 42368, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 97896, 101840, 101840, 101840},
1294 {1000, 1992, 2984, 4008, 4968, 5992, 6968, 7992, 9144, 9912, 11064, 12216, 12960, 14112, 15264, 15840, 16992, 18336, 19080, 19848, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 31704, 32856, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136},
1295 {1064, 2152, 3240, 4264, 5352, 6456, 7480, 8504, 9528, 10680, 11832, 12960, 14112, 15264, 16416, 16992, 18336, 19080, 20616, 21384, 22920, 23688, 24496, 25456, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816},
1296 {1128, 2280, 3496, 4584, 5736, 6968, 7992, 9144, 10296, 11448, 12576, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 45352, 45352, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840,101840,101840,101840,105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496},
1297 {1192, 2408, 3624, 4968, 5992, 7224, 8504, 9912, 11064, 12216, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 42368, 43816, 45352, 46888, 46888, 48936, 51024, 51024, 52752, 52752, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208},
1298 {1256, 2536, 3752, 5160, 6200, 7480, 8760, 10296, 11448, 12576, 14112, 15264, 16416, 17568, 19080, 20616, 21384, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 43816, 43816, 45352, 46888, 48936, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040,115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208, 137792, 137792, 137792, 142248},
1299 {1480, 2984, 4392, 5992, 7480, 8760, 10296, 11832, 13536, 14688, 16416, 17568, 19080, 20616, 22152, 23688, 25456, 26416, 28336, 29296, 30576, 32856, 34008, 35160, 36696, 37888, 40576, 40576, 42368, 43816, 45352, 46888, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 63776, 63776, 66592, 68808, 68808, 71112, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 81176, 84760, 84760, 87936, 87936, 90816, 90816, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 128496, 128496, 128496, 133208, 133208, 133208, 137792, 137792, 137792, 142248, 142248, 142248, 146856, 146856,149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776}
1302 RgSchUlIMcsTbl rgUlIMcsTbl = {
1303 {2, 0}, {2, 1}, {2, 2}, {2, 3}, {2, 4}, {2, 5},
1304 {2, 6}, {2, 7}, {2, 8}, {2, 9}, {2, 10},
1305 {4, 10}, {4, 11}, {4, 12}, {4, 13}, {4, 14},
1306 {4, 15}, {4, 16}, {4, 17}, {4, 18}, {4, 19},
1307 {6, 19}, {6, 20}, {6, 21}, {6, 22}, {6, 23},
1308 {6, 24}, {6, 25}, {6, 26}
1310 RgSchUeCatTbl rgUeCatTbl = {
1311 /*Column1:Maximum number of bits of an UL-SCH
1312 transport block transmitted within a TTI
1314 Column2:Maximum number of bits of a DLSCH
1315 transport block received within a TTI
1317 Column3:Total number of soft channel bits
1319 Column4:Support for 64QAM in UL
1321 Column5:Maximum number of DL-SCH transport
1322 block bits received within a TTI
1324 Column6:Maximum number of supported layers for
1325 spatial multiplexing in DL
1327 {5160, {10296,0}, 250368, FALSE, 10296, 1},
1328 {25456, {51024,0}, 1237248, FALSE, 51024, 2},
1329 {51024, {75376,0}, 1237248, FALSE, 102048, 2},
1330 {51024, {75376,0}, 1827072, FALSE, 150752, 2},
1331 {75376, {149776,0}, 3667200, TRUE, 299552, 4},
1332 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1333 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1334 {149776,{299856,0}, 35982720,TRUE, 2998560, 8}
1337 /* [ccpu00138532]-ADD-The below table stores the min HARQ RTT time
1338 in Downlink for TDD and FDD. Indices 0 to 6 map to tdd UL DL config 0-6.
1339 Index 7 map to FDD */
1340 uint8_t rgSchCmnHarqRtt[8] = {4,7,10,9,12,15,6,8};
1341 /* Number of CFI Switchover Index is equals to 7 TDD Indexes + 1 FDD index */
1342 uint8_t rgSchCfiSwitchOvrWinLen[] = {7, 4, 2, 3, 2, 1, 6, 8};
1344 /* EffTbl is calculated for single layer and two layers.
1345 * CqiToTbs is calculated for single layer and two layers */
1346 RgSchCmnTbSzEff rgSchCmnNorCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1347 RgSchCmnTbSzEff rgSchCmnNorCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1348 /* New variable to store UL effiency values for normal and extended CP*/
1349 RgSchCmnTbSzEff rgSchCmnNorUlEff[1],rgSchCmnExtUlEff[1];
1350 RgSchCmnCqiToTbs rgSchCmnNorCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1351 RgSchCmnCqiToTbs rgSchCmnNorCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1352 RgSchCmnCqiToTbs *rgSchCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
1353 RgSchCmnTbSzEff rgSchCmnExtCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1354 RgSchCmnTbSzEff rgSchCmnExtCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1355 RgSchCmnCqiToTbs rgSchCmnExtCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1356 RgSchCmnCqiToTbs rgSchCmnExtCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1357 /* Include CRS REs while calculating Efficiency */
1358 RgSchCmnTbSzEff *rgSchCmnEffTbl[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_ANT_CONF][RG_SCH_CMN_MAX_CFI];
1359 RgSchCmnTbSzEff *rgSchCmnUlEffTbl[RG_SCH_CMN_MAX_CP];
1361 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3, 1};
1363 /* Added matrix 'rgRaPrmblToRaFrmTbl'for computation of RA sub-frames from RA preamble */
1364 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3};
1367 RgUlSchdInits rgSchUlSchdInits;
1368 RgDlSchdInits rgSchDlSchdInits;
1369 RgDlfsSchdInits rgSchDlfsSchdInits;
1371 RgEmtcUlSchdInits rgSchEmtcUlSchdInits;
1372 RgEmtcDlSchdInits rgSchEmtcDlSchdInits;
1376 static S16 rgSCHCmnUeIdleExdThrsld ARGS((
1380 RgSchUeCb* rgSCHCmnGetHoUe ARGS((
1384 static Void rgSCHCmnDelDedPreamble ARGS((
1388 RgSchUeCb* rgSCHCmnGetPoUe ARGS((
1391 CmLteTimingInfo timingInfo
1393 static Void rgSCHCmnDelRachInfo ARGS((
1397 static S16 rgSCHCmnUlRbAllocForPoHoUe ARGS((
1403 static Void rgSCHCmnHdlHoPo ARGS((
1405 CmLListCp *raRspLst,
1406 RgSchRaReqInfo *raReq
1408 static Void rgSCHCmnAllocPoHoGrnt ARGS((
1410 CmLListCp *raRspLst,
1412 RgSchRaReqInfo *raReq
1414 static Void rgSCHCmnFillPdcchOdr2Sf ARGS((
1421 static Void rgSCHCmnDlAdd2PdcchOdrQ ARGS((
1425 static Void rgSCHCmnDlRmvFrmPdcchOdrQ ARGS((
1429 static Void rgSCHCmnUpdNxtPrchMskIdx ARGS((
1432 static Void rgSCHCmnUpdRachParam ARGS((
1435 static S16 rgSCHCmnAllocPOParam ARGS((
1441 uint8_t *prachMskIdx
1443 static Void rgSCHCmnGenPdcchOrder ARGS((
1447 static Void rgSCHCmnCfgRachDedPrm ARGS((
1452 static Void rgSCHCmnHdlUlInactUes ARGS((
1455 static Void rgSCHCmnHdlDlInactUes ARGS((
1458 static Void rgSCHCmnUlInit ARGS((Void
1460 static Void rgSCHCmnDlInit ARGS((Void
1462 static Void rgSCHCmnInitDlRbAllocInfo ARGS((
1463 RgSchCmnDlRbAllocInfo *allocInfo
1465 static Void rgSCHCmnUpdUlCompEffBsr ARGS((
1469 static Void rgSCHCmnUlSetAllUnSched ARGS((
1470 RgSchCmnUlRbAllocInfo *allocInfo
1472 static Void rgSCHCmnUlUpdSf ARGS((
1474 RgSchCmnUlRbAllocInfo *allocInfo,
1477 static Void rgSCHCmnUlHndlAllocRetx ARGS((
1479 RgSchCmnUlRbAllocInfo *allocInfo,
1484 static Void rgSCHCmnGrpPwrCntrlPucch ARGS((
1488 static Void rgSCHCmnGrpPwrCntrlPusch ARGS((
1492 static Void rgSCHCmnDelUeFrmRefreshQ ARGS((
1496 static S16 rgSCHCmnTmrExpiry ARGS((
1497 PTR cb, /* Pointer to timer control block */
1498 S16 tmrEvnt /* Timer Event */
1500 static S16 rgSCHCmnTmrProc ARGS((
1503 static Void rgSCHCmnAddUeToRefreshQ ARGS((
1508 static Void rgSCHCmnDlCcchRetx ARGS((
1510 RgSchCmnDlRbAllocInfo *allocInfo
1512 static Void rgSCHCmnUpdUeMimoInfo ARGS((
1516 RgSchCmnCell *cellSchd
1518 static Void rgSCHCmnUpdUeUlCqiInfo ARGS((
1522 RgSchCmnUe *ueSchCmn,
1523 RgSchCmnCell *cellSchd,
1527 static Void rgSCHCmnDlCcchSduRetx ARGS((
1529 RgSchCmnDlRbAllocInfo *allocInfo
1531 static Void rgSCHCmnDlCcchSduTx ARGS((
1533 RgSchCmnDlRbAllocInfo *allocInfo
1535 static S16 rgSCHCmnCcchSduAlloc ARGS((
1538 RgSchCmnDlRbAllocInfo *allocInfo
1540 static S16 rgSCHCmnCcchSduDedAlloc ARGS((
1544 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc ARGS((
1550 static Void rgSCHCmnInitVars ARGS((
1554 /*ccpu00117180 - DEL - Moved rgSCHCmnUpdVars to .x as its access is now */
1555 static Void rgSCHCmnUlRbAllocForLst ARGS((
1561 CmLListCp *nonSchdLst,
1564 static S16 rgSCHCmnUlRbAllocForUe ARGS((
1571 static Void rgSCHCmnMsg3GrntReq ARGS((
1575 RgSchUlHqProcCb *hqProc,
1576 RgSchUlAlloc **ulAllocRef,
1577 uint8_t *hqProcIdRef
1579 static Void rgSCHCmnDlCcchRarAlloc ARGS((
1582 static Void rgSCHCmnDlCcchTx ARGS((
1584 RgSchCmnDlRbAllocInfo *allocInfo
1586 static Void rgSCHCmnDlBcchPcch ARGS((
1588 RgSchCmnDlRbAllocInfo *allocInfo,
1589 RgInfSfAlloc *subfrmAlloc
1591 Bool rgSCHCmnChkInWin ARGS((
1592 CmLteTimingInfo frm,
1593 CmLteTimingInfo start,
1596 Bool rgSCHCmnChkPastWin ARGS((
1597 CmLteTimingInfo frm,
1600 static Void rgSCHCmnClcAlloc ARGS((
1603 RgSchClcDlLcCb *lch,
1605 RgSchCmnDlRbAllocInfo *allocInfo
1608 static Void rgSCHCmnClcRbAlloc ARGS((
1619 static S16 rgSCHCmnMsg4Alloc ARGS((
1622 RgSchCmnDlRbAllocInfo *allocInfo
1624 static S16 rgSCHCmnMsg4DedAlloc ARGS((
1628 static Void rgSCHCmnDlRaRsp ARGS((
1630 RgSchCmnDlRbAllocInfo *allocInfo
1632 static S16 rgSCHCmnRaRspAlloc ARGS((
1638 RgSchCmnDlRbAllocInfo *allocInfo
1640 static Void rgSCHCmnUlUeDelAllocs ARGS((
1644 static Void rgSCHCmnDlSetUeAllocLmt ARGS((
1649 static S16 rgSCHCmnDlRgrCellCfg ARGS((
1654 static Void rgSCHCmnUlAdapRetx ARGS((
1655 RgSchUlAlloc *alloc,
1656 RgSchUlHqProcCb *proc
1658 static Void rgSCHCmnUlUpdAllocRetx ARGS((
1662 static Void rgSCHCmnUlSfReTxAllocs ARGS((
1666 /* Fix: syed Adaptive Msg3 Retx crash. */
1668 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1672 RgrUeRecfg *ueRecfg,
1676 static Void rgSCHCmnDlHdlTxModeRecfg ARGS
1686 * DL RB allocation specific functions
1689 static Void rgSCHCmnDlRbAlloc ARGS((
1691 RgSchCmnDlRbAllocInfo *allocInfo
1693 static Void rgSCHCmnNonDlfsRbAlloc ARGS((
1695 RgSchCmnDlRbAllocInfo *allocInfo
1697 static S16 rgSCHCmnNonDlfsCmnRbAlloc ARGS((
1699 RgSchDlRbAlloc *cmnAllocInfo));
1702 static Void rgSCHCmnNonDlfsPbchRbAllocAdj ARGS((
1704 RgSchDlRbAlloc *cmnAllocInfo,
1705 uint8_t pbchSsRsSym,
1708 /* Added function to adjust TBSize*/
1709 static Void rgSCHCmnNonDlfsPbchTbSizeAdj ARGS((
1710 RgSchDlRbAlloc *allocInfo,
1711 uint8_t numOvrlapgPbchRb,
1712 uint8_t pbchSsRsSym,
1717 /* Added function to find num of overlapping PBCH rb*/
1718 static Void rgSCHCmnFindNumPbchOvrlapRbs ARGS((
1721 RgSchDlRbAlloc *allocInfo,
1722 uint8_t *numOvrlapgPbchRb
1725 static uint8_t rgSCHCmnFindNumAddtlRbsAvl ARGS((
1728 RgSchDlRbAlloc *allocInfo
1732 static Void rgSCHCmnFindCodeRate ARGS((
1735 RgSchDlRbAlloc *allocInfo,
1741 static Void rgSCHCmnNonDlfsMsg4Alloc ARGS((
1743 RgSchCmnMsg4RbAlloc *msg4AllocInfo,
1746 static S16 rgSCHCmnNonDlfsMsg4RbAlloc ARGS((
1752 static S16 rgSCHCmnNonDlfsUeRbAlloc ARGS((
1756 uint8_t *isDlBwAvail
1759 static uint32_t rgSCHCmnCalcRiv ARGS(( uint8_t bw,
1765 static Void rgSCHCmnUpdHqAndDai ARGS((
1766 RgSchDlHqProcCb *hqP,
1768 RgSchDlHqTbCb *tbCb,
1771 static S16 rgSCHCmnUlCalcAvailBw ARGS((
1773 RgrCellCfg *cellCfg,
1775 uint8_t *rbStartRef,
1778 static S16 rgSCHCmnDlKdashUlAscInit ARGS((
1781 static S16 rgSCHCmnDlANFdbkInit ARGS((
1784 static S16 rgSCHCmnDlNpValInit ARGS((
1787 static S16 rgSCHCmnDlCreateRachPrmLst ARGS((
1790 static S16 rgSCHCmnDlCpyRachInfo ARGS((
1792 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
1795 static S16 rgSCHCmnDlRachInfoInit ARGS((
1798 static S16 rgSCHCmnDlPhichOffsetInit ARGS((
1803 static Void rgSCHCmnFindUlCqiUlTxAnt ARGS
1809 static RgSchCmnRank rgSCHCmnComputeRank ARGS
1812 uint32_t *pmiBitMap,
1816 static RgSchCmnRank rgSCHCmnComp2TxMode3 ARGS
1821 static RgSchCmnRank rgSCHCmnComp4TxMode3 ARGS
1826 static RgSchCmnRank rgSCHCmnComp2TxMode4 ARGS
1831 static RgSchCmnRank rgSCHCmnComp4TxMode4 ARGS
1836 static uint8_t rgSCHCmnCalcWcqiFrmSnr ARGS
1843 /* comcodsepa : start */
1846 * @brief This function computes efficiency and stores in a table.
1850 * Function: rgSCHCmnCompEff
1851 * Purpose: this function computes the efficiency as number of
1852 * bytes per 1024 symbols. The CFI table is also filled
1853 * with the same information such that comparison is valid
1855 * Invoked by: Scheduler
1857 * @param[in] uint8_t noPdcchSym
1858 * @param[in] uint8_t cpType
1859 * @param[in] uint8_t txAntIdx
1860 * @param[in] RgSchCmnTbSzEff* effTbl
1865 static Void rgSCHCmnCompEff
1870 RgSchCmnTbSzEff *effTbl
1873 static Void rgSCHCmnCompEff(noPdcchSym, cpType, txAntIdx, effTbl)
1877 RgSchCmnTbSzEff *effTbl;
1882 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1888 case RG_SCH_CMN_NOR_CP:
1891 case RG_SCH_CMN_EXT_CP:
1895 /* Generate a log error. This case should never be executed */
1899 /* Depending on the Tx Antenna Index, deduct the
1900 * Resource elements for the CRS */
1904 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
1907 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
1910 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
1913 /* Generate a log error. This case should never be executed */
1916 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
1917 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1920 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1922 /* This line computes the coding efficiency per 1024 REs */
1923 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1925 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1930 * @brief This function computes efficiency and stores in a table.
1934 * Function: rgSCHCmnCompUlEff
1935 * Purpose: this function computes the efficiency as number of
1936 * bytes per 1024 symbols. The CFI table is also filled
1937 * with the same information such that comparison is valid
1939 * Invoked by: Scheduler
1941 * @param[in] uint8_t noUlRsSym
1942 * @param[in] uint8_t cpType
1943 * @param[in] uint8_t txAntIdx
1944 * @param[in] RgSchCmnTbSzEff* effTbl
1949 static Void rgSCHCmnCompUlEff
1953 RgSchCmnTbSzEff *effTbl
1956 static Void rgSCHCmnCompUlEff(noUlRsSym, cpType, effTbl)
1959 RgSchCmnTbSzEff *effTbl;
1969 case RG_SCH_CMN_NOR_CP:
1972 case RG_SCH_CMN_EXT_CP:
1976 /* Generate a log error. This case should never be executed */
1980 noResPerRb = ((noSymPerRb - noUlRsSym) * RB_SCH_CMN_NUM_SCS_PER_RB);
1981 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1984 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1986 /* This line computes the coding efficiency per 1024 REs */
1987 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1989 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1995 * @brief This function computes efficiency for 2 layers and stores in a table.
1999 * Function: rgSCHCmn2LyrCompEff
2000 * Purpose: this function computes the efficiency as number of
2001 * bytes per 1024 symbols. The CFI table is also filled
2002 * with the same information such that comparison is valid
2004 * Invoked by: Scheduler
2006 * @param[in] uint8_t noPdcchSym
2007 * @param[in] uint8_t cpType
2008 * @param[in] uint8_t txAntIdx
2009 * @param[in] RgSchCmnTbSzEff* effTbl2Lyr
2014 static Void rgSCHCmn2LyrCompEff
2019 RgSchCmnTbSzEff *effTbl2Lyr
2022 static Void rgSCHCmn2LyrCompEff(noPdcchSym, cpType, txAntIdx, effTbl2Lyr)
2026 RgSchCmnTbSzEff *effTbl2Lyr;
2031 uint8_t resOfCrs; /* Effective REs occupied by CRS */
2037 case RG_SCH_CMN_NOR_CP:
2040 case RG_SCH_CMN_EXT_CP:
2044 /* Generate a log error. This case should never be executed */
2048 /* Depending on the Tx Antenna Index, deduct the
2049 * Resource elements for the CRS */
2053 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
2056 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
2059 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
2062 /* Generate a log error. This case should never be executed */
2066 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
2067 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
2069 (*effTbl2Lyr)[i] = 0;
2070 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
2072 /* This line computes the coding efficiency per 1024 REs */
2073 (*effTbl2Lyr)[i] += (rgTbSzTbl[1][i][j] * 1024) / (noResPerRb * (j+1));
2075 (*effTbl2Lyr)[i] /= RG_SCH_CMN_NUM_RBS;
2082 * @brief This function initializes the rgSchCmnDciFrmtSizes table.
2086 * Function: rgSCHCmnGetDciFrmtSizes
2087 * Purpose: This function determines the sizes of all
2088 * the available DCI Formats. The order of
2089 * bits addition for each format is inaccordance
2091 * Invoked by: rgSCHCmnRgrCellCfg
2097 static Void rgSCHCmnGetDciFrmtSizes
2102 static Void rgSCHCmnGetDciFrmtSizes(cell)
2108 /* DCI Format 0 size determination */
2109 rgSchCmnDciFrmtSizes[0] = 1 +
2111 rgSCHUtlLog32bitNbase2((cell->bwCfg.ulTotalBw * \
2112 (cell->bwCfg.ulTotalBw + 1))/2) +
2122 /* DCI Format 1 size determination */
2123 rgSchCmnDciFrmtSizes[1] = 1 +
2124 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2129 4 + 2 + /* HqProc Id and DAI */
2135 /* DCI Format 1A size determination */
2136 rgSchCmnDciFrmtSizes[2] = 1 + /* Flag for format0/format1a differentiation */
2137 1 + /* Localized/distributed VRB assignment flag */
2140 3 + /* Harq process Id */
2142 4 + /* Harq process Id */
2143 2 + /* UL Index or DAI */
2145 1 + /* New Data Indicator */
2148 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2149 (cell->bwCfg.dlTotalBw + 1))/2);
2150 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
2151 Since VRB is local */
2153 /* DCI Format 1B size determination */
2154 rgSchCmnDciFrmtSizes[3] = 1 +
2155 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2156 (cell->bwCfg.dlTotalBw + 1))/2) +
2166 ((cell->numTxAntPorts == 4)? 4:2) +
2169 /* DCI Format 1C size determination */
2170 /* Approximation: NDLVrbGap1 ~= Nprb for DL */
2171 rgSchCmnDciFrmtSizes[4] = (cell->bwCfg.dlTotalBw < 50)? 0:1 +
2172 (cell->bwCfg.dlTotalBw < 50)?
2173 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/2 * \
2174 (cell->bwCfg.dlTotalBw/2 + 1))/2)) :
2175 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/4 * \
2176 (cell->bwCfg.dlTotalBw/4 + 1))/2)) +
2179 /* DCI Format 1D size determination */
2180 rgSchCmnDciFrmtSizes[5] = 1 +
2181 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2182 (cell->bwCfg.dlTotalBw + 1))/2) +
2191 ((cell->numTxAntPorts == 4)? 4:2) +
2194 /* DCI Format 2 size determination */
2195 rgSchCmnDciFrmtSizes[6] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2196 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2204 ((cell->numTxAntPorts == 4)? 6:3);
2206 /* DCI Format 2A size determination */
2207 rgSchCmnDciFrmtSizes[7] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2208 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2216 ((cell->numTxAntPorts == 4)? 2:0);
2218 /* DCI Format 3 size determination */
2219 rgSchCmnDciFrmtSizes[8] = rgSchCmnDciFrmtSizes[0];
2221 /* DCI Format 3A size determination */
2222 rgSchCmnDciFrmtSizes[9] = rgSchCmnDciFrmtSizes[0];
2229 * @brief This function initializes the cmnCell->dciAggrLvl table.
2233 * Function: rgSCHCmnGetCqiDciFrmt2AggrLvl
2234 * Purpose: This function determines the Aggregation level
2235 * for each CQI level against each DCI format.
2236 * Invoked by: rgSCHCmnRgrCellCfg
2242 static Void rgSCHCmnGetCqiDciFrmt2AggrLvl
2247 static Void rgSCHCmnGetCqiDciFrmt2AggrLvl(cell)
2251 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2256 for (i = 0; i < RG_SCH_CMN_MAX_CQI; i++)
2258 for (j = 0; j < 10; j++)
2260 uint32_t pdcchBits; /* Actual number of phy bits needed for a given DCI Format
2261 * for a given CQI Level */
2262 pdcchBits = (rgSchCmnDciFrmtSizes[j] * 1024)/rgSchCmnCqiPdcchEff[i];
2264 if (pdcchBits < 192)
2266 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL2;
2269 if (pdcchBits < 384)
2271 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL4;
2274 if (pdcchBits < 768)
2276 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL8;
2279 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL16;
2286 * @brief This function initializes all the data for the scheduler.
2290 * Function: rgSCHCmnDlInit
2291 * Purpose: This function initializes the following information:
2292 * 1. Efficiency table
2293 * 2. CQI to table index - It is one row for upto 3 RBs
2294 * and another row for greater than 3 RBs
2295 * currently extended prefix is compiled out.
2296 * Invoked by: MAC intialization code..may be ActvInit
2302 static Void rgSCHCmnDlInit
2306 static Void rgSCHCmnDlInit()
2313 RgSchCmnTbSzEff *effTbl;
2314 RgSchCmnCqiToTbs *tbsTbl;
2317 /* 0 corresponds to Single layer case, 1 corresponds to 2 layers case*/
2318 /* Init Efficiency table for normal cyclic prefix */
2319 /*Initialize Efficiency table for Layer Index 0 */
2320 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2321 /*Initialize Efficiency table for each of the CFI indices. The
2322 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2323 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[0];
2324 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[0];
2325 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[0];
2326 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[0];
2327 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2328 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[0];
2329 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[0];
2330 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[0];
2331 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[0];
2332 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2333 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[0];
2334 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[0];
2335 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[0];
2336 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[0];
2338 /*Initialize CQI to TBS table for Layer Index 0 for Normal CP */
2339 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[0];
2340 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[0];
2341 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[0];
2342 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[0];
2344 /*Intialize Efficency table for Layer Index 1 */
2345 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2346 /*Initialize Efficiency table for each of the CFI indices. The
2347 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2348 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[1];
2349 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[1];
2350 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[1];
2351 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[1];
2352 /*Initialize Efficiency table for Tx Antenna Port Index 1 */
2353 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[1];
2354 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[1];
2355 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[1];
2356 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[1];
2357 /*Initialize Efficiency table for Tx Antenna Port Index 2 */
2358 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[1];
2359 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[1];
2360 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[1];
2361 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[1];
2363 /*Initialize CQI to TBS table for Layer Index 1 for Normal CP */
2364 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[1];
2365 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[1];
2366 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[1];
2367 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[1];
2369 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2371 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2373 /* EfficiencyTbl calculation incase of 2 layers for normal CP */
2374 rgSCHCmnCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx,\
2375 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i]);
2376 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx, \
2377 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i]);
2381 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2383 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2385 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i];
2386 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][i];
2387 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2388 (j >= 0) && (k > 0); --j)
2390 /* ADD CQI to MCS mapping correction
2391 * single dimensional array is replaced by 2 dimensions for different CFI*/
2392 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2394 (*tbsTbl)[k--] = (uint8_t)j;
2401 /* effTbl,tbsTbl calculation incase of 2 layers for normal CP */
2402 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i];
2403 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][i];
2404 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2405 (j >= 0) && (k > 0); --j)
2407 /* ADD CQI to MCS mapping correction
2408 * single dimensional array is replaced by 2 dimensions for different CFI*/
2409 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2411 (*tbsTbl)[k--] = (uint8_t)j;
2421 /* Efficiency Table for Extended CP */
2422 /*Initialize Efficiency table for Layer Index 0 */
2423 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2424 /*Initialize Efficiency table for each of the CFI indices. The
2425 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2426 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[0];
2427 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[0];
2428 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[0];
2429 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[0];
2430 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2431 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[0];
2432 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[0];
2433 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[0];
2434 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[0];
2435 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2436 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[0];
2437 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[0];
2438 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[0];
2439 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[0];
2441 /*Initialize CQI to TBS table for Layer Index 0 for Extended CP */
2442 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[0];
2443 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[0];
2444 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[0];
2445 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[0];
2447 /*Initialize Efficiency table for Layer Index 1 */
2448 /*Initialize Efficiency table for each of the CFI indices. The
2449 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2450 /*Initialize Efficency table for Tx Antenna Port Index 0 */
2451 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[1];
2452 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[1];
2453 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[1];
2454 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[1];
2455 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2456 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[1];
2457 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[1];
2458 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[1];
2459 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[1];
2460 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2461 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[1];
2462 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[1];
2463 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[1];
2464 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[1];
2466 /*Initialize CQI to TBS table for Layer Index 1 for Extended CP */
2467 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[1];
2468 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[1];
2469 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[1];
2470 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[1];
2471 /* Activate this code when extended cp is supported */
2472 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2474 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2476 /* EfficiencyTbl calculation incase of 2 layers for extendedl CP */
2477 rgSCHCmnCompEff( (uint8_t)(i + 1 ), (uint8_t)RG_SCH_CMN_EXT_CP, idx,\
2478 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i]);
2479 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), (uint8_t) RG_SCH_CMN_EXT_CP,idx, \
2480 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i]);
2484 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2486 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2488 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i];
2489 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][i];
2490 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2491 (j >= 0) && (k > 0); --j)
2493 /* ADD CQI to MCS mapping correction
2494 * single dimensional array is replaced by 2 dimensions for different CFI*/
2495 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2497 (*tbsTbl)[k--] = (uint8_t)j;
2504 /* effTbl,tbsTbl calculation incase of 2 layers for extended CP */
2505 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i];
2506 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][i];
2507 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2508 (j >= 0) && (k > 0); --j)
2510 /* ADD CQI to MCS mapping correction
2511 * single dimensional array is replaced by 2 dimensions for different CFI*/
2512 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2514 (*tbsTbl)[k--] = (uint8_t)j;
2527 * @brief This function initializes all the data for the scheduler.
2531 * Function: rgSCHCmnUlInit
2532 * Purpose: This function initializes the following information:
2533 * 1. Efficiency table
2534 * 2. CQI to table index - It is one row for upto 3 RBs
2535 * and another row for greater than 3 RBs
2536 * currently extended prefix is compiled out.
2537 * Invoked by: MAC intialization code..may be ActvInit
2543 static Void rgSCHCmnUlInit
2547 static Void rgSCHCmnUlInit()
2550 uint8_t *mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_NOR_CP][0];
2551 RgSchCmnTbSzEff *effTbl = &rgSchCmnNorUlEff[0];
2552 const RgSchCmnUlCqiInfo *cqiTbl = &rgSchCmnUlCqiTbl[0];
2556 /* Initaializing new variable added for UL eff */
2557 rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP] = &rgSchCmnNorUlEff[0];
2558 /* Reason behind using 3 as the number of symbols to rule out for
2559 * efficiency table computation would be that we are using 2 symbols for
2560 * DMRS(1 in each slot) and 1 symbol for SRS*/
2561 rgSCHCmnCompUlEff(RGSCH_UL_SYM_DMRS_SRS,RG_SCH_CMN_NOR_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP]);
2563 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2564 i >= 0 && j > 0; --i)
2566 if ((*effTbl)[i] <= cqiTbl[j].eff)
2568 mapTbl[j--] = (uint8_t)i;
2575 effTbl = &rgSchCmnExtUlEff[0];
2576 mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_EXT_CP][0];
2578 /* Initaializing new variable added for UL eff */
2579 rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP] = &rgSchCmnExtUlEff[0];
2580 /* Reason behind using 3 as the number of symbols to rule out for
2581 * efficiency table computation would be that we are using 2 symbols for
2582 * DMRS(1 in each slot) and 1 symbol for SRS*/
2583 rgSCHCmnCompUlEff(3,RG_SCH_CMN_EXT_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP]);
2585 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2586 i >= 0 && j > 0; --i)
2588 if ((*effTbl)[i] <= cqiTbl[j].eff)
2590 mapTbl[j--] = (uint8_t)i;
2602 * @brief This function initializes all the data for the scheduler.
2606 * Function: rgSCHCmnInit
2607 * Purpose: This function initializes the following information:
2608 * 1. Efficiency table
2609 * 2. CQI to table index - It is one row for upto 3 RBs
2610 * and another row for greater than 3 RBs
2611 * currently extended prefix is compiled out.
2612 * Invoked by: MAC intialization code..may be ActvInit
2630 rgSCHEmtcCmnDlInit();
2631 rgSCHEmtcCmnUlInit();
2637 /* Init the function pointers */
2638 rgSchCmnApis.rgSCHRgrUeCfg = rgSCHCmnRgrUeCfg;
2639 rgSchCmnApis.rgSCHRgrUeRecfg = rgSCHCmnRgrUeRecfg;
2640 rgSchCmnApis.rgSCHFreeUe = rgSCHCmnUeDel;
2641 rgSchCmnApis.rgSCHRgrCellCfg = rgSCHCmnRgrCellCfg;
2642 rgSchCmnApis.rgSCHRgrCellRecfg = rgSCHCmnRgrCellRecfg;
2643 rgSchCmnApis.rgSCHFreeCell = rgSCHCmnCellDel;
2644 rgSchCmnApis.rgSCHRgrLchCfg = rgSCHCmnRgrLchCfg;
2645 rgSchCmnApis.rgSCHRgrLcgCfg = rgSCHCmnRgrLcgCfg;
2646 rgSchCmnApis.rgSCHRgrLchRecfg = rgSCHCmnRgrLchRecfg;
2647 rgSchCmnApis.rgSCHRgrLcgRecfg = rgSCHCmnRgrLcgRecfg;
2648 rgSchCmnApis.rgSCHFreeDlLc = rgSCHCmnFreeDlLc;
2649 rgSchCmnApis.rgSCHFreeLcg = rgSCHCmnLcgDel;
2650 rgSchCmnApis.rgSCHRgrLchDel = rgSCHCmnRgrLchDel;
2651 rgSchCmnApis.rgSCHActvtUlUe = rgSCHCmnActvtUlUe;
2652 rgSchCmnApis.rgSCHActvtDlUe = rgSCHCmnActvtDlUe;
2653 rgSchCmnApis.rgSCHHdlUlTransInd = rgSCHCmnHdlUlTransInd;
2654 rgSchCmnApis.rgSCHDlDedBoUpd = rgSCHCmnDlDedBoUpd;
2655 rgSchCmnApis.rgSCHUlRecMsg3Alloc = rgSCHCmnUlRecMsg3Alloc;
2656 rgSchCmnApis.rgSCHUlCqiInd = rgSCHCmnUlCqiInd;
2657 rgSchCmnApis.rgSCHPucchDeltaPwrInd = rgSCHPwrPucchDeltaInd;
2658 rgSchCmnApis.rgSCHUlHqProcForUe = rgSCHCmnUlHqProcForUe;
2660 rgSchCmnApis.rgSCHUpdUlHqProc = rgSCHCmnUpdUlHqProc;
2662 rgSchCmnApis.rgSCHUpdBsrShort = rgSCHCmnUpdBsrShort;
2663 rgSchCmnApis.rgSCHUpdBsrTrunc = rgSCHCmnUpdBsrTrunc;
2664 rgSchCmnApis.rgSCHUpdBsrLong = rgSCHCmnUpdBsrLong;
2665 rgSchCmnApis.rgSCHUpdPhr = rgSCHCmnUpdPhr;
2666 rgSchCmnApis.rgSCHUpdExtPhr = rgSCHCmnUpdExtPhr;
2667 rgSchCmnApis.rgSCHContResUlGrant = rgSCHCmnContResUlGrant;
2668 rgSchCmnApis.rgSCHSrRcvd = rgSCHCmnSrRcvd;
2669 rgSchCmnApis.rgSCHFirstRcptnReq = rgSCHCmnFirstRcptnReq;
2670 rgSchCmnApis.rgSCHNextRcptnReq = rgSCHCmnNextRcptnReq;
2671 rgSchCmnApis.rgSCHFirstHqFdbkAlloc = rgSCHCmnFirstHqFdbkAlloc;
2672 rgSchCmnApis.rgSCHNextHqFdbkAlloc = rgSCHCmnNextHqFdbkAlloc;
2673 rgSchCmnApis.rgSCHDlProcAddToRetx = rgSCHCmnDlProcAddToRetx;
2674 rgSchCmnApis.rgSCHDlCqiInd = rgSCHCmnDlCqiInd;
2676 rgSchCmnApis.rgSCHUlProcAddToRetx = rgSCHCmnEmtcUlProcAddToRetx;
2679 rgSchCmnApis.rgSCHSrsInd = rgSCHCmnSrsInd;
2681 rgSchCmnApis.rgSCHDlTARpt = rgSCHCmnDlTARpt;
2682 rgSchCmnApis.rgSCHDlRlsSubFrm = rgSCHCmnDlRlsSubFrm;
2683 rgSchCmnApis.rgSCHUeReset = rgSCHCmnUeReset;
2685 rgSchCmnApis.rgSCHHdlCrntiCE = rgSCHCmnHdlCrntiCE;
2686 rgSchCmnApis.rgSCHDlProcAck = rgSCHCmnDlProcAck;
2687 rgSchCmnApis.rgSCHDlRelPdcchFbk = rgSCHCmnDlRelPdcchFbk;
2688 rgSchCmnApis.rgSCHUlSpsRelInd = rgSCHCmnUlSpsRelInd;
2689 rgSchCmnApis.rgSCHUlSpsActInd = rgSCHCmnUlSpsActInd;
2690 rgSchCmnApis.rgSCHUlCrcFailInd = rgSCHCmnUlCrcFailInd;
2691 rgSchCmnApis.rgSCHUlCrcInd = rgSCHCmnUlCrcInd;
2693 rgSchCmnApis.rgSCHDrxStrtInActvTmrInUl = rgSCHCmnDrxStrtInActvTmrInUl;
2694 rgSchCmnApis.rgSCHUpdUeDataIndLcg = rgSCHCmnUpdUeDataIndLcg;
2696 for (idx = 0; idx < RGSCH_NUM_SCHEDULERS; ++idx)
2698 rgSchUlSchdInits[idx](&rgSchUlSchdTbl[idx]);
2699 rgSchDlSchdInits[idx](&rgSchDlSchdTbl[idx]);
2702 for (idx = 0; idx < RGSCH_NUM_EMTC_SCHEDULERS; ++idx)
2704 rgSchEmtcUlSchdInits[idx](&rgSchEmtcUlSchdTbl[idx]);
2705 rgSchEmtcDlSchdInits[idx](&rgSchEmtcDlSchdTbl[idx]);
2708 #if (defined (RG_PHASE2_SCHED) && defined(TFU_UPGRADE))
2709 for (idx = 0; idx < RGSCH_NUM_DLFS_SCHEDULERS; ++idx)
2711 rgSchDlfsSchdInits[idx](&rgSchDlfsSchdTbl[idx]);
2715 rgSchCmnApis.rgSCHRgrSCellUeCfg = rgSCHCmnRgrSCellUeCfg;
2716 rgSchCmnApis.rgSCHRgrSCellUeDel = rgSCHCmnRgrSCellUeDel;
2723 * @brief This function is a wrapper to call scheduler specific API.
2727 * Function: rgSCHCmnDlRlsSubFrm
2728 * Purpose: Releases scheduler Information from DL SubFrm.
2732 * @param[in] RgSchCellCb *cell
2733 * @param[out] CmLteTimingInfo frm
2738 Void rgSCHCmnDlRlsSubFrm
2744 Void rgSCHCmnDlRlsSubFrm(cell, frm)
2746 CmLteTimingInfo frm;
2749 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2753 /* Get the pointer to the subframe */
2754 sf = rgSCHUtlSubFrmGet(cell, frm);
2756 rgSCHUtlSubFrmPut(cell, sf);
2759 /* Re-initialize DLFS specific information for the sub-frame */
2760 cellSch->apisDlfs->rgSCHDlfsReinitSf(cell, sf);
2768 * @brief This function is the starting function for DL allocation.
2772 * Function: rgSCHCmnDlCmnChAlloc
2773 * Purpose: Scheduling for downlink. It performs allocation in the order
2774 * of priority wich BCCH/PCH first, CCCH, Random Access and TA.
2776 * Invoked by: Scheduler
2778 * @param[in] RgSchCellCb* cell
2779 * @param[out] RgSchCmnDlRbAllocInfo* allocInfo
2784 static Void rgSCHCmnDlCcchRarAlloc
2789 static Void rgSCHCmnDlCcchRarAlloc(cell)
2793 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2796 rgSCHCmnDlCcchRetx(cell, &cellSch->allocInfo);
2797 /* LTE_ADV_FLAG_REMOVED_START */
2798 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2800 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2802 /*eNodeB need to blank the subframe */
2806 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2811 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2813 /* LTE_ADV_FLAG_REMOVED_END */
2817 /*Added these function calls for processing CCCH SDU arriving
2818 * after guard timer expiry.Functions differ from above two functions
2819 * in using ueCb instead of raCb.*/
2820 rgSCHCmnDlCcchSduRetx(cell, &cellSch->allocInfo);
2821 /* LTE_ADV_FLAG_REMOVED_START */
2822 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2824 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2826 /*eNodeB need to blank the subframe */
2830 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2835 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2837 /* LTE_ADV_FLAG_REMOVED_END */
2841 if(cellSch->ul.msg3SchdIdx != RGSCH_INVALID_INFO)
2843 /* Do not schedule msg3 if there is a CFI change ongoing */
2844 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2846 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2850 /* LTE_ADV_FLAG_REMOVED_START */
2851 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2853 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2855 /*eNodeB need to blank the subframe */
2859 /* Do not schedule msg3 if there is a CFI change ongoing */
2860 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2862 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2868 /* Do not schedule msg3 if there is a CFI change ongoing */
2869 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2871 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2874 /* LTE_ADV_FLAG_REMOVED_END */
2882 * @brief Scheduling for CCCH SDU.
2886 * Function: rgSCHCmnCcchSduAlloc
2887 * Purpose: Scheduling for CCCH SDU
2889 * Invoked by: Scheduler
2891 * @param[in] RgSchCellCb* cell
2892 * @param[in] RgSchUeCb* ueCb
2893 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2898 static S16 rgSCHCmnCcchSduAlloc
2902 RgSchCmnDlRbAllocInfo *allocInfo
2905 static S16 rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)
2908 RgSchCmnDlRbAllocInfo *allocInfo;
2911 RgSchDlRbAlloc *rbAllocInfo;
2912 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2913 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2916 /* Return if subframe BW exhausted */
2917 if (allocInfo->ccchSduAlloc.ccchSduDlSf->bw <=
2918 allocInfo->ccchSduAlloc.ccchSduDlSf->bwAssigned)
2920 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2921 "bw<=bwAssigned for UEID:%d",ueCb->ueId);
2925 if (rgSCHDhmGetCcchSduHqProc(ueCb, cellSch->dl.time, &(ueDl->proc)) != ROK)
2927 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2928 "rgSCHDhmGetCcchSduHqProc failed UEID:%d",ueCb->ueId);
2932 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
2933 rbAllocInfo->dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2935 if (rgSCHCmnCcchSduDedAlloc(cell, ueCb) != ROK)
2937 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
2938 rgSCHDhmRlsHqpTb(ueDl->proc, 0, FALSE);
2939 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2940 "rgSCHCmnCcchSduDedAlloc failed UEID:%d",ueCb->ueId);
2943 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduTxLst, &ueDl->proc->reqLnk);
2944 ueDl->proc->reqLnk.node = (PTR)ueDl->proc;
2945 allocInfo->ccchSduAlloc.ccchSduDlSf->schdCcchUe++;
2950 * @brief This function scheduler for downlink CCCH messages.
2954 * Function: rgSCHCmnDlCcchSduTx
2955 * Purpose: Scheduling for downlink CCCH
2957 * Invoked by: Scheduler
2959 * @param[in] RgSchCellCb *cell
2960 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2965 static Void rgSCHCmnDlCcchSduTx
2968 RgSchCmnDlRbAllocInfo *allocInfo
2971 static Void rgSCHCmnDlCcchSduTx(cell, allocInfo)
2973 RgSchCmnDlRbAllocInfo *allocInfo;
2978 RgSchCmnDlUe *ueCmnDl;
2979 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2981 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2984 node = cell->ccchSduUeLst.first;
2987 if(cellSch->dl.maxCcchPerDlSf &&
2988 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2994 ueCb = (RgSchUeCb *)(node->node);
2995 ueCmnDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2997 /* Fix : syed postpone scheduling for this
2998 * until msg4 is done */
2999 /* Fix : syed RLC can erroneously send CCCH SDU BO
3000 * twice. Hence an extra guard to avoid if already
3001 * scheduled for RETX */
3002 if ((!(ueCb->dl.dlInactvMask & RG_HQENT_INACTIVE)) &&
3005 if ((rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)) != ROK)
3012 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"ERROR!! THIS SHOULD "
3013 "NEVER HAPPEN for UEID:%d", ueCb->ueId);
3023 * @brief This function scheduler for downlink CCCH messages.
3027 * Function: rgSCHCmnDlCcchTx
3028 * Purpose: Scheduling for downlink CCCH
3030 * Invoked by: Scheduler
3032 * @param[in] RgSchCellCb *cell
3033 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3038 static Void rgSCHCmnDlCcchTx
3041 RgSchCmnDlRbAllocInfo *allocInfo
3044 static Void rgSCHCmnDlCcchTx(cell, allocInfo)
3046 RgSchCmnDlRbAllocInfo *allocInfo;
3051 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3052 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3055 node = cell->raInfo.toBeSchdLst.first;
3058 if(cellSch->dl.maxCcchPerDlSf &&
3059 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3066 raCb = (RgSchRaCb *)(node->node);
3068 /* Address allocation for this UE for MSG 4 */
3069 /* Allocation for Msg4 */
3070 if ((rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)) != ROK)
3081 * @brief This function scheduler for downlink CCCH messages.
3085 * Function: rgSCHCmnDlCcchSduRetx
3086 * Purpose: Scheduling for downlink CCCH
3088 * Invoked by: Scheduler
3090 * @param[in] RgSchCellCb *cell
3091 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3096 static Void rgSCHCmnDlCcchSduRetx
3099 RgSchCmnDlRbAllocInfo *allocInfo
3102 static Void rgSCHCmnDlCcchSduRetx(cell, allocInfo)
3104 RgSchCmnDlRbAllocInfo *allocInfo;
3107 RgSchDlRbAlloc *rbAllocInfo;
3109 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3111 RgSchDlHqProcCb *hqP;
3114 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
3117 node = cellSch->dl.ccchSduRetxLst.first;
3120 if(cellSch->dl.maxCcchPerDlSf &&
3121 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3128 hqP = (RgSchDlHqProcCb *)(node->node);
3131 /* DwPts Scheduling Changes Start */
3133 if (rgSCHCmnRetxAvoidTdd(allocInfo->ccchSduAlloc.ccchSduDlSf,
3139 /* DwPts Scheduling Changes End */
3141 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3145 ueCb = (RgSchUeCb*)(hqP->hqE->ue);
3146 ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
3148 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3149 /* Fill RB Alloc Info */
3150 rbAllocInfo->dlSf = dlSf;
3151 rbAllocInfo->tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3152 rbAllocInfo->rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3153 /* Fix : syed iMcs setting did not correspond to RETX */
3154 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3155 rbAllocInfo->tbInfo[0].imcs);
3156 rbAllocInfo->rnti = ueCb->ueId;
3157 rbAllocInfo->tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3158 /* Fix : syed Copying info in entirety without depending on stale TX information */
3159 rbAllocInfo->tbInfo[0].tbCb = &hqP->tbInfo[0];
3160 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
3161 /* Fix : syed Assigning proc to scratchpad */
3164 retxBw += rbAllocInfo->rbsReq;
3166 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduRetxLst, \
3168 hqP->reqLnk.node = (PTR)hqP;
3172 dlSf->bwAssigned += retxBw;
3178 * @brief This function scheduler for downlink CCCH messages.
3182 * Function: rgSCHCmnDlCcchRetx
3183 * Purpose: Scheduling for downlink CCCH
3185 * Invoked by: Scheduler
3187 * @param[in] RgSchCellCb *cell
3188 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3193 static Void rgSCHCmnDlCcchRetx
3196 RgSchCmnDlRbAllocInfo *allocInfo
3199 static Void rgSCHCmnDlCcchRetx(cell, allocInfo)
3201 RgSchCmnDlRbAllocInfo *allocInfo;
3205 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3207 RgSchDlHqProcCb *hqP;
3209 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3212 node = cellSch->dl.msg4RetxLst.first;
3215 if(cellSch->dl.maxCcchPerDlSf &&
3216 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3222 hqP = (RgSchDlHqProcCb *)(node->node);
3226 /* DwPts Scheduling Changes Start */
3228 if (rgSCHCmnRetxAvoidTdd(allocInfo->msg4Alloc.msg4DlSf,
3234 /* DwPts Scheduling Changes End */
3236 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3240 raCb = (RgSchRaCb*)(hqP->hqE->raCb);
3241 /* Fill RB Alloc Info */
3242 raCb->rbAllocInfo.dlSf = dlSf;
3243 raCb->rbAllocInfo.tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3244 raCb->rbAllocInfo.rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3245 /* Fix : syed iMcs setting did not correspond to RETX */
3246 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3247 raCb->rbAllocInfo.tbInfo[0].imcs);
3248 raCb->rbAllocInfo.rnti = raCb->tmpCrnti;
3249 raCb->rbAllocInfo.tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3250 /* Fix; syed Copying info in entirety without depending on stale TX information */
3251 raCb->rbAllocInfo.tbInfo[0].tbCb = &hqP->tbInfo[0];
3252 raCb->rbAllocInfo.tbInfo[0].schdlngForTb = TRUE;
3254 retxBw += raCb->rbAllocInfo.rbsReq;
3256 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4RetxLst, \
3258 hqP->reqLnk.node = (PTR)hqP;
3262 dlSf->bwAssigned += retxBw;
3268 * @brief This function implements scheduler DL allocation for
3269 * for broadcast (on PDSCH) and paging.
3273 * Function: rgSCHCmnDlBcchPcch
3274 * Purpose: This function implements scheduler for DL allocation
3275 * for broadcast (on PDSCH) and paging.
3277 * Invoked by: Scheduler
3279 * @param[in] RgSchCellCb* cell
3285 static Void rgSCHCmnDlBcchPcch
3288 RgSchCmnDlRbAllocInfo *allocInfo,
3289 RgInfSfAlloc *subfrmAlloc
3292 static Void rgSCHCmnDlBcchPcch(cell, allocInfo, subfrmAlloc)
3294 RgSchCmnDlRbAllocInfo *allocInfo;
3295 RgInfSfAlloc *subfrmAlloc;
3298 CmLteTimingInfo frm;
3300 RgSchClcDlLcCb *pcch;
3304 RgSchClcDlLcCb *bcch, *bch;
3305 #endif/*RGR_SI_SCH*/
3309 frm = cell->crntTime;
3311 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
3312 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
3313 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
3315 // RGSCH_SUBFRAME_INDEX(frm);
3316 //RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
3319 /* Compute the subframe for which allocation is being made */
3320 /* essentially, we need pointer to the dl frame for this subframe */
3321 sf = rgSCHUtlSubFrmGet(cell, frm);
3325 bch = rgSCHDbmGetBcchOnBch(cell);
3326 #if (ERRCLASS & ERRCLS_DEBUG)
3329 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on BCH is not configured");
3333 if (bch->boLst.first != NULLP)
3335 bo = (RgSchClcBoRpt *)(bch->boLst.first->node);
3336 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3338 sf->bch.tbSize = bo->bo;
3339 cmLListDelFrm(&bch->boLst, bch->boLst.first);
3340 /* ccpu00117052 - MOD - Passing double pointer
3341 for proper NULLP assignment*/
3342 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(*bo));
3343 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, bch->lcId,TRUE);
3348 if ((frm.sfn % 4 == 0) && (frm.subframe == 0))
3353 allocInfo->bcchAlloc.schdFirst = FALSE;
3354 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
3355 #if (ERRCLASS & ERRCLS_DEBUG)
3358 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3362 if (bcch->boLst.first != NULLP)
3364 bo = (RgSchClcBoRpt *)(bcch->boLst.first->node);
3366 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3368 allocInfo->bcchAlloc.schdFirst = TRUE;
3369 /* Time to perform allocation for this BCCH transmission */
3370 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3374 if(!allocInfo->bcchAlloc.schdFirst)
3377 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
3378 #if (ERRCLASS & ERRCLS_DEBUG)
3381 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3385 lnk = bcch->boLst.first;
3386 while (lnk != NULLP)
3388 bo = (RgSchClcBoRpt *)(lnk->node);
3390 valid = rgSCHCmnChkInWin(frm, bo->timeToTx, bo->maxTimeToTx);
3394 bo->i = RGSCH_CALC_SF_DIFF(frm, bo->timeToTx);
3395 /* Time to perform allocation for this BCCH transmission */
3396 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3401 valid = rgSCHCmnChkPastWin(frm, bo->maxTimeToTx);
3404 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
3405 /* ccpu00117052 - MOD - Passing double pointer
3406 for proper NULLP assignment*/
3407 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo,
3408 sizeof(RgSchClcBoRpt));
3414 rgSCHDlSiSched(cell, allocInfo, subfrmAlloc);
3415 #endif/*RGR_SI_SCH*/
3417 pcch = rgSCHDbmGetPcch(cell);
3421 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"PCCH on DLSCH is not configured");
3425 if (pcch->boLst.first != NULLP)
3427 bo = (RgSchClcBoRpt *)(pcch->boLst.first->node);
3429 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3431 /* Time to perform allocation for this PCCH transmission */
3432 rgSCHCmnClcAlloc(cell, sf, pcch, RGSCH_P_RNTI, allocInfo);
3440 * Fun: rgSCHCmnChkInWin
3442 * Desc: This function checks if frm occurs in window
3444 * Ret: TRUE - if in window
3449 * File: rg_sch_cmn.c
3453 Bool rgSCHCmnChkInWin
3455 CmLteTimingInfo frm,
3456 CmLteTimingInfo start,
3460 Bool rgSCHCmnChkInWin(frm, start, end)
3461 CmLteTimingInfo frm;
3462 CmLteTimingInfo start;
3463 CmLteTimingInfo end;
3469 if (end.sfn > start.sfn)
3471 if (frm.sfn > start.sfn
3472 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3474 if (frm.sfn < end.sfn
3476 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3478 || (frm.sfn == end.sfn && frm.slot <= start.slot))
3485 /* Testing for wrap around, sfn wraparound check should be enough */
3486 else if (end.sfn < start.sfn)
3488 if (frm.sfn > start.sfn
3489 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3495 if (frm.sfn < end.sfn
3496 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3502 else /* start.sfn == end.sfn */
3504 if (frm.sfn == start.sfn
3505 && (frm.slot >= start.slot
3506 && frm.slot <= end.slot))
3513 } /* end of rgSCHCmnChkInWin*/
3517 * Fun: rgSCHCmnChkPastWin
3519 * Desc: This function checks if frm has gone past window edge
3521 * Ret: TRUE - if past window edge
3526 * File: rg_sch_cmn.c
3530 Bool rgSCHCmnChkPastWin
3532 CmLteTimingInfo frm,
3536 Bool rgSCHCmnChkPastWin(frm, end)
3537 CmLteTimingInfo frm;
3538 CmLteTimingInfo end;
3541 CmLteTimingInfo refFrm = end;
3545 RGSCH_INCR_FRAME(refFrm.sfn);
3546 RGSCH_INCR_SUB_FRAME(end, 1);
3547 pastWin = rgSCHCmnChkInWin(frm, end, refFrm);
3550 } /* end of rgSCHCmnChkPastWin*/
3553 * @brief This function implements allocation of the resources for common
3554 * channels BCCH, PCCH.
3558 * Function: rgSCHCmnClcAlloc
3559 * Purpose: This function implements selection of number of RBs based
3560 * the allowed grant for the service. It is also responsible
3561 * for selection of MCS for the transmission.
3563 * Invoked by: Scheduler
3565 * @param[in] RgSchCellCb *cell,
3566 * @param[in] RgSchDlSf *sf,
3567 * @param[in] RgSchClcDlLcCb *lch,
3568 * @param[in] uint16_t rnti,
3569 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3574 static Void rgSCHCmnClcAlloc
3578 RgSchClcDlLcCb *lch,
3580 RgSchCmnDlRbAllocInfo *allocInfo
3583 static Void rgSCHCmnClcAlloc(cell, sf, lch, rnti, allocInfo)
3586 RgSchClcDlLcCb *lch;
3588 RgSchCmnDlRbAllocInfo *allocInfo;
3591 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3598 uint8_t cfi = cellDl->currCfi;
3602 bo = (RgSchClcBoRpt *)(lch->boLst.first->node);
3606 /* rgSCHCmnClcRbAllocForFxdTb(cell, bo->bo, cellDl->ccchCqi, &rb);*/
3607 if(cellDl->bitsPerRb==0)
3609 while ((rgTbSzTbl[0][0][rb]) < (tbs*8))
3617 rb = RGSCH_CEIL((tbs*8), cellDl->bitsPerRb);
3619 /* DwPTS Scheduling Changes Start */
3621 if(sf->sfType == RG_SCH_SPL_SF_DATA)
3623 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
3625 /* Calculate the less RE's because of DwPTS */
3626 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
3628 /* Increase number of RBs in Spl SF to compensate for lost REs */
3629 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
3632 /* DwPTS Scheduling Changes End */
3633 /*ccpu00115595- end*/
3634 /* additional check to see if required RBs
3635 * exceeds the available */
3636 if (rb > sf->bw - sf->bwAssigned)
3638 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"BW allocation "
3639 "failed for CRNTI:%d",rnti);
3643 /* Update the subframe Allocated BW field */
3644 sf->bwAssigned = sf->bwAssigned + rb;
3645 /* Fill in the BCCH/PCCH transmission info to the RBAllocInfo struct */
3646 if (rnti == RGSCH_SI_RNTI)
3648 allocInfo->bcchAlloc.rnti = rnti;
3649 allocInfo->bcchAlloc.dlSf = sf;
3650 allocInfo->bcchAlloc.tbInfo[0].bytesReq = tbs;
3651 allocInfo->bcchAlloc.rbsReq = rb;
3652 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
3653 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
3654 /* Nprb indication at PHY for common Ch */
3655 allocInfo->bcchAlloc.nPrb = bo->nPrb;
3659 allocInfo->pcchAlloc.rnti = rnti;
3660 allocInfo->pcchAlloc.dlSf = sf;
3661 allocInfo->pcchAlloc.tbInfo[0].bytesReq = tbs;
3662 allocInfo->pcchAlloc.rbsReq = rb;
3663 allocInfo->pcchAlloc.tbInfo[0].imcs = mcs;
3664 allocInfo->pcchAlloc.tbInfo[0].noLyr = 1;
3665 allocInfo->pcchAlloc.nPrb = bo->nPrb;
3672 * @brief This function implements PDCCH allocation for common channels.
3676 * Function: rgSCHCmnCmnPdcchAlloc
3677 * Purpose: This function implements allocation of PDCCH for a UE.
3678 * 1. This uses index 0 of PDCCH table for efficiency.
3679 * 2. Uses he candidate PDCCH count for the aggr level.
3680 * 3. Look for availability for each candidate and choose
3681 * the first one available.
3683 * Invoked by: Scheduler
3685 * @param[in] RgSchCellCb *cell
3686 * @param[in] RgSchDlSf *sf
3687 * @return RgSchPdcch *
3688 * -# NULLP when unsuccessful
3692 RgSchPdcch *rgSCHCmnCmnPdcchAlloc
3698 RgSchPdcch *rgSCHCmnCmnPdcchAlloc(cell, subFrm)
3703 CmLteAggrLvl aggrLvl;
3704 RgSchPdcchInfo *pdcchInfo;
3706 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3707 uint8_t numCce; /*store num CCEs based on
3708 aggregation level */
3710 aggrLvl = cellSch->dl.cmnChAggrLvl;
3712 pdcchInfo = &(subFrm->pdcchInfo);
3714 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3717 if(subFrm->nCce != pdcchInfo->nCce)
3719 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3722 if(cell->nCce != pdcchInfo->nCce)
3724 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3730 case CM_LTE_AGGR_LVL4:
3733 case CM_LTE_AGGR_LVL8:
3736 case CM_LTE_AGGR_LVL16:
3743 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3746 pdcch->isSpsRnti = FALSE;
3748 /* Increment the CCE used counter in the current subframe */
3749 subFrm->cceCnt += numCce;
3750 pdcch->pdcchSearchSpace = RG_SCH_CMN_SEARCH_SPACE;
3755 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3756 subFrm->isCceFailure = TRUE;
3758 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
3759 "PDCCH ERR: NO PDDCH AVAIL IN COMMON SEARCH SPACE aggr:%u",
3766 * @brief This function implements bandwidth allocation for common channels.
3770 * Function: rgSCHCmnClcRbAlloc
3771 * Purpose: This function implements bandwith allocation logic
3772 * for common control channels.
3774 * Invoked by: Scheduler
3776 * @param[in] RgSchCellCb* cell
3777 * @param[in] uint32_t bo
3778 * @param[in] uint8_t cqi
3779 * @param[in] uint8_t *rb
3780 * @param[in] uint32_t *tbs
3781 * @param[in] uint8_t *mcs
3782 * @param[in] RgSchDlSf *sf
3788 Void rgSCHCmnClcRbAlloc
3801 Void rgSCHCmnClcRbAlloc(cell, bo, cqi, rb, tbs, mcs, iTbs, isSpsBo)
3814 static Void rgSCHCmnClcRbAlloc
3825 static Void rgSCHCmnClcRbAlloc(cell, bo, cqi, rb, tbs, mcs, sf)
3834 #endif /* LTEMAC_SPS */
3837 RgSchCmnTbSzEff *effTbl;
3840 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3841 uint8_t cfi = cellSch->dl.currCfi;
3844 /* first get the CQI to MCS table and determine the number of RBs */
3845 effTbl = (RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]);
3846 iTbsVal = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[cqi];
3847 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3849 /* Efficiency is number of bits per 1024 REs */
3850 eff = (*effTbl)[iTbsVal];
3852 /* Get the number of REs needed for this bo */
3853 noRes = ((bo * 8 * 1024) / eff );
3855 /* Get the number of RBs needed for this transmission */
3856 /* Number of RBs = No of REs / No of REs per RB */
3857 tmpRb = RGSCH_CEIL(noRes, cellSch->dl.noResPerRb[cfi]);
3858 /* KWORK_FIX: added check to see if rb has crossed maxRb*/
3859 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3860 if (tmpRb > cellSch->dl.maxDlBwPerUe)
3862 tmpRb = cellSch->dl.maxDlBwPerUe;
3864 while ((rgTbSzTbl[0][iTbsVal][tmpRb-1]/8) < bo &&
3865 (tmpRb < cellSch->dl.maxDlBwPerUe))
3868 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3870 *tbs = rgTbSzTbl[0][iTbsVal][tmpRb-1]/8;
3871 *rb = (uint8_t)tmpRb;
3872 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3880 * @brief Scheduling for MSG4.
3884 * Function: rgSCHCmnMsg4Alloc
3885 * Purpose: Scheduling for MSG4
3887 * Invoked by: Scheduler
3889 * @param[in] RgSchCellCb* cell
3890 * @param[in] RgSchRaCb* raCb
3891 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3896 static S16 rgSCHCmnMsg4Alloc
3900 RgSchCmnDlRbAllocInfo *allocInfo
3903 static S16 rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)
3906 RgSchCmnDlRbAllocInfo *allocInfo;
3909 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3912 /* SR_RACH_STATS : MSG4 TO BE TXED */
3914 /* Return if subframe BW exhausted */
3915 if (allocInfo->msg4Alloc.msg4DlSf->bw <=
3916 allocInfo->msg4Alloc.msg4DlSf->bwAssigned)
3918 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId ,
3923 if (rgSCHDhmGetMsg4HqProc(raCb, cellSch->dl.time) != ROK)
3925 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3926 "rgSCHDhmGetMsg4HqProc failed");
3930 raCb->rbAllocInfo.dlSf = allocInfo->msg4Alloc.msg4DlSf;
3932 if (rgSCHCmnMsg4DedAlloc(cell, raCb) != ROK)
3934 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
3935 rgSCHDhmRlsHqpTb(raCb->dlHqE->msg4Proc, 0, FALSE);
3936 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3937 "rgSCHCmnMsg4DedAlloc failed.");
3940 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4TxLst, &raCb->dlHqE->msg4Proc->reqLnk);
3941 raCb->dlHqE->msg4Proc->reqLnk.node = (PTR)raCb->dlHqE->msg4Proc;
3942 allocInfo->msg4Alloc.msg4DlSf->schdCcchUe++;
3949 * @brief This function implements PDCCH allocation for an UE.
3953 * Function: PdcchAlloc
3954 * Purpose: This function implements allocation of PDCCH for an UE.
3955 * 1. Get the aggregation level for the CQI of the UE.
3956 * 2. Get the candidate PDCCH count for the aggr level.
3957 * 3. Look for availability for each candidate and choose
3958 * the first one available.
3960 * Invoked by: Scheduler
3965 * @param[in] dciFrmt
3966 * @return RgSchPdcch *
3967 * -# NULLP when unsuccessful
3971 RgSchPdcch *rgSCHCmnPdcchAlloc
3977 TfuDciFormat dciFrmt,
3981 RgSchPdcch *rgSCHCmnPdcchAlloc(cell, subFrm, cqi, dciFrmt, isDtx)
3986 TfuDciFormat dciFrmt;
3990 CmLteAggrLvl aggrLvl;
3991 RgSchPdcchInfo *pdcchInfo;
3995 /* 3.1 consider the selected DCI format size in determining the
3996 * aggregation level */
3997 //TODO_SID Need to update. Currently using 4 aggregation level
3998 aggrLvl = CM_LTE_AGGR_LVL2;//cellSch->dciAggrLvl[cqi][dciFrmt];
4001 if((dciFrmt == TFU_DCI_FORMAT_1A) &&
4002 ((ue) && (ue->allocCmnUlPdcch)) )
4004 pdcch = rgSCHCmnCmnPdcchAlloc(cell, subFrm);
4005 /* Since CRNTI Scrambled */
4008 pdcch->dciNumOfBits = ue->dciSize.cmnSize[dciFrmt];
4009 // prc_trace_format_string(PRC_TRACE_GROUP_PS, PRC_TRACE_INFO_LOW,"Forcing alloc in CMN search spc size %d fmt %d \n",
4010 // pdcch->dciNumOfBits, dciFrmt);
4016 /* Incrementing aggrLvl by 1 if it not AGGR_LVL8(MAX SIZE)
4017 * inorder to increse the redudancy bits for better decoding of UE */
4020 if (aggrLvl != CM_LTE_AGGR_LVL16)
4024 case CM_LTE_AGGR_LVL2:
4025 aggrLvl = CM_LTE_AGGR_LVL4;
4027 case CM_LTE_AGGR_LVL4:
4028 aggrLvl = CM_LTE_AGGR_LVL8;
4030 case CM_LTE_AGGR_LVL8:
4031 aggrLvl = CM_LTE_AGGR_LVL16;
4040 pdcchInfo = &subFrm->pdcchInfo;
4042 /* Updating the no. of nCce in pdcchInfo, in case if CFI
4045 if(subFrm->nCce != pdcchInfo->nCce)
4047 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
4050 if(cell->nCce != pdcchInfo->nCce)
4052 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
4056 if (pdcchInfo->nCce < (1 << (aggrLvl - 1)))
4058 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
4059 subFrm->isCceFailure = TRUE;
4060 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
4061 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
4067 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
4069 /* SR_RACH_STATS : Reset isTBMsg4 */
4070 pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4= FALSE;
4071 pdcch->dci.u.format0Info.isSrGrant = FALSE;
4073 pdcch->isSpsRnti = FALSE;
4075 /* Increment the CCE used counter in the current subframe */
4076 subFrm->cceCnt += aggrLvl;
4077 pdcch->pdcchSearchSpace = RG_SCH_UE_SPECIFIC_SEARCH_SPACE;
4081 if (ue->cell != cell)
4083 /* Secondary Cell */
4084 //pdcch->dciNumOfBits = ue->dciSize.noUlCcSize[dciFrmt];
4085 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
4090 //pdcch->dciNumOfBits = ue->dciSize.dedSize[dciFrmt];
4091 //TODO_SID Need to update dci size.
4092 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
4098 pdcch->dciNumOfBits = cell->dciSize.size[dciFrmt];
4103 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
4104 subFrm->isCceFailure = TRUE;
4106 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
4107 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
4114 * @brief This function implements BW allocation for CCCH SDU
4118 * Function: rgSCHCmnCcchSduDedAlloc
4119 * Purpose: Downlink bandwidth Allocation for CCCH SDU.
4121 * Invoked by: Scheduler
4123 * @param[in] RgSchCellCb* cell
4124 * @param[out] RgSchUeCb *ueCb
4129 static S16 rgSCHCmnCcchSduDedAlloc
4135 static S16 rgSCHCmnCcchSduDedAlloc(cell, ueCb)
4140 RgSchDlHqEnt *hqE = NULLP;
4142 RgSchDlRbAlloc *rbAllocinfo = NULLP;
4143 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4147 uint8_t cfi = cellDl->currCfi;
4151 rbAllocinfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
4153 effBo = ueCb->dlCcchInfo.bo + RGSCH_CCCH_SDU_HDRSIZE;
4156 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
4157 &rbAllocinfo->tbInfo[0].bytesReq,
4158 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
4159 #else /* LTEMAC_SPS */
4160 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
4161 &rbAllocinfo->tbInfo[0].bytesReq,\
4162 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
4164 #endif /* LTEMAC_SPS */
4167 /* Cannot exceed the total number of RBs in the cell */
4168 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
4169 rbAllocinfo->dlSf->bwAssigned)))
4171 /* Check if atleast one allocation was possible.
4172 This may be the case where the Bw is very less and
4173 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
4174 if (rbAllocinfo->dlSf->bwAssigned == 0)
4176 numRb = rbAllocinfo->dlSf->bw;
4177 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
4178 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
4182 rbAllocinfo->rbsReq = numRb;
4183 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
4184 /* DwPTS Scheduling Changes Start */
4186 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
4188 rbAllocinfo->tbInfo[0].bytesReq =
4189 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1,cfi);
4192 /* DwPTS Scheduling Changes End */
4193 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4201 /* Update the subframe Allocated BW field */
4202 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4203 rbAllocinfo->rbsReq;
4204 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
4205 rbAllocinfo->tbInfo[0].tbCb = &hqE->ccchSduProc->tbInfo[0];
4206 rbAllocinfo->rnti = ueCb->ueId;
4207 rbAllocinfo->tbInfo[0].noLyr = 1;
4214 * @brief This function implements BW allocation for MSG4
4218 * Function: rgSCHCmnMsg4DedAlloc
4219 * Purpose: Downlink bandwidth Allocation for MSG4.
4221 * Invoked by: Scheduler
4223 * @param[in] RgSchCellCb* cell
4224 * @param[out] RgSchRaCb *raCb
4229 static S16 rgSCHCmnMsg4DedAlloc
4235 static S16 rgSCHCmnMsg4DedAlloc(cell, raCb)
4241 RgSchDlRbAlloc *rbAllocinfo = &raCb->rbAllocInfo;
4245 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4246 uint8_t cfi = cellDl->currCfi;
4250 effBo = raCb->dlCcchInfo.bo + RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE;
4253 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
4254 &rbAllocinfo->tbInfo[0].bytesReq,\
4255 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
4256 #else /* LTEMAC_SPS */
4257 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
4258 &rbAllocinfo->tbInfo[0].bytesReq,\
4259 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
4261 #endif /* LTEMAC_SPS */
4264 /* Cannot exceed the total number of RBs in the cell */
4265 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
4266 rbAllocinfo->dlSf->bwAssigned)))
4268 /* Check if atleast one allocation was possible.
4269 This may be the case where the Bw is very less and
4270 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
4271 if (rbAllocinfo->dlSf->bwAssigned == 0)
4273 numRb = rbAllocinfo->dlSf->bw;
4274 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
4275 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
4279 rbAllocinfo->rbsReq = numRb;
4280 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
4281 /* DwPTS Scheduling Changes Start */
4283 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
4285 rbAllocinfo->tbInfo[0].bytesReq =
4286 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1, cfi);
4289 /* DwPTS Scheduling Changes End */
4290 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4298 /* Update the subframe Allocated BW field */
4299 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4300 rbAllocinfo->rbsReq;
4301 rbAllocinfo->rnti = raCb->tmpCrnti;
4302 rbAllocinfo->tbInfo[0].tbCb = &raCb->dlHqE->msg4Proc->tbInfo[0];
4303 rbAllocinfo->tbInfo[0].schdlngForTb = TRUE;
4304 rbAllocinfo->tbInfo[0].noLyr = 1;
4311 * @brief This function implements scheduling for RA Response.
4315 * Function: rgSCHCmnDlRaRsp
4316 * Purpose: Downlink scheduling for RA responses.
4318 * Invoked by: Scheduler
4320 * @param[in] RgSchCellCb* cell
4325 static Void rgSCHCmnDlRaRsp
4328 RgSchCmnDlRbAllocInfo *allocInfo
4331 static Void rgSCHCmnDlRaRsp(cell, allocInfo)
4333 RgSchCmnDlRbAllocInfo *allocInfo;
4336 CmLteTimingInfo frm;
4337 CmLteTimingInfo schFrm;
4343 RgSchTddRachRspLst *rachRsp;
4344 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
4349 frm = cell->crntTime;
4350 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4352 /* Compute the subframe for which allocation is being made */
4353 /* essentially, we need pointer to the dl frame for this subframe */
4354 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4356 /* Get the RACH Response scheduling related information
4357 * for the subframe with RA index */
4358 raIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][frm.subframe]-1;
4360 rachRsp = &cell->rachRspLst[raIdx];
4362 for(sfnIdx = 0; sfnIdx < rachRsp->numRadiofrms; sfnIdx++)
4364 /* For all scheduled RACH Responses in SFNs */
4366 RG_SCH_CMN_DECR_FRAME(schFrm.sfn, rachRsp->rachRsp[sfnIdx].sfnOffset);
4367 /* For all scheduled RACH Responses in subframes */
4369 subfrmIdx < rachRsp->rachRsp[sfnIdx].numSubfrms; subfrmIdx++)
4371 schFrm.subframe = rachRsp->rachRsp[sfnIdx].subframe[subfrmIdx];
4372 /* compute the last RA RNTI used in the previous subframe */
4373 raIdx = (((schFrm.sfn % cell->raInfo.maxRaSize) * \
4374 RGSCH_NUM_SUB_FRAMES * RGSCH_MAX_RA_RNTI_PER_SUBFRM) \
4377 /* For all RA RNTIs within a subframe */
4379 for(i=0; (i < RGSCH_MAX_RA_RNTI_PER_SUBFRM) && \
4380 (noRaRnti < RGSCH_MAX_TDD_RA_RSP_ALLOC); i++)
4382 rarnti = (schFrm.subframe + RGSCH_NUM_SUB_FRAMES*i + 1);
4383 rntiIdx = (raIdx + RGSCH_NUM_SUB_FRAMES*i);
4385 if (cell->raInfo.raReqLst[rntiIdx].first != NULLP)
4387 /* compute the next RA RNTI */
4388 if (rgSCHCmnRaRspAlloc(cell, subFrm, rntiIdx,
4389 rarnti, noRaRnti, allocInfo) != ROK)
4391 /* The resources are exhausted */
4405 * @brief This function implements scheduling for RA Response.
4409 * Function: rgSCHCmnDlRaRsp
4410 * Purpose: Downlink scheduling for RA responses.
4412 * Invoked by: Scheduler
4414 * @param[in] RgSchCellCb* cell
4415 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4420 static Void rgSCHCmnDlRaRsp //FDD
4423 RgSchCmnDlRbAllocInfo *allocInfo
4426 static Void rgSCHCmnDlRaRsp(cell, allocInfo)
4428 RgSchCmnDlRbAllocInfo *allocInfo;
4431 CmLteTimingInfo frm;
4432 CmLteTimingInfo winStartFrm;
4434 uint8_t winStartIdx;
4438 RgSchCmnCell *sched;
4439 uint8_t i,noRaRnti=0;
4441 frm = cell->crntTime;
4442 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4444 /* Compute the subframe for which allocation is being made */
4445 /* essentially, we need pointer to the dl frame for this subframe */
4446 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4447 sched = RG_SCH_CMN_GET_CELL(cell);
4449 /* ccpu00132523 - Window Start calculated by considering RAR window size,
4450 * RAR Wait period, Subframes occuppied for respective preamble format*/
4451 winGap = (sched->dl.numRaSubFrms-1) + (cell->rachCfg.raWinSize-1)
4452 +RGSCH_RARSP_WAIT_PERIOD;
4454 /* Window starting occassion is retrieved using the gap and tried to
4455 * fit to the size of raReqLst array*/
4456 RGSCHDECRFRMCRNTTIME(frm, winStartFrm, winGap);
4458 //5G_TODO TIMING update. Need to check
4459 winStartIdx = (winStartFrm.sfn & 1) * RGSCH_MAX_RA_RNTI+ winStartFrm.slot;
4461 for(i = 0; ((i < cell->rachCfg.raWinSize) && (noRaRnti < RG_SCH_CMN_MAX_CMN_PDCCH)); i++)
4463 raIdx = (winStartIdx + i) % RGSCH_RAREQ_ARRAY_SIZE;
4465 if (cell->raInfo.raReqLst[raIdx].first != NULLP)
4467 allocInfo->raRspAlloc[noRaRnti].biEstmt = \
4468 (!i * RGSCH_ONE_BIHDR_SIZE);
4469 rarnti = raIdx % RGSCH_MAX_RA_RNTI+ 1;
4470 if (rgSCHCmnRaRspAlloc(cell, subFrm, raIdx,
4471 rarnti, noRaRnti, allocInfo) != ROK)
4473 /* The resources are exhausted */
4476 /* ccpu00132523- If all the RAP ids are not scheduled then need not
4477 * proceed for next RA RNTIs*/
4478 if(allocInfo->raRspAlloc[noRaRnti].numRapids < cell->raInfo.raReqLst[raIdx].count)
4482 noRaRnti++; /* Max of RG_SCH_CMN_MAX_CMN_PDCCH RARNTIs
4483 for response allocation */
4492 * @brief This function allocates the resources for an RARNTI.
4496 * Function: rgSCHCmnRaRspAlloc
4497 * Purpose: Allocate resources to a RARNTI.
4498 * 0. Allocate PDCCH for sending the response.
4499 * 1. Locate the number of RA requests pending for the RARNTI.
4500 * 2. Compute the size of data to be built.
4501 * 3. Using common channel CQI, compute the number of RBs.
4503 * Invoked by: Scheduler
4505 * @param[in] RgSchCellCb *cell,
4506 * @param[in] RgSchDlSf *subFrm,
4507 * @param[in] uint16_t rarnti,
4508 * @param[in] uint8_t noRaRnti
4509 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4514 static S16 rgSCHCmnRaRspAlloc
4521 RgSchCmnDlRbAllocInfo *allocInfo
4524 static S16 rgSCHCmnRaRspAlloc(cell,subFrm,raIndex,rarnti,noRaRnti,allocInfo)
4530 RgSchCmnDlRbAllocInfo *allocInfo;
4533 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4534 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4538 /*ccpu00116700,ccpu00116708- Corrected the wrong type for mcs*/
4541 /* RACH handling related changes */
4542 Bool isAlloc = FALSE;
4543 static uint8_t schdNumRapid = 0;
4544 uint8_t remNumRapid = 0;
4549 uint8_t cfi = cellDl->currCfi;
4556 /* ccpu00132523: Resetting the schdRap Id count in every scheduling subframe*/
4563 if (subFrm->bw == subFrm->bwAssigned)
4565 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4566 "bw == bwAssigned RARNTI:%d",rarnti);
4570 reqLst = &cell->raInfo.raReqLst[raIndex];
4571 if (reqLst->count == 0)
4573 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4574 "reqLst Count=0 RARNTI:%d",rarnti);
4577 remNumRapid = reqLst->count;
4580 /* Limit number of rach rsps to maxMsg3PerUlsf */
4581 if ( schdNumRapid+remNumRapid > cellUl->maxMsg3PerUlSf )
4583 remNumRapid = cellUl->maxMsg3PerUlSf-schdNumRapid;
4589 /* Try allocating for as many RAPIDs as possible */
4590 /* BI sub-header size to the tbSize requirement */
4591 noBytes = RGSCH_GET_RAR_BYTES(remNumRapid) +\
4592 allocInfo->raRspAlloc[noRaRnti].biEstmt;
4593 if ((allwdTbSz = rgSCHUtlGetAllwdCchTbSz(noBytes*8, &nPrb, &mcs)) == -1)
4599 /* rgSCHCmnClcRbAllocForFxdTb(cell, allwdTbSz/8, cellDl->ccchCqi, &rb);*/
4600 if(cellDl->bitsPerRb==0)
4602 while ((rgTbSzTbl[0][0][rb]) <(uint32_t) allwdTbSz)
4610 rb = RGSCH_CEIL(allwdTbSz, cellDl->bitsPerRb);
4612 /* DwPTS Scheduling Changes Start */
4614 if (subFrm->sfType == RG_SCH_SPL_SF_DATA)
4616 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
4618 /* Calculate the less RE's because of DwPTS */
4619 lostRe = rb * (cellDl->noResPerRb[cfi] -
4620 cellDl->numReDwPts[cfi]);
4622 /* Increase number of RBs in Spl SF to compensate for lost REs */
4623 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
4626 /* DwPTS Scheduling Changes End */
4628 /*ccpu00115595- end*/
4629 if (rb > subFrm->bw - subFrm->bwAssigned)
4634 /* Allocation succeeded for 'remNumRapid' */
4637 printf("\n!!!RAR alloc noBytes:%u,allwdTbSz:%u,tbs:%u,rb:%u\n",
4638 noBytes,allwdTbSz,tbs,rb);
4643 RLOG_ARG0(L_INFO,DBG_CELLID,cell->cellId,"BW alloc Failed");
4647 subFrm->bwAssigned = subFrm->bwAssigned + rb;
4649 /* Fill AllocInfo structure */
4650 allocInfo->raRspAlloc[noRaRnti].rnti = rarnti;
4651 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].bytesReq = tbs;
4652 allocInfo->raRspAlloc[noRaRnti].rbsReq = rb;
4653 allocInfo->raRspAlloc[noRaRnti].dlSf = subFrm;
4654 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].imcs = mcs;
4655 allocInfo->raRspAlloc[noRaRnti].raIndex = raIndex;
4656 /* RACH changes for multiple RAPID handling */
4657 allocInfo->raRspAlloc[noRaRnti].numRapids = remNumRapid;
4658 allocInfo->raRspAlloc[noRaRnti].nPrb = nPrb;
4659 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].noLyr = 1;
4660 allocInfo->raRspAlloc[noRaRnti].vrbgReq = RGSCH_CEIL(nPrb,MAX_5GTF_VRBG_SIZE);
4661 schdNumRapid += remNumRapid;
4665 /***********************************************************
4667 * Func : rgSCHCmnUlAllocFillRbInfo
4669 * Desc : Fills the start RB and the number of RBs for
4670 * uplink allocation.
4678 **********************************************************/
4680 Void rgSCHCmnUlAllocFillRbInfo
4687 Void rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc)
4690 RgSchUlAlloc *alloc;
4693 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4694 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4695 uint8_t cfi = cellDl->currCfi;
4698 alloc->grnt.rbStart = (alloc->sbStart * cellUl->sbSize) +
4699 cell->dynCfiCb.bwInfo[cfi].startRb;
4701 /* Num RBs = numSbAllocated * sbSize - less RBs in the last SB */
4702 alloc->grnt.numRb = (alloc->numSb * cellUl->sbSize);
4708 * @brief Grant request for Msg3.
4712 * Function : rgSCHCmnMsg3GrntReq
4714 * This is invoked by downlink scheduler to request allocation
4717 * - Attempt to allocate msg3 in the current msg3 subframe
4718 * Allocation attempt based on whether preamble is from group A
4719 * and the value of MESSAGE_SIZE_GROUP_A
4720 * - Link allocation with passed RNTI and msg3 HARQ process
4721 * - Set the HARQ process ID (*hqProcIdRef)
4723 * @param[in] RgSchCellCb *cell
4724 * @param[in] CmLteRnti rnti
4725 * @param[in] Bool preamGrpA
4726 * @param[in] RgSchUlHqProcCb *hqProc
4727 * @param[out] RgSchUlAlloc **ulAllocRef
4728 * @param[out] uint8_t *hqProcIdRef
4732 static Void rgSCHCmnMsg3GrntReq
4737 RgSchUlHqProcCb *hqProc,
4738 RgSchUlAlloc **ulAllocRef,
4739 uint8_t *hqProcIdRef
4742 static Void rgSCHCmnMsg3GrntReq(cell, rnti, preamGrpA, hqProc,
4743 ulAllocRef, hqProcIdRef)
4747 RgSchUlHqProcCb *hqProc;
4748 RgSchUlAlloc **ulAllocRef;
4749 uint8_t *hqProcIdRef;
4752 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4753 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
4755 RgSchUlAlloc *alloc;
4760 *ulAllocRef = NULLP;
4762 /* Fix: ccpu00120610 Use remAllocs from subframe during msg3 allocation */
4763 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
4767 if (preamGrpA == FALSE)
4769 numSb = cellUl->ra.prmblBNumSb;
4770 iMcs = cellUl->ra.prmblBIMcs;
4774 numSb = cellUl->ra.prmblANumSb;
4775 iMcs = cellUl->ra.prmblAIMcs;
4778 if ((hole = rgSCHUtlUlHoleFirst(sf)) != NULLP)
4780 if(*sf->allocCountRef == 0)
4782 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4783 /* Reinitialize the hole */
4784 if (sf->holeDb->count == 1 && (hole->start == 0)) /* Sanity check of holeDb */
4786 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4787 /* Re-Initialize available subbands because of CFI change*/
4788 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4792 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4793 "Error! holeDb sanity check failed RNTI:%d",rnti);
4796 if (numSb <= hole->num)
4799 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
4800 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
4801 alloc->grnt.iMcs = iMcs;
4802 alloc->grnt.iMcsCrnt = iMcs;
4803 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
4804 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
4805 /* To include the length and ModOrder in DataRecp Req.*/
4806 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
4807 RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
4808 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
4809 alloc->grnt.nDmrs = 0;
4810 alloc->grnt.hop = 0;
4811 alloc->grnt.delayBit = 0;
4812 alloc->grnt.isRtx = FALSE;
4813 *ulAllocRef = alloc;
4814 *hqProcIdRef = (cellUl->msg3SchdHqProcIdx);
4815 hqProc->procId = *hqProcIdRef;
4816 hqProc->ulSfIdx = (cellUl->msg3SchdIdx);
4819 alloc->pdcch = FALSE;
4820 alloc->forMsg3 = TRUE;
4821 alloc->hqProc = hqProc;
4822 rgSCHUhmNewTx(hqProc, (uint8_t)(cell->rachCfg.maxMsg3Tx - 1), alloc);
4823 //RLOG_ARG4(L_DEBUG,DBG_CELLID,cell->cellId,
4825 "\nRNTI:%d MSG3 ALLOC proc(%lu)procId(%d)schdIdx(%d)\n",
4827 ((PTR)alloc->hqProc),
4828 alloc->hqProc->procId,
4829 alloc->hqProc->ulSfIdx);
4830 RLOG_ARG2(L_DEBUG,DBG_CELLID,cell->cellId,
4831 "alloc(%p)maxMsg3Tx(%d)",
4833 cell->rachCfg.maxMsg3Tx);
4842 * @brief This function determines the allocation limits and
4843 * parameters that aid in DL scheduling.
4847 * Function: rgSCHCmnDlSetUeAllocLmt
4848 * Purpose: This function determines the Maximum RBs
4849 * a UE is eligible to get based on softbuffer
4850 * limitation and cell->>>maxDlBwPerUe. The Codeword
4851 * specific parameters like iTbs, eff and noLyrs
4852 * are also set in this function. This function
4853 * is called while UE configuration and UeDlCqiInd.
4855 * Invoked by: Scheduler
4857 * @param[in] RgSchCellCb *cellCb
4858 * @param[in] RgSchCmnDlUe *ueDl
4863 static Void rgSCHCmnDlSetUeAllocLmt
4870 static Void rgSCHCmnDlSetUeAllocLmt(cell, ueDl, isEmtcUe)
4878 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4879 uint8_t cfi = cellSch->dl.currCfi;
4883 if(TRUE == isEmtcUe)
4885 /* ITbs for CW0 for 1 Layer Tx */
4886 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4887 [ueDl->mimoInfo.cwInfo[0].cqi];
4888 /* ITbs for CW0 for 2 Layer Tx */
4889 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4890 [ueDl->mimoInfo.cwInfo[0].cqi];
4891 /* Eff for CW0 for 1 Layer Tx */
4892 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4893 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4894 /* Eff for CW0 for 2 Layer Tx */
4895 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4896 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4898 /* ITbs for CW1 for 1 Layer Tx */
4899 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4900 [ueDl->mimoInfo.cwInfo[1].cqi];
4901 /* ITbs for CW1 for 2 Layer Tx */
4902 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4903 [ueDl->mimoInfo.cwInfo[1].cqi];
4904 /* Eff for CW1 for 1 Layer Tx */
4905 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4906 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4907 /* Eff for CW1 for 2 Layer Tx */
4908 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4909 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4914 /* ITbs for CW0 for 1 Layer Tx */
4915 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4916 [ueDl->mimoInfo.cwInfo[0].cqi];
4917 /* ITbs for CW0 for 2 Layer Tx */
4918 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4919 [ueDl->mimoInfo.cwInfo[0].cqi];
4920 /* Eff for CW0 for 1 Layer Tx */
4921 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4922 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4923 /* Eff for CW0 for 2 Layer Tx */
4924 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4925 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4927 /* ITbs for CW1 for 1 Layer Tx */
4928 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4929 [ueDl->mimoInfo.cwInfo[1].cqi];
4930 /* ITbs for CW1 for 2 Layer Tx */
4931 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4932 [ueDl->mimoInfo.cwInfo[1].cqi];
4933 /* Eff for CW1 for 1 Layer Tx */
4934 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4935 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4936 /* Eff for CW1 for 2 Layer Tx */
4937 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4938 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4942 // ueDl->laCb.cqiBasediTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0] * 100;
4944 /* Assigning noLyrs to each CW assuming optimal Spatial multiplexing
4946 (ueDl->mimoInfo.ri/2 == 0)? (ueDl->mimoInfo.cwInfo[0].noLyr = 1) : \
4947 (ueDl->mimoInfo.cwInfo[0].noLyr = ueDl->mimoInfo.ri/2);
4948 ueDl->mimoInfo.cwInfo[1].noLyr = ueDl->mimoInfo.ri - ueDl->mimoInfo.cwInfo[0].noLyr;
4949 /* rg002.101:ccpu00102106: correcting DL harq softbuffer limitation logic.
4950 * The maxTbSz is the maximum number of PHY bits a harq process can
4951 * hold. Hence we limit our allocation per harq process based on this.
4952 * Earlier implementation we misinterpreted the maxTbSz to be per UE
4953 * per TTI, but in fact it is per Harq per TTI. */
4954 /* rg002.101:ccpu00102106: cannot exceed the harq Tb Size
4955 * and harq Soft Bits limit.*/
4957 /* Considering iTbs corresponding to 2 layer transmission for
4958 * codeword0(approximation) and the maxLayers supported by
4959 * this UE at this point of time. */
4960 RG_SCH_CMN_TBS_TO_MODODR(ueDl->mimoInfo.cwInfo[0].iTbs[1], modOrder);
4962 /* Bits/modOrder gives #REs, #REs/noResPerRb gives #RBs */
4963 /* rg001.301 -MOD- [ccpu00119213] : avoiding wraparound */
4964 maxRb = ((ueDl->maxSbSz)/(cellSch->dl.noResPerRb[cfi] * modOrder *\
4965 ueDl->mimoInfo.ri));
4966 if (cellSch->dl.isDlFreqSel)
4968 /* Rounding off to left nearest multiple of RBG size */
4969 maxRb -= maxRb % cell->rbgSize;
4971 ueDl->maxRb = RGSCH_MIN(maxRb, cellSch->dl.maxDlBwPerUe);
4972 if (cellSch->dl.isDlFreqSel)
4974 /* Rounding off to right nearest multiple of RBG size */
4975 if (ueDl->maxRb % cell->rbgSize)
4977 ueDl->maxRb += (cell->rbgSize -
4978 (ueDl->maxRb % cell->rbgSize));
4982 /* Set the index of the cwInfo, which is better in terms of
4983 * efficiency. If RI<2, only 1 CW, hence btrCwIdx shall be 0 */
4984 if (ueDl->mimoInfo.ri < 2)
4986 ueDl->mimoInfo.btrCwIdx = 0;
4990 if (ueDl->mimoInfo.cwInfo[0].eff[ueDl->mimoInfo.cwInfo[0].noLyr-1] <\
4991 ueDl->mimoInfo.cwInfo[1].eff[ueDl->mimoInfo.cwInfo[1].noLyr-1])
4993 ueDl->mimoInfo.btrCwIdx = 1;
4997 ueDl->mimoInfo.btrCwIdx = 0;
5007 * @brief This function updates TX Scheme.
5011 * Function: rgSCHCheckAndSetTxScheme
5012 * Purpose: This function determines the Maximum RBs
5013 * a UE is eligible to get based on softbuffer
5014 * limitation and cell->>>maxDlBwPerUe. The Codeword
5015 * specific parameters like iTbs, eff and noLyrs
5016 * are also set in this function. This function
5017 * is called while UE configuration and UeDlCqiInd.
5019 * Invoked by: Scheduler
5021 * @param[in] RgSchCellCb *cell
5022 * @param[in] RgSchUeCb *ue
5027 static Void rgSCHCheckAndSetTxScheme
5033 static Void rgSCHCheckAndSetTxScheme(cell, ue)
5038 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
5039 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue ,cell);
5040 uint8_t cfi = cellSch->dl.currCfi;
5042 uint8_t cqiBasediTbs;
5046 maxiTbs = (*(RgSchCmnCqiToTbs*)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
5047 [RG_SCH_CMN_MAX_CQI - 1];
5048 cqiBasediTbs = (ueDl->laCb[0].cqiBasediTbs)/100;
5049 actualiTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0];
5051 if((actualiTbs < RG_SCH_TXSCHEME_CHNG_ITBS_FACTOR) && (cqiBasediTbs >
5052 actualiTbs) && ((cqiBasediTbs - actualiTbs) > RG_SCH_TXSCHEME_CHNG_THRSHD))
5054 RG_SCH_CMN_SET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
5057 if(actualiTbs >= maxiTbs)
5059 RG_SCH_CMN_UNSET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
5066 * @brief This function determines the allocation limits and
5067 * parameters that aid in DL scheduling.
5071 * Function: rgSCHCmnDlSetUeAllocLmtLa
5072 * Purpose: This function determines the Maximum RBs
5073 * a UE is eligible to get based on softbuffer
5074 * limitation and cell->>>maxDlBwPerUe. The Codeword
5075 * specific parameters like iTbs, eff and noLyrs
5076 * are also set in this function. This function
5077 * is called while UE configuration and UeDlCqiInd.
5079 * Invoked by: Scheduler
5081 * @param[in] RgSchCellCb *cell
5082 * @param[in] RgSchUeCb *ue
5087 Void rgSCHCmnDlSetUeAllocLmtLa
5093 Void rgSCHCmnDlSetUeAllocLmtLa(cell, ue)
5100 uint8_t reportediTbs;
5101 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
5102 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
5103 uint8_t cfi = cellSch->dl.currCfi;
5108 maxiTbs = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[RG_SCH_CMN_MAX_CQI - 1];
5109 if(ueDl->cqiFlag == TRUE)
5111 for(cwIdx=0; cwIdx < RG_SCH_CMN_MAX_CW_PER_UE; cwIdx++)
5115 /* Calcluating the reported iTbs for code word 0 */
5116 reportediTbs = ue->ue5gtfCb.mcs;
5118 iTbsNew = (S32) reportediTbs;
5120 if(!ueDl->laCb[cwIdx].notFirstCqi)
5122 /* This is the first CQI report from UE */
5123 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
5124 ueDl->laCb[cwIdx].notFirstCqi = TRUE;
5126 else if ((RG_ITBS_DIFF(reportediTbs, ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0])) > 5)
5128 /* Ignore this iTBS report and mark that last iTBS report was */
5129 /* ignored so that subsequently we reset the LA algorithm */
5130 ueDl->laCb[cwIdx].lastiTbsIgnored = TRUE;
5131 ueDl->laCb[cwIdx].numLastiTbsIgnored++;
5132 if( ueDl->laCb[cwIdx].numLastiTbsIgnored > 10)
5134 /* CQI reported by UE is not catching up. Reset the LA algorithm */
5135 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
5136 ueDl->laCb[cwIdx].deltaiTbs = 0;
5137 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
5138 ueDl->laCb[cwIdx].numLastiTbsIgnored = 0;
5143 if (ueDl->laCb[cwIdx].lastiTbsIgnored != TRUE)
5145 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
5146 (80 * ueDl->laCb[cwIdx].cqiBasediTbs))/100;
5150 /* Reset the LA as iTbs in use caught up with the value */
5151 /* reported by UE. */
5152 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
5153 (80 * ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] * 100))/100;
5154 ueDl->laCb[cwIdx].deltaiTbs = 0;
5155 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
5159 iTbsNew = (ueDl->laCb[cwIdx].cqiBasediTbs + ueDl->laCb[cwIdx].deltaiTbs)/100;
5161 RG_SCH_CHK_ITBS_RANGE(iTbsNew, maxiTbs);
5163 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] = RGSCH_MIN(iTbsNew, cell->thresholds.maxDlItbs);
5164 //ueDl->mimoInfo.cwInfo[cwIdx].iTbs[1] = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
5166 ue->ue5gtfCb.mcs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
5168 printf("reportediTbs[%d] cqiBasediTbs[%d] deltaiTbs[%d] iTbsNew[%d] mcs[%d] cwIdx[%d]\n",
5169 reportediTbs, ueDl->laCb[cwIdx].cqiBasediTbs, ueDl->laCb[cwIdx].deltaiTbs,
5170 iTbsNew, ue->ue5gtfCb.mcs, cwIdx);
5174 if((ue->mimoInfo.txMode != RGR_UE_TM_3) && (ue->mimoInfo.txMode != RGR_UE_TM_4))
5179 ueDl->cqiFlag = FALSE;
5186 /***********************************************************
5188 * Func : rgSCHCmnDlUeResetTemp
5190 * Desc : Reset whatever variables where temporarily used
5191 * during UE scheduling.
5199 **********************************************************/
5201 Void rgSCHCmnDlHqPResetTemp
5203 RgSchDlHqProcCb *hqP
5206 Void rgSCHCmnDlHqPResetTemp(hqP)
5207 RgSchDlHqProcCb *hqP;
5212 /* Fix: syed having a hqP added to Lists for RB assignment rather than
5213 * a UE, as adding UE was limiting handling some scenarios */
5214 hqP->reqLnk.node = (PTR)NULLP;
5215 hqP->schdLstLnk.node = (PTR)NULLP;
5218 } /* rgSCHCmnDlHqPResetTemp */
5220 /***********************************************************
5222 * Func : rgSCHCmnDlUeResetTemp
5224 * Desc : Reset whatever variables where temporarily used
5225 * during UE scheduling.
5233 **********************************************************/
5235 Void rgSCHCmnDlUeResetTemp
5238 RgSchDlHqProcCb *hqP
5241 Void rgSCHCmnDlUeResetTemp(ue, hqP)
5243 RgSchDlHqProcCb *hqP;
5246 RgSchDlRbAlloc *allocInfo;
5247 RgSchCmnDlUe *cmnUe = RG_SCH_CMN_GET_DL_UE(ue,hqP->hqE->cell);
5253 /* Fix : syed check for UE's existence was useless.
5254 * Instead we need to check that reset is done only for the
5255 * information of a scheduled harq proc, which is cmnUe->proc.
5256 * Reset should not be done for non-scheduled hqP */
5257 if((cmnUe->proc == hqP) || (cmnUe->proc == NULLP))
5259 cmnUe->proc = NULLP;
5260 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, hqP->hqE->cell);
5262 tmpCb = allocInfo->laaCb;
5264 memset(allocInfo, 0, sizeof(RgSchDlRbAlloc));
5265 allocInfo->rnti = ue->ueId;
5267 allocInfo->laaCb = tmpCb;
5269 /* Fix: syed moving this to a common function for both scheduled
5270 * and non-scheduled UEs */
5271 cmnUe->outStndAlloc = 0;
5273 rgSCHCmnDlHqPResetTemp(hqP);
5276 } /* rgSCHCmnDlUeResetTemp */
5278 /***********************************************************
5280 * Func : rgSCHCmnUlUeResetTemp
5282 * Desc : Reset whatever variables where temporarily used
5283 * during UE scheduling.
5291 **********************************************************/
5293 Void rgSCHCmnUlUeResetTemp
5299 Void rgSCHCmnUlUeResetTemp(cell, ue)
5304 RgSchCmnUlUe *cmnUlUe = RG_SCH_CMN_GET_UL_UE(ue,cell);
5307 memset(&cmnUlUe->alloc, 0, sizeof(cmnUlUe->alloc));
5310 } /* rgSCHCmnUlUeResetTemp */
5315 * @brief This function fills the PDCCH information from dlProc.
5319 * Function: rgSCHCmnFillPdcch
5320 * Purpose: This function fills in the PDCCH information
5321 * obtained from the RgSchDlRbAlloc
5322 * during common channel scheduling(P, SI, RA - RNTI's).
5324 * Invoked by: Downlink Scheduler
5326 * @param[out] RgSchPdcch* pdcch
5327 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5332 Void rgSCHCmnFillPdcch
5336 RgSchDlRbAlloc *rbAllocInfo
5339 Void rgSCHCmnFillPdcch(cell, pdcch, rbAllocInfo)
5342 RgSchDlRbAlloc *rbAllocInfo;
5347 /* common channel pdcch filling,
5348 * only 1A and Local is supported */
5349 pdcch->rnti = rbAllocInfo->rnti;
5350 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
5351 switch(rbAllocInfo->dciFormat)
5353 #ifdef RG_5GTF /* ANOOP: ToDo: DCI format B1/B2 filling */
5354 case TFU_DCI_FORMAT_B1:
5357 pdcch->dci.u.formatB1Info.formatType = 0;
5358 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].cmnGrnt.xPDSCHRange;
5359 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].cmnGrnt.rbAssign;
5360 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = 0;
5361 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5362 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = 0;
5363 //pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].ndi;
5364 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].cmnGrnt.rv;
5365 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5366 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5367 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5368 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5369 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5370 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5371 //TODO_SID: Need to update
5372 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5373 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5374 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5375 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5376 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5377 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5378 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].cmnGrnt.SCID;
5379 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5380 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5381 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5383 break; /* case TFU_DCI_FORMAT_B1: */
5386 case TFU_DCI_FORMAT_B2:
5388 //printf(" RG_5GTF:: Pdcch filling with DCI format B2\n");
5390 break; /* case TFU_DCI_FORMAT_B2: */
5393 case TFU_DCI_FORMAT_1A:
5394 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
5396 /*Nprb indication at PHY for common Ch
5397 *setting least significant bit of tpc field to 1 if
5398 nPrb=3 and 0 otherwise. */
5399 if (rbAllocInfo->nPrb == 3)
5401 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 1;
5405 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 0;
5407 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
5408 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
5409 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
5410 rbAllocInfo->tbInfo[0].imcs;
5411 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = 0;
5412 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = 0;
5414 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
5416 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
5417 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5418 rbAllocInfo->allocInfo.raType2.rbStart,
5419 rbAllocInfo->allocInfo.raType2.numRb);
5422 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = \
5425 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5426 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
5429 break; /* case TFU_DCI_FORMAT_1A: */
5430 case TFU_DCI_FORMAT_1:
5431 pdcch->dci.u.format1Info.tpcCmd = 0;
5432 /* Avoiding this check,as we dont support Type1 RA */
5434 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5437 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5438 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5439 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5441 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5442 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5444 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5445 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5447 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5448 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5452 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5453 pdcch->dci.u.format1Info.allocInfo.ndi = 0;
5454 pdcch->dci.u.format1Info.allocInfo.mcs = rbAllocInfo->tbInfo[0].imcs;
5455 pdcch->dci.u.format1Info.allocInfo.rv = 0;
5457 pdcch->dci.u.format1Info.dai = 1;
5461 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Allocator's icorrect "
5462 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5470 * @brief This function finds whether the subframe is special subframe or not.
5474 * Function: rgSCHCmnIsSplSubfrm
5475 * Purpose: This function finds the subframe index of the special subframe
5476 * and finds whether the current DL index matches it or not.
5478 * Invoked by: Scheduler
5480 * @param[in] uint8_t splfrmCnt
5481 * @param[in] uint8_t curSubfrmIdx
5482 * @param[in] uint8_t periodicity
5483 * @param[in] RgSchTddSubfrmInfo *subfrmInfo
5488 static Bool rgSCHCmnIsSplSubfrm
5491 uint8_t curSubfrmIdx,
5492 uint8_t periodicity,
5493 RgSchTddSubfrmInfo *subfrmInfo
5496 static Bool rgSCHCmnIsSplSubfrm(splfrmCnt, curSubfrmIdx, periodicity, subfrmInfo)
5498 uint8_t curSubfrmIdx;
5499 uint8_t periodicity;
5500 RgSchTddSubfrmInfo *subfrmInfo;
5503 uint8_t dlSfCnt = 0;
5504 uint8_t splfrmIdx = 0;
5509 if(periodicity == RG_SCH_CMN_5_MS_PRD)
5513 dlSfCnt = ((splfrmCnt-1)/2) *\
5514 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5515 dlSfCnt = dlSfCnt + subfrmInfo->numFrmHf1;
5519 dlSfCnt = (splfrmCnt/2) * \
5520 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5525 dlSfCnt = splfrmCnt * subfrmInfo->numFrmHf1;
5527 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1 +\
5528 (periodicity*splfrmCnt - dlSfCnt);
5532 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1;
5535 if(splfrmIdx == curSubfrmIdx)
5544 * @brief This function updates DAI or UL index.
5548 * Function: rgSCHCmnUpdHqAndDai
5549 * Purpose: Updates the DAI based on UL-DL Configuration
5550 * index and UE. It also updates the HARQ feedback
5551 * time and 'm' index.
5555 * @param[in] RgDlHqProcCb *hqP
5556 * @param[in] RgSchDlSf *subFrm
5557 * @param[in] RgSchDlHqTbCb *tbCb
5558 * @param[in] uint8_t tbAllocIdx
5563 static Void rgSCHCmnUpdHqAndDai
5565 RgSchDlHqProcCb *hqP,
5567 RgSchDlHqTbCb *tbCb,
5571 static Void rgSCHCmnUpdHqAndDai(hqP, subFrm, tbCb,tbAllocIdx)
5572 RgSchDlHqProcCb *hqP;
5574 RgSchDlHqTbCb *tbCb;
5578 RgSchUeCb *ue = hqP->hqE->ue;
5583 /* set the time at which UE shall send the feedback
5584 * for this process */
5585 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5586 subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5587 tbCb->fdbkTime.subframe = subFrm->dlFdbkInfo.subframe;
5588 tbCb->m = subFrm->dlFdbkInfo.m;
5592 /* set the time at which UE shall send the feedback
5593 * for this process */
5594 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5595 hqP->subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5596 tbCb->fdbkTime.subframe = hqP->subFrm->dlFdbkInfo.subframe;
5597 tbCb->m = hqP->subFrm->dlFdbkInfo.m;
5600 /* ccpu00132340-MOD- DAI need to be updated for first TB only*/
5601 if(ue && !tbAllocIdx)
5603 Bool havePdcch = (tbCb->hqP->pdcch ? TRUE : FALSE);
5606 dlDai = rgSCHCmnUpdDai(ue, &tbCb->fdbkTime, tbCb->m, havePdcch,tbCb->hqP,
5609 {/* Non SPS occasions */
5610 tbCb->hqP->pdcch->dlDai = dlDai;
5611 /* hqP->ulDai is used for N1 resource filling
5612 * when SPS occaions present in a bundle */
5613 tbCb->hqP->ulDai = tbCb->dai;
5614 tbCb->hqP->dlDai = dlDai;
5618 /* Updatijng pucchFdbkIdx for both PUCCH or PUSCH
5620 tbCb->pucchFdbkIdx = tbCb->hqP->ulDai;
5627 * @brief This function updates DAI or UL index.
5631 * Function: rgSCHCmnUpdDai
5632 * Purpose: Updates the DAI in the ack-nack info, a valid
5633 * ue should be passed
5637 * @param[in] RgDlHqProcCb *hqP
5638 * @param[in] RgSchDlSf *subFrm
5639 * @param[in] RgSchDlHqTbCb *tbCb
5640 * @return uint8_t dlDai
5644 uint8_t rgSCHCmnUpdDai
5647 CmLteTimingInfo *fdbkTime,
5650 RgSchDlHqProcCb *hqP,
5654 uint8_t rgSCHCmnUpdDai(ue, fdbkTime, m, havePdcch,tbCb,servCellId,hqP,ulDai)
5656 CmLteTimingInfo *fdbkTime;
5659 RgSchDlHqProcCb *hqP;
5663 RgSchTddANInfo *anInfo;
5664 uint8_t servCellIdx;
5665 uint8_t ackNackFdbkArrSize;
5672 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5673 hqP->hqE->cell->cellId,
5676 servCellIdx = RGSCH_PCELL_INDEX;
5678 ackNackFdbkArrSize = hqP->hqE->cell->ackNackFdbkArrSize;
5680 {/* SPS on primary cell */
5681 servCellIdx = RGSCH_PCELL_INDEX;
5682 ackNackFdbkArrSize = ue->cell->ackNackFdbkArrSize;
5686 anInfo = rgSCHUtlGetUeANFdbkInfo(ue, fdbkTime,servCellIdx);
5688 /* If no ACK/NACK feedback already present, create a new one */
5691 anInfo = &ue->cellInfo[servCellIdx]->anInfo[ue->cellInfo[servCellIdx]->nextFreeANIdx];
5692 anInfo->sfn = fdbkTime->sfn;
5693 anInfo->subframe = fdbkTime->subframe;
5694 anInfo->latestMIdx = m;
5695 /* Fixing DAI value - ccpu00109162 */
5696 /* Handle TDD case as in MIMO definition of the function */
5702 anInfo->isSpsOccasion = FALSE;
5703 /* set the free Index to store Ack/Nack Information*/
5704 ue->cellInfo[servCellIdx]->nextFreeANIdx = (ue->cellInfo[servCellIdx]->nextFreeANIdx + 1) %
5710 anInfo->latestMIdx = m;
5711 /* Fixing DAI value - ccpu00109162 */
5712 /* Handle TDD case as in MIMO definition of the function */
5713 anInfo->ulDai = anInfo->ulDai + 1;
5716 anInfo->dlDai = anInfo->dlDai + 1;
5720 /* ignoring the Scell check,
5721 * for primary cell this field is unused*/
5724 anInfo->n1ResTpcIdx = hqP->tpc;
5728 {/* As this not required for release pdcch */
5729 *ulDai = anInfo->ulDai;
5732 return (anInfo->dlDai);
5735 #endif /* ifdef LTE_TDD */
5737 uint32_t rgHqRvRetxCnt[4][2];
5738 uint32_t rgUlrate_grant;
5741 * @brief This function fills the HqP TB with rbAllocInfo.
5745 * Function: rgSCHCmnFillHqPTb
5746 * Purpose: This function fills in the HqP TB with rbAllocInfo.
5748 * Invoked by: rgSCHCmnFillHqPTb
5750 * @param[in] RgSchCellCb* cell
5751 * @param[in] RgSchDlRbAlloc *rbAllocInfo,
5752 * @param[in] uint8_t tbAllocIdx
5753 * @param[in] RgSchPdcch *pdcch
5759 Void rgSCHCmnFillHqPTb
5762 RgSchDlRbAlloc *rbAllocInfo,
5767 Void rgSCHCmnFillHqPTb(cell, rbAllocInfo, tbAllocIdx, pdcch)
5769 RgSchDlRbAlloc *rbAllocInfo;
5775 static Void rgSCHCmnFillHqPTb
5778 RgSchDlRbAlloc *rbAllocInfo,
5783 static Void rgSCHCmnFillHqPTb(cell, rbAllocInfo, tbAllocIdx, pdcch)
5785 RgSchDlRbAlloc *rbAllocInfo;
5789 #endif /* LTEMAC_SPS */
5791 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
5792 RgSchDlTbAllocInfo *tbAllocInfo = &rbAllocInfo->tbInfo[tbAllocIdx];
5793 RgSchDlHqTbCb *tbInfo = tbAllocInfo->tbCb;
5794 RgSchDlHqProcCb *hqP = tbInfo->hqP;
5797 /*ccpu00120365-ADD-if tb is disabled, set mcs=0,rv=1.
5798 * Relevant for DCI format 2 & 2A as per 36.213-7.1.7.2
5800 if ( tbAllocInfo->isDisabled)
5803 tbInfo->dlGrnt.iMcs = 0;
5804 tbInfo->dlGrnt.rv = 1;
5806 /* Fill for TB retransmission */
5807 else if (tbInfo->txCntr > 0)
5810 tbInfo->timingInfo = cmnCellDl->time;
5812 if ((tbInfo->isAckNackDtx == TFU_HQFDB_DTX))
5814 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5815 rgHqRvRetxCnt[tbInfo->dlGrnt.rv][tbInfo->tbIdx]++;
5819 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[++(tbInfo->ccchSchdInfo.rvIdx) & 0x03];
5822 /* fill the scheduler information of hqProc */
5823 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5824 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx,hqP->tbInfo,tbInfo->tbIdx );
5825 rgSCHDhmHqTbRetx(hqP->hqE, tbInfo->timingInfo, hqP, tbInfo->tbIdx);
5827 /* Fill for TB transmission */
5830 /* Fill the HqProc */
5831 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5832 tbInfo->tbSz = tbAllocInfo->bytesAlloc;
5833 tbInfo->timingInfo = cmnCellDl->time;
5835 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[0];
5836 /* fill the scheduler information of hqProc */
5837 tbInfo->ccchSchdInfo.rvIdx = 0;
5838 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5839 /* DwPts Scheduling Changes Start */
5840 /* DwPts Scheduling Changes End */
5841 cell->measurements.dlBytesCnt += tbAllocInfo->bytesAlloc;
5844 /*ccpu00120365:-ADD-only add to subFrm list if tb is not disabled */
5845 if ( tbAllocInfo->isDisabled == FALSE )
5847 /* Set the number of transmitting SM layers for this TB */
5848 tbInfo->numLyrs = tbAllocInfo->noLyr;
5849 /* Set the TB state as WAITING to indicate TB has been
5850 * considered for transmission */
5851 tbInfo->state = HQ_TB_WAITING;
5852 hqP->subFrm = rbAllocInfo->dlSf;
5853 tbInfo->hqP->pdcch = pdcch;
5854 //tbInfo->dlGrnt.numRb = rbAllocInfo->rbsAlloc;
5855 rgSCHUtlDlHqPTbAddToTx(hqP->subFrm, hqP, tbInfo->tbIdx);
5861 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5865 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5866 * Purpose: This function fills in the PDCCH information
5867 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5868 * for dedicated service scheduling. It also
5869 * obtains TPC to be filled in from the power module.
5870 * Assign the PDCCH to HQProc.
5872 * Invoked by: Downlink Scheduler
5874 * @param[in] RgSchCellCb* cell
5875 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5876 * @param[in] RgDlHqProc* hqP
5877 * @param[out] RgSchPdcch *pdcch
5878 * @param[in] uint8_t tpc
5883 static Void rgSCHCmnFillHqPPdcchDciFrmtB1B2
5886 RgSchDlRbAlloc *rbAllocInfo,
5887 RgSchDlHqProcCb *hqP,
5892 static Void rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, pdcch, tpc)
5894 RgSchDlRbAlloc *rbAllocInfo;
5895 RgSchDlHqProcCb *hqP;
5902 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5903 //Currently hardcoding values here.
5904 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
5905 switch(rbAllocInfo->dciFormat)
5907 case TFU_DCI_FORMAT_B1:
5909 pdcch->dci.u.formatB1Info.formatType = 0;
5910 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5911 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5912 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5913 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5914 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5915 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5916 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5917 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5918 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5919 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5920 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5921 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5922 //TODO_SID: Need to update
5923 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5924 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5925 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5926 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5927 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5928 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5929 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5930 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5931 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5932 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5935 case TFU_DCI_FORMAT_B2:
5937 pdcch->dci.u.formatB2Info.formatType = 1;
5938 pdcch->dci.u.formatB2Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5939 pdcch->dci.u.formatB2Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5940 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5941 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5942 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5943 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5944 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5945 pdcch->dci.u.formatB2Info.CSI_BSI_BRI_Req = 0;
5946 pdcch->dci.u.formatB2Info.CSIRS_BRRS_TxTiming = 0;
5947 pdcch->dci.u.formatB2Info.CSIRS_BRRS_SymbIdx = 0;
5948 pdcch->dci.u.formatB2Info.CSIRS_BRRS_ProcInd = 0;
5949 pdcch->dci.u.formatB2Info.xPUCCH_TxTiming = 0;
5950 //TODO_SID: Need to update
5951 pdcch->dci.u.formatB2Info.freqResIdx_xPUCCH = 0;
5952 pdcch->dci.u.formatB2Info.beamSwitch = 0;
5953 pdcch->dci.u.formatB2Info.SRS_Config = 0;
5954 pdcch->dci.u.formatB2Info.SRS_Symbol = 0;
5955 //TODO_SID: Need to check.Currently setting 4(2 layer, ports(8,9) w/o OCC).
5956 pdcch->dci.u.formatB2Info.AntPorts_numLayers = 4;
5957 pdcch->dci.u.formatB2Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5958 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5959 pdcch->dci.u.formatB2Info.tpcCmd = 1; //tpc;
5960 pdcch->dci.u.formatB2Info.DL_PCRS = 0;
5964 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId," 5GTF_ERROR Allocator's icorrect "
5965 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5972 uint32_t totPcellSCell;
5973 uint32_t addedForScell;
5974 uint32_t addedForScell1;
5975 uint32_t addedForScell2;
5977 * @brief This function fills the PDCCH information from dlProc.
5981 * Function: rgSCHCmnFillHqPPdcch
5982 * Purpose: This function fills in the PDCCH information
5983 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5984 * for dedicated service scheduling. It also
5985 * obtains TPC to be filled in from the power module.
5986 * Assign the PDCCH to HQProc.
5988 * Invoked by: Downlink Scheduler
5990 * @param[in] RgSchCellCb* cell
5991 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5992 * @param[in] RgDlHqProc* hqP
5997 Void rgSCHCmnFillHqPPdcch
6000 RgSchDlRbAlloc *rbAllocInfo,
6001 RgSchDlHqProcCb *hqP
6004 Void rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP)
6006 RgSchDlRbAlloc *rbAllocInfo;
6007 RgSchDlHqProcCb *hqP;
6010 RgSchCmnDlCell *cmnCell = RG_SCH_CMN_GET_DL_CELL(cell);
6011 RgSchPdcch *pdcch = rbAllocInfo->pdcch;
6018 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6025 tpc = rgSCHPwrPucchTpcForUe(cell, hqP->hqE->ue);
6027 /* Fix: syed moving this to a common function for both scheduled
6028 * and non-scheduled UEs */
6030 pdcch->ue = hqP->hqE->ue;
6031 if (hqP->hqE->ue->csgMmbrSta == FALSE)
6033 cmnCell->ncsgPrbCnt += rbAllocInfo->rbsAlloc;
6035 cmnCell->totPrbCnt += rbAllocInfo->rbsAlloc;
6038 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlPrbUsg +=
6039 rbAllocInfo->rbsAlloc;
6040 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw0iTbs +=
6041 rbAllocInfo->tbInfo[0].iTbs;
6042 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw0iTbs ++;
6043 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
6044 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6047 totPcellSCell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6048 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6050 addedForScell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6051 addedForScell1 += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6053 printf (" Hqp %d cell %d addedForScell %lu addedForScell1 %lu sfn:sf %d:%d \n",
6055 hqP->hqE->cell->cellId,
6059 cell->crntTime.slot);
6063 hqP->hqE->cell->tenbStats->sch.dlPrbUsage[0] +=
6064 rbAllocInfo->rbsAlloc;
6065 hqP->hqE->cell->tenbStats->sch.dlSumCw0iTbs +=
6066 rbAllocInfo->tbInfo[0].iTbs;
6067 hqP->hqE->cell->tenbStats->sch.dlNumCw0iTbs ++;
6068 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
6069 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6070 if (rbAllocInfo->tbInfo[1].schdlngForTb)
6072 hqP->hqE->cell->tenbStats->sch.dlSumCw1iTbs +=
6073 rbAllocInfo->tbInfo[1].iTbs;
6074 hqP->hqE->cell->tenbStats->sch.dlNumCw1iTbs ++;
6075 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw1iTbs +=
6076 rbAllocInfo->tbInfo[1].iTbs;
6077 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw1iTbs ++;
6078 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
6079 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6083 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6085 addedForScell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6086 addedForScell2 += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6088 printf (" Hqp %d cell %d addedForScell %lu addedForScell2 %lu \n",
6090 hqP->hqE->cell->cellId,
6095 totPcellSCell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6099 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
6100 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6103 printf ("add DL TPT is %lu sfn:sf %d:%d \n", hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt ,
6105 cell->crntTime.slot);
6111 pdcch->rnti = rbAllocInfo->rnti;
6112 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
6113 /* Update subframe and pdcch info in HqTb control block */
6114 switch(rbAllocInfo->dciFormat)
6117 case TFU_DCI_FORMAT_B1:
6118 case TFU_DCI_FORMAT_B2:
6120 // printf(" RG_5GTF:: Pdcch filling with DCI format B1/B2\n");
6121 rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, \
6127 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6128 "Allocator's incorrect dciForamt Fill for RNTI:%d",rbAllocInfo->rnti);
6135 * @brief This function fills the PDCCH DCI format 1 information from dlProc.
6139 * Function: rgSCHCmnFillHqPPdcchDciFrmt1
6140 * Purpose: This function fills in the PDCCH information
6141 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6142 * for dedicated service scheduling. It also
6143 * obtains TPC to be filled in from the power module.
6144 * Assign the PDCCH to HQProc.
6146 * Invoked by: Downlink Scheduler
6148 * @param[in] RgSchCellCb* cell
6149 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6150 * @param[in] RgDlHqProc* hqP
6151 * @param[out] RgSchPdcch *pdcch
6152 * @param[in] uint8_t tpc
6158 static Void rgSCHCmnFillHqPPdcchDciFrmt1
6161 RgSchDlRbAlloc *rbAllocInfo,
6162 RgSchDlHqProcCb *hqP,
6167 static Void rgSCHCmnFillHqPPdcchDciFrmt1(cell, rbAllocInfo, hqP, pdcch, tpc)
6169 RgSchDlRbAlloc *rbAllocInfo;
6170 RgSchDlHqProcCb *hqP;
6177 RgSchTddANInfo *anInfo;
6181 /* For activation or reactivation,
6182 * Harq ProcId should be 0 */
6183 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6187 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6188 pdcch->dci.u.format1Info.tpcCmd = tpc;
6189 /* Avoiding this check,as we dont support Type1 RA */
6191 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6194 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
6195 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
6196 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6198 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
6199 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6201 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
6202 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6204 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
6205 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6210 if ((!(hqP->tbInfo[0].txCntr)) &&
6211 (cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6212 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6213 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV)))
6216 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
6220 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
6223 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
6226 pdcch->dci.u.format1Info.allocInfo.ndi =
6227 rbAllocInfo->tbInfo[0].tbCb->ndi;
6228 pdcch->dci.u.format1Info.allocInfo.mcs =
6229 rbAllocInfo->tbInfo[0].imcs;
6230 pdcch->dci.u.format1Info.allocInfo.rv =
6231 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6233 if(hqP->hqE->ue != NULLP)
6236 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6237 hqP->hqE->cell->cellId,
6240 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6241 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6243 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6244 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6249 pdcch->dci.u.format1Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6253 /* Fixing DAI value - ccpu00109162 */
6254 pdcch->dci.u.format1Info.dai = RG_SCH_MAX_DAI_IDX;
6260 /* always 0 for RACH */
6261 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
6263 /* Fixing DAI value - ccpu00109162 */
6264 pdcch->dci.u.format1Info.dai = 1;
6273 * @brief This function fills the PDCCH DCI format 1A information from dlProc.
6277 * Function: rgSCHCmnFillHqPPdcchDciFrmt1A
6278 * Purpose: This function fills in the PDCCH information
6279 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6280 * for dedicated service scheduling. It also
6281 * obtains TPC to be filled in from the power module.
6282 * Assign the PDCCH to HQProc.
6284 * Invoked by: Downlink Scheduler
6286 * @param[in] RgSchCellCb* cell
6287 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6288 * @param[in] RgDlHqProc* hqP
6289 * @param[out] RgSchPdcch *pdcch
6290 * @param[in] uint8_t tpc
6295 static Void rgSCHCmnFillHqPPdcchDciFrmt1A
6298 RgSchDlRbAlloc *rbAllocInfo,
6299 RgSchDlHqProcCb *hqP,
6304 static Void rgSCHCmnFillHqPPdcchDciFrmt1A(cell, rbAllocInfo, hqP, pdcch, tpc)
6306 RgSchDlRbAlloc *rbAllocInfo;
6307 RgSchDlHqProcCb *hqP;
6314 RgSchTddANInfo *anInfo;
6318 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6322 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6323 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
6324 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = tpc;
6325 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
6326 rbAllocInfo->tbInfo[0].imcs;
6327 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = TRUE;
6329 if ((!(hqP->tbInfo[0].txCntr)) &&
6330 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6331 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6332 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6335 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val = 0;
6339 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val
6343 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val =
6346 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = \
6347 rbAllocInfo->tbInfo[0].tbCb->ndi;
6348 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = \
6349 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6350 /* As of now, we do not support Distributed allocations */
6351 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
6352 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
6353 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
6355 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
6356 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
6357 rbAllocInfo->allocInfo.raType2.rbStart,
6358 rbAllocInfo->allocInfo.raType2.numRb);
6360 if(hqP->hqE->ue != NULLP)
6363 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6364 hqP->hqE->cell->cellId,
6366 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6367 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6369 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6370 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6373 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
6376 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val =
6377 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6381 /* Fixing DAI value - ccpu00109162 */
6382 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = RG_SCH_MAX_DAI_IDX;
6383 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6384 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6391 /* always 0 for RACH */
6392 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres
6395 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
6396 /* Fixing DAI value - ccpu00109162 */
6397 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
6405 * @brief This function fills the PDCCH DCI format 1B information from dlProc.
6409 * Function: rgSCHCmnFillHqPPdcchDciFrmt1B
6410 * Purpose: This function fills in the PDCCH information
6411 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6412 * for dedicated service scheduling. It also
6413 * obtains TPC to be filled in from the power module.
6414 * Assign the PDCCH to HQProc.
6416 * Invoked by: Downlink Scheduler
6418 * @param[in] RgSchCellCb* cell
6419 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6420 * @param[in] RgDlHqProc* hqP
6421 * @param[out] RgSchPdcch *pdcch
6422 * @param[in] uint8_t tpc
6427 static Void rgSCHCmnFillHqPPdcchDciFrmt1B
6430 RgSchDlRbAlloc *rbAllocInfo,
6431 RgSchDlHqProcCb *hqP,
6436 static Void rgSCHCmnFillHqPPdcchDciFrmt1B(cell, rbAllocInfo, hqP, pdcch, tpc)
6438 RgSchDlRbAlloc *rbAllocInfo;
6439 RgSchDlHqProcCb *hqP;
6446 RgSchTddANInfo *anInfo;
6450 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6454 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6455 pdcch->dci.u.format1bInfo.tpcCmd = tpc;
6456 pdcch->dci.u.format1bInfo.allocInfo.mcs = \
6457 rbAllocInfo->tbInfo[0].imcs;
6459 if ((!(hqP->tbInfo[0].txCntr)) &&
6460 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6461 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6462 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6465 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = 0;
6469 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
6472 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
6474 pdcch->dci.u.format1bInfo.allocInfo.ndi = \
6475 rbAllocInfo->tbInfo[0].tbCb->ndi;
6476 pdcch->dci.u.format1bInfo.allocInfo.rv = \
6477 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6478 /* As of now, we do not support Distributed allocations */
6479 pdcch->dci.u.format1bInfo.allocInfo.isLocal = TRUE;
6480 pdcch->dci.u.format1bInfo.allocInfo.nGap2.pres = NOTPRSNT;
6481 pdcch->dci.u.format1bInfo.allocInfo.alloc.type =
6483 pdcch->dci.u.format1bInfo.allocInfo.alloc.u.riv =
6484 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
6485 rbAllocInfo->allocInfo.raType2.rbStart,
6486 rbAllocInfo->allocInfo.raType2.numRb);
6487 /* Fill precoding Info */
6488 pdcch->dci.u.format1bInfo.allocInfo.pmiCfm = \
6489 rbAllocInfo->mimoAllocInfo.precIdxInfo >> 4;
6490 pdcch->dci.u.format1bInfo.allocInfo.tPmi = \
6491 rbAllocInfo->mimoAllocInfo.precIdxInfo & 0x0F;
6493 if(hqP->hqE->ue != NULLP)
6496 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6497 hqP->hqE->cell->cellId,
6499 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6500 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6502 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6503 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6508 pdcch->dci.u.format1bInfo.dai =
6509 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6513 pdcch->dci.u.format1bInfo.dai = RG_SCH_MAX_DAI_IDX;
6514 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6515 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6526 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
6530 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
6531 * Purpose: This function fills in the PDCCH information
6532 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6533 * for dedicated service scheduling. It also
6534 * obtains TPC to be filled in from the power module.
6535 * Assign the PDCCH to HQProc.
6537 * Invoked by: Downlink Scheduler
6539 * @param[in] RgSchCellCb* cell
6540 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6541 * @param[in] RgDlHqProc* hqP
6542 * @param[out] RgSchPdcch *pdcch
6543 * @param[in] uint8_t tpc
6548 static Void rgSCHCmnFillHqPPdcchDciFrmt2
6551 RgSchDlRbAlloc *rbAllocInfo,
6552 RgSchDlHqProcCb *hqP,
6557 static Void rgSCHCmnFillHqPPdcchDciFrmt2(cell, rbAllocInfo, hqP, pdcch, tpc)
6559 RgSchDlRbAlloc *rbAllocInfo;
6560 RgSchDlHqProcCb *hqP;
6567 RgSchTddANInfo *anInfo;
6571 /* ccpu00119023-ADD-For activation or reactivation,
6572 * Harq ProcId should be 0 */
6573 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6577 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6578 /*ccpu00120365:-ADD-call also if tb is disabled */
6579 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6580 rbAllocInfo->tbInfo[1].isDisabled)
6582 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6584 pdcch->dci.u.format2Info.tpcCmd = tpc;
6585 /* Avoiding this check,as we dont support Type1 RA */
6587 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6590 pdcch->dci.u.format2Info.allocInfo.isAllocType0 = TRUE;
6591 pdcch->dci.u.format2Info.allocInfo.resAllocMap[0] =
6592 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6594 pdcch->dci.u.format2Info.allocInfo.resAllocMap[1] =
6595 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6597 pdcch->dci.u.format2Info.allocInfo.resAllocMap[2] =
6598 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6600 pdcch->dci.u.format2Info.allocInfo.resAllocMap[3] =
6601 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6606 if ((!(hqP->tbInfo[0].txCntr)) &&
6607 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6608 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6609 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6612 pdcch->dci.u.format2Info.allocInfo.harqProcId = 0;
6616 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6619 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6621 /* Initialize the TB info for both the TBs */
6622 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].mcs = 0;
6623 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].rv = 1;
6624 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].mcs = 0;
6625 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].rv = 1;
6626 /* Fill tbInfo for scheduled TBs */
6627 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6628 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6629 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6630 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6631 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6632 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6633 /* If we reach this function. It is safely assumed that
6634 * rbAllocInfo->tbInfo[0] always has non default valid values.
6635 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6636 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6638 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6639 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6640 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6641 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6642 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6643 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6645 pdcch->dci.u.format2Info.allocInfo.transSwap =
6646 rbAllocInfo->mimoAllocInfo.swpFlg;
6647 pdcch->dci.u.format2Info.allocInfo.precoding =
6648 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6650 if(hqP->hqE->ue != NULLP)
6654 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6655 hqP->hqE->cell->cellId,
6657 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6658 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6660 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6661 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6666 pdcch->dci.u.format2Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6670 pdcch->dci.u.format2Info.dai = RG_SCH_MAX_DAI_IDX;
6671 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6672 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6682 * @brief This function fills the PDCCH DCI format 2A information from dlProc.
6686 * Function: rgSCHCmnFillHqPPdcchDciFrmt2A
6687 * Purpose: This function fills in the PDCCH information
6688 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6689 * for dedicated service scheduling. It also
6690 * obtains TPC to be filled in from the power module.
6691 * Assign the PDCCH to HQProc.
6693 * Invoked by: Downlink Scheduler
6695 * @param[in] RgSchCellCb* cell
6696 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6697 * @param[in] RgDlHqProc* hqP
6698 * @param[out] RgSchPdcch *pdcch
6699 * @param[in] uint8_t tpc
6704 static Void rgSCHCmnFillHqPPdcchDciFrmt2A
6707 RgSchDlRbAlloc *rbAllocInfo,
6708 RgSchDlHqProcCb *hqP,
6713 static Void rgSCHCmnFillHqPPdcchDciFrmt2A(cell, rbAllocInfo, hqP, pdcch, tpc)
6715 RgSchDlRbAlloc *rbAllocInfo;
6716 RgSchDlHqProcCb *hqP;
6722 RgSchTddANInfo *anInfo;
6726 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6730 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6731 /*ccpu00120365:-ADD-call also if tb is disabled */
6732 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6733 rbAllocInfo->tbInfo[1].isDisabled)
6736 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6739 pdcch->dci.u.format2AInfo.tpcCmd = tpc;
6740 /* Avoiding this check,as we dont support Type1 RA */
6742 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6745 pdcch->dci.u.format2AInfo.allocInfo.isAllocType0 = TRUE;
6746 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[0] =
6747 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6749 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[1] =
6750 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6752 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[2] =
6753 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6755 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[3] =
6756 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6761 if ((!(hqP->tbInfo[0].txCntr)) &&
6762 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6763 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6764 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6767 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = 0;
6771 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6774 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6776 /* Initialize the TB info for both the TBs */
6777 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].mcs = 0;
6778 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].rv = 1;
6779 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].mcs = 0;
6780 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].rv = 1;
6781 /* Fill tbInfo for scheduled TBs */
6782 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6783 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6784 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6785 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6786 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6787 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6788 /* If we reach this function. It is safely assumed that
6789 * rbAllocInfo->tbInfo[0] always has non default valid values.
6790 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6792 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6794 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6795 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6796 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6797 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6798 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6799 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6802 pdcch->dci.u.format2AInfo.allocInfo.transSwap =
6803 rbAllocInfo->mimoAllocInfo.swpFlg;
6804 pdcch->dci.u.format2AInfo.allocInfo.precoding =
6805 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6807 if(hqP->hqE->ue != NULLP)
6810 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6811 hqP->hqE->cell->cellId,
6813 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6814 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6816 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6817 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6822 pdcch->dci.u.format2AInfo.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6826 pdcch->dci.u.format2AInfo.dai = RG_SCH_MAX_DAI_IDX;
6827 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6828 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6840 * @brief init of Sch vars.
6844 * Function: rgSCHCmnInitVars
6845 Purpose: Initialization of various UL subframe indices
6847 * @param[in] RgSchCellCb *cell
6852 static Void rgSCHCmnInitVars
6857 static Void rgSCHCmnInitVars(cell)
6861 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6864 cellUl->idx = RGSCH_INVALID_INFO;
6865 cellUl->schdIdx = RGSCH_INVALID_INFO;
6866 cellUl->schdHqProcIdx = RGSCH_INVALID_INFO;
6867 cellUl->msg3SchdIdx = RGSCH_INVALID_INFO;
6869 cellUl->emtcMsg3SchdIdx = RGSCH_INVALID_INFO;
6871 cellUl->msg3SchdHqProcIdx = RGSCH_INVALID_INFO;
6872 cellUl->rcpReqIdx = RGSCH_INVALID_INFO;
6873 cellUl->hqFdbkIdx[0] = RGSCH_INVALID_INFO;
6874 cellUl->hqFdbkIdx[1] = RGSCH_INVALID_INFO;
6875 cellUl->reTxIdx[0] = RGSCH_INVALID_INFO;
6876 cellUl->reTxIdx[1] = RGSCH_INVALID_INFO;
6877 /* Stack Crash problem for TRACE5 Changes. Added the return below */
6884 * @brief Updation of Sch vars per TTI.
6888 * Function: rgSCHCmnUpdVars
6889 * Purpose: Updation of Sch vars per TTI.
6891 * @param[in] RgSchCellCb *cell
6896 Void rgSCHCmnUpdVars
6901 Void rgSCHCmnUpdVars(cell)
6905 CmLteTimingInfo timeInfo;
6906 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6910 idx = (cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot);
6911 cellUl->idx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6913 printf("idx %d cellUl->idx %d RGSCH_NUM_SUB_FRAMES_5G %d time(%d %d) \n",idx,cellUl->idx ,RGSCH_NUM_SUB_FRAMES_5G,cell->crntTime.sfn,cell->crntTime.slot);
6915 /* Need to scheduler for after SCHED_DELTA */
6916 /* UL allocation has been advanced by 1 subframe
6917 * so that we do not wrap around and send feedback
6918 * before the data is even received by the PHY */
6919 /* Introduced timing delta for UL control */
6920 idx = (cellUl->idx + TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA);
6921 cellUl->schdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6923 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6924 TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA)
6925 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6927 /* ccpu00127193 filling schdTime for logging and enhancement purpose*/
6928 cellUl->schdTime = timeInfo;
6930 /* msg3 scheduling two subframes after general scheduling */
6931 idx = (cellUl->idx + RG_SCH_CMN_DL_DELTA + RGSCH_RARSP_MSG3_DELTA);
6932 cellUl->msg3SchdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6934 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6935 RG_SCH_CMN_DL_DELTA+ RGSCH_RARSP_MSG3_DELTA)
6936 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6938 idx = (cellUl->idx + TFU_RECPREQ_DLDELTA);
6940 cellUl->rcpReqIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6942 /* Downlink harq feedback is sometime after data reception / harq failure */
6943 /* Since feedback happens prior to scheduling being called, we add 1 to */
6944 /* take care of getting the correct subframe for feedback */
6945 idx = (cellUl->idx - TFU_CRCIND_ULDELTA + RG_SCH_CMN_UL_NUM_SF);
6947 printf("Finally setting cellUl->hqFdbkIdx[0] = %d TFU_CRCIND_ULDELTA %d RG_SCH_CMN_UL_NUM_SF %d\n",idx,TFU_CRCIND_ULDELTA,RG_SCH_CMN_UL_NUM_SF);
6949 cellUl->hqFdbkIdx[0] = (idx % (RG_SCH_CMN_UL_NUM_SF));
6951 idx = ((cellUl->schdIdx) % (RG_SCH_CMN_UL_NUM_SF));
6953 cellUl->reTxIdx[0] = (uint8_t) idx;
6955 printf("cellUl->hqFdbkIdx[0] %d cellUl->reTxIdx[0] %d \n",cellUl->hqFdbkIdx[0], cellUl->reTxIdx[0] );
6957 /* RACHO: update cmn sched specific RACH variables,
6958 * mainly the prachMaskIndex */
6959 rgSCHCmnUpdRachParam(cell);
6968 * @brief To get uplink subframe index associated with current PHICH
6973 * Function: rgSCHCmnGetPhichUlSfIdx
6974 * Purpose: Gets uplink subframe index associated with current PHICH
6975 * transmission based on SFN and subframe no
6977 * @param[in] CmLteTimingInfo *timeInfo
6978 * @param[in] RgSchCellCb *cell
6983 uint8_t rgSCHCmnGetPhichUlSfIdx
6985 CmLteTimingInfo *timeInfo,
6989 uint8_t rgSCHCmnGetPhichUlSfIdx(timeInfo, cell)
6990 CmLteTimingInfo *timeInfo;
6994 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6996 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
7003 dlsf = rgSCHUtlSubFrmGet(cell, *timeInfo);
7005 if(dlsf->phichOffInfo.sfnOffset == RGSCH_INVALID_INFO)
7007 return (RGSCH_INVALID_INFO);
7009 subframe = dlsf->phichOffInfo.subframe;
7011 sfn = (RGSCH_MAX_SFN + timeInfo->sfn -
7012 dlsf->phichOffInfo.sfnOffset) % RGSCH_MAX_SFN;
7014 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
7015 * wrap case such that idx will be proper*/
7016 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7017 numUlSf = ((numUlSf * sfn) + rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subframe]) - 1;
7018 idx = numUlSf % (cellUl->numUlSubfrms);
7024 * @brief To get uplink subframe index.
7029 * Function: rgSCHCmnGetUlSfIdx
7030 * Purpose: Gets uplink subframe index based on SFN and subframe number.
7032 * @param[in] CmLteTimingInfo *timeInfo
7033 * @param[in] uint8_t ulDlCfgIdx
7038 uint8_t rgSCHCmnGetUlSfIdx
7040 CmLteTimingInfo *timeInfo,
7044 uint8_t rgSCHCmnGetUlSfIdx(timeInfo, cell)
7045 CmLteTimingInfo *timeInfo;
7049 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
7050 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
7055 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
7056 * wrap case such that idx will be proper*/
7057 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7058 numUlSf = ((numUlSf * timeInfo->sfn) + \
7059 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->subframe]) - 1;
7060 idx = numUlSf % (cellUl->numUlSubfrms);
7068 * @brief To get uplink hq index.
7073 * Function: rgSCHCmnGetUlHqProcIdx
7074 * Purpose: Gets uplink subframe index based on SFN and subframe number.
7076 * @param[in] CmLteTimingInfo *timeInfo
7077 * @param[in] uint8_t ulDlCfgIdx
7082 uint8_t rgSCHCmnGetUlHqProcIdx
7084 CmLteTimingInfo *timeInfo,
7088 uint8_t rgSCHCmnGetUlHqProcIdx(timeInfo, cell)
7089 CmLteTimingInfo *timeInfo;
7097 numUlSf = (timeInfo->sfn * RGSCH_NUM_SUB_FRAMES_5G + timeInfo->slot);
7098 procId = numUlSf % RGSCH_NUM_UL_HQ_PROC;
7100 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
7101 /*ccpu00130639 - MOD - To get correct UL HARQ Proc IDs for all UL/DL Configs*/
7102 uint8_t numUlSfInSfn;
7103 S8 sfnCycle = cell->tddHqSfnCycle;
7104 uint8_t numUlHarq = rgSchTddUlNumHarqProcTbl[ulDlCfgIdx]
7106 /* TRACE 5 Changes */
7108 /* Calculate the number of UL SF in one SFN */
7109 numUlSfInSfn = RGSCH_NUM_SUB_FRAMES -
7110 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7112 /* Check for the SFN wrap around case */
7113 if(cell->crntTime.sfn == 1023 && timeInfo->sfn == 0)
7117 else if(cell->crntTime.sfn == 0 && timeInfo->sfn == 1023)
7119 /* sfnCycle decremented by 1 */
7120 sfnCycle = (sfnCycle + numUlHarq-1) % numUlHarq;
7122 /* Calculate the total number of UL sf */
7123 /* -1 is done since uplink sf are counted from 0 */
7124 numUlSf = numUlSfInSfn * (timeInfo->sfn + (sfnCycle*1024)) +
7125 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->slot] - 1;
7127 procId = numUlSf % numUlHarq;
7133 /* UL_ALLOC_CHANGES */
7134 /***********************************************************
7136 * Func : rgSCHCmnUlFreeAlloc
7138 * Desc : Free an allocation - invokes UHM and releases
7139 * alloc for the scheduler
7140 * Doest need subframe as argument
7148 **********************************************************/
7150 Void rgSCHCmnUlFreeAlloc
7156 Void rgSCHCmnUlFreeAlloc(cell, alloc)
7158 RgSchUlAlloc *alloc;
7161 RgSchUlHqProcCb *hqProc;
7165 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
7166 if ((alloc->hqProc->remTx == 0) &&
7167 (alloc->hqProc->rcvdCrcInd == FALSE) &&
7170 RgSchRaCb *raCb = alloc->raCb;
7171 rgSCHUhmFreeProc(alloc->hqProc, cell);
7172 rgSCHUtlUlAllocRelease(alloc);
7173 rgSCHRamDelRaCb(cell, raCb, TRUE);
7178 hqProc = alloc->hqProc;
7179 rgSCHUtlUlAllocRelease(alloc);
7180 rgSCHUhmFreeProc(hqProc, cell);
7185 /***********************************************************
7187 * Func : rgSCHCmnUlFreeAllocation
7189 * Desc : Free an allocation - invokes UHM and releases
7190 * alloc for the scheduler
7198 **********************************************************/
7200 Void rgSCHCmnUlFreeAllocation
7207 Void rgSCHCmnUlFreeAllocation(cell, sf, alloc)
7210 RgSchUlAlloc *alloc;
7213 RgSchUlHqProcCb *hqProc;
7218 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
7219 if ((alloc->hqProc->remTx == 0) &&
7220 (alloc->hqProc->rcvdCrcInd == FALSE) &&
7223 RgSchRaCb *raCb = alloc->raCb;
7224 rgSCHUhmFreeProc(alloc->hqProc, cell);
7225 rgSCHUtlUlAllocRls(sf, alloc);
7226 rgSCHRamDelRaCb(cell, raCb, TRUE);
7231 hqProc = alloc->hqProc;
7232 rgSCHUhmFreeProc(hqProc, cell);
7234 /* re-setting the PRB count while freeing the allocations */
7237 rgSCHUtlUlAllocRls(sf, alloc);
7243 * @brief This function implements PDCCH allocation for an UE
7244 * in the currently running subframe.
7248 * Function: rgSCHCmnPdcchAllocCrntSf
7249 * Purpose: This function determines current DL subframe
7250 * and UE DL CQI to call the actual pdcch allocator
7252 * Note that this function is called only
7253 * when PDCCH request needs to be made during
7254 * uplink scheduling.
7256 * Invoked by: Scheduler
7258 * @param[in] RgSchCellCb *cell
7259 * @param[in] RgSchUeCb *ue
7260 * @return RgSchPdcch *
7261 * -# NULLP when unsuccessful
7264 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf
7270 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf(cell, ue)
7275 CmLteTimingInfo frm = cell->crntTime;
7276 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7278 RgSchPdcch *pdcch = NULLP;
7280 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
7281 sf = rgSCHUtlSubFrmGet(cell, frm);
7284 if (ue->allocCmnUlPdcch)
7286 pdcch = rgSCHCmnCmnPdcchAlloc(cell, sf);
7287 /* Since CRNTI Scrambled */
7290 pdcch->dciNumOfBits = ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
7296 //pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, y, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_0, FALSE);
7297 pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_A1, FALSE);
7302 /***********************************************************
7304 * Func : rgSCHCmnUlAllocFillNdmrs
7306 * Desc : Determines and fills N_dmrs for a UE uplink
7311 * Notes: N_dmrs determination is straightforward, so
7312 * it is configured per subband
7316 **********************************************************/
7318 Void rgSCHCmnUlAllocFillNdmrs
7320 RgSchCmnUlCell *cellUl,
7324 Void rgSCHCmnUlAllocFillNdmrs(cellUl, alloc)
7325 RgSchCmnUlCell *cellUl;
7326 RgSchUlAlloc *alloc;
7329 alloc->grnt.nDmrs = cellUl->dmrsArr[alloc->sbStart];
7333 /***********************************************************
7335 * Func : rgSCHCmnUlAllocLnkHqProc
7337 * Desc : Links a new allocation for an UE with the
7338 * appropriate HARQ process of the UE.
7346 **********************************************************/
7348 Void rgSCHCmnUlAllocLnkHqProc
7351 RgSchUlAlloc *alloc,
7352 RgSchUlHqProcCb *proc,
7356 Void rgSCHCmnUlAllocLnkHqProc(ue, alloc, proc, isRetx)
7358 RgSchUlAlloc *alloc;
7359 RgSchUlHqProcCb *proc;
7366 rgSCHCmnUlAdapRetx(alloc, proc);
7370 #ifdef LTE_L2_MEAS /* L2_COUNTERS */
7373 rgSCHUhmNewTx(proc, (((RgUeUlHqCb*)proc->hqEnt)->maxHqRetx), alloc);
7379 * @brief This function releases a PDCCH in the subframe that is
7380 * currently being allocated for.
7384 * Function: rgSCHCmnPdcchRlsCrntSf
7385 * Purpose: This function determines current DL subframe
7386 * which is considered for PDCCH allocation,
7387 * and then calls the actual function that
7388 * releases a PDCCH in a specific subframe.
7389 * Note that this function is called only
7390 * when PDCCH release needs to be made during
7391 * uplink scheduling.
7393 * Invoked by: Scheduler
7395 * @param[in] RgSchCellCb *cell
7396 * @param[in] RgSchPdcch *pdcch
7400 Void rgSCHCmnPdcchRlsCrntSf
7406 Void rgSCHCmnPdcchRlsCrntSf(cell, pdcch)
7411 CmLteTimingInfo frm = cell->crntTime;
7415 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
7416 sf = rgSCHUtlSubFrmGet(cell, frm);
7417 rgSCHUtlPdcchPut(cell, &sf->pdcchInfo, pdcch);
7420 /***********************************************************
7422 * Func : rgSCHCmnUlFillPdcchWithAlloc
7424 * Desc : Fills a PDCCH with format 0 information.
7432 **********************************************************/
7434 Void rgSCHCmnUlFillPdcchWithAlloc
7437 RgSchUlAlloc *alloc,
7441 Void rgSCHCmnUlFillPdcchWithAlloc(pdcch, alloc, ue)
7443 RgSchUlAlloc *alloc;
7450 pdcch->rnti = alloc->rnti;
7451 //pdcch->dci.dciFormat = TFU_DCI_FORMAT_A2;
7452 pdcch->dci.dciFormat = alloc->grnt.dciFrmt;
7454 //Currently hardcoding values here.
7455 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
7456 switch(pdcch->dci.dciFormat)
7458 case TFU_DCI_FORMAT_A1:
7460 pdcch->dci.u.formatA1Info.formatType = 0;
7461 pdcch->dci.u.formatA1Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
7462 pdcch->dci.u.formatA1Info.xPUSCH_TxTiming = 0;
7463 pdcch->dci.u.formatA1Info.RBAssign = alloc->grnt.rbAssign;
7464 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
7465 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
7466 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
7467 pdcch->dci.u.formatA1Info.CSI_BSI_BRI_Req = 0;
7468 pdcch->dci.u.formatA1Info.CSIRS_BRRS_TxTiming = 0;
7469 pdcch->dci.u.formatA1Info.CSIRS_BRRS_SymbIdx = 0;
7470 pdcch->dci.u.formatA1Info.CSIRS_BRRS_ProcInd = 0;
7471 pdcch->dci.u.formatA1Info.numBSI_Reports = 0;
7472 pdcch->dci.u.formatA1Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
7473 pdcch->dci.u.formatA1Info.beamSwitch = 0;
7474 pdcch->dci.u.formatA1Info.SRS_Config = 0;
7475 pdcch->dci.u.formatA1Info.SRS_Symbol = 0;
7476 pdcch->dci.u.formatA1Info.REMapIdx_DMRS_PCRS_numLayers = 0;
7477 pdcch->dci.u.formatA1Info.SCID = alloc->grnt.SCID;
7478 pdcch->dci.u.formatA1Info.PMI = alloc->grnt.PMI;
7479 pdcch->dci.u.formatA1Info.UL_PCRS = 0;
7480 pdcch->dci.u.formatA1Info.tpcCmd = alloc->grnt.tpc;
7483 case TFU_DCI_FORMAT_A2:
7485 pdcch->dci.u.formatA2Info.formatType = 1;
7486 pdcch->dci.u.formatA2Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
7487 pdcch->dci.u.formatA2Info.xPUSCH_TxTiming = 0;
7488 pdcch->dci.u.formatA2Info.RBAssign = alloc->grnt.rbAssign;
7489 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
7490 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
7491 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
7492 pdcch->dci.u.formatA2Info.CSI_BSI_BRI_Req = 0;
7493 pdcch->dci.u.formatA2Info.CSIRS_BRRS_TxTiming = 0;
7494 pdcch->dci.u.formatA2Info.CSIRS_BRRS_SymbIdx = 0;
7495 pdcch->dci.u.formatA2Info.CSIRS_BRRS_ProcInd = 0;
7496 pdcch->dci.u.formatA2Info.numBSI_Reports = 0;
7497 pdcch->dci.u.formatA2Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
7498 pdcch->dci.u.formatA2Info.beamSwitch = 0;
7499 pdcch->dci.u.formatA2Info.SRS_Config = 0;
7500 pdcch->dci.u.formatA2Info.SRS_Symbol = 0;
7501 pdcch->dci.u.formatA2Info.REMapIdx_DMRS_PCRS_numLayers = 0;
7502 pdcch->dci.u.formatA2Info.SCID = alloc->grnt.SCID;
7503 pdcch->dci.u.formatA2Info.PMI = alloc->grnt.PMI;
7504 pdcch->dci.u.formatA2Info.UL_PCRS = 0;
7505 pdcch->dci.u.formatA2Info.tpcCmd = alloc->grnt.tpc;
7509 RLOG1(L_ERROR," 5GTF_ERROR UL Allocator's icorrect "
7510 "dciForamt Fill RNTI:%d",alloc->rnti);
7518 /***********************************************************
7520 * Func : rgSCHCmnUlAllocFillTpc
7522 * Desc : Determines and fills TPC for an UE allocation.
7530 **********************************************************/
7532 Void rgSCHCmnUlAllocFillTpc
7539 Void rgSCHCmnUlAllocFillTpc(cell, ue, alloc)
7542 RgSchUlAlloc *alloc;
7545 alloc->grnt.tpc = rgSCHPwrPuschTpcForUe(cell, ue);
7550 /***********************************************************
7552 * Func : rgSCHCmnAddUeToRefreshQ
7554 * Desc : Adds a UE to refresh queue, so that the UE is
7555 * periodically triggered to refresh it's GBR and
7564 **********************************************************/
7566 static Void rgSCHCmnAddUeToRefreshQ
7573 static Void rgSCHCmnAddUeToRefreshQ(cell, ue, wait)
7579 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
7581 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
7585 memset(&arg, 0, sizeof(arg));
7586 arg.tqCp = &sched->tmrTqCp;
7587 arg.tq = sched->tmrTq;
7588 arg.timers = &ueSchd->tmr;
7592 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
7599 * @brief Perform UE reset procedure.
7603 * Function : rgSCHCmnUlUeReset
7605 * This functions performs BSR resetting and
7606 * triggers UL specific scheduler
7607 * to Perform UE reset procedure.
7609 * @param[in] RgSchCellCb *cell
7610 * @param[in] RgSchUeCb *ue
7614 static Void rgSCHCmnUlUeReset
7620 static Void rgSCHCmnUlUeReset(cell, ue)
7625 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7626 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
7628 RgSchCmnLcg *lcgCmn;
7630 RgSchCmnAllocRecord *allRcd;
7632 ue->ul.minReqBytes = 0;
7633 ue->ul.totalBsr = 0;
7635 ue->ul.nonGbrLcgBs = 0;
7636 ue->ul.effAmbr = ue->ul.cfgdAmbr;
7638 node = ueUl->ulAllocLst.first;
7641 allRcd = (RgSchCmnAllocRecord *)node->node;
7645 for(lcgCnt = 0; lcgCnt < RGSCH_MAX_LCG_PER_UE; lcgCnt++)
7647 lcgCmn = RG_SCH_CMN_GET_UL_LCG(&ue->ul.lcgArr[lcgCnt]);
7649 lcgCmn->reportedBs = 0;
7650 lcgCmn->effGbr = lcgCmn->cfgdGbr;
7651 lcgCmn->effDeltaMbr = lcgCmn->deltaMbr;
7653 rgSCHCmnUlUeDelAllocs(cell, ue);
7655 ue->isSrGrant = FALSE;
7657 cellSchd->apisUl->rgSCHUlUeReset(cell, ue);
7659 /* Stack Crash problem for TRACE5 changes. Added the return below */
7665 * @brief RESET UL CQI and DL CQI&RI to conservative values
7666 * for a reestablishing UE.
7670 * Function : rgSCHCmnResetRiCqi
7672 * RESET UL CQI and DL CQI&RI to conservative values
7673 * for a reestablishing UE
7675 * @param[in] RgSchCellCb *cell
7676 * @param[in] RgSchUeCb *ue
7680 static Void rgSCHCmnResetRiCqi
7686 static Void rgSCHCmnResetRiCqi(cell, ue)
7691 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7692 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
7693 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7694 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
7697 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
7698 cell->isCpUlExtend);
7700 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
7701 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
7702 ueDl->mimoInfo.ri = 1;
7703 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
7704 (ue->mimoInfo.txMode == RGR_UE_TM_6))
7706 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
7708 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
7710 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
7713 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
7715 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
7719 /* Request for an early Aper CQI in case of reest */
7720 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
7721 if(acqiCb && acqiCb->aCqiCfg.pres)
7723 acqiCb->aCqiTrigWt = 0;
7731 * @brief Perform UE reset procedure.
7735 * Function : rgSCHCmnDlUeReset
7737 * This functions performs BO resetting and
7738 * triggers DL specific scheduler
7739 * to Perform UE reset procedure.
7741 * @param[in] RgSchCellCb *cell
7742 * @param[in] RgSchUeCb *ue
7746 static Void rgSCHCmnDlUeReset
7752 static Void rgSCHCmnDlUeReset(cell, ue)
7757 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7758 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
7759 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7762 if (ueDl->rachInfo.poLnk.node != NULLP)
7764 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
7767 /* Fix: syed Remove from TA List if this UE is there.
7768 * If TA Timer is running. Stop it */
7769 if (ue->dlTaLnk.node)
7771 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
7772 ue->dlTaLnk.node = (PTR)NULLP;
7774 else if (ue->taTmr.tmrEvnt != TMR_NONE)
7776 rgSCHTmrStopTmr(cell, ue->taTmr.tmrEvnt, ue);
7779 cellSchd->apisDl->rgSCHDlUeReset(cell, ue);
7783 rgSCHSCellDlUeReset(cell,ue);
7789 * @brief Perform UE reset procedure.
7793 * Function : rgSCHCmnUeReset
7795 * This functions triggers specific scheduler
7796 * to Perform UE reset procedure.
7798 * @param[in] RgSchCellCb *cell
7799 * @param[in] RgSchUeCb *ue
7805 Void rgSCHCmnUeReset
7811 Void rgSCHCmnUeReset(cell, ue)
7818 RgInfResetHqEnt hqEntRstInfo;
7820 /* RACHO: remove UE from pdcch, handover and rapId assoc Qs */
7821 rgSCHCmnDelRachInfo(cell, ue);
7823 rgSCHPwrUeReset(cell, ue);
7825 rgSCHCmnUlUeReset(cell, ue);
7826 rgSCHCmnDlUeReset(cell, ue);
7829 /* Making allocCmnUlPdcch TRUE to allocate DCI0/1A from Common search space.
7830 As because multiple cells are added hence 2 bits CqiReq is there
7831 This flag will be set to FALSE once we will get Scell READY */
7832 ue->allocCmnUlPdcch = TRUE;
7835 /* Fix : syed RESET UL CQI and DL CQI&RI to conservative values
7836 * for a reestablishing UE */
7837 /*Reset Cqi Config for all the configured cells*/
7838 for (idx = 0;idx < CM_LTE_MAX_CELLS; idx++)
7840 if (ue->cellInfo[idx] != NULLP)
7842 rgSCHCmnResetRiCqi(ue->cellInfo[idx]->cell, ue);
7845 /*After Reset Trigger APCQI for Pcell*/
7846 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7847 if(pCellInfo->acqiCb.aCqiCfg.pres)
7849 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
7852 /* sending HqEnt reset to MAC */
7853 hqEntRstInfo.cellId = cell->cellId;
7854 hqEntRstInfo.crnti = ue->ueId;
7856 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
7857 RgSchMacRstHqEnt(&pst,&hqEntRstInfo);
7863 * @brief UE out of MeasGap or AckNackReptn.
7867 * Function : rgSCHCmnActvtUlUe
7869 * This functions triggers specific scheduler
7870 * to start considering it for scheduling.
7872 * @param[in] RgSchCellCb *cell
7873 * @param[in] RgSchUeCb *ue
7879 Void rgSCHCmnActvtUlUe
7885 Void rgSCHCmnActvtUlUe(cell, ue)
7890 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7892 /* : take care of this in UL retransmission */
7893 cellSchd->apisUl->rgSCHUlActvtUe(cell, ue);
7898 * @brief UE out of MeasGap or AckNackReptn.
7902 * Function : rgSCHCmnActvtDlUe
7904 * This functions triggers specific scheduler
7905 * to start considering it for scheduling.
7907 * @param[in] RgSchCellCb *cell
7908 * @param[in] RgSchUeCb *ue
7914 Void rgSCHCmnActvtDlUe
7920 Void rgSCHCmnActvtDlUe(cell, ue)
7925 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7927 cellSchd->apisDl->rgSCHDlActvtUe(cell, ue);
7932 * @brief This API is invoked to indicate scheduler of a CRC indication.
7936 * Function : rgSCHCmnHdlUlTransInd
7937 * This API is invoked to indicate scheduler of a CRC indication.
7939 * @param[in] RgSchCellCb *cell
7940 * @param[in] RgSchUeCb *ue
7941 * @param[in] CmLteTimingInfo timingInfo
7946 Void rgSCHCmnHdlUlTransInd
7950 CmLteTimingInfo timingInfo
7953 Void rgSCHCmnHdlUlTransInd(cell, ue, timingInfo)
7956 CmLteTimingInfo timingInfo;
7960 /* Update the latest UL dat/sig transmission time */
7961 RGSCHCPYTIMEINFO(timingInfo, ue->ul.ulTransTime);
7962 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
7964 /* Some UL Transmission from this UE.
7965 * Activate this UE if it was inactive */
7966 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7967 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7975 * @brief Compute the minimum Rank based on Codebook subset
7976 * restriction configuration for 4 Tx Ports and Tx Mode 4.
7980 * Function : rgSCHCmnComp4TxMode4
7982 * Depending on BitMap set at CBSR during Configuration
7983 * - return the least possible Rank
7986 * @param[in] uint32_t *pmiBitMap
7987 * @return RgSchCmnRank
7990 static RgSchCmnRank rgSCHCmnComp4TxMode4
7995 static RgSchCmnRank rgSCHCmnComp4TxMode4(pmiBitMap)
7996 uint32_t *pmiBitMap;
7999 uint32_t bitMap0, bitMap1;
8000 bitMap0 = pmiBitMap[0];
8001 bitMap1 = pmiBitMap[1];
8002 if((bitMap1) & 0xFFFF)
8004 return (RG_SCH_CMN_RANK_1);
8006 else if((bitMap1>>16) & 0xFFFF)
8008 return (RG_SCH_CMN_RANK_2);
8010 else if((bitMap0) & 0xFFFF)
8012 return (RG_SCH_CMN_RANK_3);
8014 else if((bitMap0>>16) & 0xFFFF)
8016 return (RG_SCH_CMN_RANK_4);
8020 return (RG_SCH_CMN_RANK_1);
8026 * @brief Compute the minimum Rank based on Codebook subset
8027 * restriction configuration for 2 Tx Ports and Tx Mode 4.
8031 * Function : rgSCHCmnComp2TxMode4
8033 * Depending on BitMap set at CBSR during Configuration
8034 * - return the least possible Rank
8037 * @param[in] uint32_t *pmiBitMap
8038 * @return RgSchCmnRank
8041 static RgSchCmnRank rgSCHCmnComp2TxMode4
8046 static RgSchCmnRank rgSCHCmnComp2TxMode4(pmiBitMap)
8047 uint32_t *pmiBitMap;
8051 bitMap0 = pmiBitMap[0];
8052 if((bitMap0>>26)& 0x0F)
8054 return (RG_SCH_CMN_RANK_1);
8056 else if((bitMap0>>30) & 3)
8058 return (RG_SCH_CMN_RANK_2);
8062 return (RG_SCH_CMN_RANK_1);
8067 * @brief Compute the minimum Rank based on Codebook subset
8068 * restriction configuration for 4 Tx Ports and Tx Mode 3.
8072 * Function : rgSCHCmnComp4TxMode3
8074 * Depending on BitMap set at CBSR during Configuration
8075 * - return the least possible Rank
8078 * @param[in] uint32_t *pmiBitMap
8079 * @return RgSchCmnRank
8082 static RgSchCmnRank rgSCHCmnComp4TxMode3
8087 static RgSchCmnRank rgSCHCmnComp4TxMode3(pmiBitMap)
8088 uint32_t *pmiBitMap;
8092 bitMap0 = pmiBitMap[0];
8093 if((bitMap0>>28)& 1)
8095 return (RG_SCH_CMN_RANK_1);
8097 else if((bitMap0>>29) &1)
8099 return (RG_SCH_CMN_RANK_2);
8101 else if((bitMap0>>30) &1)
8103 return (RG_SCH_CMN_RANK_3);
8105 else if((bitMap0>>31) &1)
8107 return (RG_SCH_CMN_RANK_4);
8111 return (RG_SCH_CMN_RANK_1);
8116 * @brief Compute the minimum Rank based on Codebook subset
8117 * restriction configuration for 2 Tx Ports and Tx Mode 3.
8121 * Function : rgSCHCmnComp2TxMode3
8123 * Depending on BitMap set at CBSR during Configuration
8124 * - return the least possible Rank
8127 * @param[in] uint32_t *pmiBitMap
8128 * @return RgSchCmnRank
8131 static RgSchCmnRank rgSCHCmnComp2TxMode3
8136 static RgSchCmnRank rgSCHCmnComp2TxMode3(pmiBitMap)
8137 uint32_t *pmiBitMap;
8141 bitMap0 = pmiBitMap[0];
8142 if((bitMap0>>30)& 1)
8144 return (RG_SCH_CMN_RANK_1);
8146 else if((bitMap0>>31) &1)
8148 return (RG_SCH_CMN_RANK_2);
8152 return (RG_SCH_CMN_RANK_1);
8157 * @brief Compute the minimum Rank based on Codebook subset
8158 * restriction configuration.
8162 * Function : rgSCHCmnComputeRank
8164 * Depending on Num Tx Ports and Transmission mode
8165 * - return the least possible Rank
8168 * @param[in] RgrTxMode txMode
8169 * @param[in] uint32_t *pmiBitMap
8170 * @param[in] uint8_t numTxPorts
8171 * @return RgSchCmnRank
8174 static RgSchCmnRank rgSCHCmnComputeRank
8177 uint32_t *pmiBitMap,
8181 static RgSchCmnRank rgSCHCmnComputeRank(txMode, pmiBitMap, numTxPorts)
8183 uint32_t *pmiBitMap;
8188 if (numTxPorts ==2 && txMode == RGR_UE_TM_3)
8190 return (rgSCHCmnComp2TxMode3(pmiBitMap));
8192 else if (numTxPorts ==4 && txMode == RGR_UE_TM_3)
8194 return (rgSCHCmnComp4TxMode3(pmiBitMap));
8196 else if (numTxPorts ==2 && txMode == RGR_UE_TM_4)
8198 return (rgSCHCmnComp2TxMode4(pmiBitMap));
8200 else if (numTxPorts ==4 && txMode == RGR_UE_TM_4)
8202 return (rgSCHCmnComp4TxMode4(pmiBitMap));
8206 return (RG_SCH_CMN_RANK_1);
8213 * @brief Harq Entity Deinitialization for CMN SCH.
8217 * Function : rgSCHCmnDlDeInitHqEnt
8219 * Harq Entity Deinitialization for CMN SCH
8221 * @param[in] RgSchCellCb *cell
8222 * @param[in] RgSchDlHqEnt *hqE
8225 /*KWORK_FIX:Changed function return type to void */
8227 Void rgSCHCmnDlDeInitHqEnt
8233 Void rgSCHCmnDlDeInitHqEnt(cell, hqE)
8238 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8239 RgSchDlHqProcCb *hqP;
8244 ret = cellSchd->apisDl->rgSCHDlUeHqEntDeInit(cell, hqE);
8245 /* Free only If the Harq proc are created*/
8250 for(cnt = 0; cnt < hqE->numHqPrcs; cnt++)
8252 hqP = &hqE->procs[cnt];
8253 if ((RG_SCH_CMN_GET_DL_HQP(hqP)))
8255 rgSCHUtlFreeSBuf(cell->instIdx,
8256 (Data**)(&(hqP->sch)), (sizeof(RgSchCmnDlHqProc)));
8260 rgSCHLaaDeInitDlHqProcCb (cell, hqE);
8267 * @brief Harq Entity initialization for CMN SCH.
8271 * Function : rgSCHCmnDlInitHqEnt
8273 * Harq Entity initialization for CMN SCH
8275 * @param[in] RgSchCellCb *cell
8276 * @param[in] RgSchUeCb *ue
8282 S16 rgSCHCmnDlInitHqEnt
8288 S16 rgSCHCmnDlInitHqEnt(cell, hqEnt)
8290 RgSchDlHqEnt *hqEnt;
8294 RgSchDlHqProcCb *hqP;
8297 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8299 for(cnt = 0; cnt < hqEnt->numHqPrcs; cnt++)
8301 hqP = &hqEnt->procs[cnt];
8302 if (rgSCHUtlAllocSBuf(cell->instIdx,
8303 (Data**)&(hqP->sch), (sizeof(RgSchCmnDlHqProc))) != ROK)
8309 if((cell->emtcEnable) &&(hqEnt->ue->isEmtcUe))
8311 if(ROK != cellSchd->apisEmtcDl->rgSCHDlUeHqEntInit(cell, hqEnt))
8320 if(ROK != cellSchd->apisDl->rgSCHDlUeHqEntInit(cell, hqEnt))
8327 } /* rgSCHCmnDlInitHqEnt */
8330 * @brief This function computes distribution of refresh period
8334 * Function: rgSCHCmnGetRefreshDist
8335 * Purpose: This function computes distribution of refresh period
8336 * This is required to align set of UEs refresh
8337 * around the different consecutive subframe.
8339 * Invoked by: rgSCHCmnGetRefreshPerDist
8341 * @param[in] RgSchCellCb *cell
8342 * @param[in] RgSchUeCb *ue
8347 static uint8_t rgSCHCmnGetRefreshDist
8353 static uint8_t rgSCHCmnGetRefreshDist(cell, ue)
8360 Inst inst = cell->instIdx;
8363 for(refOffst = 0; refOffst < RGSCH_MAX_REFRESH_OFFSET; refOffst++)
8365 if(cell->refreshUeCnt[refOffst] < RGSCH_MAX_REFRESH_GRPSZ)
8367 cell->refreshUeCnt[refOffst]++;
8368 ue->refreshOffset = refOffst;
8369 /* printf("UE[%d] refresh offset[%d]. Cell refresh ue count[%d].\n", ue->ueId, refOffst, cell->refreshUeCnt[refOffst]); */
8374 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Allocation of refresh distribution failed\n"));
8375 /* We should not enter here normally, but incase of failure, allocating from last offset*/
8376 cell->refreshUeCnt[refOffst-1]++;
8377 ue->refreshOffset = refOffst-1;
8379 return (refOffst-1);
8382 * @brief This function computes initial Refresh Wait Period.
8386 * Function: rgSCHCmnGetRefreshPer
8387 * Purpose: This function computes initial Refresh Wait Period.
8388 * This is required to align multiple UEs refresh
8389 * around the same time.
8391 * Invoked by: rgSCHCmnGetRefreshPer
8393 * @param[in] RgSchCellCb *cell
8394 * @param[in] RgSchUeCb *ue
8395 * @param[in] uint32_t *waitPer
8400 static Void rgSCHCmnGetRefreshPer
8407 static Void rgSCHCmnGetRefreshPer(cell, ue, waitPer)
8413 uint32_t refreshPer;
8414 uint32_t crntSubFrm;
8417 refreshPer = RG_SCH_CMN_REFRESH_TIME * RG_SCH_CMN_REFRESH_TIMERES;
8418 crntSubFrm = cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot;
8419 /* Fix: syed align multiple UEs to refresh at same time */
8420 *waitPer = refreshPer - (crntSubFrm % refreshPer);
8421 *waitPer = RGSCH_CEIL(*waitPer, RG_SCH_CMN_REFRESH_TIMERES);
8422 *waitPer = *waitPer + rgSCHCmnGetRefreshDist(cell, ue);
8430 * @brief UE initialisation for scheduler.
8434 * Function : rgSCHCmnRgrSCellUeCfg
8436 * This functions intialises UE specific scheduler
8437 * information for SCELL
8438 * 0. Perform basic validations
8439 * 1. Allocate common sched UE cntrl blk
8440 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
8442 * 4. Perform DLFS cfg
8444 * @param[in] RgSchCellCb *cell
8445 * @param[in] RgSchUeCb *ue
8446 * @param[out] RgSchErrInfo *err
8452 S16 rgSCHCmnRgrSCellUeCfg
8456 RgrUeSecCellCfg *sCellInfoCfg,
8460 S16 rgSCHCmnRgrSCellUeCfg(sCell, ue, sCellInfoCfg, err)
8463 RgrUeSecCellCfg *sCellInfoCfg;
8470 RgSchCmnAllocRecord *allRcd;
8471 RgSchDlRbAlloc *allocInfo;
8472 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
8474 RgSchCmnUlUe *ueUlPcell;
8475 RgSchCmnUe *pCellUeSchCmn;
8476 RgSchCmnUe *ueSchCmn;
8478 RgSchCmnDlUe *pCellUeDl;
8480 Inst inst = ue->cell->instIdx;
8482 uint32_t idx = (uint8_t)((sCell->cellId - rgSchCb[sCell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
8484 pCellUeSchCmn = RG_SCH_CMN_GET_UE(ue,ue->cell);
8485 pCellUeDl = &pCellUeSchCmn->dl;
8487 /* 1. Allocate Common sched control block */
8488 if((rgSCHUtlAllocSBuf(sCell->instIdx,
8489 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
8491 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Memory allocation FAILED\n"));
8492 err->errCause = RGSCHERR_SCH_CFG;
8495 ueSchCmn = RG_SCH_CMN_GET_UE(ue,sCell);
8497 /*2. Perform UEs downlink configuration */
8498 ueDl = &ueSchCmn->dl;
8501 ueDl->mimoInfo = pCellUeDl->mimoInfo;
8503 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
8504 (ue->mimoInfo.txMode == RGR_UE_TM_6))
8506 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_NO_PMI);
8508 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
8510 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_RI_1);
8512 RGSCH_ARRAY_BOUND_CHECK(sCell->instIdx, rgUeCatTbl, pCellUeSchCmn->cmn.ueCat);
8513 ueDl->maxTbBits = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlTbBits;
8516 ri = RGSCH_MIN(ri, sCell->numTxAntPorts);
8517 if(((CM_LTE_UE_CAT_6 == pCellUeSchCmn->cmn.ueCat )
8518 ||(CM_LTE_UE_CAT_7 == pCellUeSchCmn->cmn.ueCat))
8521 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[1];
8525 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[0];
8528 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
8530 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
8531 rgSchTddDlNumHarqProcTbl[sCell->ulDlCfgIdx]);
8533 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
8534 RGSCH_NUM_DL_HQ_PROC);
8537 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, ue->isEmtcUe);
8539 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, FALSE);
8543 /* ambrCfgd config moved to ueCb.dl, as it's not needed for per cell wise*/
8545 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, sCell);
8546 allocInfo->rnti = ue->ueId;
8548 /* Initializing the lastCfi value to current cfi value */
8549 ueDl->lastCfi = cellSchd->dl.currCfi;
8551 if ((cellSchd->apisDl->rgSCHRgrSCellDlUeCfg(sCell, ue, err)) != ROK)
8553 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Spec Sched DL UE CFG FAILED\n"));
8557 /* TODO: enhance for DLFS RB Allocation for SCELLs in future dev */
8559 /* DLFS UE Config */
8560 if (cellSchd->dl.isDlFreqSel)
8562 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeCfg(sCell, ue, sCellInfoCfg, err)) != ROK)
8564 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS UE config FAILED\n"));
8569 /* TODO: Do UL SCELL CFG during UL CA dev */
8571 ueUl = RG_SCH_CMN_GET_UL_UE(ue, sCell);
8573 /* TODO_ULCA: SRS for SCELL needs to be handled in the below function call */
8574 rgSCHCmnUpdUeUlCqiInfo(sCell, ue, ueUl, ueSchCmn, cellSchd,
8575 sCell->isCpUlExtend);
8577 ret = rgSCHUhmHqEntInit(sCell, ue);
8580 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL UHM HARQ Ent Init "
8581 "Failed for CRNTI:%d", ue->ueId);
8585 ueUlPcell = RG_SCH_CMN_GET_UL_UE(ue, ue->cell);
8586 /* Initialize uplink HARQ related information for UE */
8587 ueUl->hqEnt.maxHqRetx = ueUlPcell->hqEnt.maxHqRetx;
8588 cmLListInit(&ueUl->hqEnt.free);
8589 cmLListInit(&ueUl->hqEnt.inUse);
8590 for(i=0; i < ueUl->hqEnt.numHqPrcs; i++)
8592 ueUl->hqEnt.hqProcCb[i].hqEnt = (void*)(&ueUl->hqEnt);
8593 ueUl->hqEnt.hqProcCb[i].procId = i;
8594 ueUl->hqEnt.hqProcCb[i].ulSfIdx = RGSCH_INVALID_INFO;
8595 ueUl->hqEnt.hqProcCb[i].alloc = NULLP;
8597 /* ccpu00139513- Initializing SPS flags*/
8598 ueUl->hqEnt.hqProcCb[i].isSpsActvnHqP = FALSE;
8599 ueUl->hqEnt.hqProcCb[i].isSpsOccnHqP = FALSE;
8601 cmLListAdd2Tail(&ueUl->hqEnt.free, &ueUl->hqEnt.hqProcCb[i].lnk);
8602 ueUl->hqEnt.hqProcCb[i].lnk.node = (PTR)&ueUl->hqEnt.hqProcCb[i];
8605 /* Allocate UL BSR allocation tracking List */
8606 cmLListInit(&ueUl->ulAllocLst);
8608 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8610 if((rgSCHUtlAllocSBuf(sCell->instIdx,
8611 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8613 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL Memory allocation FAILED"
8614 "for CRNTI:%d",ue->ueId);
8615 err->errCause = RGSCHERR_SCH_CFG;
8618 allRcd->allocTime = sCell->crntTime;
8619 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8620 allRcd->lnk.node = (PTR)allRcd;
8623 /* After initialising UL part, do power related init */
8624 ret = rgSCHPwrUeSCellCfg(sCell, ue, sCellInfoCfg);
8627 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Could not do "
8628 "power config for UE CRNTI:%d",ue->ueId);
8633 if(TRUE == ue->isEmtcUe)
8635 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
8637 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
8638 "for CRNTI:%d",ue->ueId);
8645 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
8647 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
8648 "for CRNTI:%d",ue->ueId);
8653 ue->ul.isUlCaEnabled = TRUE;
8657 } /* rgSCHCmnRgrSCellUeCfg */
8661 * @brief UE initialisation for scheduler.
8665 * Function : rgSCHCmnRgrSCellUeDel
8667 * This functions Delete UE specific scheduler
8668 * information for SCELL
8670 * @param[in] RgSchCellCb *cell
8671 * @param[in] RgSchUeCb *ue
8677 S16 rgSCHCmnRgrSCellUeDel
8679 RgSchUeCellInfo *sCellInfo,
8683 S16 rgSCHCmnRgrSCellUeDel(sCellInfo, ue)
8684 RgSchUeCellInfo *sCellInfo;
8688 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
8689 Inst inst = ue->cell->instIdx;
8692 cellSchd->apisDl->rgSCHRgrSCellDlUeDel(sCellInfo, ue);
8695 rgSCHCmnUlUeDelAllocs(sCellInfo->cell, ue);
8698 if(TRUE == ue->isEmtcUe)
8700 cellSchd->apisEmtcUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
8705 cellSchd->apisUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
8708 /* DLFS UE Config */
8709 if (cellSchd->dl.isDlFreqSel)
8711 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeDel(sCellInfo->cell, ue)) != ROK)
8713 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS Scell del FAILED\n"));
8718 rgSCHUtlFreeSBuf(sCellInfo->cell->instIdx,
8719 (Data**)(&(sCellInfo->sch)), (sizeof(RgSchCmnUe)));
8723 } /* rgSCHCmnRgrSCellUeDel */
8729 * @brief Handles 5gtf configuration for a UE
8733 * Function : rgSCHCmn5gtfUeCfg
8739 * @param[in] RgSchCellCb *cell
8740 * @param[in] RgSchUeCb *ue
8741 * @param[in] RgrUeCfg *cfg
8747 S16 rgSCHCmn5gtfUeCfg
8754 S16 rgSCHCmn5gtfUeCfg(cell, ue, cfg)
8761 RgSchUeGrp *ue5gtfGrp;
8762 ue->ue5gtfCb.grpId = cfg->ue5gtfCfg.grpId;
8763 ue->ue5gtfCb.BeamId = cfg->ue5gtfCfg.BeamId;
8764 ue->ue5gtfCb.numCC = cfg->ue5gtfCfg.numCC;
8765 ue->ue5gtfCb.mcs = cfg->ue5gtfCfg.mcs;
8766 ue->ue5gtfCb.maxPrb = cfg->ue5gtfCfg.maxPrb;
8768 ue->ue5gtfCb.cqiRiPer = 100;
8769 /* 5gtf TODO: CQIs to start from (10,0)*/
8770 ue->ue5gtfCb.nxtCqiRiOccn.sfn = 10;
8771 ue->ue5gtfCb.nxtCqiRiOccn.slot = 0;
8772 ue->ue5gtfCb.rank = 1;
8774 printf("\nschd cfg at mac,%u,%u,%u,%u,%u\n",ue->ue5gtfCb.grpId,ue->ue5gtfCb.BeamId,ue->ue5gtfCb.numCC,
8775 ue->ue5gtfCb.mcs,ue->ue5gtfCb.maxPrb);
8777 ue5gtfGrp = &(cell->cell5gtfCb.ueGrp5gConf[ue->ue5gtfCb.BeamId]);
8779 /* TODO_5GTF: Currently handling 1 group only. Need to update when multi group
8780 scheduling comes into picture */
8781 if(ue5gtfGrp->beamBitMask & (1 << ue->ue5gtfCb.BeamId))
8783 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8784 "5GTF_ERROR Invalid beam id CRNTI:%d",cfg->crnti);
8787 ue5gtfGrp->beamBitMask |= (1 << ue->ue5gtfCb.BeamId);
8794 * @brief UE initialisation for scheduler.
8798 * Function : rgSCHCmnRgrUeCfg
8800 * This functions intialises UE specific scheduler
8802 * 0. Perform basic validations
8803 * 1. Allocate common sched UE cntrl blk
8804 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
8806 * 4. Perform DLFS cfg
8808 * @param[in] RgSchCellCb *cell
8809 * @param[in] RgSchUeCb *ue
8810 * @param[int] RgrUeCfg *ueCfg
8811 * @param[out] RgSchErrInfo *err
8817 S16 rgSCHCmnRgrUeCfg
8825 S16 rgSCHCmnRgrUeCfg(cell, ue, ueCfg, err)
8832 RgSchDlRbAlloc *allocInfo;
8834 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8835 RgSchCmnUe *ueSchCmn;
8839 RgSchCmnAllocRecord *allRcd;
8841 uint32_t idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
8842 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
8845 /* 1. Allocate Common sched control block */
8846 if((rgSCHUtlAllocSBuf(cell->instIdx,
8847 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
8849 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8850 "Memory allocation FAILED for CRNTI:%d",ueCfg->crnti);
8851 err->errCause = RGSCHERR_SCH_CFG;
8854 ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
8855 ue->dl.ueDlCqiCfg = ueCfg->ueDlCqiCfg;
8856 pCellInfo->acqiCb.aCqiCfg = ueCfg->ueDlCqiCfg.aprdCqiCfg;
8857 if(ueCfg->ueCatEnum > 0 )
8859 /*KWORK_FIX removed NULL chk for ueSchCmn*/
8860 ueSchCmn->cmn.ueCat = ueCfg->ueCatEnum - 1;
8864 ueSchCmn->cmn.ueCat = 0; /* Assuming enum values correctly set */
8866 cmInitTimers(&ueSchCmn->cmn.tmr, 1);
8868 /*2. Perform UEs downlink configuration */
8869 ueDl = &ueSchCmn->dl;
8870 /* RACHO : store the rapId assigned for HandOver UE.
8871 * Append UE to handover list of cmnCell */
8872 if (ueCfg->dedPreambleId.pres == PRSNT_NODEF)
8874 rgSCHCmnDelDedPreamble(cell, ueCfg->dedPreambleId.val);
8875 ueDl->rachInfo.hoRapId = ueCfg->dedPreambleId.val;
8876 cmLListAdd2Tail(&cellSchd->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
8877 ueDl->rachInfo.hoLnk.node = (PTR)ue;
8880 rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd);
8882 if (ueCfg->txMode.pres == TRUE)
8884 if ((ueCfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8885 (ueCfg->txMode.txModeEnum == RGR_UE_TM_6))
8887 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8889 if (ueCfg->txMode.txModeEnum == RGR_UE_TM_3)
8891 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8894 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
8895 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
8898 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
8899 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
8900 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
8903 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
8907 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
8910 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
8912 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8913 rgSchTddDlNumHarqProcTbl[cell->ulDlCfgIdx]);
8915 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8916 RGSCH_NUM_DL_HQ_PROC);
8919 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
8921 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
8923 /* if none of the DL and UL AMBR are configured then fail the configuration
8925 if((ueCfg->ueQosCfg.dlAmbr == 0) && (ueCfg->ueQosCfg.ueBr == 0))
8927 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"UL Ambr and DL Ambr are"
8928 "configured as 0 for CRNTI:%d",ueCfg->crnti);
8929 err->errCause = RGSCHERR_SCH_CFG;
8933 ue->dl.ambrCfgd = (ueCfg->ueQosCfg.dlAmbr * RG_SCH_CMN_REFRESH_TIME)/100;
8935 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
8936 allocInfo->rnti = ue->ueId;
8938 /* Initializing the lastCfi value to current cfi value */
8939 ueDl->lastCfi = cellSchd->dl.currCfi;
8941 if(cell->emtcEnable && ue->isEmtcUe)
8943 if ((cellSchd->apisEmtcDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8945 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8946 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8954 if ((cellSchd->apisDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8956 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8957 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8964 /* 3. Initialize ul part */
8965 ueUl = &ueSchCmn->ul;
8967 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
8968 cell->isCpUlExtend);
8970 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8971 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8973 ue->ul.cfgdAmbr = (ueCfg->ueQosCfg.ueBr * RG_SCH_CMN_REFRESH_TIME)/100;
8974 ue->ul.effAmbr = ue->ul.cfgdAmbr;
8975 RGSCHCPYTIMEINFO(cell->crntTime, ue->ul.ulTransTime);
8977 /* Allocate UL BSR allocation tracking List */
8978 cmLListInit(&ueUl->ulAllocLst);
8980 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8982 if((rgSCHUtlAllocSBuf(cell->instIdx,
8983 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8985 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation FAILED"
8986 "for CRNTI:%d",ueCfg->crnti);
8987 err->errCause = RGSCHERR_SCH_CFG;
8990 allRcd->allocTime = cell->crntTime;
8991 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8992 allRcd->lnk.node = (PTR)allRcd;
8994 /* Allocate common sch cntrl blocks for LCGs */
8995 for (cnt=0; cnt<RGSCH_MAX_LCG_PER_UE; cnt++)
8997 ret = rgSCHUtlAllocSBuf(cell->instIdx,
8998 (Data**)&(ue->ul.lcgArr[cnt].sch), (sizeof(RgSchCmnLcg)));
9001 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9002 "SCH struct alloc failed for CRNTI:%d",ueCfg->crnti);
9003 err->errCause = RGSCHERR_SCH_CFG;
9007 /* After initialising UL part, do power related init */
9008 ret = rgSCHPwrUeCfg(cell, ue, ueCfg);
9011 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
9012 "power config for UE CRNTI:%d",ueCfg->crnti);
9016 ret = rgSCHCmnSpsUeCfg(cell, ue, ueCfg, err);
9019 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
9020 "SPS config for CRNTI:%d",ueCfg->crnti);
9023 #endif /* LTEMAC_SPS */
9026 if(TRUE == ue->isEmtcUe)
9028 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
9030 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
9031 "for CRNTI:%d",ueCfg->crnti);
9038 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
9040 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
9041 "for CRNTI:%d",ueCfg->crnti);
9046 /* DLFS UE Config */
9047 if (cellSchd->dl.isDlFreqSel)
9049 if ((cellSchd->apisDlfs->rgSCHDlfsUeCfg(cell, ue, ueCfg, err)) != ROK)
9051 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "DLFS UE config FAILED"
9052 "for CRNTI:%d",ueCfg->crnti);
9057 /* Fix: syed align multiple UEs to refresh at same time */
9058 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
9059 /* Start UE Qos Refresh Timer */
9060 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
9062 rgSCHCmn5gtfUeCfg(cell, ue, ueCfg);
9066 } /* rgSCHCmnRgrUeCfg */
9069 * @brief UE TX mode reconfiguration handler.
9073 * Function : rgSCHCmnDlHdlTxModeRecfg
9075 * This functions updates UE specific scheduler
9076 * information upon UE reconfiguration.
9078 * @param[in] RgSchUeCb *ue
9079 * @param[in] RgrUeRecfg *ueRecfg
9084 static Void rgSCHCmnDlHdlTxModeRecfg
9088 RgrUeRecfg *ueRecfg,
9092 static Void rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, numTxPorts)
9095 RgrUeRecfg *ueRecfg;
9100 static Void rgSCHCmnDlHdlTxModeRecfg
9107 static Void rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg)
9110 RgrUeRecfg *ueRecfg;
9114 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
9116 if (ueRecfg->txMode.pres != PRSNT_NODEF)
9120 /* ccpu00140894- Starting Timer for TxMode Transition Completion*/
9121 ue->txModeTransCmplt =FALSE;
9122 rgSCHTmrStartTmr (ue->cell, ue, RG_SCH_TMR_TXMODE_TRNSTN, RG_SCH_TXMODE_TRANS_TIMER);
9123 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_CMPLT)
9125 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell,
9126 RG_SCH_CMN_TD_TXMODE_RECFG);
9127 /* MS_WORKAROUND for ccpu00123186 MIMO Fix Start: need to set FORCE TD bitmap based on TX mode */
9128 ueDl->mimoInfo.ri = 1;
9129 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
9130 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
9132 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
9134 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
9136 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
9138 /* MIMO Fix End: need to set FORCE TD bitmap based on TX mode */
9141 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_START)
9143 /* start afresh forceTD masking */
9144 RG_SCH_CMN_INIT_FORCE_TD(ue, cell, 0);
9145 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_TXMODE_RECFG);
9146 /* Intialize MIMO related parameters of UE */
9149 if(ueRecfg->txMode.pres)
9151 if((ueRecfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
9152 (ueRecfg->txMode.txModeEnum ==RGR_UE_TM_4))
9154 if(ueRecfg->ueCodeBookRstRecfg.pres)
9157 rgSCHCmnComputeRank(ueRecfg->txMode.txModeEnum,
9158 ueRecfg->ueCodeBookRstRecfg.pmiBitMap, numTxPorts);
9162 ueDl->mimoInfo.ri = 1;
9167 ueDl->mimoInfo.ri = 1;
9172 ueDl->mimoInfo.ri = 1;
9175 ueDl->mimoInfo.ri = 1;
9176 #endif /* TFU_UPGRADE */
9177 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
9178 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
9180 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
9182 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
9184 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
9189 /***********************************************************
9191 * Func : rgSCHCmnUpdUeMimoInfo
9193 * Desc : Updates UL and DL Ue Information
9201 **********************************************************/
9203 static Void rgSCHCmnUpdUeMimoInfo
9208 RgSchCmnCell *cellSchd
9211 static Void rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd)
9215 RgSchCmnCell *cellSchd;
9219 if(ueCfg->txMode.pres)
9221 if((ueCfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
9222 (ueCfg->txMode.txModeEnum ==RGR_UE_TM_4))
9224 if(ueCfg->ueCodeBookRstCfg.pres)
9227 rgSCHCmnComputeRank(ueCfg->txMode.txModeEnum,
9228 ueCfg->ueCodeBookRstCfg.pmiBitMap, cell->numTxAntPorts);
9232 ueDl->mimoInfo.ri = 1;
9237 ueDl->mimoInfo.ri = 1;
9242 ueDl->mimoInfo.ri = 1;
9246 ueDl->mimoInfo.ri = 1;
9247 #endif /*TFU_UPGRADE */
9248 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
9249 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
9253 /***********************************************************
9255 * Func : rgSCHCmnUpdUeUlCqiInfo
9257 * Desc : Updates UL and DL Ue Information
9265 **********************************************************/
9267 static Void rgSCHCmnUpdUeUlCqiInfo
9272 RgSchCmnUe *ueSchCmn,
9273 RgSchCmnCell *cellSchd,
9277 static Void rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd, isEcp)
9281 RgSchCmnUe *ueSchCmn;
9282 RgSchCmnCell *cellSchd;
9289 if(ue->srsCb.srsCfg.type == RGR_SCH_SRS_SETUP)
9291 if(ue->ul.ulTxAntSel.pres)
9293 ueUl->crntUlCqi[ue->srsCb.selectedAnt] = cellSchd->ul.dfltUlCqi;
9294 ueUl->validUlCqi = ueUl->crntUlCqi[ue->srsCb.selectedAnt];
9298 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
9299 ueUl->validUlCqi = ueUl->crntUlCqi[0];
9301 ue->validTxAnt = ue->srsCb.selectedAnt;
9305 ueUl->validUlCqi = cellSchd->ul.dfltUlCqi;
9309 ueUl->ulLaCb.cqiBasediTbs = rgSchCmnUlCqiToTbsTbl[isEcp]
9310 [ueUl->validUlCqi] * 100;
9311 ueUl->ulLaCb.deltaiTbs = 0;
9315 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
9316 #endif /*TFU_UPGRADE */
9317 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
9318 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
9320 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
9324 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
9329 /***********************************************************
9331 * Func : rgSCHCmnUpdUeCatCfg
9333 * Desc : Updates UL and DL Ue Information
9341 **********************************************************/
9343 static Void rgSCHCmnUpdUeCatCfg
9349 static Void rgSCHCmnUpdUeCatCfg(ue, cell)
9354 RgSchDlHqEnt *hqE = NULLP;
9355 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
9356 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
9357 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
9358 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
9361 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
9363 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
9366 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
9367 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
9368 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
9369 && (RG_SCH_MAX_TX_LYRS_4 == ri))
9371 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
9375 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
9378 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
9380 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
9382 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
9386 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
9388 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
9389 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
9394 * @brief UE reconfiguration for scheduler.
9398 * Function : rgSChCmnRgrUeRecfg
9400 * This functions updates UE specific scheduler
9401 * information upon UE reconfiguration.
9403 * @param[in] RgSchCellCb *cell
9404 * @param[in] RgSchUeCb *ue
9405 * @param[int] RgrUeRecfg *ueRecfg
9406 * @param[out] RgSchErrInfo *err
9412 S16 rgSCHCmnRgrUeRecfg
9416 RgrUeRecfg *ueRecfg,
9420 S16 rgSCHCmnRgrUeRecfg(cell, ue, ueRecfg, err)
9423 RgrUeRecfg *ueRecfg;
9427 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
9430 /* Basic validations */
9431 if (ueRecfg->ueRecfgTypes & RGR_UE_TXMODE_RECFG)
9434 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, cell->numTxAntPorts);
9436 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg);
9437 #endif /* TFU_UPGRADE */
9439 if(ueRecfg->ueRecfgTypes & RGR_UE_CSG_PARAM_RECFG)
9441 ue->csgMmbrSta = ueRecfg->csgMmbrSta;
9443 /* Changes for UE Category reconfiguration feature */
9444 if(ueRecfg->ueRecfgTypes & RGR_UE_UECAT_RECFG)
9446 rgSCHCmnUpdUeCatCfg(ue, cell);
9448 if (ueRecfg->ueRecfgTypes & RGR_UE_APRD_DLCQI_RECFG)
9450 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
9451 pCellInfo->acqiCb.aCqiCfg = ueRecfg->aprdDlCqiRecfg;
9454 if (ueRecfg->ueRecfgTypes & RGR_UE_PRD_DLCQI_RECFG)
9456 if ((ueRecfg->prdDlCqiRecfg.pres == TRUE)
9457 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD10)
9458 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD20))
9460 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unsupported periodic CQI "
9461 "reporting mode %d for old CRNIT:%d",
9462 (int)ueRecfg->prdDlCqiRecfg.prdModeEnum,ueRecfg->oldCrnti);
9463 err->errCause = RGSCHERR_SCH_CFG;
9466 ue->dl.ueDlCqiCfg.prdCqiCfg = ueRecfg->prdDlCqiRecfg;
9470 if (ueRecfg->ueRecfgTypes & RGR_UE_ULPWR_RECFG)
9472 if (rgSCHPwrUeRecfg(cell, ue, ueRecfg) != ROK)
9474 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9475 "Power Reconfiguration Failed for OLD CRNTI:%d",ueRecfg->oldCrnti);
9480 if (ueRecfg->ueRecfgTypes & RGR_UE_QOS_RECFG)
9482 /* Uplink Sched related Initialization */
9483 if ((ueRecfg->ueQosRecfg.dlAmbr == 0) && (ueRecfg->ueQosRecfg.ueBr == 0))
9485 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Ul Ambr and DL Ambr "
9486 "configured as 0 for OLD CRNTI:%d",ueRecfg->oldCrnti);
9487 err->errCause = RGSCHERR_SCH_CFG;
9490 ue->ul.cfgdAmbr = (ueRecfg->ueQosRecfg.ueBr * \
9491 RG_SCH_CMN_REFRESH_TIME)/100;
9492 /* Downlink Sched related Initialization */
9493 ue->dl.ambrCfgd = (ueRecfg->ueQosRecfg.dlAmbr * \
9494 RG_SCH_CMN_REFRESH_TIME)/100;
9495 /* Fix: syed Update the effAmbr and effUeBR fields w.r.t the
9496 * new QOS configuration */
9497 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
9498 /* Fix: syed align multiple UEs to refresh at same time */
9499 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
9500 rgSCHCmnApplyUeRefresh(cell, ue);
9501 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
9504 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
9506 if ((cellSchCmn->apisEmtcUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9508 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9509 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9512 if ((cellSchCmn->apisEmtcDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9514 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9515 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9522 if ((cellSchCmn->apisUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9524 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9525 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9528 if ((cellSchCmn->apisDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9530 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9531 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9535 /* DLFS UE Config */
9536 if (cellSchCmn->dl.isDlFreqSel)
9538 if ((cellSchCmn->apisDlfs->rgSCHDlfsUeRecfg(cell, ue, \
9539 ueRecfg, err)) != ROK)
9541 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9542 "DLFS UE re-config FAILED for CRNTI:%d",ue->ueId);
9548 /* Invoke re-configuration on SPS module */
9549 if (rgSCHCmnSpsUeRecfg(cell, ue, ueRecfg, err) != ROK)
9551 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9552 "DL SPS ReCFG FAILED for UE CRNTI:%d", ue->ueId);
9558 } /* rgSCHCmnRgrUeRecfg*/
9560 /***********************************************************
9562 * Func : rgSCHCmnUlUeDelAllocs
9564 * Desc : Deletion of all UE allocations.
9572 **********************************************************/
9574 static Void rgSCHCmnUlUeDelAllocs
9580 static Void rgSCHCmnUlUeDelAllocs(cell, ue)
9585 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
9586 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
9589 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
9592 for (i = 0; i < ueUl->hqEnt.numHqPrcs; ++i)
9594 RgSchUlHqProcCb *proc = rgSCHUhmGetUlHqProc(cell, ue, i);
9597 /* proc can't be NULL here */
9605 /* Added Insure Fixes Of reading Dangling memory.NULLed crntAlloc */
9607 if(proc->alloc == ulSpsUe->ulSpsSchdInfo.crntAlloc)
9609 ulSpsUe->ulSpsSchdInfo.crntAlloc = NULLP;
9610 ulSpsUe->ulSpsSchdInfo.crntAllocSf = NULLP;
9614 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
9615 proc->alloc,ue->isEmtcUe);
9617 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
9620 /* PHY probably needn't be intimated since
9621 * whatever intimation it needs happens at the last minute
9624 /* Fix: syed Adaptive Msg3 Retx crash. Remove the harqProc
9625 * from adaptive retx List. */
9626 if (proc->reTxLnk.node)
9629 //TODO_SID: Need to take care
9630 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
9631 proc->reTxLnk.node = (PTR)NULLP;
9639 /***********************************************************
9641 * Func : rgSCHCmnDelUeFrmRefreshQ
9643 * Desc : Adds a UE to refresh queue, so that the UE is
9644 * periodically triggered to refresh it's GBR and
9653 **********************************************************/
9655 static Void rgSCHCmnDelUeFrmRefreshQ
9661 static Void rgSCHCmnDelUeFrmRefreshQ(cell, ue)
9666 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
9668 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
9671 #ifdef RGL_SPECIFIC_CHANGES
9672 if(ue->refreshOffset < RGSCH_MAX_REFRESH_GRPSZ)
9674 if(cell->refreshUeCnt[ue->refreshOffset])
9676 cell->refreshUeCnt[ue->refreshOffset]--;
9682 memset(&arg, 0, sizeof(arg));
9683 arg.tqCp = &sched->tmrTqCp;
9684 arg.tq = sched->tmrTq;
9685 arg.timers = &ueSchd->tmr;
9689 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
9695 /***********************************************************
9697 * Func : rgSCHCmnUeCcchSduDel
9699 * Desc : Clear CCCH SDU scheduling context.
9707 **********************************************************/
9709 static Void rgSCHCmnUeCcchSduDel
9715 static Void rgSCHCmnUeCcchSduDel(cell, ueCb)
9720 RgSchDlHqEnt *hqE = NULLP;
9721 RgSchDlHqProcCb *ccchSduHqP = NULLP;
9722 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
9725 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
9730 ccchSduHqP = hqE->ccchSduProc;
9731 if(ueCb->ccchSduLnk.node != NULLP)
9733 /* Remove the ccchSduProc if it is in the Tx list */
9734 cmLListDelFrm(&(cell->ccchSduUeLst), &(ueCb->ccchSduLnk));
9735 ueCb->ccchSduLnk.node = NULLP;
9737 else if(ccchSduHqP != NULLP)
9739 /* Fix for crash due to stale pdcch. Release ccch pdcch*/
9740 if(ccchSduHqP->pdcch)
9742 cmLListDelFrm(&ccchSduHqP->subFrm->pdcchInfo.pdcchs,
9743 &ccchSduHqP->pdcch->lnk);
9744 cmLListAdd2Tail(&cell->pdcchLst, &ccchSduHqP->pdcch->lnk);
9745 ccchSduHqP->pdcch = NULLP;
9747 if(ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node != NULLP)
9749 /* Remove the ccchSduProc if it is in the retx list */
9750 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
9751 &ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk);
9752 /* ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node = NULLP; */
9753 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
9755 else if ((ccchSduHqP->subFrm != NULLP) &&
9756 (ccchSduHqP->hqPSfLnk.node != NULLP))
9758 rgSCHUtlDlHqPTbRmvFrmTx(ccchSduHqP->subFrm,
9759 ccchSduHqP, 0, FALSE);
9760 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
9770 * @brief UE deletion for scheduler.
9774 * Function : rgSCHCmnUeDel
9776 * This functions deletes all scheduler information
9777 * pertaining to an UE.
9779 * @param[in] RgSchCellCb *cell
9780 * @param[in] RgSchUeCb *ue
9790 Void rgSCHCmnUeDel(cell, ue)
9795 RgSchDlHqEnt *hqE = NULLP;
9796 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
9798 RgSchCmnAllocRecord *allRcd;
9800 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
9803 if (RG_SCH_CMN_GET_UE(ue,cell) == NULLP)
9805 /* Common scheduler config has not happened yet */
9808 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
9811 /* UE Free can be triggered before MSG4 done when dlHqE is not updated */
9815 rgSCHEmtcCmnUeCcchSduDel(cell, ue);
9820 rgSCHCmnUeCcchSduDel(cell, ue);
9823 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
9825 rgSCHCmnUlUeDelAllocs(cell, ue);
9827 rgSCHCmnDelRachInfo(cell, ue);
9830 if(TRUE == ue->isEmtcUe)
9832 cellSchCmn->apisEmtcUl->rgSCHFreeUlUe(cell, ue);
9837 cellSchCmn->apisUl->rgSCHFreeUlUe(cell, ue);
9842 for(idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
9844 if(ue->cellInfo[idx] != NULLP)
9846 rgSCHSCellDelUeSCell(cell,ue,idx);
9853 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
9855 cellSchCmn->apisEmtcDl->rgSCHFreeDlUe(cell, ue);
9860 cellSchCmn->apisDl->rgSCHFreeDlUe(cell, ue);
9862 rgSCHPwrUeDel(cell, ue);
9865 rgSCHCmnSpsUeDel(cell, ue);
9866 #endif /* LTEMAC_SPS*/
9869 rgSchCmnDlSfHqDel(ue, cell);
9871 /* DLFS UE delete */
9872 if (cellSchCmn->dl.isDlFreqSel)
9874 cellSchCmn->apisDlfs->rgSCHDlfsUeDel(cell, ue);
9876 node = ueUl->ulAllocLst.first;
9878 /* ccpu00117052 - MOD - Passing double pointer in all the places of
9879 rgSCHUtlFreeSBuf function call for proper NULLP assignment*/
9882 allRcd = (RgSchCmnAllocRecord *)node->node;
9884 cmLListDelFrm(&ueUl->ulAllocLst, &allRcd->lnk);
9885 rgSCHUtlFreeSBuf(cell->instIdx,
9886 (Data**)(&allRcd), (sizeof(RgSchCmnAllocRecord)));
9889 for(cnt = 0; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
9891 if (ue->ul.lcgArr[cnt].sch != NULLP)
9893 rgSCHUtlFreeSBuf(cell->instIdx,
9894 (Data**)(&(ue->ul.lcgArr[cnt].sch)), (sizeof(RgSchCmnLcg)));
9898 /* Fix : syed Moved hqEnt deinit to rgSCHCmnDlDeInitHqEnt */
9899 idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId) & (CM_LTE_MAX_CELLS - 1));
9900 rgSCHUtlFreeSBuf(cell->instIdx,
9901 (Data**)(&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch))), (sizeof(RgSchCmnUe)));
9903 } /* rgSCHCmnUeDel */
9907 * @brief This function handles the common code rate configurations
9908 * done as part of RgrCellCfg/RgrCellRecfg.
9912 * Function: rgSCHCmnDlCnsdrCmnRt
9913 * Purpose: This function handles the common code rate configurations
9914 * done as part of RgrCellCfg/RgrCellRecfg.
9916 * Invoked by: Scheduler
9918 * @param[in] RgSchCellCb *cell
9919 * @param[in] RgrDlCmnCodeRateCfg *dlCmnCodeRate
9924 static S16 rgSCHCmnDlCnsdrCmnRt
9927 RgrDlCmnCodeRateCfg *dlCmnCodeRate
9930 static S16 rgSCHCmnDlCnsdrCmnRt(cell, dlCmnCodeRate)
9932 RgrDlCmnCodeRateCfg *dlCmnCodeRate;
9935 RgSchCmnCell *cellDl = RG_SCH_CMN_GET_CELL(cell);
9937 uint32_t bitsPer2Rb;
9938 uint32_t bitsPer3Rb;
9943 /* code rate is bits per 1024 phy bits, since modl'n scheme is 2. it is
9944 * bits per 1024/2 REs */
9945 if (dlCmnCodeRate->bcchPchRaCodeRate != 0)
9947 bitsPerRb = ((dlCmnCodeRate->bcchPchRaCodeRate * 2) *
9948 cellDl->dl.noResPerRb[3])/1024;
9952 bitsPerRb = ((RG_SCH_CMN_DEF_BCCHPCCH_CODERATE * 2) *
9953 cellDl->dl.noResPerRb[3])/1024;
9955 /* Store bitsPerRb in cellDl->dl to use later to determine
9956 * Number of RBs for UEs with SI-RNTI, P-RNTI and RA-RNTI */
9957 cellDl->dl.bitsPerRb = bitsPerRb;
9958 /* ccpu00115595 end*/
9959 /* calculate the ITbs for 2 RBs. Initialize ITbs to MAX value */
9962 bitsPer2Rb = bitsPerRb * rbNum;
9963 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer2Rb))
9966 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs2Rbs = 0) :
9967 (cellDl->dl.cmnChITbs.iTbs2Rbs = i-1);
9969 /* calculate the ITbs for 3 RBs. Initialize ITbs to MAX value */
9972 bitsPer3Rb = bitsPerRb * rbNum;
9973 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer3Rb))
9976 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs3Rbs = 0) :
9977 (cellDl->dl.cmnChITbs.iTbs3Rbs = i-1);
9980 pdcchBits = 1 + /* Flag for format0/format1a differentiation */
9981 1 + /* Localized/distributed VRB assignment flag */
9984 3 + /* Harq process Id */
9986 4 + /* Harq process Id */
9987 2 + /* UL Index or DAI */
9989 1 + /* New Data Indicator */
9992 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
9993 (cell->bwCfg.dlTotalBw + 1))/2);
9994 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
9995 Since VRB is local */
9996 /* For TDD consider DAI */
9998 /* Convert the pdcchBits to actual pdcchBits required for transmission */
9999 if (dlCmnCodeRate->pdcchCodeRate != 0)
10001 pdcchBits = (pdcchBits * 1024)/dlCmnCodeRate->pdcchCodeRate;
10002 if (pdcchBits <= 288) /* 288 : Num of pdcch bits for aggrLvl=4 */
10004 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
10006 else /* 576 : Num of pdcch bits for aggrLvl=8 */
10008 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL8;
10013 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
10015 if (dlCmnCodeRate->ccchCqi == 0)
10021 cellDl->dl.ccchCqi = dlCmnCodeRate->ccchCqi;
10028 * @brief This function handles the configuration of cell for the first
10029 * time by the scheduler.
10033 * Function: rgSCHCmnDlRgrCellCfg
10034 * Purpose: Configuration received is stored into the data structures
10035 * Also, update the scheduler with the number of frames of
10036 * RACH preamble transmission.
10038 * Invoked by: BO and Scheduler
10040 * @param[in] RgSchCellCb* cell
10041 * @param[in] RgrCellCfg* cfg
10046 static S16 rgSCHCmnDlRgrCellCfg
10053 static S16 rgSCHCmnDlRgrCellCfg(cell, cfg, err)
10059 RgSchCmnCell *cellSch;
10062 uint8_t numPdcchSym;
10063 uint8_t noSymPerSlot;
10064 uint8_t maxDlSubfrms = cell->numDlSubfrms;
10065 uint8_t splSubfrmIdx = cfg->spclSfCfgIdx;
10066 uint8_t swPtCnt = 0;
10068 RgSchTddSubfrmInfo subfrmInfo = rgSchTddMaxUlSubfrmTbl[cell->ulDlCfgIdx];
10071 uint8_t antPortIdx;
10081 cellSch = RG_SCH_CMN_GET_CELL(cell);
10082 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->\
10083 rachCfg.preambleFormat];
10084 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
10085 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
10087 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
10088 3 TTI (MAX L1+L2 processing delay at the UE) */
10089 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
10090 rgSchCmnHarqRtt[cell->ulDlCfgIdx] + 3;
10091 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10092 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10093 if (cfg->maxUePerDlSf == 0)
10095 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10097 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
10103 if (cell->bwCfg.dlTotalBw <= 10)
10113 /* DwPTS Scheduling Changes Start */
10114 cellSch->dl.splSfCfg = splSubfrmIdx;
10116 if (cfg->isCpDlExtend == TRUE)
10118 if((0 == splSubfrmIdx) || (4 == splSubfrmIdx) ||
10119 (7 == splSubfrmIdx) || (8 == splSubfrmIdx)
10122 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
10126 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
10131 /* Refer to 36.213 Section 7.1.7 */
10132 if((0 == splSubfrmIdx) || (5 == splSubfrmIdx))
10134 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
10138 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
10141 /* DwPTS Scheduling Changes End */
10143 splSfCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
10144 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
10146 for (sfCount = 0; sfCount < maxDlSubfrms; sfCount++)
10148 sf = cell->subFrms[sfCount];
10149 /* Sfcount matches the first special subframe occurs at Index 0
10150 * or subsequent special subframes */
10151 if(subfrmInfo.switchPoints == 1)
10153 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
10154 RG_SCH_CMN_10_MS_PRD, &subfrmInfo);
10158 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
10159 RG_SCH_CMN_5_MS_PRD, &subfrmInfo);
10161 if(isSplfrm == TRUE)
10164 /* DwPTS Scheduling Changes Start */
10165 if (cell->splSubfrmCfg.isDlDataAllowed == TRUE)
10167 sf->sfType = RG_SCH_SPL_SF_DATA;
10171 sf->sfType = RG_SCH_SPL_SF_NO_DATA;
10173 /* DwPTS Scheduling Changes End */
10177 /* DwPTS Scheduling Changes Start */
10178 if (sf->sfNum != 0)
10180 sf->sfType = RG_SCH_DL_SF;
10184 sf->sfType = RG_SCH_DL_SF_0;
10186 /* DwPTS Scheduling Changes End */
10189 /* Calculate the number of CCEs per subframe in the cell */
10190 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][sf->sfNum];
10191 if(cell->dynCfiCb.isDynCfiEnb == TRUE)
10193 /* In case if Dynamic CFI feature is enabled, default CFI
10194 * value 1 is used */
10195 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][1];
10199 if (sf->sfType == RG_SCH_SPL_SF_DATA)
10201 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
10205 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi)];
10210 /* Intialize the RACH response scheduling related infromation */
10211 if(rgSCHCmnDlRachInfoInit(cell) != ROK)
10216 /* Allocate PRACH preamble list */
10217 rgSCHCmnDlCreateRachPrmLst(cell);
10219 /* Initialize PHICH offset information */
10220 rgSCHCmnDlPhichOffsetInit(cell);
10222 /* Update the size of HARQ ACK/NACK feedback table */
10223 /* The array size is increased by 2 to have enough free indices, where other
10224 * indices are busy waiting for HARQ feedback */
10225 cell->ackNackFdbkArrSize = rgSchTddANFdbkMapTbl[cell->ulDlCfgIdx] + 2;
10227 /* Initialize expected HARQ ACK/NACK feedback time */
10228 rgSCHCmnDlANFdbkInit(cell);
10230 /* Initialize UL association set index */
10231 if(cell->ulDlCfgIdx != 0)
10233 rgSCHCmnDlKdashUlAscInit(cell);
10236 if (cfg->isCpDlExtend == TRUE)
10238 cp = RG_SCH_CMN_EXT_CP;
10240 cell->splSubfrmCfg.dwPts =
10241 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlDwPts;
10243 if ( cell->splSubfrmCfg.dwPts == 0 )
10245 cell->isDwPtsCnted = FALSE;
10249 cell->isDwPtsCnted = TRUE;
10252 if(cfg->isCpUlExtend == TRUE)
10254 cell->splSubfrmCfg.upPts =
10255 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlExtUpPts;
10259 cell->splSubfrmCfg.upPts =
10260 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlNorUpPts;
10265 cp = RG_SCH_CMN_NOR_CP;
10267 cell->splSubfrmCfg.dwPts =
10268 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlDwPts;
10269 cell->isDwPtsCnted = TRUE;
10271 if(cfg->isCpUlExtend == TRUE)
10273 cell->splSubfrmCfg.upPts =
10274 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlExtUpPts;
10278 cell->splSubfrmCfg.upPts =
10279 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlNorUpPts;
10283 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
10284 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++,cfiIdx++)
10286 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
10287 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
10288 [cell->numTxAntPorts]][cfiIdx];
10289 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
10290 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
10291 [cell->numTxAntPorts]][cfiIdx];
10294 /* Initializing the values of CFI parameters */
10295 if(cell->dynCfiCb.isDynCfiEnb)
10297 /* If DCFI is enabled, current CFI value will start from 1 */
10298 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
10302 /* If DCFI is disabled, current CFI value is set as default max allowed CFI value */
10303 cellSch->dl.currCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
10304 cellSch->dl.newCfi = cellSch->dl.currCfi;
10307 /* Include CRS REs while calculating Efficiency
10308 * The number of Resource Elements occupied by CRS depends on Number of
10309 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
10310 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
10311 * details of the same. Please note that PDCCH overlap symbols would not
10312 * considered in CRS REs deduction */
10313 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
10315 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
10316 - numPdcchSym) *RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
10319 /* DwPTS Scheduling Changes Start */
10320 antPortIdx = (cell->numTxAntPorts == 1)? 0:
10321 ((cell->numTxAntPorts == 2)? 1: 2);
10323 if (cp == RG_SCH_CMN_NOR_CP)
10325 splSfIdx = (splSubfrmIdx == 4)? 1: 0;
10329 splSfIdx = (splSubfrmIdx == 3)? 1: 0;
10332 numCrs = rgSchCmnDwptsCrs[splSfIdx][antPortIdx];
10334 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI-1; cfi++)
10336 /* If CFI is 2 and Ant Port is 4, don't consider the sym 1 CRS REs */
10337 if (antPortIdx == 2 && cfi == 2)
10341 cellSch->dl.numReDwPts[cfi] = ((cell->splSubfrmCfg.dwPts - cfi)*
10342 RB_SCH_CMN_NUM_SCS_PER_RB) - numCrs;
10344 /* DwPTS Scheduling Changes End */
10346 if (cfg->maxDlBwPerUe == 0)
10348 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
10352 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
10354 if (cfg->maxDlRetxBw == 0)
10356 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
10360 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
10362 /* Fix: MUE_PERTTI_DL*/
10363 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10364 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10365 if (cfg->maxUePerDlSf == 0)
10367 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10369 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
10370 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
10371 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
10373 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10374 "Invalid configuration !: "
10375 "maxCcchPerDlSf %u > maxUePerDlSf %u",
10376 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
10380 else if (!cfg->maxCcchPerDlSf)
10382 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
10383 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
10384 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
10385 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
10386 * FLE crash in PHY as PHY has limit of 16 max*/
10387 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
10391 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
10393 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
10398 /*ccpu00118273 - ADD - start */
10399 cmLListInit(&cellSch->dl.msg4RetxLst);
10401 cmLListInit(&cellSch->dl.ccchSduRetxLst);
10404 #ifdef RG_PHASE2_SCHED
10405 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
10407 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
10409 if (cfg->dlfsCfg.isDlFreqSel)
10411 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
10417 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
10420 /* Power related configuration */
10421 ret = rgSCHPwrCellCfg(cell, cfg);
10427 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
10428 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
10429 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
10430 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
10431 cellSch->dl.msg4pAVal = cfg->msg4pAVal;
10434 #else /* LTE_TDD */
10436 * @brief This function handles the configuration of cell for the first
10437 * time by the scheduler.
10441 * Function: rgSCHCmnDlRgrCellCfg
10442 * Purpose: Configuration received is stored into the data structures
10443 * Also, update the scheduler with the number of frames of
10444 * RACH preamble transmission.
10446 * Invoked by: BO and Scheduler
10448 * @param[in] RgSchCellCb* cell
10449 * @param[in] RgrCellCfg* cfg
10450 * @param[in] RgSchErrInfo* err
10455 static S16 rgSCHCmnDlRgrCellCfg
10462 static S16 rgSCHCmnDlRgrCellCfg(cell, cfg, err)
10469 RgSchCmnCell *cellSch;
10471 uint8_t numPdcchSym;
10472 uint8_t noSymPerSlot;
10477 cellSch = RG_SCH_CMN_GET_CELL(cell);
10479 /* Initialize the parameters with the ones received in the */
10480 /* configuration. */
10482 /* Added matrix 'rgRaPrmblToRaFrmTbl' for computation of RA
10483 * sub-frames from preamble format */
10484 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->rachCfg.preambleFormat];
10486 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
10487 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
10489 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
10490 3 TTI (MAX L1+L2 processing delay at the UE) */
10491 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
10492 rgSchCmnHarqRtt[7] + 3;
10494 if (cell->bwCfg.dlTotalBw <= 10)
10505 if (cell->isCpDlExtend == TRUE)
10507 cp = RG_SCH_CMN_EXT_CP;
10512 cp = RG_SCH_CMN_NOR_CP;
10516 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
10517 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, cfiIdx++)
10519 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
10521 cellSch->dl.emtcCqiToTbsTbl[0][cfi] = rgSchEmtcCmnCqiToTbs[0][cp][cfiIdx];
10523 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
10524 [cell->numTxAntPorts]][cfiIdx];
10525 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
10527 cellSch->dl.emtcCqiToTbsTbl[1][cfi] = rgSchEmtcCmnCqiToTbs[1][cp][cfiIdx];
10529 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
10530 [cell->numTxAntPorts]][cfiIdx];
10533 /* Initializing the values of CFI parameters */
10534 if(cell->dynCfiCb.isDynCfiEnb)
10536 /* If DCFI is enabled, current CFI value will start from 1 */
10537 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
10541 /* If DCFI is disabled, current CFI value is set as default CFI value */
10542 cellSch->dl.currCfi = cellSch->cfiCfg.cfi;
10543 cellSch->dl.newCfi = cellSch->dl.currCfi;
10546 /* Include CRS REs while calculating Efficiency
10547 * The number of Resource Elements occupied by CRS depends on Number of
10548 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
10549 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
10550 * details of the same. Please note that PDCCH overlap symbols would not
10551 * considered in CRS REs deduction */
10552 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
10554 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
10555 - numPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
10558 if (cfg->maxDlBwPerUe == 0)
10560 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
10564 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
10566 if (cfg->maxDlRetxBw == 0)
10568 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
10572 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
10575 /* Fix: MUE_PERTTI_DL*/
10576 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10577 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10578 if (cfg->maxUePerDlSf == 0)
10580 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10582 /* Fix: MUE_PERTTI_DL syed validating Cell Configuration */
10583 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
10585 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10586 "FAILED MaxUePerDlSf(%u) < MaxDlUeNewTxPerTti(%u)",
10587 cellSch->dl.maxUePerDlSf,
10588 cellSch->dl.maxUeNewTxPerTti);
10591 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
10592 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
10594 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid configuration !: "
10595 "maxCcchPerDlSf %u > maxUePerDlSf %u",
10596 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
10600 else if (!cfg->maxCcchPerDlSf)
10602 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
10603 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
10604 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
10605 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
10606 * FLE crash in PHY as PHY has limit of 16 max*/
10607 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
10611 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
10615 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
10619 cmLListInit(&cellSch->dl.msg4RetxLst);
10621 cmLListInit(&cellSch->dl.ccchSduRetxLst);
10624 #ifdef RG_PHASE2_SCHED
10625 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
10627 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
10629 if (cfg->dlfsCfg.isDlFreqSel)
10631 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
10637 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
10640 /* Power related configuration */
10641 ret = rgSCHPwrCellCfg(cell, cfg);
10647 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
10648 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
10649 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
10650 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
10651 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
10654 #endif /* LTE_TDD */
10656 /***********************************************************
10658 * Func : rgSCHCmnUlCalcReqRbCeil
10660 * Desc : Calculate RB required to satisfy 'bytes' for
10662 * Returns number of RBs such that requirement
10663 * is necessarily satisfied (does a 'ceiling'
10666 * Ret : Required RBs (uint8_t)
10672 **********************************************************/
10674 uint8_t rgSCHCmnUlCalcReqRbCeil
10678 RgSchCmnUlCell *cellUl
10681 uint8_t rgSCHCmnUlCalcReqRbCeil(bytes, cqi, cellUl)
10684 RgSchCmnUlCell *cellUl;
10687 uint32_t numRe = RGSCH_CEIL((bytes * 8) * 1024, rgSchCmnUlCqiTbl[cqi].eff);
10688 return ((uint8_t)RGSCH_CEIL(numRe, RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl)));
10691 /***********************************************************
10693 * Func : rgSCHCmnPrecompMsg3Vars
10695 * Desc : Precomputes the following for msg3 allocation:
10696 * 1. numSb and Imcs for msg size A
10697 * 2. numSb and Imcs otherwise
10701 * Notes: The corresponding vars in cellUl struct is filled
10706 **********************************************************/
10708 static S16 rgSCHCmnPrecompMsg3Vars
10710 RgSchCmnUlCell *cellUl,
10717 static S16 rgSCHCmnPrecompMsg3Vars(cellUl, ccchCqi, msgSzA, sbSize, isEcp)
10718 RgSchCmnUlCell *cellUl;
10730 uint16_t msg3GrntSz = 0;
10733 if (ccchCqi > cellUl->max16qamCqi)
10735 ccchCqi = cellUl->max16qamCqi;
10737 /* #ifndef RG_SCH_CMN_EXP_CP_SUP For ECP Pick the index 1 */
10739 ccchTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
10740 ccchMcs = rgSCHCmnUlGetIMcsFrmITbs(ccchTbs, CM_LTE_UE_CAT_1);
10742 /* MCS should fit in 4 bits in RAR */
10748 /* Limit the ccchMcs to 15 as it
10749 * can be inferred from 36.213, section 6.2 that msg3 imcs
10751 * Since, UE doesn't exist right now, we use CAT_1 for ue
10753 while((ccchMcs = (rgSCHCmnUlGetIMcsFrmITbs(
10754 rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi],CM_LTE_UE_CAT_1))
10756 RG_SCH_CMN_MAX_MSG3_IMCS)
10761 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
10763 if (msgSzA < RGSCH_MIN_MSG3_GRNT_SZ)
10767 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(msgSzA, ccchCqi, cellUl), sbSize);
10769 numRb = numSb * sbSize;
10770 msg3GrntSz = 8 * msgSzA;
10772 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
10775 numRb = numSb * sbSize;
10777 while (rgSchCmnMult235Tbl[numSb].match != numSb)
10781 /* Reversed(Corrected) the assignment for preamble-GrpA
10782 * Refer- TG36.321- section- 5.1.2*/
10783 cellUl->ra.prmblBNumSb = numSb;
10784 cellUl->ra.prmblBIMcs = ccchMcs;
10785 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(RGSCH_MIN_MSG3_GRNT_SZ, \
10789 numRb = numSb * sbSize;
10790 msg3GrntSz = 8 * RGSCH_MIN_MSG3_GRNT_SZ;
10791 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
10794 numRb = numSb * sbSize;
10796 while (rgSchCmnMult235Tbl[numSb].match != numSb)
10800 /* Reversed(Corrected) the assignment for preamble-GrpA
10801 * Refer- TG36.321- section- 5.1.2*/
10802 cellUl->ra.prmblANumSb = numSb;
10803 cellUl->ra.prmblAIMcs = ccchMcs;
10807 uint32_t gPrntPucchDet=0;
10810 /***********************************************************
10812 * Func : rgSCHCmnUlCalcAvailBw
10814 * Desc : Calculates bandwidth available for PUSCH scheduling.
10816 * Ret : S16 (ROK/RFAILED)
10822 **********************************************************/
10824 static S16 rgSCHCmnUlCalcAvailBw
10827 RgrCellCfg *cellCfg,
10829 uint8_t *rbStartRef,
10830 uint8_t *bwAvailRef
10833 static S16 rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, rbStartRef, bwAvailRef)
10835 RgrCellCfg *cellCfg;
10837 uint8_t *rbStartRef;
10838 uint8_t *bwAvailRef;
10842 uint8_t ulBw = cell->bwCfg.ulTotalBw;
10843 uint8_t n2Rb = cell->pucchCfg.resourceSize;
10844 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
10845 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
10846 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
10853 uint8_t exclRb; /* RBs to exclude */
10855 uint8_t puschRbStart;
10856 /* To avoid PUCCH and PUSCH collision issue */
10860 /* Maximum value of M as per Table 10.1-1 */
10861 uint8_t M[RGSCH_MAX_TDD_UL_DL_CFG] = {1, 2, 4, 3, 4, 9, 1};
10864 if (cell->isCpUlExtend)
10869 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
10871 /* Considering the max no. of CCEs for PUSCH BW calculation
10872 * based on min mi value */
10873 if (cell->ulDlCfgIdx == 0 || cell->ulDlCfgIdx == 6)
10882 totalCce = cell->dynCfiCb.cfi2NCceTbl[mi][cfi];
10884 P = rgSCHCmnGetPValFrmCCE(cell, totalCce-1);
10885 n1PlusOne = cell->rgSchTddNpValTbl[P + 1];
10886 n1Max = (M[cell->ulDlCfgIdx] - 1)*n1PlusOne + (totalCce-1) + n1Pucch;
10888 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
10890 n1RbPart = (c*n1Cs)/pucchDeltaShft;
10891 n1Rb = (n1Max - n1RbPart)/ n1PerRb;
10892 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
10894 /* get the total Number of RB's to be excluded for PUSCH */
10896 if(n1Pucch < n1RbPart)
10902 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
10904 puschRbStart = exclRb/2 + 1;
10906 /* Num of PUCCH RBs = puschRbStart*2 */
10907 if (puschRbStart * 2 >= ulBw)
10909 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
10913 *rbStartRef = puschRbStart;
10914 *bwAvailRef = ulBw - puschRbStart * 2;
10916 if(cell->pucchCfg.maxPucchRb !=0 &&
10917 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
10919 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
10926 /***********************************************************
10928 * Func : rgSCHCmnUlCalcAvailBw
10930 * Desc : Calculates bandwidth available for PUSCH scheduling.
10932 * Ret : S16 (ROK/RFAILED)
10938 **********************************************************/
10940 static S16 rgSCHCmnUlCalcAvailBw
10943 RgrCellCfg *cellCfg,
10945 uint8_t *rbStartRef,
10946 uint8_t *bwAvailRef
10949 static S16 rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, rbStartRef, bwAvailRef)
10951 RgrCellCfg *cellCfg;
10953 uint8_t *rbStartRef;
10954 uint8_t *bwAvailRef;
10958 uint8_t ulBw = cell->bwCfg.ulTotalBw;
10959 uint8_t n2Rb = cell->pucchCfg.resourceSize;
10960 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
10961 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
10962 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
10968 uint8_t exclRb; /* RBs to exclude */
10970 uint8_t puschRbStart;
10972 uint16_t numOfN3PucchRb;
10973 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10977 if (cell->isCpUlExtend)
10982 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
10984 totalCce = cell->dynCfiCb.cfi2NCceTbl[0][cfi];
10986 n1Max = n1Pucch + totalCce-1;
10988 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
10990 n1RbPart = (c*n1Cs)/pucchDeltaShft;
10991 n1Rb = (uint8_t)((n1Max - n1RbPart) / n1PerRb);
10992 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
10994 /* get the total Number of RB's to be excluded for PUSCH */
10996 if(n1Pucch < n1RbPart)
11002 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
11004 /*Support for PUCCH Format 3*/
11006 if (cell->isPucchFormat3Sptd)
11008 numOfN3PucchRb = RGSCH_CEIL(cellSch->dl.maxUePerDlSf,5);
11009 exclRb = exclRb + numOfN3PucchRb;
11012 puschRbStart = exclRb/2 + 1;
11016 #ifndef ALIGN_64BIT
11017 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%ld:%d:%d:%d:%d:%d]\n",
11018 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
11020 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%d:%d:%d:%d:%d:%d]\n",
11021 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
11025 if (puschRbStart*2 >= ulBw)
11027 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
11031 *rbStartRef = puschRbStart;
11032 *bwAvailRef = ulBw - puschRbStart * 2;
11034 if(cell->pucchCfg.maxPucchRb !=0 &&
11035 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
11037 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
11046 /***********************************************************
11048 * Func : rgSCHCmnUlCellInit
11050 * Desc : Uplink scheduler initialisation for cell.
11058 **********************************************************/
11060 static S16 rgSCHCmnUlCellInit
11063 RgrCellCfg *cellCfg
11066 static S16 rgSCHCmnUlCellInit(cell, cellCfg)
11068 RgrCellCfg *cellCfg;
11072 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11073 uint8_t maxUePerUlSf = cellCfg->maxUePerUlSf;
11075 /* Added configuration for maximum number of MSG3s */
11076 uint8_t maxMsg3PerUlSf = cellCfg->maxMsg3PerUlSf;
11078 uint8_t maxUlBwPerUe = cellCfg->maxUlBwPerUe;
11079 uint8_t sbSize = cellCfg->puschSubBand.size;
11084 uint8_t maxSbPerUe;
11087 uint16_t ulDlCfgIdx = cell->ulDlCfgIdx;
11088 /* [ccpu00127294]-MOD-Change the max Ul subfrms size in TDD */
11089 uint8_t maxSubfrms = 2 * rgSchTddNumUlSf[ulDlCfgIdx];
11090 uint8_t ulToDlMap[12] = {0}; /* maximum 6 Subframes in UL * 2 */
11091 uint8_t maxUlsubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
11092 [RGSCH_NUM_SUB_FRAMES-1];
11096 uint8_t maxSubfrms = RG_SCH_CMN_UL_NUM_SF;
11102 #if (defined(LTE_L2_MEAS) )
11103 Inst inst = cell->instIdx;
11104 #endif /* #if (defined(LTE_L2_MEAS) || defined(DEBUGP) */
11105 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
11108 cellUl->maxUeNewTxPerTti = cellCfg->maxUlUeNewTxPerTti;
11109 if (maxUePerUlSf == 0)
11111 maxUePerUlSf = RG_SCH_CMN_MAX_UE_PER_UL_SF;
11114 if (maxMsg3PerUlSf == 0)
11116 maxMsg3PerUlSf = RG_SCH_CMN_MAX_MSG3_PER_UL_SF;
11118 /* fixed the problem while sending raRsp
11119 * if maxMsg3PerUlSf is greater than
11120 * RGSCH_MAX_RNTI_PER_RARNTI
11122 if(maxMsg3PerUlSf > RGSCH_MAX_RNTI_PER_RARNTI)
11124 maxMsg3PerUlSf = RGSCH_MAX_RNTI_PER_RARNTI;
11127 if(maxMsg3PerUlSf > maxUePerUlSf)
11129 maxMsg3PerUlSf = maxUePerUlSf;
11132 /*cellUl->maxAllocPerUlSf = maxUePerUlSf + maxMsg3PerUlSf;*/
11133 /*Max MSG3 should be a subset of Max UEs*/
11134 cellUl->maxAllocPerUlSf = maxUePerUlSf;
11135 cellUl->maxMsg3PerUlSf = maxMsg3PerUlSf;
11137 cellUl->maxAllocPerUlSf = maxUePerUlSf;
11139 /* Fix: MUE_PERTTI_UL syed validating Cell Configuration */
11140 if (cellUl->maxAllocPerUlSf < cellUl->maxUeNewTxPerTti)
11142 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
11143 "FAILED: MaxUePerUlSf(%u) < MaxUlUeNewTxPerTti(%u)",
11144 cellUl->maxAllocPerUlSf,
11145 cellUl->maxUeNewTxPerTti);
11151 for(idx = 0; idx < RGSCH_SF_ALLOC_SIZE; idx++)
11153 for(idx = 0; idx < RGSCH_NUM_SUB_FRAMES; idx++)
11157 ret = rgSCHUtlAllocSBuf(inst, (Data **)&(cell->sfAllocArr[idx].
11158 ulUeInfo.ulAllocInfo), (cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc)));
11161 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation failed ");
11166 if (maxUlBwPerUe == 0)
11168 /* ccpu00139362- Setting to configured UL BW instead of MAX BW(100)*/
11169 maxUlBwPerUe = cell->bwCfg.ulTotalBw;
11171 cellUl->maxUlBwPerUe = maxUlBwPerUe;
11173 /* FOR RG_SCH_CMN_EXT_CP_SUP */
11174 if (!cellCfg->isCpUlExtend)
11176 cellUl->ulNumRePerRb = 12 * (14 - RGSCH_UL_SYM_DMRS_SRS);
11180 cellUl->ulNumRePerRb = 12 * (12 - RGSCH_UL_SYM_DMRS_SRS);
11183 if (sbSize != rgSchCmnMult235Tbl[sbSize].match)
11185 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Invalid subband size %d", sbSize);
11188 //Setting the subband size to 4 which is size of VRBG in 5GTF
11190 sbSize = MAX_5GTF_VRBG_SIZE;
11193 maxSbPerUe = maxUlBwPerUe / sbSize;
11194 if (maxSbPerUe == 0)
11196 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnUlCellInit(): "
11197 "maxUlBwPerUe/sbSize is zero");
11200 cellUl->maxSbPerUe = rgSchCmnMult235Tbl[maxSbPerUe].prvMatch;
11202 /* CQI related updations */
11203 if ((!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->ulCmnCodeRate.ccchCqi))
11204 || (!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->trgUlCqi.trgCqi)))
11206 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnUlCellInit(): "
11210 cellUl->dfltUlCqi = cellCfg->ulCmnCodeRate.ccchCqi;
11212 /* Changed the logic to determine maxUlCqi.
11213 * For a 16qam UE, maxUlCqi is the CQI Index at which
11214 * efficiency is as close as possible to RG_SCH_MAX_CODE_RATE_16QAM
11215 * Refer to 36.213-8.6.1 */
11216 for (i = RG_SCH_CMN_UL_NUM_CQI - 1;i > 0; --i)
11218 RLOG_ARG2(L_INFO,DBG_CELLID,cell->cellId,
11221 rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i]);
11222 #ifdef MAC_SCH_STATS
11223 /* ccpu00128489 ADD Update mcs in hqFailStats here instead of at CRC
11224 * since CQI to MCS mapping does not change. The only exception is for
11225 * ITBS = 19 where the MCS can be 20 or 21 based on the UE cat. We
11226 * choose 20, instead of 21, ie UE_CAT_3 */
11227 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
11228 RG_SCH_CMN_UL_TBS_TO_MCS(iTbs, hqFailStats.ulCqiStat[i - 1].mcs);
11231 for (i = RG_SCH_CMN_UL_NUM_CQI - 1; i != 0; --i)
11233 /* Fix for ccpu00123912*/
11234 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
11235 if (iTbs <= RGSCH_UL_16QAM_MAX_ITBS) /* corresponds to 16QAM */
11237 RLOG_ARG1(L_INFO,DBG_CELLID,cell->cellId,
11238 "16 QAM CQI %u", i);
11239 cellUl->max16qamCqi = i;
11245 /* Precompute useful values for RA msg3 */
11246 ret = rgSCHCmnPrecompEmtcMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
11247 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
11254 /* Precompute useful values for RA msg3 */
11255 ret = rgSCHCmnPrecompMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
11256 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
11262 cellUl->sbSize = sbSize;
11265 cellUl->numUlSubfrms = maxSubfrms;
11267 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->ulSfArr,
11268 cellUl->numUlSubfrms * sizeof(RgSchUlSf));
11272 cellUl->numUlSubfrms = 0;
11276 /* store the DL subframe corresponding to the PUSCH offset
11277 * in their respective UL subframe */
11278 for(i=0; i < RGSCH_NUM_SUB_FRAMES; i++)
11280 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][i] != 0)
11282 subfrm = (i + rgSchTddPuschTxKTbl[ulDlCfgIdx][i]) % \
11283 RGSCH_NUM_SUB_FRAMES;
11284 subfrm = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subfrm]-1;
11285 dlIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][i]-1;
11286 RGSCH_ARRAY_BOUND_CHECK( cell->instIdx, ulToDlMap, subfrm);
11287 ulToDlMap[subfrm] = dlIdx;
11290 /* Copy the information in the remaining UL subframes based
11291 * on number of HARQ processes */
11292 for(i=maxUlsubfrms; i < maxSubfrms; i++)
11294 subfrm = i-maxUlsubfrms;
11295 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, i);
11296 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, subfrm)
11297 ulToDlMap[i] = ulToDlMap[subfrm];
11301 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++)
11304 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
11306 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
11315 cell->ulAvailBw = bwAvail;
11318 numSb = bwAvail/sbSize;
11320 cell->dynCfiCb.bwInfo[cfi].startRb = rbStart;
11321 cell->dynCfiCb.bwInfo[cfi].numSb = numSb;
11324 if(0 == cell->dynCfiCb.maxCfi)
11326 RLOG_ARG3(L_ERROR,DBG_CELLID,cell->cellId,
11327 "Incorrect Default CFI(%u), maxCfi(%u), maxPucchRb(%d)",
11328 cellSch->cfiCfg.cfi, cell->dynCfiCb.maxCfi,
11329 cell->pucchCfg.maxPucchRb);
11335 cellUl->dmrsArrSize = cell->dynCfiCb.bwInfo[1].numSb;
11336 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->dmrsArr,
11337 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11342 for (i = 0; i < cellUl->dmrsArrSize; ++i)
11344 cellUl->dmrsArr[i] = cellCfg->puschSubBand.dmrs[i];
11347 /* Init subframes */
11348 for (i = 0; i < maxSubfrms; ++i)
11350 ret = rgSCHUtlUlSfInit(cell, &cellUl->ulSfArr[i], i,
11351 cellUl->maxAllocPerUlSf);
11354 for (; i != 0; --i)
11356 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[i-1]);
11358 /* ccpu00117052 - MOD - Passing double pointer
11359 for proper NULLP assignment*/
11360 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)(&(cellUl->dmrsArr)),
11361 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11363 /* ccpu00117052 - MOD - Passing double pointer
11364 for proper NULLP assignment*/
11365 rgSCHUtlFreeSBuf(cell->instIdx,
11366 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
11371 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cellUl);
11376 * @brief Scheduler processing on cell configuration.
11380 * Function : rgSCHCmnRgrCellCfg
11382 * This function does requisite initialisation
11383 * and setup for scheduler1 when a cell is
11386 * @param[in] RgSchCellCb *cell
11387 * @param[in] RgrCellCfg *cellCfg
11388 * @param[out] RgSchErrInfo *err
11394 S16 rgSCHCmnRgrCellCfg
11397 RgrCellCfg *cellCfg,
11401 S16 rgSCHCmnRgrCellCfg(cell, cellCfg, err)
11403 RgrCellCfg *cellCfg;
11408 RgSchCmnCell *cellSch;
11410 /* As part of RGR cell configuration, validate the CRGCellCfg
11411 * There is no trigger for crgCellCfg from SC1 */
11412 /* Removed failure check for Extended CP */
11414 if (((ret = rgSCHUtlAllocSBuf(cell->instIdx,
11415 (Data**)&(cell->sc.sch), (sizeof(RgSchCmnCell)))) != ROK))
11417 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
11418 "Memory allocation FAILED");
11419 err->errCause = RGSCHERR_SCH_CFG;
11422 cellSch = (RgSchCmnCell *)(cell->sc.sch);
11423 cellSch->cfiCfg = cellCfg->cfiCfg;
11424 cellSch->trgUlCqi.trgCqi = cellCfg->trgUlCqi.trgCqi;
11425 /* Initialize the scheduler refresh timer queues */
11426 cellSch->tmrTqCp.nxtEnt = 0;
11427 cellSch->tmrTqCp.tmrLen = RG_SCH_CMN_NUM_REFRESH_Q;
11429 /* RACHO Intialize the RACH ded Preamble Information */
11430 rgSCHCmnCfgRachDedPrm(cell);
11432 /* Initialize 'Np' value for each 'p' used for
11433 * HARQ ACK/NACK reception */
11434 rgSCHCmnDlNpValInit(cell);
11437 /* Initialize 'Np' value for each 'p' used for
11438 * HARQ ACK/NACK reception */
11440 rgSCHCmnDlNpValInit(cell);
11443 /* Now perform uplink related initializations */
11444 ret = rgSCHCmnUlCellInit(cell, cellCfg);
11447 /* There is no downlink deinit to be performed */
11448 err->errCause = RGSCHERR_SCH_CFG;
11451 ret = rgSCHCmnDlRgrCellCfg(cell, cellCfg, err);
11454 err->errCause = RGSCHERR_SCH_CFG;
11457 /* DL scheduler has no initializations to make */
11458 /* As of now DL scheduler always returns ROK */
11460 rgSCHCmnGetDciFrmtSizes(cell);
11461 rgSCHCmnGetCqiDciFrmt2AggrLvl(cell);
11463 rgSCHCmnGetEmtcDciFrmtSizes(cell);
11464 rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl(cell);
11465 #endif /* EMTC_ENABLE */
11468 if(TRUE == cellCfg->emtcEnable)
11470 cellSch->apisEmtcUl = &rgSchEmtcUlSchdTbl[0];
11471 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
11478 cellSch->apisUl = &rgSchUlSchdTbl[RG_SCH_CMN_GET_UL_SCHED_TYPE(cell)];
11479 ret = cellSch->apisUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
11485 if(TRUE == cellCfg->emtcEnable)
11487 cellSch->apisEmtcDl = &rgSchEmtcDlSchdTbl[0];
11488 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
11495 cellSch->apisDl = &rgSchDlSchdTbl[RG_SCH_CMN_GET_DL_SCHED_TYPE(cell)];
11497 /* Perform SPS specific initialization for the cell */
11498 ret = rgSCHCmnSpsCellCfg(cell, cellCfg, err);
11504 ret = cellSch->apisDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
11509 rgSCHCmnInitVars(cell);
11512 } /* rgSCHCmnRgrCellCfg*/
11516 * @brief This function handles the reconfiguration of cell.
11520 * Function: rgSCHCmnRgrCellRecfg
11521 * Purpose: Update the reconfiguration parameters.
11523 * Invoked by: Scheduler
11525 * @param[in] RgSchCellCb* cell
11530 S16 rgSCHCmnRgrCellRecfg
11533 RgrCellRecfg *recfg,
11537 S16 rgSCHCmnRgrCellRecfg(cell, recfg, err)
11539 RgrCellRecfg *recfg;
11544 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11545 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11548 if (recfg->recfgTypes & RGR_CELL_UL_CMNRATE_RECFG)
11550 uint8_t oldCqi = cellUl->dfltUlCqi;
11551 if (!RG_SCH_CMN_UL_IS_CQI_VALID(recfg->ulCmnCodeRate.ccchCqi))
11553 err->errCause = RGSCHERR_SCH_CFG;
11554 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnRgrCellRecfg(): "
11558 cellUl->dfltUlCqi = recfg->ulCmnCodeRate.ccchCqi;
11559 ret = rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
11560 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
11563 cellUl->dfltUlCqi = oldCqi;
11564 rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
11565 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
11570 if (recfg->recfgTypes & RGR_CELL_DL_CMNRATE_RECFG)
11572 if (rgSCHCmnDlCnsdrCmnRt(cell, &recfg->dlCmnCodeRate) != ROK)
11574 err->errCause = RGSCHERR_SCH_CFG;
11580 if(TRUE == cell->emtcEnable)
11582 /* Invoke UL sched for cell Recfg */
11583 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
11589 /* Invoke DL sched for cell Recfg */
11590 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
11599 /* Invoke UL sched for cell Recfg */
11600 ret = cellSch->apisUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
11606 /* Invoke DL sched for cell Recfg */
11607 ret = cellSch->apisDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
11614 if (recfg->recfgTypes & RGR_CELL_DLFS_RECFG)
11616 ret = cellSch->apisDlfs->rgSCHDlfsCellRecfg(cell, recfg, err);
11621 cellSch->dl.isDlFreqSel = recfg->dlfsRecfg.isDlFreqSel;
11624 if (recfg->recfgTypes & RGR_CELL_PWR_RECFG)
11626 ret = rgSCHPwrCellRecfg(cell, recfg);
11636 /***********************************************************
11638 * Func : rgSCHCmnUlCellDeinit
11640 * Desc : Uplink scheduler de-initialisation for cell.
11648 **********************************************************/
11650 static Void rgSCHCmnUlCellDeinit
11655 static Void rgSCHCmnUlCellDeinit(cell)
11659 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11662 uint8_t maxSubfrms = cellUl->numUlSubfrms;
11665 CmLList *lnk = NULLP;
11666 RgSchL2MeasCb *measCb;
11670 for(ulSfIdx = 0; ulSfIdx < RGSCH_SF_ALLOC_SIZE; ulSfIdx++)
11672 for(ulSfIdx = 0; ulSfIdx < RGSCH_NUM_SUB_FRAMES; ulSfIdx++)
11675 if(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo != NULLP)
11677 /* ccpu00117052 - MOD - Passing double pointer
11678 for proper NULLP assignment*/
11679 rgSCHUtlFreeSBuf(cell->instIdx,
11680 (Data **)(&(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo)),
11681 cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc));
11683 /* ccpu00117052 - DEL - removed explicit NULLP assignment
11684 as it is done in above utility function */
11687 /* Free the memory allocated to measCb */
11688 lnk = cell->l2mList.first;
11689 while(lnk != NULLP)
11691 measCb = (RgSchL2MeasCb *)lnk->node;
11692 cmLListDelFrm(&cell->l2mList, lnk);
11694 /* ccpu00117052 - MOD - Passing double pointer
11695 for proper NULLP assignment*/
11696 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&measCb,\
11697 sizeof(RgSchL2MeasCb));
11700 if (cellUl->dmrsArr != NULLP)
11702 /* ccpu00117052 - MOD - Passing double pointer
11703 for proper NULLP assignment*/
11704 rgSCHUtlFreeSBuf(cell->instIdx,(Data **)(&(cellUl->dmrsArr)),
11705 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11707 /* De-init subframes */
11709 for (ulSfIdx = 0; ulSfIdx < maxSubfrms; ++ulSfIdx)
11711 for (ulSfIdx = 0; ulSfIdx < RG_SCH_CMN_UL_NUM_SF; ++ulSfIdx)
11714 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[ulSfIdx]);
11718 if (cellUl->ulSfArr != NULLP)
11720 /* ccpu00117052 - MOD - Passing double pointer
11721 for proper NULLP assignment*/
11722 rgSCHUtlFreeSBuf(cell->instIdx,
11723 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
11731 * @brief Scheduler processing for cell delete.
11735 * Function : rgSCHCmnCellDel
11737 * This functions de-initialises and frees memory
11738 * taken up by scheduler1 for the entire cell.
11740 * @param[in] RgSchCellCb *cell
11744 Void rgSCHCmnCellDel
11749 Void rgSCHCmnCellDel(cell)
11753 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11758 if (cellSch == NULLP)
11762 /* Perform the deinit for the UL scheduler */
11763 rgSCHCmnUlCellDeinit(cell);
11765 if(TRUE == cell->emtcEnable)
11767 if (cellSch->apisEmtcUl)
11769 cellSch->apisEmtcUl->rgSCHFreeUlCell(cell);
11773 if (cellSch->apisUl)
11775 /* api pointer checks added (here and below in
11776 * this function). pl check. - antriksh */
11777 cellSch->apisUl->rgSCHFreeUlCell(cell);
11780 /* Perform the deinit for the DL scheduler */
11781 cmLListInit(&cellSch->dl.taLst);
11782 if (cellSch->apisDl)
11784 cellSch->apisDl->rgSCHFreeDlCell(cell);
11787 if (cellSch->apisEmtcDl)
11789 rgSCHEmtcInitTaLst(&cellSch->dl);
11791 cellSch->apisEmtcDl->rgSCHFreeDlCell(cell);
11795 /* DLFS de-initialization */
11796 if (cellSch->dl.isDlFreqSel && cellSch->apisDlfs)
11798 cellSch->apisDlfs->rgSCHDlfsCellDel(cell);
11801 rgSCHPwrCellDel(cell);
11803 rgSCHCmnSpsCellDel(cell);
11806 /* ccpu00117052 - MOD - Passing double pointer
11807 for proper NULLP assignment*/
11808 rgSCHUtlFreeSBuf(cell->instIdx,
11809 (Data**)(&(cell->sc.sch)), (sizeof(RgSchCmnCell)));
11811 } /* rgSCHCmnCellDel */
11815 * @brief This function validates QOS parameters for DL.
11819 * Function: rgSCHCmnValidateDlQos
11820 * Purpose: This function validates QOS parameters for DL.
11822 * Invoked by: Scheduler
11824 * @param[in] CrgLchQosCfg *dlQos
11829 static S16 rgSCHCmnValidateDlQos
11831 RgrLchQosCfg *dlQos
11834 static S16 rgSCHCmnValidateDlQos(dlQos)
11835 RgrLchQosCfg *dlQos;
11838 uint8_t qci = dlQos->qci;
11841 if ( qci < RG_SCH_CMN_MIN_QCI || qci > RG_SCH_CMN_MAX_QCI )
11846 if ((qci >= RG_SCH_CMN_GBR_QCI_START) &&
11847 (qci <= RG_SCH_CMN_GBR_QCI_END))
11849 if ((dlQos->mbr == 0) || (dlQos->mbr < dlQos->gbr))
11858 * @brief Scheduler invocation on logical channel addition.
11862 * Function : rgSCHCmnRgrLchCfg
11864 * This functions does required processing when a new
11865 * (dedicated) logical channel is added. Assumes lcg
11866 * pointer in ulLc is set.
11868 * @param[in] RgSchCellCb *cell
11869 * @param[in] RgSchUeCb *ue
11870 * @param[in] RgSchDlLcCb *dlLc
11871 * @param[int] RgrLchCfg *lcCfg
11872 * @param[out] RgSchErrInfo *err
11878 S16 rgSCHCmnRgrLchCfg
11887 S16 rgSCHCmnRgrLchCfg(cell, ue, dlLc, lcCfg, err)
11897 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11900 ret = rgSCHUtlAllocSBuf(cell->instIdx,
11901 (Data**)&((dlLc)->sch), (sizeof(RgSchCmnDlSvc)));
11904 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRgrLchCfg(): "
11905 "SCH struct alloc failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11906 err->errCause = RGSCHERR_SCH_CFG;
11909 if(lcCfg->lcType != CM_LTE_LCH_DCCH)
11911 ret = rgSCHCmnValidateDlQos(&lcCfg->dlInfo.dlQos);
11914 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSchCmnCrgLcCfg(): "
11915 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11916 err->errCause = RGSCHERR_SCH_CFG;
11919 /* Perform DL service activation in the scheduler */
11920 ((RgSchCmnDlSvc *)(dlLc->sch))->qci = lcCfg->dlInfo.dlQos.qci;
11921 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = rgSchCmnDlQciPrio[lcCfg->dlInfo.dlQos.qci - 1];
11922 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcCfg->dlInfo.dlQos.gbr * \
11923 RG_SCH_CMN_REFRESH_TIME)/100;
11924 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcCfg->dlInfo.dlQos.mbr * \
11925 RG_SCH_CMN_REFRESH_TIME)/100;
11929 /*assigning highest priority to DCCH */
11930 ((RgSchCmnDlSvc *)(dlLc->sch))->prio=RG_SCH_CMN_DCCH_PRIO;
11933 dlLc->lcType=lcCfg->lcType;
11936 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
11938 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcCfg(cell, ue,dlLc ,lcCfg, err);
11947 ret = cellSch->apisDl->rgSCHRgrDlLcCfg(cell, ue, dlLc, lcCfg, err);
11955 if(TRUE == ue->isEmtcUe)
11957 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
11966 ret = cellSch->apisUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
11976 rgSCHSCellDlLcCfg(cell, ue, dlLc);
11982 if(lcCfg->dlInfo.dlSpsCfg.isSpsEnabled)
11984 /* Invoke SPS module if SPS is enabled for the service */
11985 ret = rgSCHCmnSpsDlLcCfg(cell, ue, dlLc, lcCfg, err);
11988 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "rgSchCmnRgrLchCfg(): "
11989 "SPS configuration failed for DL LC for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11990 err->errCause = RGSCHERR_SCH_CFG;
12000 * @brief Scheduler invocation on logical channel addition.
12004 * Function : rgSCHCmnRgrLchRecfg
12006 * This functions does required processing when an existing
12007 * (dedicated) logical channel is reconfigured. Assumes lcg
12008 * pointer in ulLc is set to the old value.
12009 * Independent of whether new LCG is meant to be configured,
12010 * the new LCG scheduler information is accessed and possibly modified.
12012 * @param[in] RgSchCellCb *cell
12013 * @param[in] RgSchUeCb *ue
12014 * @param[in] RgSchDlLcCb *dlLc
12015 * @param[int] RgrLchRecfg *lcRecfg
12016 * @param[out] RgSchErrInfo *err
12022 S16 rgSCHCmnRgrLchRecfg
12027 RgrLchRecfg *lcRecfg,
12031 S16 rgSCHCmnRgrLchRecfg(cell, ue, dlLc, lcRecfg, err)
12035 RgrLchRecfg *lcRecfg;
12040 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12043 if(dlLc->lcType != CM_LTE_LCH_DCCH)
12045 ret = rgSCHCmnValidateDlQos(&lcRecfg->dlRecfg.dlQos);
12049 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
12050 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12051 err->errCause = RGSCHERR_SCH_CFG;
12054 if (((RgSchCmnDlSvc *)(dlLc->sch))->qci != lcRecfg->dlRecfg.dlQos.qci)
12056 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Qci, hence lc Priority change "
12057 "not supported for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12058 err->errCause = RGSCHERR_SCH_CFG;
12061 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcRecfg->dlRecfg.dlQos.gbr * \
12062 RG_SCH_CMN_REFRESH_TIME)/100;
12063 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcRecfg->dlRecfg.dlQos.mbr * \
12064 RG_SCH_CMN_REFRESH_TIME)/100;
12068 /*assigning highest priority to DCCH */
12069 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = RG_SCH_CMN_DCCH_PRIO;
12073 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
12075 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12080 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
12089 ret = cellSch->apisDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12094 ret = cellSch->apisUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
12102 if (lcRecfg->recfgTypes & RGR_DL_LC_SPS_RECFG)
12104 /* Invoke SPS module if SPS is enabled for the service */
12105 if(lcRecfg->dlRecfg.dlSpsRecfg.isSpsEnabled)
12107 ret = rgSCHCmnSpsDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12110 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"SPS re-configuration not "
12111 "supported for dlLC Ignore this CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12122 * @brief Scheduler invocation on logical channel addition.
12126 * Function : rgSCHCmnRgrLcgCfg
12128 * This functions does required processing when a new
12129 * (dedicated) logical channel is added. Assumes lcg
12130 * pointer in ulLc is set.
12132 * @param[in] RgSchCellCb *cell,
12133 * @param[in] RgSchUeCb *ue,
12134 * @param[in] RgSchLcgCb *lcg,
12135 * @param[in] RgrLcgCfg *lcgCfg,
12136 * @param[out] RgSchErrInfo *err
12142 S16 rgSCHCmnRgrLcgCfg
12151 S16 rgSCHCmnRgrLcgCfg(cell, ue, lcg, lcgCfg, err)
12160 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12161 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCfg->ulInfo.lcgId].sch));
12164 ulLcg->cfgdGbr = (lcgCfg->ulInfo.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
12165 ulLcg->effGbr = ulLcg->cfgdGbr;
12166 ulLcg->deltaMbr = ((lcgCfg->ulInfo.mbr - lcgCfg->ulInfo.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
12167 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
12170 if(TRUE == ue->isEmtcUe)
12172 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
12181 ret = cellSch->apisUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
12187 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
12189 /* Indicate MAC that this LCG is GBR LCG */
12190 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcgCfg->ulInfo.lcgId, TRUE);
12196 * @brief Scheduler invocation on logical channel addition.
12200 * Function : rgSCHCmnRgrLcgRecfg
12202 * This functions does required processing when a new
12203 * (dedicated) logical channel is added. Assumes lcg
12204 * pointer in ulLc is set.
12206 * @param[in] RgSchCellCb *cell,
12207 * @param[in] RgSchUeCb *ue,
12208 * @param[in] RgSchLcgCb *lcg,
12209 * @param[in] RgrLcgRecfg *reCfg,
12210 * @param[out] RgSchErrInfo *err
12216 S16 rgSCHCmnRgrLcgRecfg
12221 RgrLcgRecfg *reCfg,
12225 S16 rgSCHCmnRgrLcgRecfg(cell, ue, lcg, reCfg, err)
12229 RgrLcgRecfg *reCfg;
12234 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12235 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[reCfg->ulRecfg.lcgId].sch));
12238 ulLcg->cfgdGbr = (reCfg->ulRecfg.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
12239 ulLcg->effGbr = ulLcg->cfgdGbr;
12240 ulLcg->deltaMbr = ((reCfg->ulRecfg.mbr - reCfg->ulRecfg.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
12241 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
12244 if(TRUE == ue->isEmtcUe)
12246 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
12255 ret = cellSch->apisUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
12261 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
12263 /* Indicate MAC that this LCG is GBR LCG */
12264 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, TRUE);
12268 /* In case of RAB modification */
12269 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, FALSE);
12274 /***********************************************************
12276 * Func : rgSCHCmnRgrLchDel
12278 * Desc : Scheduler handling for a (dedicated)
12279 * uplink logical channel being deleted.
12286 **********************************************************/
12288 S16 rgSCHCmnRgrLchDel
12296 S16 rgSCHCmnRgrLchDel(cell, ue, lcId, lcgId)
12303 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12305 if(TRUE == ue->isEmtcUe)
12307 cellSch->apisEmtcUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
12312 cellSch->apisUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
12317 /***********************************************************
12319 * Func : rgSCHCmnLcgDel
12321 * Desc : Scheduler handling for a (dedicated)
12322 * uplink logical channel being deleted.
12330 **********************************************************/
12332 Void rgSCHCmnLcgDel
12339 Void rgSCHCmnLcgDel(cell, ue, lcg)
12345 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12346 RgSchCmnLcg *lcgCmn = RG_SCH_CMN_GET_UL_LCG(lcg);
12348 if (lcgCmn == NULLP)
12353 if (RGSCH_IS_GBR_BEARER(lcgCmn->cfgdGbr))
12355 /* Indicate MAC that this LCG is GBR LCG */
12356 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcg->lcgId, FALSE);
12360 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
12362 rgSCHCmnSpsUlLcgDel(cell, ue, lcg);
12364 #endif /* LTEMAC_SPS */
12366 lcgCmn->effGbr = 0;
12367 lcgCmn->reportedBs = 0;
12368 lcgCmn->cfgdGbr = 0;
12369 /* set lcg bs to 0. Deletion of control block happens
12370 * at the time of UE deletion. */
12373 if(TRUE == ue->isEmtcUe)
12375 cellSch->apisEmtcUl->rgSCHFreeUlLcg(cell, ue, lcg);
12380 cellSch->apisUl->rgSCHFreeUlLcg(cell, ue, lcg);
12387 * @brief This function deletes a service from scheduler.
12391 * Function: rgSCHCmnFreeDlLc
12392 * Purpose: This function is made available through a FP for
12393 * making scheduler aware of a service being deleted from UE.
12395 * Invoked by: BO and Scheduler
12397 * @param[in] RgSchCellCb* cell
12398 * @param[in] RgSchUeCb* ue
12399 * @param[in] RgSchDlLcCb* svc
12404 Void rgSCHCmnFreeDlLc
12411 Void rgSCHCmnFreeDlLc(cell, ue, svc)
12417 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12418 if (svc->sch == NULLP)
12423 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
12425 cellSch->apisEmtcDl->rgSCHFreeDlLc(cell, ue, svc);
12430 cellSch->apisDl->rgSCHFreeDlLc(cell, ue, svc);
12436 rgSCHSCellDlLcDel(cell, ue, svc);
12441 /* If SPS service, invoke SPS module */
12442 if (svc->dlLcSpsCfg.isSpsEnabled)
12444 rgSCHCmnSpsDlLcDel(cell, ue, svc);
12448 /* ccpu00117052 - MOD - Passing double pointer
12449 for proper NULLP assignment*/
12450 rgSCHUtlFreeSBuf(cell->instIdx,
12451 (Data**)(&(svc->sch)), (sizeof(RgSchCmnDlSvc)));
12454 rgSCHLaaDeInitDlLchCb(cell, svc);
12463 * @brief This function Processes the Final Allocations
12464 * made by the RB Allocator against the requested
12465 * CCCH SDURetx Allocations.
12469 * Function: rgSCHCmnDlCcchSduRetxFnlz
12470 * Purpose: This function Processes the Final Allocations
12471 * made by the RB Allocator against the requested
12472 * CCCH Retx Allocations.
12473 * Scans through the scheduled list of ccchSdu retrans
12474 * fills the corresponding pdcch, adds the hqProc to
12475 * the corresponding SubFrm and removes the hqP from
12478 * Invoked by: Common Scheduler
12480 * @param[in] RgSchCellCb *cell
12481 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12486 static Void rgSCHCmnDlCcchSduRetxFnlz
12489 RgSchCmnDlRbAllocInfo *allocInfo
12492 static Void rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo)
12494 RgSchCmnDlRbAllocInfo *allocInfo;
12498 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12499 RgSchDlRbAlloc *rbAllocInfo;
12500 RgSchDlHqProcCb *hqP;
12503 /* Traverse through the Scheduled Retx List */
12504 node = allocInfo->ccchSduAlloc.schdCcchSduRetxLst.first;
12507 hqP = (RgSchDlHqProcCb *)(node->node);
12509 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
12511 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12513 /* Remove the HqP from cell's ccchSduRetxLst */
12514 cmLListDelFrm(&cmnCellDl->ccchSduRetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
12515 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
12517 /* Fix: syed dlAllocCb reset should be performed.
12518 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12519 rgSCHCmnDlUeResetTemp(ue, hqP);
12521 /* Fix: syed dlAllocCb reset should be performed.
12522 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12523 node = allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst.first;
12526 hqP = (RgSchDlHqProcCb *)(node->node);
12529 /* reset the UE allocation Information */
12530 rgSCHCmnDlUeResetTemp(ue, hqP);
12536 * @brief This function Processes the Final Allocations
12537 * made by the RB Allocator against the requested
12538 * CCCH Retx Allocations.
12542 * Function: rgSCHCmnDlCcchRetxFnlz
12543 * Purpose: This function Processes the Final Allocations
12544 * made by the RB Allocator against the requested
12545 * CCCH Retx Allocations.
12546 * Scans through the scheduled list of msg4 retrans
12547 * fills the corresponding pdcch, adds the hqProc to
12548 * the corresponding SubFrm and removes the hqP from
12551 * Invoked by: Common Scheduler
12553 * @param[in] RgSchCellCb *cell
12554 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12559 static Void rgSCHCmnDlCcchRetxFnlz
12562 RgSchCmnDlRbAllocInfo *allocInfo
12565 static Void rgSCHCmnDlCcchRetxFnlz(cell, allocInfo)
12567 RgSchCmnDlRbAllocInfo *allocInfo;
12571 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12572 RgSchDlRbAlloc *rbAllocInfo;
12573 RgSchDlHqProcCb *hqP;
12576 /* Traverse through the Scheduled Retx List */
12577 node = allocInfo->msg4Alloc.schdMsg4RetxLst.first;
12580 hqP = (RgSchDlHqProcCb *)(node->node);
12581 raCb = hqP->hqE->raCb;
12582 rbAllocInfo = &raCb->rbAllocInfo;
12584 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12586 /* Remove the HqP from cell's msg4RetxLst */
12587 cmLListDelFrm(&cmnCellDl->msg4RetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
12588 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
12589 /* Fix: syed dlAllocCb reset should be performed.
12590 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12591 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
12592 rgSCHCmnDlHqPResetTemp(hqP);
12594 /* Fix: syed dlAllocCb reset should be performed.
12595 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12596 node = allocInfo->msg4Alloc.nonSchdMsg4RetxLst.first;
12599 hqP = (RgSchDlHqProcCb *)(node->node);
12600 raCb = hqP->hqE->raCb;
12602 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
12603 rgSCHCmnDlHqPResetTemp(hqP);
12610 * @brief This function Processes the Final Allocations
12611 * made by the RB Allocator against the requested
12612 * CCCH SDU tx Allocations.
12616 * Function: rgSCHCmnDlCcchSduTxFnlz
12617 * Purpose: This function Processes the Final Allocations
12618 * made by the RB Allocator against the requested
12619 * CCCH tx Allocations.
12620 * Scans through the scheduled list of CCCH SDU trans
12621 * fills the corresponding pdcch, adds the hqProc to
12622 * the corresponding SubFrm and removes the hqP from
12625 * Invoked by: Common Scheduler
12627 * @param[in] RgSchCellCb *cell
12628 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12633 static Void rgSCHCmnDlCcchSduTxFnlz
12636 RgSchCmnDlRbAllocInfo *allocInfo
12639 static Void rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo)
12641 RgSchCmnDlRbAllocInfo *allocInfo;
12646 RgSchDlRbAlloc *rbAllocInfo;
12647 RgSchDlHqProcCb *hqP;
12648 RgSchLchAllocInfo lchSchdData;
12650 /* Traverse through the Scheduled Retx List */
12651 node = allocInfo->ccchSduAlloc.schdCcchSduTxLst.first;
12654 hqP = (RgSchDlHqProcCb *)(node->node);
12655 ueCb = hqP->hqE->ue;
12657 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
12659 /* fill the pdcch and HqProc */
12660 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12662 /* Remove the raCb from cell's toBeSchdLst */
12663 cmLListDelFrm(&cell->ccchSduUeLst, &ueCb->ccchSduLnk);
12664 ueCb->ccchSduLnk.node = (PTR)NULLP;
12666 /* Fix : Resetting this required to avoid complication
12667 * in reestablishment case */
12668 ueCb->dlCcchInfo.bo = 0;
12670 /* Indicate DHM of the CCCH LC scheduling */
12671 hqP->tbInfo[0].contResCe = NOTPRSNT;
12672 lchSchdData.lcId = 0;
12673 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
12674 (RGSCH_MSG4_HDRSIZE);
12675 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
12677 /* Fix: syed dlAllocCb reset should be performed.
12678 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12679 rgSCHCmnDlUeResetTemp(ueCb, hqP);
12681 /* Fix: syed dlAllocCb reset should be performed.
12682 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12683 node = allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst.first;
12686 hqP = (RgSchDlHqProcCb *)(node->node);
12687 ueCb = hqP->hqE->ue;
12689 /* Release HqProc */
12690 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
12691 /*Fix: Removing releasing of TB1 as it will not exist for CCCH SDU and hence caused a crash*/
12692 /*rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
12693 /* reset the UE allocation Information */
12694 rgSCHCmnDlUeResetTemp(ueCb, hqP);
12701 * @brief This function Processes the Final Allocations
12702 * made by the RB Allocator against the requested
12703 * CCCH tx Allocations.
12707 * Function: rgSCHCmnDlCcchTxFnlz
12708 * Purpose: This function Processes the Final Allocations
12709 * made by the RB Allocator against the requested
12710 * CCCH tx Allocations.
12711 * Scans through the scheduled list of msg4 trans
12712 * fills the corresponding pdcch, adds the hqProc to
12713 * the corresponding SubFrm and removes the hqP from
12716 * Invoked by: Common Scheduler
12718 * @param[in] RgSchCellCb *cell
12719 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12724 static Void rgSCHCmnDlCcchTxFnlz
12727 RgSchCmnDlRbAllocInfo *allocInfo
12730 static Void rgSCHCmnDlCcchTxFnlz(cell, allocInfo)
12732 RgSchCmnDlRbAllocInfo *allocInfo;
12737 RgSchDlRbAlloc *rbAllocInfo;
12738 RgSchDlHqProcCb *hqP;
12739 RgSchLchAllocInfo lchSchdData;
12741 /* Traverse through the Scheduled Retx List */
12742 node = allocInfo->msg4Alloc.schdMsg4TxLst.first;
12745 hqP = (RgSchDlHqProcCb *)(node->node);
12746 raCb = hqP->hqE->raCb;
12748 rbAllocInfo = &raCb->rbAllocInfo;
12750 /* fill the pdcch and HqProc */
12751 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12752 /* MSG4 Fix Start */
12754 rgSCHRamRmvFrmRaInfoSchdLst(cell, raCb);
12757 /* Indicate DHM of the CCCH LC scheduling */
12758 lchSchdData.lcId = 0;
12759 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
12760 (RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE);
12761 /* TRansmitting presence of cont Res CE across MAC-SCH interface to
12762 * identify CCCH SDU transmissions which need to be done
12764 * contention resolution CE*/
12765 hqP->tbInfo[0].contResCe = PRSNT_NODEF;
12766 /*Dont add lc if only cont res CE is being transmitted*/
12767 if(raCb->dlCcchInfo.bo)
12769 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
12774 /* Fix: syed dlAllocCb reset should be performed.
12775 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12776 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
12777 rgSCHCmnDlHqPResetTemp(hqP);
12779 node = allocInfo->msg4Alloc.nonSchdMsg4TxLst.first;
12782 hqP = (RgSchDlHqProcCb *)(node->node);
12783 raCb = hqP->hqE->raCb;
12785 rbAllocInfo = &raCb->rbAllocInfo;
12786 /* Release HqProc */
12787 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
12788 /*Fix: Removing releasing of TB1 as it will not exist for MSG4 and hence caused a crash*/
12789 /* rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
12790 /* reset the UE allocation Information */
12791 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
12792 rgSCHCmnDlHqPResetTemp(hqP);
12799 * @brief This function calculates the BI Index to be sent in the Bi header
12803 * Function: rgSCHCmnGetBiIndex
12804 * Purpose: This function Processes utilizes the previous BI time value
12805 * calculated and the difference last BI sent time and current time. To
12806 * calculate the latest BI Index. It also considers the how many UE's
12807 * Unserved in this subframe.
12809 * Invoked by: Common Scheduler
12811 * @param[in] RgSchCellCb *cell
12812 * @param[in] uint32_t ueCount
12817 uint8_t rgSCHCmnGetBiIndex
12823 uint8_t rgSCHCmnGetBiIndex(cell, ueCount)
12828 S16 prevVal = 0; /* To Store Intermediate Value */
12829 uint16_t newBiVal = 0; /* To store Bi Value in millisecond */
12831 uint16_t timeDiff = 0;
12834 if (cell->biInfo.prevBiTime != 0)
12837 if(cell->emtcEnable == TRUE)
12839 timeDiff =(RGSCH_CALC_SF_DIFF_EMTC(cell->crntTime, cell->biInfo.biTime));
12844 timeDiff =(RGSCH_CALC_SF_DIFF(cell->crntTime, cell->biInfo.biTime));
12847 prevVal = cell->biInfo.prevBiTime - timeDiff;
12853 newBiVal = RG_SCH_CMN_GET_BI_VAL(prevVal,ueCount);
12854 /* To be used next time when BI is calculated */
12856 if(cell->emtcEnable == TRUE)
12858 RGSCHCPYTIMEINFO_EMTC(cell->crntTime, cell->biInfo.biTime)
12863 RGSCHCPYTIMEINFO(cell->crntTime, cell->biInfo.biTime)
12866 /* Search the actual BI Index from table Backoff Parameters Value and
12867 * return that Index */
12870 if (rgSchCmnBiTbl[idx] > newBiVal)
12875 }while(idx < RG_SCH_CMN_NUM_BI_VAL-1);
12876 cell->biInfo.prevBiTime = rgSchCmnBiTbl[idx];
12877 /* For 16 Entries in Table 7.2.1 36.321.880 - 3 reserved so total 13 Entries */
12878 return (idx); /* Returning reserved value from table UE treats it has 960 ms */
12879 } /* rgSCHCmnGetBiIndex */
12883 * @brief This function Processes the Final Allocations
12884 * made by the RB Allocator against the requested
12885 * RAR allocations. Assumption: The reuqested
12886 * allocations are always satisfied completely.
12887 * Hence no roll back.
12891 * Function: rgSCHCmnDlRaRspFnlz
12892 * Purpose: This function Processes the Final Allocations
12893 * made by the RB Allocator against the requested.
12894 * Takes care of PDCCH filling.
12896 * Invoked by: Common Scheduler
12898 * @param[in] RgSchCellCb *cell
12899 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12904 static Void rgSCHCmnDlRaRspFnlz
12907 RgSchCmnDlRbAllocInfo *allocInfo
12910 static Void rgSCHCmnDlRaRspFnlz(cell, allocInfo)
12912 RgSchCmnDlRbAllocInfo *allocInfo;
12915 uint32_t rarCnt = 0;
12916 RgSchDlRbAlloc *raRspAlloc;
12917 RgSchDlSf *subFrm = NULLP;
12921 RgSchRaReqInfo *raReq;
12923 RgSchUlAlloc *ulAllocRef=NULLP;
12924 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12925 uint8_t allocRapidCnt = 0;
12927 uint32_t msg3SchdIdx = 0;
12928 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
12929 uint8_t msg3Subfrm;
12933 for (rarCnt=0; rarCnt<RG_SCH_CMN_MAX_CMN_PDCCH; rarCnt++)
12935 raRspAlloc = &allocInfo->raRspAlloc[rarCnt];
12936 /* Having likely condition first for optimization */
12937 if (!raRspAlloc->pdcch)
12943 subFrm = raRspAlloc->dlSf;
12944 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
12945 /* Corrected RACH handling for multiple RAPIDs per RARNTI */
12946 allocRapidCnt = raRspAlloc->numRapids;
12947 while (allocRapidCnt)
12949 raReq = (RgSchRaReqInfo *)(reqLst->first->node);
12950 /* RACHO: If dedicated preamble, then allocate UL Grant
12951 * (consequence of handover/pdcchOrder) and continue */
12952 if (RGSCH_IS_DEDPRM(cell, raReq->raReq.rapId))
12954 rgSCHCmnHdlHoPo(cell, &subFrm->raRsp[rarCnt].contFreeUeLst,
12956 cmLListDelFrm(reqLst, reqLst->first);
12958 /* ccpu00117052 - MOD - Passing double pointer
12959 for proper NULLP assignment*/
12960 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
12961 sizeof(RgSchRaReqInfo));
12965 if(cell->overLoadBackOffEnab)
12966 {/* rach Overlaod conrol is triggerd, Skipping this rach */
12967 cmLListDelFrm(reqLst, reqLst->first);
12969 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
12970 sizeof(RgSchRaReqInfo));
12973 /* Attempt to include each RA request into the RSP */
12974 /* Any failure in the procedure is considered to */
12975 /* affect futher allocations in the same TTI. When */
12976 /* a failure happens, we break out and complete */
12977 /* the processing for random access */
12978 if (rgSCHRamCreateRaCb(cell, &raCb, &err) != ROK)
12982 /* Msg3 allocation request to USM */
12983 if (raReq->raReq.rapId < cell->rachCfg.sizeRaPreambleGrpA)
12987 /*ccpu00128820 - MOD - Msg3 alloc double delete issue*/
12988 rgSCHCmnMsg3GrntReq(cell, raCb->tmpCrnti, preamGrpA, \
12989 &(raCb->msg3HqProc), &ulAllocRef, &raCb->msg3HqProcId);
12990 if (ulAllocRef == NULLP)
12992 rgSCHRamDelRaCb(cell, raCb, TRUE);
12995 if (raReq->raReq.cqiPres)
12997 raCb->ccchCqi = raReq->raReq.cqiIdx;
13001 raCb->ccchCqi = cellDl->ccchCqi;
13003 raCb->rapId = raReq->raReq.rapId;
13004 raCb->ta.pres = TRUE;
13005 raCb->ta.val = raReq->raReq.ta;
13006 raCb->msg3Grnt = ulAllocRef->grnt;
13007 /* Populating the tpc value received */
13008 raCb->msg3Grnt.tpc = raReq->raReq.tpc;
13009 /* PHR handling for MSG3 */
13010 ulAllocRef->raCb = raCb;
13012 /* To the crntTime, add the MIN time at which UE will
13013 * actually send MSG3 i.e DL_DELTA+6 */
13014 raCb->msg3AllocTime = cell->crntTime;
13015 RGSCH_INCR_SUB_FRAME(raCb->msg3AllocTime, RG_SCH_CMN_MIN_MSG3_RECP_INTRVL);
13017 msg3SchdIdx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) %
13018 RGSCH_NUM_SUB_FRAMES;
13019 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
13020 special subframe */
13021 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][msg3SchdIdx] !=
13022 RG_SCH_TDD_UL_SUBFRAME)
13024 RGSCHCMNADDTOCRNTTIME(cell->crntTime,raCb->msg3AllocTime,
13025 RG_SCH_CMN_DL_DELTA)
13026 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][
13027 raCb->msg3AllocTime.slot];
13028 RGSCHCMNADDTOCRNTTIME(raCb->msg3AllocTime, raCb->msg3AllocTime,
13032 cmLListAdd2Tail(&subFrm->raRsp[rarCnt].raRspLst, &raCb->rspLnk);
13033 raCb->rspLnk.node = (PTR)raCb;
13034 cmLListDelFrm(reqLst, reqLst->first);
13036 /* ccpu00117052 - MOD - Passing double pointer
13037 for proper NULLP assignment*/
13038 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
13039 sizeof(RgSchRaReqInfo));
13041 /* SR_RACH_STATS : RAR scheduled */
13046 /* Fill subframe data members */
13047 subFrm->raRsp[rarCnt].raRnti = raRspAlloc->rnti;
13048 subFrm->raRsp[rarCnt].pdcch = raRspAlloc->pdcch;
13049 subFrm->raRsp[rarCnt].tbSz = raRspAlloc->tbInfo[0].bytesAlloc;
13050 /* Fill PDCCH data members */
13051 rgSCHCmnFillPdcch(cell, subFrm->raRsp[rarCnt].pdcch, raRspAlloc);
13054 if(cell->overLoadBackOffEnab)
13055 {/* rach Overlaod conrol is triggerd, Skipping this rach */
13056 subFrm->raRsp[rarCnt].backOffInd.pres = PRSNT_NODEF;
13057 subFrm->raRsp[rarCnt].backOffInd.val = cell->overLoadBackOffval;
13062 subFrm->raRsp[rarCnt].backOffInd.pres = NOTPRSNT;
13065 /*[ccpu00125212] Avoiding sending of empty RAR in case of RAR window
13066 is short and UE is sending unauthorised preamble.*/
13067 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
13068 if ((raRspAlloc->biEstmt) && (reqLst->count))
13070 subFrm->raRsp[0].backOffInd.pres = PRSNT_NODEF;
13071 /* Added as part of Upgrade */
13072 subFrm->raRsp[0].backOffInd.val =
13073 rgSCHCmnGetBiIndex(cell, reqLst->count);
13075 /* SR_RACH_STATS : Back Off Inds */
13079 else if ((subFrm->raRsp[rarCnt].raRspLst.first == NULLP) &&
13080 (subFrm->raRsp[rarCnt].contFreeUeLst.first == NULLP))
13082 /* Return the grabbed PDCCH */
13083 rgSCHUtlPdcchPut(cell, &subFrm->pdcchInfo, raRspAlloc->pdcch);
13084 subFrm->raRsp[rarCnt].pdcch = NULLP;
13085 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRaRspAlloc(): "
13086 "Not even one RaReq.");
13090 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId,
13091 "RNTI:%d Scheduled RAR @ (%u,%u) ",
13093 cell->crntTime.sfn,
13094 cell->crntTime.slot);
13100 * @brief This function computes rv.
13104 * Function: rgSCHCmnDlCalcRvForBcch
13105 * Purpose: This function computes rv.
13107 * Invoked by: Common Scheduler
13109 * @param[in] RgSchCellCb *cell
13110 * @param[in] Bool si
13111 * @param[in] uint16_t i
13116 static uint8_t rgSCHCmnDlCalcRvForBcch
13123 static uint8_t rgSCHCmnDlCalcRvForBcch(cell, si, i)
13130 CmLteTimingInfo frm;
13132 frm = cell->crntTime;
13133 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
13141 k = (frm.sfn/2) % 4;
13143 rv = RGSCH_CEIL(3*k, 2) % 4;
13148 * @brief This function Processes the Final Allocations
13149 * made by the RB Allocator against the requested
13150 * BCCH/PCCH allocations. Assumption: The reuqested
13151 * allocations are always satisfied completely.
13152 * Hence no roll back.
13156 * Function: rgSCHCmnDlBcchPcchFnlz
13157 * Purpose: This function Processes the Final Allocations
13158 * made by the RB Allocator against the requested.
13159 * Takes care of PDCCH filling.
13161 * Invoked by: Common Scheduler
13163 * @param[in] RgSchCellCb *cell
13164 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
13169 static Void rgSCHCmnDlBcchPcchFnlz
13172 RgSchCmnDlRbAllocInfo *allocInfo
13175 static Void rgSCHCmnDlBcchPcchFnlz(cell, allocInfo)
13177 RgSchCmnDlRbAllocInfo *allocInfo;
13180 RgSchDlRbAlloc *rbAllocInfo;
13184 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
13186 #ifdef LTEMAC_HDFDD
13187 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13189 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13193 /* Moving variables to available scope for optimization */
13194 RgSchClcDlLcCb *pcch;
13197 RgSchClcDlLcCb *bcch;
13200 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
13204 rbAllocInfo = &allocInfo->pcchAlloc;
13205 if (rbAllocInfo->pdcch)
13207 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
13209 /* Added sfIdx calculation for TDD as well */
13211 #ifdef LTEMAC_HDFDD
13212 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13214 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13217 subFrm = rbAllocInfo->dlSf;
13218 pcch = rgSCHDbmGetPcch(cell);
13221 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnDlBcchPcchFnlz( ): "
13222 "No Pcch Present");
13226 /* Added Dl TB count for paging message transmission*/
13228 cell->dlUlTbCnt.tbTransDlTotalCnt++;
13230 bo = (RgSchClcBoRpt *)pcch->boLst.first->node;
13231 cmLListDelFrm(&pcch->boLst, &bo->boLstEnt);
13232 /* ccpu00117052 - MOD - Passing double pointer
13233 for proper NULLP assignment*/
13234 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
13235 /* Fill subframe data members */
13236 subFrm->pcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
13237 subFrm->pcch.pdcch = rbAllocInfo->pdcch;
13238 /* Fill PDCCH data members */
13239 rgSCHCmnFillPdcch(cell, subFrm->pcch.pdcch, rbAllocInfo);
13240 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, pcch->lcId, TRUE);
13241 /* ccpu00132314-ADD-Update the tx power allocation info
13242 TODO-Need to add a check for max tx power per symbol */
13243 subfrmAlloc->cmnLcInfo.pcchInfo.txPwrOffset = cellDl->pcchTxPwrOffset;
13247 rbAllocInfo = &allocInfo->bcchAlloc;
13248 if (rbAllocInfo->pdcch)
13250 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
13252 #ifdef LTEMAC_HDFDD
13253 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13255 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13258 subFrm = rbAllocInfo->dlSf;
13260 /* Fill subframe data members */
13261 subFrm->bcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
13262 subFrm->bcch.pdcch = rbAllocInfo->pdcch;
13263 /* Fill PDCCH data members */
13264 rgSCHCmnFillPdcch(cell, subFrm->bcch.pdcch, rbAllocInfo);
13266 if(rbAllocInfo->schdFirst)
13269 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
13270 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
13272 /*Copy the SIB1 msg buff into interface buffer */
13273 SCpyMsgMsg(cell->siCb.crntSiInfo.sib1Info.sib1,
13274 rgSchCb[cell->instIdx].rgSchInit.region,
13275 rgSchCb[cell->instIdx].rgSchInit.pool,
13276 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13277 #endif/*RGR_SI_SCH*/
13278 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
13279 rgSCHCmnDlCalcRvForBcch(cell, FALSE, 0);
13287 i = cell->siCb.siCtx.i;
13288 /*Decrement the retransmission count */
13289 cell->siCb.siCtx.retxCntRem--;
13291 /*Copy the SI msg buff into interface buffer */
13292 if(cell->siCb.siCtx.warningSiFlag == FALSE)
13294 SCpyMsgMsg(cell->siCb.siArray[cell->siCb.siCtx.siId-1].si,
13295 rgSchCb[cell->instIdx].rgSchInit.region,
13296 rgSchCb[cell->instIdx].rgSchInit.pool,
13297 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13301 pdu = rgSCHUtlGetWarningSiPdu(cell);
13302 RGSCH_NULL_CHECK(cell->instIdx, pdu);
13304 rgSchCb[cell->instIdx].rgSchInit.region,
13305 rgSchCb[cell->instIdx].rgSchInit.pool,
13306 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13307 if(cell->siCb.siCtx.retxCntRem == 0)
13309 rgSCHUtlFreeWarningSiPdu(cell);
13310 cell->siCb.siCtx.warningSiFlag = FALSE;
13315 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
13316 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
13318 if(bo->retxCnt != cell->siCfg.retxCnt-1)
13323 #endif/*RGR_SI_SCH*/
13324 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
13325 rgSCHCmnDlCalcRvForBcch(cell, TRUE, i);
13328 /* Added Dl TB count for SIB1 and SI messages transmission.
13329 * This counter will be incremented only for the first transmission
13330 * (with RV 0) of these messages*/
13332 if(subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv == 0)
13334 cell->dlUlTbCnt.tbTransDlTotalCnt++;
13338 if(bo->retxCnt == 0)
13340 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
13341 /* ccpu00117052 - MOD - Passing double pointer
13342 for proper NULLP assignment*/
13343 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
13345 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, bcch->lcId, sendInd);
13347 /*Fill the interface info */
13348 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, NULLD, NULLD);
13350 /* ccpu00132314-ADD-Update the tx power allocation info
13351 TODO-Need to add a check for max tx power per symbol */
13352 subfrmAlloc->cmnLcInfo.bcchInfo.txPwrOffset = cellDl->bcchTxPwrOffset;
13354 /*mBuf has been already copied above */
13355 #endif/*RGR_SI_SCH*/
13368 * Function: rgSCHCmnUlSetAllUnSched
13371 * Invoked by: Common Scheduler
13373 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13378 static Void rgSCHCmnUlSetAllUnSched
13380 RgSchCmnUlRbAllocInfo *allocInfo
13383 static Void rgSCHCmnUlSetAllUnSched(allocInfo)
13384 RgSchCmnUlRbAllocInfo *allocInfo;
13390 node = allocInfo->contResLst.first;
13393 rgSCHCmnUlMov2NonSchdCntResLst(allocInfo, (RgSchUeCb *)node->node);
13394 node = allocInfo->contResLst.first;
13397 node = allocInfo->retxUeLst.first;
13400 rgSCHCmnUlMov2NonSchdRetxUeLst(allocInfo, (RgSchUeCb *)node->node);
13401 node = allocInfo->retxUeLst.first;
13404 node = allocInfo->ueLst.first;
13407 rgSCHCmnUlMov2NonSchdUeLst(allocInfo, (RgSchUeCb *)node->node);
13408 node = allocInfo->ueLst.first;
13420 * Function: rgSCHCmnUlAdd2CntResLst
13423 * Invoked by: Common Scheduler
13425 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13426 * @param[in] RgSchUeCb *ue
13431 Void rgSCHCmnUlAdd2CntResLst
13433 RgSchCmnUlRbAllocInfo *allocInfo,
13437 Void rgSCHCmnUlAdd2CntResLst(allocInfo, ue)
13438 RgSchCmnUlRbAllocInfo *allocInfo;
13442 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,ue->cell))->alloc);
13443 cmLListAdd2Tail(&allocInfo->contResLst, &ulAllocInfo->reqLnk);
13444 ulAllocInfo->reqLnk.node = (PTR)ue;
13453 * Function: rgSCHCmnUlAdd2UeLst
13456 * Invoked by: Common Scheduler
13458 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13459 * @param[in] RgSchUeCb *ue
13464 Void rgSCHCmnUlAdd2UeLst
13467 RgSchCmnUlRbAllocInfo *allocInfo,
13471 Void rgSCHCmnUlAdd2UeLst(cell, allocInfo, ue)
13473 RgSchCmnUlRbAllocInfo *allocInfo;
13477 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,cell))->alloc);
13478 if (ulAllocInfo->reqLnk.node == NULLP)
13480 cmLListAdd2Tail(&allocInfo->ueLst, &ulAllocInfo->reqLnk);
13481 ulAllocInfo->reqLnk.node = (PTR)ue;
13491 * Function: rgSCHCmnAllocUlRb
13492 * Purpose: To do RB allocations for uplink
13494 * Invoked by: Common Scheduler
13496 * @param[in] RgSchCellCb *cell
13497 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
13501 Void rgSCHCmnAllocUlRb
13504 RgSchCmnUlRbAllocInfo *allocInfo
13507 Void rgSCHCmnAllocUlRb(cell, allocInfo)
13509 RgSchCmnUlRbAllocInfo *allocInfo;
13512 RgSchUlSf *sf = allocInfo->sf;
13514 /* Schedule for new transmissions */
13515 rgSCHCmnUlRbAllocForLst(cell, sf, allocInfo->ueLst.count,
13516 &allocInfo->ueLst, &allocInfo->schdUeLst,
13517 &allocInfo->nonSchdUeLst, (Bool)TRUE);
13521 /***********************************************************
13523 * Func : rgSCHCmnUlRbAllocForLst
13525 * Desc : Allocate for a list in cmn rb alloc information passed
13534 **********************************************************/
13536 static Void rgSCHCmnUlRbAllocForLst
13542 CmLListCp *schdLst,
13543 CmLListCp *nonSchdLst,
13547 static Void rgSCHCmnUlRbAllocForLst(cell, sf, count, reqLst, schdLst,
13548 nonSchdLst, isNewTx)
13553 CmLListCp *schdLst;
13554 CmLListCp *nonSchdLst;
13563 CmLteTimingInfo timeInfo;
13567 if(schdLst->count == 0)
13569 cmLListInit(schdLst);
13572 cmLListInit(nonSchdLst);
13574 if(isNewTx == TRUE)
13576 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.numUes = (uint8_t) count;
13578 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime, timeInfo, TFU_ULCNTRL_DLDELTA);
13579 k = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][timeInfo.subframe];
13580 RG_SCH_ADD_TO_CRNT_TIME(timeInfo,
13581 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo, k);
13583 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime,cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo,
13584 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
13589 for (lnk = reqLst->first; count; lnk = lnk->next, --count)
13591 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
13592 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
13597 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
13602 ueUl->subbandShare = ueUl->subbandRequired;
13603 if(isNewTx == TRUE)
13605 maxRb = RGSCH_MIN((ueUl->subbandRequired * MAX_5GTF_VRBG_SIZE), ue->ue5gtfCb.maxPrb);
13607 ret = rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole);
13610 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, schdLst);
13611 rgSCHCmnUlUeFillAllocInfo(cell, ue);
13615 gUl5gtfRbAllocFail++;
13616 #if defined (TENB_STATS) && defined (RG_5GTF)
13617 cell->tenbStats->sch.ul5gtfRbAllocFail++;
13619 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
13620 ue->isMsg4PdcchWithCrnti = FALSE;
13621 ue->isSrGrant = FALSE;
13624 if(isNewTx == TRUE)
13626 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
13627 ulAllocInfo[count - 1].rnti = ue->ueId;
13628 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
13629 ulAllocInfo[count - 1].numPrb = ue->ul.nPrb;
13632 ueUl->subbandShare = 0; /* This reset will take care of
13633 * all scheduler types */
13635 for (; count; lnk = lnk->next, --count)
13637 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
13638 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
13639 ue->isMsg4PdcchWithCrnti = FALSE;
13646 /***********************************************************
13648 * Func : rgSCHCmnUlMdfyGrntForCqi
13650 * Desc : Modify UL Grant to consider presence of
13651 * CQI along with PUSCH Data.
13656 * - Scale down iTbs based on betaOffset and
13657 * size of Acqi Size.
13658 * - Optionally attempt to increase numSb by 1
13659 * if input payload size does not fit in due
13660 * to reduced tbSz as a result of iTbsNew.
13664 **********************************************************/
13666 static S16 rgSCHCmnUlMdfyGrntForCqi
13674 uint32_t stepDownItbs,
13678 static S16 rgSCHCmnUlMdfyGrntForCqi(cell, ue, maxRb, numSb, iTbs, hqSz, stepDownItbs, effTgt)
13685 uint32_t stepDownItbs;
13689 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(ue->cell);
13694 uint32_t remREsForPusch;
13695 uint32_t bitsPerRe;
13697 uint32_t betaOffVal = ue->ul.betaOffstVal;
13698 uint32_t cqiRiRptSz = ue->ul.cqiRiSz;
13699 uint32_t betaOffHqVal = rgSchCmnBetaHqOffstTbl[ue->ul.betaHqOffst];
13700 uint32_t resNumSb = *numSb;
13701 uint32_t puschEff = 1000;
13704 Bool mdfyiTbsFlg = FALSE;
13705 uint8_t resiTbs = *iTbs;
13711 iMcs = rgSCHCmnUlGetIMcsFrmITbs(resiTbs, RG_SCH_CMN_GET_UE_CTGY(ue));
13712 RG_SCH_UL_MCS_TO_MODODR(iMcs, modOdr);
13713 if (RG_SCH_CMN_GET_UE_CTGY(ue) != CM_LTE_UE_CAT_5)
13715 modOdr = RGSCH_MIN(RGSCH_QM_QPSK, modOdr);
13719 modOdr = RGSCH_MIN(RGSCH_QM_64QAM, modOdr);
13721 nPrb = resNumSb * cellUl->sbSize;
13722 /* Restricting the minumum iTbs requried to modify to 10 */
13723 if ((nPrb >= maxRb) && (resiTbs <= 10))
13725 /* Could not accomodate ACQI */
13728 totREs = nPrb * RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
13729 tbSz = rgTbSzTbl[0][resiTbs][nPrb-1];
13730 /* totalREs/tbSz = num of bits perRE. */
13731 cqiRiREs = (totREs * betaOffVal * cqiRiRptSz)/(1000 * tbSz); /* betaOffVal is represented
13732 as parts per 1000 */
13733 hqREs = (totREs * betaOffHqVal * hqSz)/(1000 * tbSz);
13734 if ((cqiRiREs + hqREs) < totREs)
13736 remREsForPusch = totREs - cqiRiREs - hqREs;
13737 bitsPerRe = (tbSz * 1000)/remREsForPusch; /* Multiplying by 1000 for Interger Oper */
13738 puschEff = bitsPerRe/modOdr;
13740 if (puschEff < effTgt)
13742 /* ensure resultant efficiency for PUSCH Data is within 0.93*/
13747 /* Alternate between increasing SB or decreasing iTbs until eff is met */
13748 if (mdfyiTbsFlg == FALSE)
13752 resNumSb = resNumSb + 1;
13754 mdfyiTbsFlg = TRUE;
13760 resiTbs-= stepDownItbs;
13762 mdfyiTbsFlg = FALSE;
13765 }while (1); /* Loop breaks if efficency is met
13766 or returns RFAILED if not able to meet the efficiency */
13775 /***********************************************************
13777 * Func : rgSCHCmnUlRbAllocForUe
13779 * Desc : Do uplink RB allocation for an UE.
13783 * Notes: Note that as of now, for retx, maxRb
13784 * is not considered. Alternatives, such
13785 * as dropping retx if it crosses maxRb
13786 * could be considered.
13790 **********************************************************/
13792 static S16 rgSCHCmnUlRbAllocForUe
13801 static S16 rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole)
13809 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
13810 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
13811 RgSchUlAlloc *alloc = NULLP;
13817 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->schdHqProcIdx];
13819 RgSchUlHqProcCb *proc = NULLP;
13823 uint8_t numVrbgTemp;
13825 TfuDciFormat dciFrmt;
13830 rgSCHUhmGetAvlHqProc(cell, ue, &proc);
13833 //printf("UE [%d] HQ Proc unavailable\n", ue->ueId);
13838 if (ue->ue5gtfCb.rank == 2)
13840 dciFrmt = TFU_DCI_FORMAT_A2;
13845 dciFrmt = TFU_DCI_FORMAT_A1;
13848 /* 5gtf TODO : To pass dci frmt to this function */
13849 pdcch = rgSCHCmnPdcchAllocCrntSf(cell, ue);
13852 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
13853 "rgSCHCmnUlRbAllocForUe(): Could not get PDCCH for CRNTI:%d",ue->ueId);
13856 gUl5gtfPdcchSchd++;
13857 #if defined (TENB_STATS) && defined (RG_5GTF)
13858 cell->tenbStats->sch.ul5gtfPdcchSchd++;
13861 //TODO_SID using configured prb as of now
13862 nPrb = ue->ue5gtfCb.maxPrb;
13863 reqVrbg = nPrb/MAX_5GTF_VRBG_SIZE;
13864 iMcs = ue->ue5gtfCb.mcs; //gSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
13868 if((sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart > MAX_5GTF_VRBG)
13869 || (sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated > MAX_5GTF_VRBG))
13871 printf("5GTF_ERROR vrbg > 25 valstart = %d valalloc %d\n", sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart
13872 , sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated);
13877 /*TODO_SID: Workaround for alloc. Currently alloc is ulsf based. To handle multiple beams, we need a different
13878 design. Now alloc are formed based on MAX_5GTF_UE_SCH macro. */
13879 numVrbgTemp = MAX_5GTF_VRBG/MAX_5GTF_UE_SCH;
13882 alloc = rgSCHCmnUlSbAlloc(sf, numVrbgTemp,\
13885 if (alloc == NULLP)
13887 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
13888 "rgSCHCmnUlRbAllocForUe(): Could not get UlAlloc %d CRNTI:%d",numVrbg,ue->ueId);
13889 rgSCHCmnPdcchRlsCrntSf(cell, pdcch);
13892 gUl5gtfAllocAllocated++;
13893 #if defined (TENB_STATS) && defined (RG_5GTF)
13894 cell->tenbStats->sch.ul5gtfAllocAllocated++;
13896 alloc->grnt.vrbgStart = sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart;
13897 alloc->grnt.numVrbg = numVrbg;
13898 alloc->grnt.numLyr = numLyr;
13899 alloc->grnt.dciFrmt = dciFrmt;
13901 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart += numVrbg;
13902 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated += numVrbg;
13904 //rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
13906 sf->totPrb += alloc->grnt.numRb;
13907 ue->ul.nPrb = alloc->grnt.numRb;
13909 if (ue->csgMmbrSta != TRUE)
13911 cellUl->ncsgPrbCnt += alloc->grnt.numRb;
13913 cellUl->totPrbCnt += (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
13914 alloc->pdcch = pdcch;
13915 alloc->grnt.iMcs = iMcs;
13916 alloc->grnt.iMcsCrnt = iMcsCrnt;
13917 alloc->grnt.hop = 0;
13918 /* Initial Num RBs support for UCI on PUSCH */
13920 ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
13922 alloc->forMsg3 = FALSE;
13923 //RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTb5gtfSzTbl[0], (iTbs));
13925 //ueUl->alloc.allocdBytes = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
13926 /* TODO_SID Allocating based on configured MCS as of now.
13927 Currently for format A2. When doing multi grp per tti, need to update this. */
13928 ueUl->alloc.allocdBytes = (rgSch5gtfTbSzTbl[iMcs]/8) * ue->ue5gtfCb.rank;
13930 alloc->grnt.datSz = ueUl->alloc.allocdBytes;
13931 //TODO_SID Need to check mod order.
13932 RG_SCH_CMN_TBS_TO_MODODR(iMcs, alloc->grnt.modOdr);
13933 //alloc->grnt.modOdr = 6;
13934 alloc->grnt.isRtx = FALSE;
13936 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
13937 alloc->grnt.SCID = 0;
13938 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
13939 alloc->grnt.PMI = 0;
13940 alloc->grnt.uciOnxPUSCH = 0;
13941 alloc->grnt.hqProcId = proc->procId;
13943 alloc->hqProc = proc;
13944 alloc->hqProc->ulSfIdx = cellUl->schdIdx;
13946 /*commenting to retain the rnti used for transmission SPS/c-rnti */
13947 alloc->rnti = ue->ueId;
13948 ueUl->alloc.alloc = alloc;
13949 /*rntiwari-Adding the debug for generating the graph.*/
13950 /* No grant attr recorded now */
13954 /***********************************************************
13956 * Func : rgSCHCmnUlRbAllocAddUeToLst
13958 * Desc : Add UE to list (scheduled/non-scheduled list)
13959 * for UL RB allocation information.
13967 **********************************************************/
13969 Void rgSCHCmnUlRbAllocAddUeToLst
13976 Void rgSCHCmnUlRbAllocAddUeToLst(cell, ue, lst)
13982 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
13985 gUl5gtfUeRbAllocDone++;
13986 #if defined (TENB_STATS) && defined (RG_5GTF)
13987 cell->tenbStats->sch.ul5gtfUeRbAllocDone++;
13989 cmLListAdd2Tail(lst, &ueUl->alloc.schdLstLnk);
13990 ueUl->alloc.schdLstLnk.node = (PTR)ue;
13995 * @brief This function Processes the Final Allocations
13996 * made by the RB Allocator against the requested.
14000 * Function: rgSCHCmnUlAllocFnlz
14001 * Purpose: This function Processes the Final Allocations
14002 * made by the RB Allocator against the requested.
14004 * Invoked by: Common Scheduler
14006 * @param[in] RgSchCellCb *cell
14007 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
14012 static Void rgSCHCmnUlAllocFnlz
14015 RgSchCmnUlRbAllocInfo *allocInfo
14018 static Void rgSCHCmnUlAllocFnlz(cell, allocInfo)
14020 RgSchCmnUlRbAllocInfo *allocInfo;
14023 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14025 /* call scheduler specific Finalization */
14026 cellSch->apisUl->rgSCHUlAllocFnlz(cell, allocInfo);
14032 * @brief This function Processes the Final Allocations
14033 * made by the RB Allocator against the requested.
14037 * Function: rgSCHCmnDlAllocFnlz
14038 * Purpose: This function Processes the Final Allocations
14039 * made by the RB Allocator against the requested.
14041 * Invoked by: Common Scheduler
14043 * @param[in] RgSchCellCb *cell
14048 Void rgSCHCmnDlAllocFnlz
14053 Void rgSCHCmnDlAllocFnlz(cell)
14057 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14058 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
14061 rgSCHCmnDlCcchRetxFnlz(cell, allocInfo);
14062 rgSCHCmnDlCcchTxFnlz(cell, allocInfo);
14064 /* Added below functions for handling CCCH SDU transmission received
14066 * * guard timer expiry*/
14067 rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo);
14068 rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo);
14070 rgSCHCmnDlRaRspFnlz(cell, allocInfo);
14071 /* call scheduler specific Finalization */
14072 cellSch->apisDl->rgSCHDlAllocFnlz(cell, allocInfo);
14074 /* Stack Crash problem for TRACE5 Changes. Added the return below */
14081 * @brief Update an uplink subframe.
14085 * Function : rgSCHCmnUlUpdSf
14087 * For each allocation
14088 * - if no more tx needed
14089 * - Release allocation
14091 * - Perform retransmission
14093 * @param[in] RgSchUlSf *sf
14097 static Void rgSCHCmnUlUpdSf
14100 RgSchCmnUlRbAllocInfo *allocInfo,
14104 static Void rgSCHCmnUlUpdSf(cell, allocInfo, sf)
14106 RgSchCmnUlRbAllocInfo *allocInfo;
14112 while ((lnk = sf->allocs.first))
14114 RgSchUlAlloc *alloc = (RgSchUlAlloc *)lnk->node;
14117 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
14122 /* If need to handle all retx together, run another loop separately */
14123 rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc);
14125 rgSCHCmnUlRlsUlAlloc(cell, sf, alloc);
14128 /* By this time, all allocs would have been cleared and
14129 * SF is reset to be made ready for new allocations. */
14130 rgSCHCmnUlSfReset(cell, sf);
14131 /* In case there are timing problems due to msg3
14132 * allocations being done in advance, (which will
14133 * probably happen with the current FDD code that
14134 * handles 8 subframes) one solution
14135 * could be to hold the (recent) msg3 allocs in a separate
14136 * list, and then possibly add that to the actual
14137 * list later. So at this time while allocations are
14138 * traversed, the recent msg3 ones are not seen. Anytime after
14139 * this (a good time is when the usual allocations
14140 * are made), msg3 allocations could be transferred to the
14141 * normal list. Not doing this now as it is assumed
14142 * that incorporation of TDD shall take care of this.
14150 * @brief Handle uplink allocation for retransmission.
14154 * Function : rgSCHCmnUlHndlAllocRetx
14156 * Processing Steps:
14157 * - Add to queue for retx.
14158 * - Do not release here, release happends as part
14159 * of the loop that calls this function.
14161 * @param[in] RgSchCellCb *cell
14162 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
14163 * @param[in] RgSchUlSf *sf
14164 * @param[in] RgSchUlAlloc *alloc
14168 static Void rgSCHCmnUlHndlAllocRetx
14171 RgSchCmnUlRbAllocInfo *allocInfo,
14173 RgSchUlAlloc *alloc
14176 static Void rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc)
14178 RgSchCmnUlRbAllocInfo *allocInfo;
14180 RgSchUlAlloc *alloc;
14184 RgSchCmnUlUe *ueUl;
14186 rgTbSzTbl[0][rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs)]\
14187 [alloc->grnt.numRb-1]/8;
14188 if (!alloc->forMsg3)
14190 ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue);
14191 ueUl->alloc.reqBytes = bytes;
14192 rgSCHUhmRetx(alloc->hqProc);
14193 rgSCHCmnUlAdd2RetxUeLst(allocInfo, alloc->ue);
14197 /* RACHO msg3 retx handling. Part of RACH procedure changes. */
14198 retxAlloc = rgSCHCmnUlGetUlAlloc(cell, sf, alloc->numSb);
14199 if (retxAlloc == NULLP)
14201 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
14202 "rgSCHCmnUlRbAllocForUe():Could not get UlAlloc for msg3Retx RNTI:%d",
14206 retxAlloc->grnt.iMcs = alloc->grnt.iMcs;
14207 retxAlloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl\
14208 [alloc->hqProc->rvIdx];
14209 retxAlloc->grnt.nDmrs = 0;
14210 retxAlloc->grnt.hop = 0;
14211 retxAlloc->grnt.delayBit = 0;
14212 retxAlloc->rnti = alloc->rnti;
14213 retxAlloc->ue = NULLP;
14214 retxAlloc->pdcch = FALSE;
14215 retxAlloc->forMsg3 = TRUE;
14216 retxAlloc->raCb = alloc->raCb;
14217 retxAlloc->hqProc = alloc->hqProc;
14218 rgSCHUhmRetx(retxAlloc->hqProc);
14225 * @brief Uplink Scheduling Handler.
14229 * Function: rgSCHCmnUlAlloc
14230 * Purpose: This function Handles Uplink Scheduling.
14232 * Invoked by: Common Scheduler
14234 * @param[in] RgSchCellCb *cell
14237 /* ccpu00132653- The definition of this function made common for TDD and FDD*/
14239 static Void rgSCHCmnUlAlloc
14244 static Void rgSCHCmnUlAlloc(cell)
14248 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14249 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
14250 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
14251 RgSchCmnUlRbAllocInfo allocInfo;
14252 RgSchCmnUlRbAllocInfo *allocInfoRef = &allocInfo;
14259 /* Initializing RgSchCmnUlRbAllocInfo structure */
14260 rgSCHCmnInitUlRbAllocInfo(allocInfoRef);
14262 /* Get Uplink Subframe */
14263 allocInfoRef->sf = &cellUl->ulSfArr[cellUl->schdIdx];
14265 /* initializing the UL PRB count */
14266 allocInfoRef->sf->totPrb = 0;
14270 rgSCHCmnSpsUlTti(cell, allocInfoRef);
14273 if(*allocInfoRef->sf->allocCountRef == 0)
14277 if ((hole = rgSCHUtlUlHoleFirst(allocInfoRef->sf)) != NULLP)
14279 /* Sanity check of holeDb */
14280 if (allocInfoRef->sf->holeDb->count == 1 && hole->start == 0)
14282 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
14283 /* Re-Initialize available subbands because of CFI change*/
14284 allocInfoRef->sf->availSubbands = cell->dynCfiCb.\
14285 bwInfo[cellDl->currCfi].numSb;
14286 /*Currently initializing 5gtf ulsf specific initialization here.
14287 need to do at proper place */
14289 allocInfoRef->sf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
14290 allocInfoRef->sf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
14291 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
14293 allocInfoRef->sf->sfBeamInfo[idx].totVrbgAllocated = 0;
14294 allocInfoRef->sf->sfBeamInfo[idx].totVrbgRequired = 0;
14295 allocInfoRef->sf->sfBeamInfo[idx].vrbgStart = 0;
14301 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
14302 "Error! holeDb sanity check failed");
14307 /* Fix: Adaptive re-transmissions prioritised over other transmissions */
14308 /* perform adaptive retransmissions */
14309 rgSCHCmnUlSfReTxAllocs(cell, allocInfoRef->sf);
14313 /* Fix: syed Adaptive Msg3 Retx crash. Release all
14314 Harq processes for which adap Retx failed, to avoid
14315 blocking. This step should be done before New TX
14316 scheduling to make hqProc available. Right now we
14317 dont check if proc is in adap Retx list for considering
14318 it to be available. But now with this release that
14319 functionality would be correct. */
14321 rgSCHCmnUlSfRlsRetxProcs(cell, allocInfoRef->sf);
14324 /* Specific UL scheduler to perform UE scheduling */
14325 cellSch->apisUl->rgSCHUlSched(cell, allocInfoRef);
14327 /* Call UL RB allocator module */
14328 rgSCHCmnAllocUlRb(cell, allocInfoRef);
14330 /* Do group power control for PUSCH */
14331 rgSCHCmnGrpPwrCntrlPusch(cell, allocInfoRef->sf);
14333 cell->sc.apis->rgSCHDrxStrtInActvTmrInUl(cell);
14335 rgSCHCmnUlAllocFnlz(cell, allocInfoRef);
14336 if(5000 == g5gtfTtiCnt)
14338 ul5gtfsidDlAlreadyMarkUl = 0;
14339 ul5gtfsidDlSchdPass = 0;
14340 ul5gtfsidUlMarkUl = 0;
14341 ul5gtfTotSchdCnt = 0;
14349 * @brief send Subframe Allocations.
14353 * Function: rgSCHCmnSndCnsldtInfo
14354 * Purpose: Send the scheduled
14355 * allocations to MAC for StaInd generation to Higher layers and
14356 * for MUXing. PST's RgInfSfAlloc to MAC instance.
14358 * Invoked by: Common Scheduler
14360 * @param[in] RgSchCellCb *cell
14364 Void rgSCHCmnSndCnsldtInfo
14369 Void rgSCHCmnSndCnsldtInfo(cell)
14373 RgInfSfAlloc *subfrmAlloc;
14375 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14378 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
14380 /* Send the allocations to MAC for MUXing */
14381 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
14382 subfrmAlloc->cellId = cell->cellId;
14383 /* Populate the List of UEs needing PDB-based Flow control */
14384 cellSch->apisDl->rgSCHDlFillFlwCtrlInfo(cell, subfrmAlloc);
14386 if((subfrmAlloc->rarInfo.numRaRntis) ||
14388 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
14389 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
14390 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
14392 (subfrmAlloc->ueInfo.numUes) ||
14393 (subfrmAlloc->cmnLcInfo.bitMask) ||
14394 (subfrmAlloc->ulUeInfo.numUes) ||
14395 (subfrmAlloc->flowCntrlInfo.numUes))
14397 if((subfrmAlloc->rarInfo.numRaRntis) ||
14399 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
14400 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
14401 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
14403 (subfrmAlloc->ueInfo.numUes) ||
14404 (subfrmAlloc->cmnLcInfo.bitMask) ||
14405 (subfrmAlloc->flowCntrlInfo.numUes))
14408 RgSchMacSfAlloc(&pst, subfrmAlloc);
14411 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_NUM_SUB_FRAMES;
14413 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_SF_ALLOC_SIZE;
14419 * @brief Consolidate Subframe Allocations.
14423 * Function: rgSCHCmnCnsldtSfAlloc
14424 * Purpose: Consolidate Subframe Allocations.
14426 * Invoked by: Common Scheduler
14428 * @param[in] RgSchCellCb *cell
14432 Void rgSCHCmnCnsldtSfAlloc
14437 Void rgSCHCmnCnsldtSfAlloc(cell)
14441 RgInfSfAlloc *subfrmAlloc;
14442 CmLteTimingInfo frm;
14444 CmLListCp dlDrxInactvTmrLst;
14445 CmLListCp dlInActvLst;
14446 CmLListCp ulInActvLst;
14447 RgSchCmnCell *cellSch = NULLP;
14450 cmLListInit(&dlDrxInactvTmrLst);
14451 cmLListInit(&dlInActvLst);
14452 cmLListInit(&ulInActvLst);
14454 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
14456 /* Get Downlink Subframe */
14457 frm = cell->crntTime;
14458 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
14459 dlSf = rgSCHUtlSubFrmGet(cell, frm);
14461 /* Fill the allocation Info */
14462 rgSCHUtlFillRgInfRarInfo(dlSf, subfrmAlloc, cell);
14465 rgSCHUtlFillRgInfUeInfo(dlSf, cell, &dlDrxInactvTmrLst,
14466 &dlInActvLst, &ulInActvLst);
14467 #ifdef RG_PFS_STATS
14468 cell->totalPrb += dlSf->bwAssigned;
14470 /* Mark the following Ues inactive for UL*/
14471 cellSch = RG_SCH_CMN_GET_CELL(cell);
14473 /* Calling Scheduler specific function with DRX inactive UE list*/
14474 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInActvLst);
14475 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInActvLst);
14478 /*re/start DRX inactivity timer for the UEs*/
14479 (Void)rgSCHDrxStrtInActvTmr(cell,&dlDrxInactvTmrLst,RG_SCH_DRX_DL);
14485 * @brief Initialize the DL Allocation Information Structure.
14489 * Function: rgSCHCmnInitDlRbAllocInfo
14490 * Purpose: Initialize the DL Allocation Information Structure.
14492 * Invoked by: Common Scheduler
14494 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
14498 static Void rgSCHCmnInitDlRbAllocInfo
14500 RgSchCmnDlRbAllocInfo *allocInfo
14503 static Void rgSCHCmnInitDlRbAllocInfo(allocInfo)
14504 RgSchCmnDlRbAllocInfo *allocInfo;
14507 memset(&allocInfo->pcchAlloc, 0, sizeof(RgSchDlRbAlloc));
14508 memset(&allocInfo->bcchAlloc, 0, sizeof(RgSchDlRbAlloc));
14509 memset(allocInfo->raRspAlloc, 0, RG_SCH_CMN_MAX_CMN_PDCCH*sizeof(RgSchDlRbAlloc));
14511 allocInfo->msg4Alloc.msg4DlSf = NULLP;
14512 cmLListInit(&allocInfo->msg4Alloc.msg4TxLst);
14513 cmLListInit(&allocInfo->msg4Alloc.msg4RetxLst);
14514 cmLListInit(&allocInfo->msg4Alloc.schdMsg4TxLst);
14515 cmLListInit(&allocInfo->msg4Alloc.schdMsg4RetxLst);
14516 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4TxLst);
14517 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4RetxLst);
14519 allocInfo->ccchSduAlloc.ccchSduDlSf = NULLP;
14520 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduTxLst);
14521 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduRetxLst);
14522 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduTxLst);
14523 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduRetxLst);
14524 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst);
14525 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst);
14528 allocInfo->dedAlloc.dedDlSf = NULLP;
14529 cmLListInit(&allocInfo->dedAlloc.txHqPLst);
14530 cmLListInit(&allocInfo->dedAlloc.retxHqPLst);
14531 cmLListInit(&allocInfo->dedAlloc.schdTxHqPLst);
14532 cmLListInit(&allocInfo->dedAlloc.schdRetxHqPLst);
14533 cmLListInit(&allocInfo->dedAlloc.nonSchdTxHqPLst);
14534 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxHqPLst);
14536 cmLListInit(&allocInfo->dedAlloc.txRetxHqPLst);
14537 cmLListInit(&allocInfo->dedAlloc.schdTxRetxHqPLst);
14538 cmLListInit(&allocInfo->dedAlloc.nonSchdTxRetxHqPLst);
14540 cmLListInit(&allocInfo->dedAlloc.txSpsHqPLst);
14541 cmLListInit(&allocInfo->dedAlloc.retxSpsHqPLst);
14542 cmLListInit(&allocInfo->dedAlloc.schdTxSpsHqPLst);
14543 cmLListInit(&allocInfo->dedAlloc.schdRetxSpsHqPLst);
14544 cmLListInit(&allocInfo->dedAlloc.nonSchdTxSpsHqPLst);
14545 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxSpsHqPLst);
14549 rgSCHLaaCmnInitDlRbAllocInfo (allocInfo);
14552 cmLListInit(&allocInfo->dedAlloc.errIndTxHqPLst);
14553 cmLListInit(&allocInfo->dedAlloc.schdErrIndTxHqPLst);
14554 cmLListInit(&allocInfo->dedAlloc.nonSchdErrIndTxHqPLst);
14559 * @brief Initialize the UL Allocation Information Structure.
14563 * Function: rgSCHCmnInitUlRbAllocInfo
14564 * Purpose: Initialize the UL Allocation Information Structure.
14566 * Invoked by: Common Scheduler
14568 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
14572 Void rgSCHCmnInitUlRbAllocInfo
14574 RgSchCmnUlRbAllocInfo *allocInfo
14577 Void rgSCHCmnInitUlRbAllocInfo(allocInfo)
14578 RgSchCmnUlRbAllocInfo *allocInfo;
14581 allocInfo->sf = NULLP;
14582 cmLListInit(&allocInfo->contResLst);
14583 cmLListInit(&allocInfo->schdContResLst);
14584 cmLListInit(&allocInfo->nonSchdContResLst);
14585 cmLListInit(&allocInfo->ueLst);
14586 cmLListInit(&allocInfo->schdUeLst);
14587 cmLListInit(&allocInfo->nonSchdUeLst);
14593 * @brief Scheduling for PUCCH group power control.
14597 * Function: rgSCHCmnGrpPwrCntrlPucch
14598 * Purpose: This function does group power control for PUCCH
14599 * corresponding to the subframe for which DL UE allocations
14602 * Invoked by: Common Scheduler
14604 * @param[in] RgSchCellCb *cell
14608 static Void rgSCHCmnGrpPwrCntrlPucch
14614 static Void rgSCHCmnGrpPwrCntrlPucch(cell, dlSf)
14620 rgSCHPwrGrpCntrlPucch(cell, dlSf);
14626 * @brief Scheduling for PUSCH group power control.
14630 * Function: rgSCHCmnGrpPwrCntrlPusch
14631 * Purpose: This function does group power control, for
14632 * the subframe for which UL allocation has (just) happened.
14634 * Invoked by: Common Scheduler
14636 * @param[in] RgSchCellCb *cell
14637 * @param[in] RgSchUlSf *ulSf
14641 static Void rgSCHCmnGrpPwrCntrlPusch
14647 static Void rgSCHCmnGrpPwrCntrlPusch(cell, ulSf)
14652 /*removed unused variable *cellSch*/
14653 CmLteTimingInfo frm;
14657 /* Got to pass DL SF corresponding to UL SF, so get that first.
14658 * There is no easy way of getting dlSf by having the RgSchUlSf*,
14659 * so use the UL delta from current time to get the DL SF. */
14660 frm = cell->crntTime;
14663 if(cell->emtcEnable == TRUE)
14665 RGSCH_INCR_SUB_FRAME_EMTC(frm, TFU_DLCNTRL_DLDELTA);
14670 RGSCH_INCR_SUB_FRAME(frm, TFU_DLCNTRL_DLDELTA);
14672 /* Del filling of dl.time */
14673 dlSf = rgSCHUtlSubFrmGet(cell, frm);
14675 rgSCHPwrGrpCntrlPusch(cell, dlSf, ulSf);
14680 /* Fix: syed align multiple UEs to refresh at same time */
14681 /***********************************************************
14683 * Func : rgSCHCmnApplyUeRefresh
14685 * Desc : Apply UE refresh in CMN and Specific
14686 * schedulers. Data rates and corresponding
14687 * scratchpad variables are updated.
14695 **********************************************************/
14697 static S16 rgSCHCmnApplyUeRefresh
14703 static S16 rgSCHCmnApplyUeRefresh(cell, ue)
14708 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14709 uint32_t effGbrBsr = 0;
14710 uint32_t effNonGbrBsr = 0;
14714 /* Reset the refresh cycle variableCAP */
14715 ue->ul.effAmbr = ue->ul.cfgdAmbr;
14717 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
14719 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
14721 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
14723 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
14725 cmnLcg->effGbr = cmnLcg->cfgdGbr;
14726 cmnLcg->effDeltaMbr = cmnLcg->deltaMbr;
14727 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
14728 /* Considering GBR LCG will be prioritised by UE */
14729 effGbrBsr += cmnLcg->bs;
14730 }/* Else no remaing BS so nonLcg0 will be updated when BSR will be received */
14733 effNonGbrBsr += cmnLcg->reportedBs;
14734 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
14738 effNonGbrBsr = RGSCH_MIN(effNonGbrBsr,ue->ul.effAmbr);
14739 ue->ul.nonGbrLcgBs = effNonGbrBsr;
14741 ue->ul.nonLcg0Bs = effGbrBsr + effNonGbrBsr;
14742 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
14743 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
14746 /* call scheduler specific event handlers
14747 * for refresh timer expiry */
14748 cellSch->apisUl->rgSCHUlUeRefresh(cell, ue);
14749 cellSch->apisDl->rgSCHDlUeRefresh(cell, ue);
14754 /***********************************************************
14756 * Func : rgSCHCmnTmrExpiry
14758 * Desc : Adds an UE to refresh queue, so that the UE is
14759 * periodically triggered to refresh it's GBR and
14768 **********************************************************/
14770 static S16 rgSCHCmnTmrExpiry
14772 PTR cb, /* Pointer to timer control block */
14773 S16 tmrEvnt /* Timer Event */
14776 static S16 rgSCHCmnTmrExpiry(cb, tmrEvnt)
14777 PTR cb; /* Pointer to timer control block */
14778 S16 tmrEvnt; /* Timer Event */
14781 RgSchUeCb *ue = (RgSchUeCb *)cb;
14782 RgSchCellCb *cell = ue->cell;
14783 #if (ERRCLASS & ERRCLS_DEBUG)
14787 #if (ERRCLASS & ERRCLS_DEBUG)
14788 if (tmrEvnt != RG_SCH_CMN_EVNT_UE_REFRESH)
14790 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnTmrExpiry(): Invalid "
14791 "timer event CRNTI:%d",ue->ueId);
14798 rgSCHCmnApplyUeRefresh(cell, ue);
14800 rgSCHCmnAddUeToRefreshQ(cell, ue, RG_SCH_CMN_REFRESH_TIME);
14805 /***********************************************************
14807 * Func : rgSCHCmnTmrProc
14809 * Desc : Timer entry point per cell. Timer
14810 * processing is triggered at every frame boundary
14819 **********************************************************/
14821 static S16 rgSCHCmnTmrProc
14826 static S16 rgSCHCmnTmrProc(cell)
14830 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
14831 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
14832 /* Moving the assignment of scheduler pointer
14833 to available scope for optimization */
14835 if ((cell->crntTime.slot % RGSCH_NUM_SUB_FRAMES_5G) == 0)
14837 /* Reset the counters periodically */
14838 if ((cell->crntTime.sfn % RG_SCH_CMN_CSG_REFRESH_TIME) == 0)
14840 RG_SCH_RESET_HCSG_DL_PRB_CNTR(cmnDlCell);
14841 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cmnUlCell);
14843 if ((cell->crntTime.sfn % RG_SCH_CMN_OVRLDCTRL_REFRESH_TIME) == 0)
14846 cell->measurements.ulTpt = ((cell->measurements.ulTpt * 95) + ( cell->measurements.ulBytesCnt * 5))/100;
14847 cell->measurements.dlTpt = ((cell->measurements.dlTpt * 95) + ( cell->measurements.dlBytesCnt * 5))/100;
14849 rgSCHUtlCpuOvrLdAdjItbsCap(cell);
14850 /* reset cell level tpt measurements for next cycle */
14851 cell->measurements.ulBytesCnt = 0;
14852 cell->measurements.dlBytesCnt = 0;
14854 /* Comparing with Zero instead of % is being done for efficiency.
14855 * If Timer resolution changes then accordingly update the
14856 * macro RG_SCH_CMN_REFRESH_TIMERES */
14857 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
14858 cmPrcTmr(&sched->tmrTqCp, sched->tmrTq, (PFV)rgSCHCmnTmrExpiry);
14865 /***********************************************************
14867 * Func : rgSchCmnUpdCfiVal
14869 * Desc : Update the CFI value if CFI switch was done
14877 **********************************************************/
14879 static Void rgSchCmnUpdCfiVal
14885 static Void rgSchCmnUpdCfiVal(cell, delta)
14891 CmLteTimingInfo pdsch;
14892 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
14898 uint8_t splSfCfi = 0;
14902 pdsch = cell->crntTime;
14903 RGSCH_INCR_SUB_FRAME(pdsch, delta);
14904 dlSf = rgSCHUtlSubFrmGet(cell, pdsch);
14905 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
14906 *change happens in that SF then UL PDCCH allocation happens with old CFI
14907 *but CFI in control Req goes updated one since it was stored in the CELL
14909 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
14910 if(cell->dynCfiCb.pdcchSfIdx != 0xFF)
14913 dlIdx = rgSCHUtlGetDlSfIdx(cell, &pdsch);
14915 dlIdx = (((pdsch.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (pdsch.slot % RGSCH_NUM_SUB_FRAMES));
14916 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
14918 /* If current downlink subframe index is same as pdcch SF index,
14919 * perform the switching of CFI in this subframe */
14920 if(cell->dynCfiCb.pdcchSfIdx == dlIdx)
14922 cellCmnDl->currCfi = cellCmnDl->newCfi;
14923 cell->dynCfiCb.pdcchSfIdx = 0xFF;
14925 /* Updating the nCce value based on the new CFI */
14927 splSfCfi = cellCmnDl->newCfi;
14928 for(idx = 0; idx < cell->numDlSubfrms; idx++)
14930 tddSf = cell->subFrms[idx];
14932 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][tddSf->sfNum];
14934 if(tddSf->sfType == RG_SCH_SPL_SF_DATA)
14936 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
14938 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
14942 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][cellCmnDl->currCfi];
14945 /* Setting the switch over window length based on config index.
14946 * During switch over period all the UL trnsmissions are Acked
14948 cell->dynCfiCb.switchOvrWinLen =
14949 rgSchCfiSwitchOvrWinLen[cell->ulDlCfgIdx];
14951 cell->nCce = cell->dynCfiCb.cfi2NCceTbl[0][cellCmnDl->currCfi];
14952 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
14953 *change happens in that SF then UL PDCCH allocation happens with old CFI
14954 *but CFI in control Req goes updated one since it was stored in the CELL
14956 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
14957 cell->dynCfiCb.switchOvrWinLen = rgSchCfiSwitchOvrWinLen[7];
14965 /***********************************************************
14967 * Func : rgSchCmnUpdtPdcchSfIdx
14969 * Desc : Update the switch over window length
14977 **********************************************************/
14980 static Void rgSchCmnUpdtPdcchSfIdx
14987 static Void rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, sfNum)
14994 static Void rgSchCmnUpdtPdcchSfIdx
15000 static Void rgSchCmnUpdtPdcchSfIdx(cell, dlIdx)
15009 /* Resetting the parameters on CFI switching */
15010 cell->dynCfiCb.cceUsed = 0;
15011 cell->dynCfiCb.lowCceCnt = 0;
15013 cell->dynCfiCb.cceFailSum = 0;
15014 cell->dynCfiCb.cceFailCnt = 0;
15015 cell->dynCfiCb.prevCceFailIdx = 0;
15017 cell->dynCfiCb.switchOvrInProgress = TRUE;
15019 for(idx = 0; idx < cell->dynCfiCb.numFailSamples; idx++)
15021 cell->dynCfiCb.cceFailSamples[idx] = 0;
15024 cell->dynCfiCb.ttiCnt = 0;
15026 cell->dynCfiCb.cfiSwitches++;
15027 cfiSwitchCnt = cell->dynCfiCb.cfiSwitches;
15030 cell->dynCfiCb.pdcchSfIdx = (dlIdx +
15031 rgSchTddPdcchSfIncTbl[cell->ulDlCfgIdx][sfNum]) % cell->numDlSubfrms;
15033 cell->dynCfiCb.pdcchSfIdx = (dlIdx + RG_SCH_CFI_APPLY_DELTA) % \
15034 RGSCH_NUM_DL_slotS;
15038 /***********************************************************
15040 * Func : rgSchCmnUpdCfiDb
15042 * Desc : Update the counters related to dynamic
15043 * CFI feature in cellCb.
15051 **********************************************************/
15053 Void rgSchCmnUpdCfiDb
15059 Void rgSchCmnUpdCfiDb(cell, delta)
15064 CmLteTimingInfo frm;
15070 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15071 uint8_t nCceLowerCfi = 0;
15073 uint8_t cceFailIdx;
15079 /* Get Downlink Subframe */
15080 frm = cell->crntTime;
15081 RGSCH_INCR_SUB_FRAME(frm, delta);
15084 dlIdx = rgSCHUtlGetDlSfIdx(cell, &frm);
15085 dlSf = cell->subFrms[dlIdx];
15086 isHiDci0 = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][dlSf->sfNum];
15088 /* Changing the idexing
15089 so that proper subframe is selected */
15090 dlIdx = (((frm.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (frm.slot % RGSCH_NUM_SUB_FRAMES));
15091 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
15092 dlSf = cell->subFrms[dlIdx];
15095 currCfi = cellSch->dl.currCfi;
15097 if(!cell->dynCfiCb.switchOvrInProgress)
15100 if(!cell->dynCfiCb.isDynCfiEnb)
15102 if(currCfi != cellSch->cfiCfg.cfi)
15104 if(currCfi < cellSch->cfiCfg.cfi)
15106 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
15107 cfiIncr = cell->dynCfiCb.cfiIncr;
15111 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
15112 cfiDecr = cell->dynCfiCb.cfiDecr;
15119 /* Setting ttiMod to 0 for ttiCnt > 1000 in case if this
15120 * function was not called in UL subframe*/
15121 if(cell->dynCfiCb.ttiCnt > RGSCH_CFI_TTI_MON_INTRVL)
15128 ttiMod = cell->dynCfiCb.ttiCnt % RGSCH_CFI_TTI_MON_INTRVL;
15131 dlSf->dlUlBothCmplt++;
15133 if((dlSf->dlUlBothCmplt == 2) || (!isHiDci0))
15135 if(dlSf->dlUlBothCmplt == 2)
15138 /********************STEP UP CRITERIA********************/
15139 /* Updating the CCE failure count parameter */
15140 cell->dynCfiCb.cceFailCnt += dlSf->isCceFailure;
15141 cell->dynCfiCb.cceFailSum += dlSf->isCceFailure;
15143 /* Check if cfi step up can be performed */
15144 if(currCfi < cell->dynCfiCb.maxCfi)
15146 if(cell->dynCfiCb.cceFailSum >= cell->dynCfiCb.cfiStepUpTtiCnt)
15148 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
15149 cfiIncr = cell->dynCfiCb.cfiIncr;
15154 /********************STEP DOWN CRITERIA********************/
15156 /* Updating the no. of CCE used in this dl subframe */
15157 cell->dynCfiCb.cceUsed += dlSf->cceCnt;
15159 if(currCfi > RGSCH_MIN_CFI_VAL)
15161 /* calculating the number of CCE for next lower CFI */
15163 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][dlSf->sfNum];
15164 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[mPhich][currCfi-1];
15166 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[0][currCfi-1];
15168 if(dlSf->cceCnt < nCceLowerCfi)
15170 /* Updating the count of TTIs in which no. of CCEs
15171 * used were less than the CCEs of next lower CFI */
15172 cell->dynCfiCb.lowCceCnt++;
15177 totalCce = (nCceLowerCfi * cell->dynCfiCb.cfiStepDownTtiCnt *
15178 RGSCH_CFI_CCE_PERCNTG)/100;
15180 if((!cell->dynCfiCb.cceFailSum) &&
15181 (cell->dynCfiCb.lowCceCnt >=
15182 cell->dynCfiCb.cfiStepDownTtiCnt) &&
15183 (cell->dynCfiCb.cceUsed < totalCce))
15185 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
15186 cfiDecr = cell->dynCfiCb.cfiDecr;
15192 cceFailIdx = ttiMod/cell->dynCfiCb.failSamplePrd;
15194 if(cceFailIdx != cell->dynCfiCb.prevCceFailIdx)
15196 /* New sample period has started. Subtract the old count
15197 * from the new sample period */
15198 cell->dynCfiCb.cceFailSum -= cell->dynCfiCb.cceFailSamples[cceFailIdx];
15200 /* Store the previous sample period data */
15201 cell->dynCfiCb.cceFailSamples[cell->dynCfiCb.prevCceFailIdx]
15202 = cell->dynCfiCb.cceFailCnt;
15204 cell->dynCfiCb.prevCceFailIdx = cceFailIdx;
15206 /* Resetting the CCE failure count as zero for next sample period */
15207 cell->dynCfiCb.cceFailCnt = 0;
15212 /* Restting the parametrs after Monitoring Interval expired */
15213 cell->dynCfiCb.cceUsed = 0;
15214 cell->dynCfiCb.lowCceCnt = 0;
15215 cell->dynCfiCb.ttiCnt = 0;
15218 cell->dynCfiCb.ttiCnt++;
15222 if(cellSch->dl.newCfi != cellSch->dl.currCfi)
15225 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, dlSf->sfNum);
15227 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx);
15234 * @brief Dl Scheduler for Broadcast and Common channel scheduling.
15238 * Function: rgSCHCmnDlCommonChSch
15239 * Purpose: This function schedules DL Common channels for LTE.
15240 * Invoked by TTI processing in TOM. Scheduling is done for
15241 * BCCH, PCCH, Msg4, CCCH SDU, RAR in that order
15243 * Invoked by: TOM (TTI processing)
15245 * @param[in] RgSchCellCb *cell
15249 Void rgSCHCmnDlCommonChSch
15254 Void rgSCHCmnDlCommonChSch(cell)
15258 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15261 cellSch->apisDl->rgSCHDlTickForPdbTrkng(cell);
15262 rgSchCmnUpdCfiVal(cell, RG_SCH_CMN_DL_DELTA);
15264 /* handle Inactive UEs for DL */
15265 rgSCHCmnHdlDlInactUes(cell);
15267 /* Send a Tick to Refresh Timer */
15268 rgSCHCmnTmrProc(cell);
15270 if (cell->isDlDataAllwd && (cell->stopSiSch == FALSE))
15272 rgSCHCmnInitRbAlloc(cell);
15273 /* Perform DL scheduling of BCCH, PCCH */
15274 rgSCHCmnDlBcchPcchAlloc(cell);
15278 if(cell->siCb.inWindow != 0)
15280 cell->siCb.inWindow--;
15283 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
15285 rgSCHCmnDlCcchRarAlloc(cell);
15291 * @brief Scheduler invocation per TTI.
15295 * Function: rgSCHCmnUlSch
15296 * Purpose: This function implements UL scheduler alone. This is to
15297 * be able to perform scheduling with more flexibility.
15299 * Invoked by: TOM (TTI processing)
15301 * @param[in] RgSchCellCb *cell
15310 Void rgSCHCmnUlSch(cell)
15314 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15319 if(TRUE == rgSCHLaaSCellEnabled(cell))
15325 if(cellSch->ul.schdIdx != RGSCH_INVALID_INFO)
15327 rgSchCmnUpdCfiVal(cell, TFU_ULCNTRL_DLDELTA);
15329 /* Handle Inactive UEs for UL */
15330 rgSCHCmnHdlUlInactUes(cell);
15331 /* Perform UL Scheduling EVERY TTI */
15332 rgSCHCmnUlAlloc(cell);
15334 /* Calling function to update CFI parameters*/
15335 rgSchCmnUpdCfiDb(cell, TFU_ULCNTRL_DLDELTA);
15337 if(cell->dynCfiCb.switchOvrWinLen > 0)
15339 /* Decrementing the switchover window length */
15340 cell->dynCfiCb.switchOvrWinLen--;
15342 if(!cell->dynCfiCb.switchOvrWinLen)
15344 if(cell->dynCfiCb.dynCfiRecfgPend)
15346 /* Toggling the Dynamic CFI enabling */
15347 cell->dynCfiCb.isDynCfiEnb ^= 1;
15348 rgSCHDynCfiReCfg(cell, cell->dynCfiCb.isDynCfiEnb);
15349 cell->dynCfiCb.dynCfiRecfgPend = FALSE;
15351 cell->dynCfiCb.switchOvrInProgress = FALSE;
15359 rgSCHCmnSpsUlTti(cell, NULLP);
15369 * @brief This function updates the scheduler with service for an UE.
15373 * Function: rgSCHCmnDlDedBoUpd
15374 * Purpose: This function should be called whenever there is a
15375 * change BO for a service.
15377 * Invoked by: BO and Scheduler
15379 * @param[in] RgSchCellCb* cell
15380 * @param[in] RgSchUeCb* ue
15381 * @param[in] RgSchDlLcCb* svc
15386 Void rgSCHCmnDlDedBoUpd
15393 Void rgSCHCmnDlDedBoUpd(cell, ue, svc)
15399 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15401 /* RACHO : if UEs idle time exceeded and a BO update
15402 * is received, then add UE to the pdcch Order Q */
15403 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
15405 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue, cell);
15406 /* If PDCCH order is already triggered and we are waiting for
15407 * RACH from UE then do not add to PdcchOdrQ. */
15408 if (ueDl->rachInfo.rapIdLnk.node == NULLP)
15410 rgSCHCmnDlAdd2PdcchOdrQ(cell, ue);
15416 /* If SPS service, invoke SPS module */
15417 if (svc->dlLcSpsCfg.isSpsEnabled)
15419 rgSCHCmnSpsDlDedBoUpd(cell, ue, svc);
15420 /* Note: Retrun from here, no update needed in other schedulers */
15425 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
15427 cellSch->apisEmtcDl->rgSCHDlDedBoUpd(cell, ue, svc);
15428 //printf("rgSCHEMTCDlDedBoUpd\n");
15433 cellSch->apisDl->rgSCHDlDedBoUpd(cell, ue, svc);
15438 rgSCHSCellDlDedBoUpd(cell, ue, svc);
15446 * @brief Removes an UE from Cell's TA List.
15450 * Function: rgSCHCmnRmvFrmTaLst
15451 * Purpose: Removes an UE from Cell's TA List.
15453 * Invoked by: Specific Scheduler
15455 * @param[in] RgSchCellCb* cell
15456 * @param[in] RgSchUeCb* ue
15461 Void rgSCHCmnRmvFrmTaLst
15467 Void rgSCHCmnRmvFrmTaLst(cell, ue)
15472 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
15475 if(cell->emtcEnable && ue->isEmtcUe)
15477 rgSCHEmtcRmvFrmTaLst(cellCmnDl,ue);
15482 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
15483 ue->dlTaLnk.node = (PTR)NULLP;
15488 /* Fix: syed Remove the msg4Proc from cell
15489 * msg4Retx Queue. I have used CMN scheduler function
15490 * directly. Please define a new API and call this
15491 * function through that. */
15494 * @brief This function removes MSG4 HARQ process from cell RETX Queues.
15498 * Function: rgSCHCmnDlMsg4ProcRmvFrmRetx
15499 * Purpose: This function removes MSG4 HARQ process from cell RETX Queues.
15501 * Invoked by: UE/RACB deletion.
15503 * @param[in] RgSchCellCb* cell
15504 * @param[in] RgSchDlHqProc* hqP
15509 Void rgSCHCmnDlMsg4ProcRmvFrmRetx
15512 RgSchDlHqProcCb *hqP
15515 Void rgSCHCmnDlMsg4ProcRmvFrmRetx(cell, hqP)
15517 RgSchDlHqProcCb *hqP;
15520 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15522 if (hqP->tbInfo[0].ccchSchdInfo.retxLnk.node)
15524 if (hqP->hqE->msg4Proc == hqP)
15526 cmLListDelFrm(&cellSch->dl.msg4RetxLst, \
15527 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15528 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
15531 else if(hqP->hqE->ccchSduProc == hqP)
15533 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
15534 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15535 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
15544 * @brief This function adds a HARQ process for retx.
15548 * Function: rgSCHCmnDlProcAddToRetx
15549 * Purpose: This function adds a HARQ process to retransmission
15550 * queue. This may be performed when a HARQ ack is
15553 * Invoked by: HARQ feedback processing
15555 * @param[in] RgSchCellCb* cell
15556 * @param[in] RgSchDlHqProc* hqP
15561 Void rgSCHCmnDlProcAddToRetx
15564 RgSchDlHqProcCb *hqP
15567 Void rgSCHCmnDlProcAddToRetx(cell, hqP)
15569 RgSchDlHqProcCb *hqP;
15572 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15574 if (hqP->hqE->msg4Proc == hqP) /* indicating msg4 transmission */
15576 cmLListAdd2Tail(&cellSch->dl.msg4RetxLst, \
15577 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15578 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
15581 else if(hqP->hqE->ccchSduProc == hqP)
15583 /*If CCCH SDU being transmitted without cont res CE*/
15584 cmLListAdd2Tail(&cellSch->dl.ccchSduRetxLst,
15585 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15586 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
15592 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
15594 /* Invoke SPS module for SPS HARQ proc re-transmission handling */
15595 rgSCHCmnSpsDlProcAddToRetx(cell, hqP);
15598 #endif /* LTEMAC_SPS */
15600 if((TRUE == cell->emtcEnable)
15601 && (TRUE == hqP->hqE->ue->isEmtcUe))
15603 cellSch->apisEmtcDl->rgSCHDlProcAddToRetx(cell, hqP);
15608 cellSch->apisDl->rgSCHDlProcAddToRetx(cell, hqP);
15616 * @brief This function performs RI validation and
15617 * updates it to the ueCb.
15621 * Function: rgSCHCmnDlSetUeRi
15622 * Purpose: This function performs RI validation and
15623 * updates it to the ueCb.
15625 * Invoked by: rgSCHCmnDlCqiInd
15627 * @param[in] RgSchCellCb *cell
15628 * @param[in] RgSchUeCb *ue
15629 * @param[in] uint8_t ri
15630 * @param[in] Bool isPeriodic
15635 static Void rgSCHCmnDlSetUeRi
15643 static Void rgSCHCmnDlSetUeRi(cell, ue, ri, isPer)
15650 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15651 RgSchCmnUeInfo *ueSchCmn = RG_SCH_CMN_GET_CMN_UE(ue);
15654 RgSchUePCqiCb *cqiCb = RG_SCH_GET_UE_CELL_CQI_CB(ue,cell);
15659 /* FIX for RRC Reconfiguration issue */
15660 /* ccpu00140894- During Tx Mode transition RI report will not entertained for
15661 * specific during which SCH expecting UE can complete TX mode transition*/
15662 if (ue->txModeTransCmplt == FALSE)
15667 /* Restrict the Number of TX layers to cell->numTxAntPorts.
15668 * Protection from invalid RI values. */
15669 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
15671 /* Special case of converting PMI to sane value when
15672 * there is a switch in RI from 1 to 2 and PMI reported
15673 * for RI=1 is invalid for RI=2 */
15674 if ((cell->numTxAntPorts == 2) && (ue->mimoInfo.txMode == RGR_UE_TM_4))
15676 if ((ri == 2) && ( ueDl->mimoInfo.ri == 1))
15678 ueDl->mimoInfo.pmi = (ueDl->mimoInfo.pmi < 2)? 1:2;
15682 /* Restrict the Number of TX layers according to the UE Category */
15683 ueDl->mimoInfo.ri = RGSCH_MIN(ri, rgUeCatTbl[ueSchCmn->ueCat].maxTxLyrs);
15685 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].riCnt[ueDl->mimoInfo.ri-1]++;
15686 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
15690 ue->tenbStats->stats.nonPersistent.sch[0].riCnt[ueDl->mimoInfo.ri-1]++;
15691 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
15697 /* If RI is from Periodic CQI report */
15698 cqiCb->perRiVal = ueDl->mimoInfo.ri;
15699 /* Reset at every Periodic RI Reception */
15700 cqiCb->invalidateCqi = FALSE;
15704 /* If RI is from Aperiodic CQI report */
15705 if (cqiCb->perRiVal != ueDl->mimoInfo.ri)
15707 /* if this aperRI is different from last reported
15708 * perRI then invalidate all CQI reports till next
15710 cqiCb->invalidateCqi = TRUE;
15714 cqiCb->invalidateCqi = FALSE;
15719 if (ueDl->mimoInfo.ri > 1)
15721 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
15723 else if (ue->mimoInfo.txMode == RGR_UE_TM_3) /* ri == 1 */
15725 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
15733 * @brief This function performs PMI validation and
15734 * updates it to the ueCb.
15738 * Function: rgSCHCmnDlSetUePmi
15739 * Purpose: This function performs PMI validation and
15740 * updates it to the ueCb.
15742 * Invoked by: rgSCHCmnDlCqiInd
15744 * @param[in] RgSchCellCb *cell
15745 * @param[in] RgSchUeCb *ue
15746 * @param[in] uint8_t pmi
15751 static S16 rgSCHCmnDlSetUePmi
15758 static S16 rgSCHCmnDlSetUePmi(cell, ue, pmi)
15764 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15766 if (ue->txModeTransCmplt == FALSE)
15771 if (cell->numTxAntPorts == 2)
15777 if (ueDl->mimoInfo.ri == 2)
15779 /*ccpu00118150 - MOD - changed pmi value validation from 0 to 2*/
15780 /* PMI 2 and 3 are invalid incase of 2 TxAnt and 2 Layered SM */
15781 if (pmi == 2 || pmi == 3)
15785 ueDl->mimoInfo.pmi = pmi+1;
15789 ueDl->mimoInfo.pmi = pmi;
15792 else if (cell->numTxAntPorts == 4)
15798 ueDl->mimoInfo.pmi = pmi;
15800 /* Reset the No PMI Flag in forceTD */
15801 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
15806 * @brief This function Updates the DL CQI on PUCCH for the UE.
15810 * Function: rgSCHCmnDlProcCqiMode10
15812 * This function updates the DL CQI on PUCCH for the UE.
15814 * Invoked by: rgSCHCmnDlCqiOnPucchInd
15816 * Processing Steps:
15818 * @param[in] RgSchCellCb *cell
15819 * @param[in] RgSchUeCb *ue
15820 * @param[in] TfuDlCqiRpt *dlCqiRpt
15825 #ifdef RGR_CQI_REPT
15827 static inline Void rgSCHCmnDlProcCqiMode10
15831 TfuDlCqiPucch *pucchCqi,
15835 static inline Void rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail)
15838 TfuDlCqiPucch *pucchCqi;
15843 static inline Void rgSCHCmnDlProcCqiMode10
15847 TfuDlCqiPucch *pucchCqi
15850 static inline Void rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi)
15853 TfuDlCqiPucch *pucchCqi;
15857 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15859 if (pucchCqi->u.mode10Info.type == TFU_RPT_CQI)
15861 /*ccpu00109787 - ADD - Check for non-zero CQI*/
15862 /* Checking whether the decoded CQI is a value between 1 and 15*/
15863 if((pucchCqi->u.mode10Info.u.cqi) && (pucchCqi->u.mode10Info.u.cqi
15864 < RG_SCH_CMN_MAX_CQI))
15866 ueDl->cqiFlag = TRUE;
15867 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode10Info.u.cqi;
15868 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
15869 /* ccpu00117452 - MOD - Changed macro name from
15870 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
15871 #ifdef RGR_CQI_REPT
15872 *isCqiAvail = TRUE;
15880 else if (pucchCqi->u.mode10Info.type == TFU_RPT_RI)
15882 if ( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode10Info.u.ri) )
15884 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode10Info.u.ri,
15889 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
15890 pucchCqi->u.mode10Info.u.ri,ue->ueId);
15897 * @brief This function Updates the DL CQI on PUCCH for the UE.
15901 * Function: rgSCHCmnDlProcCqiMode11
15903 * This function updates the DL CQI on PUCCH for the UE.
15905 * Invoked by: rgSCHCmnDlCqiOnPucchInd
15907 * Processing Steps:
15908 * Process CQI MODE 11
15909 * @param[in] RgSchCellCb *cell
15910 * @param[in] RgSchUeCb *ue
15911 * @param[in] TfuDlCqiRpt *dlCqiRpt
15916 #ifdef RGR_CQI_REPT
15918 static inline Void rgSCHCmnDlProcCqiMode11
15922 TfuDlCqiPucch *pucchCqi,
15924 Bool *is2ndCwCqiAvail
15927 static inline Void rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail, is2ndCwCqiAvail)
15930 TfuDlCqiPucch *pucchCqi;
15932 Bool *is2ndCwCqiAvail;
15936 static inline Void rgSCHCmnDlProcCqiMode11
15940 TfuDlCqiPucch *pucchCqi
15943 static inline Void rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi)
15946 TfuDlCqiPucch *pucchCqi;
15950 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15952 if (pucchCqi->u.mode11Info.type == TFU_RPT_CQI)
15954 ue->mimoInfo.puschFdbkVld = FALSE;
15955 /*ccpu00109787 - ADD - Check for non-zero CQI*/
15956 if((pucchCqi->u.mode11Info.u.cqi.cqi) &&
15957 (pucchCqi->u.mode11Info.u.cqi.cqi < RG_SCH_CMN_MAX_CQI))
15959 ueDl->cqiFlag = TRUE;
15960 /* ccpu00117452 - MOD - Changed macro name from
15961 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
15962 #ifdef RGR_CQI_REPT
15963 *isCqiAvail = TRUE;
15965 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode11Info.u.cqi.cqi;
15966 if (pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.pres)
15968 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
15969 ueDl->mimoInfo.cwInfo[1].cqi, \
15970 pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.val);
15971 #ifdef RGR_CQI_REPT
15972 /* ccpu00117259 - ADD - Considering second codeword CQI info
15973 incase of MIMO for CQI Reporting */
15974 *is2ndCwCqiAvail = TRUE;
15982 rgSCHCmnDlSetUePmi(cell, ue, \
15983 pucchCqi->u.mode11Info.u.cqi.pmi);
15985 else if (pucchCqi->u.mode11Info.type == TFU_RPT_RI)
15987 if( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode11Info.u.ri))
15989 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode11Info.u.ri,
15994 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
15995 pucchCqi->u.mode11Info.u.ri,ue->ueId);
16002 * @brief This function Updates the DL CQI on PUCCH for the UE.
16006 * Function: rgSCHCmnDlProcCqiMode20
16008 * This function updates the DL CQI on PUCCH for the UE.
16010 * Invoked by: rgSCHCmnDlCqiOnPucchInd
16012 * Processing Steps:
16013 * Process CQI MODE 20
16014 * @param[in] RgSchCellCb *cell
16015 * @param[in] RgSchUeCb *ue
16016 * @param[in] TfuDlCqiRpt *dlCqiRpt
16021 #ifdef RGR_CQI_REPT
16023 static inline Void rgSCHCmnDlProcCqiMode20
16027 TfuDlCqiPucch *pucchCqi,
16031 static inline Void rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail )
16034 TfuDlCqiPucch *pucchCqi;
16039 static inline Void rgSCHCmnDlProcCqiMode20
16043 TfuDlCqiPucch *pucchCqi
16046 static inline Void rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi)
16049 TfuDlCqiPucch *pucchCqi;
16053 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16055 if (pucchCqi->u.mode20Info.type == TFU_RPT_CQI)
16057 if (pucchCqi->u.mode20Info.u.cqi.isWideband)
16059 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16060 if((pucchCqi->u.mode20Info.u.cqi.u.wideCqi) &&
16061 (pucchCqi->u.mode20Info.u.cqi.u.wideCqi < RG_SCH_CMN_MAX_CQI))
16063 ueDl->cqiFlag = TRUE;
16064 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode20Info.u.cqi.\
16066 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16067 /* ccpu00117452 - MOD - Changed macro name from
16068 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16069 #ifdef RGR_CQI_REPT
16070 *isCqiAvail = TRUE;
16079 else if (pucchCqi->u.mode20Info.type == TFU_RPT_RI)
16081 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode20Info.u.ri))
16083 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode20Info.u.ri,
16088 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
16089 pucchCqi->u.mode20Info.u.ri,ue->ueId);
16097 * @brief This function Updates the DL CQI on PUCCH for the UE.
16101 * Function: rgSCHCmnDlProcCqiMode21
16103 * This function updates the DL CQI on PUCCH for the UE.
16105 * Invoked by: rgSCHCmnDlCqiOnPucchInd
16107 * Processing Steps:
16108 * Process CQI MODE 21
16109 * @param[in] RgSchCellCb *cell
16110 * @param[in] RgSchUeCb *ue
16111 * @param[in] TfuDlCqiRpt *dlCqiRpt
16116 #ifdef RGR_CQI_REPT
16118 static inline Void rgSCHCmnDlProcCqiMode21
16122 TfuDlCqiPucch *pucchCqi,
16124 Bool *is2ndCwCqiAvail
16127 static inline Void rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail, is2ndCwCqiAvail)
16130 TfuDlCqiPucch *pucchCqi;
16131 TfuDlCqiRpt *dlCqiRpt;
16133 Bool *is2ndCwCqiAvail;
16137 static inline Void rgSCHCmnDlProcCqiMode21
16141 TfuDlCqiPucch *pucchCqi
16144 static inline Void rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi)
16147 TfuDlCqiPucch *pucchCqi;
16151 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16153 if (pucchCqi->u.mode21Info.type == TFU_RPT_CQI)
16155 ue->mimoInfo.puschFdbkVld = FALSE;
16156 if (pucchCqi->u.mode21Info.u.cqi.isWideband)
16158 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16159 if((pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi) &&
16160 (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi < RG_SCH_CMN_MAX_CQI))
16162 ueDl->cqiFlag = TRUE;
16163 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode21Info.u.cqi.\
16165 if (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.pres)
16167 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
16168 ueDl->mimoInfo.cwInfo[1].cqi, \
16169 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.val);
16170 #ifdef RGR_CQI_REPT
16171 /* ccpu00117259 - ADD - Considering second codeword CQI info
16172 incase of MIMO for CQI Reporting */
16173 *is2ndCwCqiAvail = TRUE;
16176 /* ccpu00117452 - MOD - Changed macro name from
16177 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16178 #ifdef RGR_CQI_REPT
16179 *isCqiAvail = TRUE;
16186 rgSCHCmnDlSetUePmi(cell, ue, \
16187 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.pmi);
16190 else if (pucchCqi->u.mode21Info.type == TFU_RPT_RI)
16192 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode21Info.u.ri))
16194 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode21Info.u.ri,
16199 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
16200 pucchCqi->u.mode21Info.u.ri,ue->ueId);
16208 * @brief This function Updates the DL CQI on PUCCH for the UE.
16212 * Function: rgSCHCmnDlCqiOnPucchInd
16214 * This function updates the DL CQI on PUCCH for the UE.
16216 * Invoked by: rgSCHCmnDlCqiInd
16218 * Processing Steps:
16219 * - Depending on the reporting mode of the PUCCH, the CQI/PMI/RI values
16220 * are updated and stored for each UE
16222 * @param[in] RgSchCellCb *cell
16223 * @param[in] RgSchUeCb *ue
16224 * @param[in] TfuDlCqiRpt *dlCqiRpt
16229 #ifdef RGR_CQI_REPT
16231 static Void rgSCHCmnDlCqiOnPucchInd
16235 TfuDlCqiPucch *pucchCqi,
16236 RgrUeCqiRept *ueCqiRept,
16238 Bool *is2ndCwCqiAvail
16241 static Void rgSCHCmnDlCqiOnPucchInd(cell, ue, pucchCqi, ueCqiRept, isCqiAvail, is2ndCwCqiAvail)
16244 TfuDlCqiPucch *pucchCqi;
16245 RgrUeCqiRept *ueCqiRept;
16247 Bool *is2ndCwCqiAvail;
16251 static Void rgSCHCmnDlCqiOnPucchInd
16255 TfuDlCqiPucch *pucchCqi
16258 static Void rgSCHCmnDlCqiOnPucchInd(cell, ue, pucchCqi)
16261 TfuDlCqiPucch *pucchCqi;
16265 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16267 /* ccpu00117452 - MOD - Changed
16268 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16269 #ifdef RGR_CQI_REPT
16270 /* Save CQI mode information in the report */
16271 ueCqiRept->cqiMode = pucchCqi->mode;
16274 switch(pucchCqi->mode)
16276 case TFU_PUCCH_CQI_MODE10:
16277 #ifdef RGR_CQI_REPT
16278 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail);
16280 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi);
16282 ueDl->cqiFlag = TRUE;
16284 case TFU_PUCCH_CQI_MODE11:
16285 #ifdef RGR_CQI_REPT
16286 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail,
16289 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi);
16291 ueDl->cqiFlag = TRUE;
16293 case TFU_PUCCH_CQI_MODE20:
16294 #ifdef RGR_CQI_REPT
16295 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail);
16297 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi);
16299 ueDl->cqiFlag = TRUE;
16301 case TFU_PUCCH_CQI_MODE21:
16302 #ifdef RGR_CQI_REPT
16303 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail,
16306 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi);
16308 ueDl->cqiFlag = TRUE;
16312 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unknown CQI Mode %d",
16313 pucchCqi->mode,ue->ueId);
16314 /* ccpu00117452 - MOD - Changed macro name from
16315 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16316 #ifdef RGR_CQI_REPT
16317 *isCqiAvail = FALSE;
16324 } /* rgSCHCmnDlCqiOnPucchInd */
16328 * @brief This function Updates the DL CQI on PUSCH for the UE.
16332 * Function: rgSCHCmnDlCqiOnPuschInd
16334 * This function updates the DL CQI on PUSCH for the UE.
16336 * Invoked by: rgSCHCmnDlCqiInd
16338 * Processing Steps:
16339 * - Depending on the reporting mode of the PUSCH, the CQI/PMI/RI values
16340 * are updated and stored for each UE
16342 * @param[in] RgSchCellCb *cell
16343 * @param[in] RgSchUeCb *ue
16344 * @param[in] TfuDlCqiRpt *dlCqiRpt
16349 #ifdef RGR_CQI_REPT
16351 static Void rgSCHCmnDlCqiOnPuschInd
16355 TfuDlCqiPusch *puschCqi,
16356 RgrUeCqiRept *ueCqiRept,
16358 Bool *is2ndCwCqiAvail
16361 static Void rgSCHCmnDlCqiOnPuschInd(cell, ue, puschCqi, ueCqiRept, isCqiAvail, is2ndCwCqiAvail)
16364 TfuDlCqiPusch *puschCqi;
16365 RgrUeCqiRept *ueCqiRept;
16367 Bool *is2ndCwCqiAvail;
16371 static Void rgSCHCmnDlCqiOnPuschInd
16375 TfuDlCqiPusch *puschCqi
16378 static Void rgSCHCmnDlCqiOnPuschInd(cell, ue, puschCqi)
16381 TfuDlCqiPusch *puschCqi;
16385 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16386 uint32_t prevRiVal = 0;
16387 if (puschCqi->ri.pres == PRSNT_NODEF)
16389 if (RG_SCH_CMN_IS_RI_VALID(puschCqi->ri.val))
16391 /* Saving the previous ri value to revert back
16392 in case PMI update failed */
16393 if (RGR_UE_TM_4 == ue->mimoInfo.txMode ) /* Cheking for TM4. TM8 check later */
16395 prevRiVal = ueDl->mimoInfo.ri;
16397 rgSCHCmnDlSetUeRi(cell, ue, puschCqi->ri.val, FALSE);
16401 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
16402 puschCqi->ri.val,ue->ueId);
16406 ue->mimoInfo.puschFdbkVld = FALSE;
16407 /* ccpu00117452 - MOD - Changed macro name from
16408 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16409 #ifdef RGR_CQI_REPT
16410 /* Save CQI mode information in the report */
16411 ueCqiRept->cqiMode = puschCqi->mode;
16412 /* ccpu00117259 - DEL - removed default setting of isCqiAvail to TRUE */
16415 switch(puschCqi->mode)
16417 case TFU_PUSCH_CQI_MODE_20:
16418 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16419 /* Checking whether the decoded CQI is a value between 1 and 15*/
16420 if((puschCqi->u.mode20Info.wideBandCqi) &&
16421 (puschCqi->u.mode20Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
16423 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode20Info.wideBandCqi;
16424 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16425 /* ccpu00117452 - MOD - Changed macro name from
16426 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16427 #ifdef RGR_CQI_REPT
16428 *isCqiAvail = TRUE;
16436 case TFU_PUSCH_CQI_MODE_30:
16437 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16438 if((puschCqi->u.mode30Info.wideBandCqi) &&
16439 (puschCqi->u.mode30Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
16441 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode30Info.wideBandCqi;
16442 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16443 /* ccpu00117452 - MOD - Changed macro name from
16444 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16445 #ifdef RGR_CQI_REPT
16446 *isCqiAvail = TRUE;
16450 uint32_t gACqiRcvdCount;
16461 case TFU_PUSCH_CQI_MODE_12:
16462 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16463 if((puschCqi->u.mode12Info.cqiIdx[0]) &&
16464 (puschCqi->u.mode12Info.cqiIdx[0] < RG_SCH_CMN_MAX_CQI))
16466 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode12Info.cqiIdx[0];
16467 /* ccpu00117452 - MOD - Changed macro name from
16468 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16469 #ifdef RGR_CQI_REPT
16470 *isCqiAvail = TRUE;
16477 if((puschCqi->u.mode12Info.cqiIdx[1]) &&
16478 (puschCqi->u.mode12Info.cqiIdx[1] < RG_SCH_CMN_MAX_CQI))
16480 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode12Info.cqiIdx[1];
16481 /* ccpu00117452 - MOD - Changed macro name from
16482 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16483 #ifdef RGR_CQI_REPT
16484 /* ccpu00117259 - ADD - Considering second codeword CQI info
16485 incase of MIMO for CQI Reporting */
16486 *is2ndCwCqiAvail = TRUE;
16493 ue->mimoInfo.puschFdbkVld = TRUE;
16494 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_12;
16495 ue->mimoInfo.puschPmiInfo.u.mode12Info = puschCqi->u.mode12Info;
16496 /* : resetting this is time based. Make use of CQI reporting
16497 * periodicity, DELTA's in determining the exact time at which this
16498 * need to be reset. */
16500 case TFU_PUSCH_CQI_MODE_22:
16501 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16502 if((puschCqi->u.mode22Info.wideBandCqi[0]) &&
16503 (puschCqi->u.mode22Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
16505 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode22Info.wideBandCqi[0];
16506 /* ccpu00117452 - MOD - Changed macro name from
16507 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16508 #ifdef RGR_CQI_REPT
16509 *isCqiAvail = TRUE;
16516 if((puschCqi->u.mode22Info.wideBandCqi[1]) &&
16517 (puschCqi->u.mode22Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
16519 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode22Info.wideBandCqi[1];
16520 /* ccpu00117452 - MOD - Changed macro name from
16521 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16522 #ifdef RGR_CQI_REPT
16523 /* ccpu00117259 - ADD - Considering second codeword CQI info
16524 incase of MIMO for CQI Reporting */
16525 *is2ndCwCqiAvail = TRUE;
16532 rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode22Info.wideBandPmi);
16533 ue->mimoInfo.puschFdbkVld = TRUE;
16534 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_22;
16535 ue->mimoInfo.puschPmiInfo.u.mode22Info = puschCqi->u.mode22Info;
16537 case TFU_PUSCH_CQI_MODE_31:
16538 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16539 if((puschCqi->u.mode31Info.wideBandCqi[0]) &&
16540 (puschCqi->u.mode31Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
16542 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode31Info.wideBandCqi[0];
16543 /* ccpu00117452 - MOD - Changed macro name from
16544 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16545 #ifdef RGR_CQI_REPT
16546 *isCqiAvail = TRUE;
16549 if (ueDl->mimoInfo.ri > 1)
16551 if((puschCqi->u.mode31Info.wideBandCqi[1]) &&
16552 (puschCqi->u.mode31Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
16554 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode31Info.wideBandCqi[1];
16555 /* ccpu00117452 - MOD - Changed macro name from
16556 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16557 #ifdef RGR_CQI_REPT
16558 /* ccpu00117259 - ADD - Considering second codeword CQI info
16559 incase of MIMO for CQI Reporting */
16560 *is2ndCwCqiAvail = TRUE;
16564 if (rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode31Info.pmi) != ROK)
16566 /* To avoid Rank and PMI inconsistency */
16567 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
16568 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
16570 ueDl->mimoInfo.ri = prevRiVal;
16573 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_31;
16574 ue->mimoInfo.puschPmiInfo.u.mode31Info = puschCqi->u.mode31Info;
16578 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Unknown CQI Mode %d CRNTI:%d",
16579 puschCqi->mode,ue->ueId);
16580 /* CQI decoding failed revert the RI to previous value */
16581 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
16582 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
16584 ueDl->mimoInfo.ri = prevRiVal;
16586 /* ccpu00117452 - MOD - Changed macro name from
16587 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16588 #ifdef RGR_CQI_REPT
16589 *isCqiAvail = FALSE;
16590 /* ccpu00117259 - ADD - Considering second codeword CQI info
16591 incase of MIMO for CQI Reporting */
16592 *is2ndCwCqiAvail = FALSE;
16599 } /* rgSCHCmnDlCqiOnPuschInd */
16603 * @brief This function Updates the DL CQI for the UE.
16607 * Function: rgSCHCmnDlCqiInd
16608 * Purpose: Updates the DL CQI for the UE
16612 * @param[in] RgSchCellCb *cell
16613 * @param[in] RgSchUeCb *ue
16614 * @param[in] TfuDlCqiRpt *dlCqi
16619 Void rgSCHCmnDlCqiInd
16625 CmLteTimingInfo timingInfo
16628 Void rgSCHCmnDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo)
16633 CmLteTimingInfo timingInfo;
16636 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16637 /* ccpu00117452 - MOD - Changed macro name from
16638 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16639 #ifdef RGR_CQI_REPT
16640 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16641 RgrUeCqiRept ueCqiRept = {{0}};
16642 Bool isCqiAvail = FALSE;
16643 /* ccpu00117259 - ADD - Considering second codeword CQI info
16644 incase of MIMO for CQI Reporting */
16645 Bool is2ndCwCqiAvail = FALSE;
16649 #ifdef RGR_CQI_REPT
16652 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
16656 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
16661 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi);
16665 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi);
16669 #ifdef CQI_CONFBITMASK_DROP
16670 if(!ue->cqiConfBitMask)
16672 if (ueDl->mimoInfo.cwInfo[0].cqi >15)
16674 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
16675 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
16677 else if ( ueDl->mimoInfo.cwInfo[0].cqi >= ue->prevCqi)
16679 ue->prevCqi = ueDl->mimoInfo.cwInfo[0].cqi;
16683 uint8_t dlCqiDeltaPrev = 0;
16684 dlCqiDeltaPrev = ue->prevCqi - ueDl->mimoInfo.cwInfo[0].cqi;
16685 if (dlCqiDeltaPrev > 3)
16686 dlCqiDeltaPrev = 3;
16687 if ((ue->prevCqi - dlCqiDeltaPrev) < 6)
16693 ue->prevCqi = ue->prevCqi - dlCqiDeltaPrev;
16695 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
16696 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
16702 /* ccpu00117452 - MOD - Changed macro name from
16703 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16704 #ifdef RGR_CQI_REPT
16705 /* ccpu00117259 - ADD - Considering second codeword CQI info
16706 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail\
16707 in 'if' condition*/
16708 if (RG_SCH_CQIR_IS_PUSHNCQI_ENBLE(ue) && (isCqiAvail || is2ndCwCqiAvail))
16710 ueCqiRept.cqi[0] = ueDl->mimoInfo.cwInfo[0].cqi;
16712 /* ccpu00117259 - ADD - Considering second codeword CQI info
16713 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail
16714 in 'if' condition*/
16715 ueCqiRept.cqi[1] = 0;
16716 if(is2ndCwCqiAvail)
16718 ueCqiRept.cqi[1] = ueDl->mimoInfo.cwInfo[1].cqi;
16720 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, &ueCqiRept);
16725 rgSCHCmnDlSetUeAllocLmtLa(cell, ue);
16726 rgSCHCheckAndSetTxScheme(cell, ue);
16729 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), ue->isEmtcUe);
16731 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), FALSE);
16735 if (cellSch->dl.isDlFreqSel)
16737 cellSch->apisDlfs->rgSCHDlfsDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo);
16740 /* Call SPS module to update CQI indication */
16741 rgSCHCmnSpsDlCqiIndHndlr(cell, ue, timingInfo);
16743 /* Call Specific scheduler to process on dlCqiInd */
16745 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
16747 cellSch->apisEmtcDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
16752 cellSch->apisDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
16755 #ifdef RG_PFS_STATS
16756 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].avgCqi +=
16757 ueDl->mimoInfo.cwInfo[0].cqi;
16758 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].totalCqiOcc++;
16762 ueDl->avgCqi += ueDl->mimoInfo.cwInfo[0].cqi;
16763 ueDl->numCqiOccns++;
16764 if (ueDl->mimoInfo.ri == 1)
16775 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
16776 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
16777 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw0Cqi ++;
16778 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw1Cqi ++;
16779 cell->tenbStats->sch.dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
16780 cell->tenbStats->sch.dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
16781 cell->tenbStats->sch.dlNumCw0Cqi ++;
16782 cell->tenbStats->sch.dlNumCw1Cqi ++;
16789 * @brief This function calculates the wideband CQI from SNR
16790 * reported for each RB.
16794 * Function: rgSCHCmnCalcWcqiFrmSnr
16795 * Purpose: Wideband CQI calculation from SNR
16797 * Invoked by: RG SCH
16799 * @param[in] RgSchCellCb *cell
16800 * @param[in] TfuSrsRpt *srsRpt,
16801 * @return Wideband CQI
16805 static uint8_t rgSCHCmnCalcWcqiFrmSnr
16811 static uint8_t rgSCHCmnCalcWcqiFrmSnr(cell,srsRpt)
16816 uint8_t wideCqi=1; /*Calculated value from SNR*/
16817 /*Need to map a certain SNR with a WideCQI value.
16818 * The CQI calculation is still primitive. Further, need to
16819 * use a improvized method for calculating WideCQI from SNR*/
16820 if (srsRpt->snr[0] <=50)
16824 else if (srsRpt->snr[0]>=51 && srsRpt->snr[0] <=100)
16828 else if (srsRpt->snr[0]>=101 && srsRpt->snr[0] <=150)
16832 else if (srsRpt->snr[0]>=151 && srsRpt->snr[0] <=200)
16836 else if (srsRpt->snr[0]>=201 && srsRpt->snr[0] <=250)
16845 }/*rgSCHCmnCalcWcqiFrmSnr*/
16849 * @brief This function Updates the SRS for the UE.
16853 * Function: rgSCHCmnSrsInd
16854 * Purpose: Updates the UL SRS for the UE
16858 * @param[in] RgSchCellCb *cell
16859 * @param[in] RgSchUeCb *ue
16860 * @param[in] TfuSrsRpt *srsRpt,
16865 Void rgSCHCmnSrsInd
16870 CmLteTimingInfo timingInfo
16873 Void rgSCHCmnSrsInd(cell, ue, srsRpt, timingInfo)
16877 CmLteTimingInfo timingInfo;
16880 uint8_t wideCqi; /*Calculated value from SNR*/
16881 uint32_t recReqTime; /*Received Time in TTI*/
16883 recReqTime = (timingInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) + timingInfo.slot;
16884 ue->srsCb.selectedAnt = (recReqTime/ue->srsCb.peri)%2;
16885 if(srsRpt->wideCqiPres)
16887 wideCqi = srsRpt->wideCqi;
16891 wideCqi = rgSCHCmnCalcWcqiFrmSnr(cell, srsRpt);
16893 rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi);
16895 }/*rgSCHCmnSrsInd*/
16900 * @brief This function is a handler for TA report for an UE.
16904 * Function: rgSCHCmnDlTARpt
16905 * Purpose: Determine based on UE_IDLE_TIME threshold,
16906 * whether UE needs to be Linked to the scheduler's TA list OR
16907 * if it needs a PDCCH Order.
16912 * @param[in] RgSchCellCb *cell
16913 * @param[in] RgSchUeCb *ue
16918 Void rgSCHCmnDlTARpt
16924 Void rgSCHCmnDlTARpt(cell, ue)
16929 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16930 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
16931 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16932 CmLListCp poInactvLst;
16935 /* RACHO: If UE idle time is more than threshold, then
16936 * set its poInactv pdcch order inactivity */
16937 /* Fix : syed Ignore if TaTmr is not configured */
16938 if ((ue->dl.taCb.cfgTaTmr) && (rgSCHCmnUeIdleExdThrsld(cell, ue) == ROK))
16940 uint32_t prevDlMsk = ue->dl.dlInactvMask;
16941 uint32_t prevUlMsk = ue->ul.ulInactvMask;
16942 ue->dl.dlInactvMask |= RG_PDCCHODR_INACTIVE;
16943 ue->ul.ulInactvMask |= RG_PDCCHODR_INACTIVE;
16944 /* Indicate Specific scheduler for this UEs inactivity */
16945 cmLListInit(&poInactvLst);
16946 cmLListAdd2Tail(&poInactvLst, &ueDl->rachInfo.inActUeLnk);
16947 ueDl->rachInfo.inActUeLnk.node = (PTR)ue;
16948 /* Send inactivate ind only if not already sent */
16949 if (prevDlMsk == 0)
16951 cellSch->apisDl->rgSCHDlInactvtUes(cell, &poInactvLst);
16953 if (prevUlMsk == 0)
16955 cellSch->apisUl->rgSCHUlInactvtUes(cell, &poInactvLst);
16960 /* Fix: ccpu00124009 Fix for loop in the linked list "cellDl->taLst" */
16961 if (!ue->dlTaLnk.node)
16964 if(cell->emtcEnable)
16968 rgSCHEmtcAddToTaLst(cellDl,ue);
16975 cmLListAdd2Tail(&cellDl->taLst, &ue->dlTaLnk);
16976 ue->dlTaLnk.node = (PTR)ue;
16981 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
16982 "<TA>TA duplicate entry attempt failed: UEID:%u",
16991 * @brief Indication of UL CQI.
16995 * Function : rgSCHCmnFindUlCqiUlTxAnt
16997 * - Finds the Best Tx Antenna amongst the CQIs received
16998 * from Two Tx Antennas.
17000 * @param[in] RgSchCellCb *cell
17001 * @param[in] RgSchUeCb *ue
17002 * @param[in] uint8_t wideCqi
17006 static Void rgSCHCmnFindUlCqiUlTxAnt
17013 static Void rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi)
17019 ue->validTxAnt = 1;
17021 } /* rgSCHCmnFindUlCqiUlTxAnt */
17025 * @brief Indication of UL CQI.
17029 * Function : rgSCHCmnUlCqiInd
17031 * - Updates uplink CQI information for the UE. Computes and
17032 * stores the lowest CQI of CQIs reported in all subbands.
17034 * @param[in] RgSchCellCb *cell
17035 * @param[in] RgSchUeCb *ue
17036 * @param[in] TfuUlCqiRpt *ulCqiInfo
17040 Void rgSCHCmnUlCqiInd
17044 TfuUlCqiRpt *ulCqiInfo
17047 Void rgSCHCmnUlCqiInd(cell, ue, ulCqiInfo)
17050 TfuUlCqiRpt *ulCqiInfo;
17053 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17054 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17059 #if (defined(SCH_STATS) || defined(TENB_STATS))
17060 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
17063 /* consider inputs from SRS handlers about SRS occassions
17064 * in determining the UL TX Antenna selection */
17065 ueUl->crntUlCqi[0] = ulCqiInfo->wideCqi;
17067 ueUl->validUlCqi = ueUl->crntUlCqi[0];
17068 ue->validTxAnt = 0;
17070 iTbsNew = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][ueUl->validUlCqi];
17071 previTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
17073 if (RG_ITBS_DIFF(iTbsNew, previTbs) > 5)
17075 /* Ignore this iTBS report and mark that last iTBS report was */
17076 /* ignored so that subsequently we reset the LA algorithm */
17077 ueUl->ulLaCb.lastiTbsIgnored = TRUE;
17081 if (ueUl->ulLaCb.lastiTbsIgnored != TRUE)
17083 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
17084 (80 * ueUl->ulLaCb.cqiBasediTbs))/100;
17088 /* Reset the LA as iTbs in use caught up with the value */
17089 /* reported by UE. */
17090 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
17091 (80 * previTbs * 100))/100;
17092 ueUl->ulLaCb.deltaiTbs = 0;
17093 ueUl->ulLaCb.lastiTbsIgnored = FALSE;
17098 rgSCHPwrUlCqiInd(cell, ue);
17100 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
17102 rgSCHCmnSpsUlCqiInd(cell, ue);
17105 /* Applicable to only some schedulers */
17107 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
17109 cellSch->apisEmtcUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
17114 cellSch->apisUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
17118 ueUl->numCqiOccns++;
17119 ueUl->avgCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17124 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17125 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulNumCqi ++;
17126 cell->tenbStats->sch.ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17127 cell->tenbStats->sch.ulNumCqi ++;
17132 } /* rgSCHCmnUlCqiInd */
17135 * @brief Returns HARQ proc for which data expected now.
17139 * Function: rgSCHCmnUlHqProcForUe
17140 * Purpose: This function returns the harq process for
17141 * which data is expected in the current subframe.
17142 * It does not validate that the HARQ process
17143 * has an allocation.
17147 * @param[in] RgSchCellCb *cell
17148 * @param[in] CmLteTimingInfo frm
17149 * @param[in] RgSchUeCb *ue
17150 * @param[out] RgSchUlHqProcCb **procRef
17154 Void rgSCHCmnUlHqProcForUe
17157 CmLteTimingInfo frm,
17159 RgSchUlHqProcCb **procRef
17162 Void rgSCHCmnUlHqProcForUe(cell, frm, ue, procRef)
17164 CmLteTimingInfo frm;
17166 RgSchUlHqProcCb **procRef;
17170 uint8_t procId = rgSCHCmnGetUlHqProcIdx(&frm, cell);
17173 *procRef = rgSCHUhmGetUlHqProc(cell, ue, procId);
17175 *procRef = rgSCHUhmGetUlProcByTime(cell, ue, frm);
17182 * @brief Update harq process for allocation.
17186 * Function : rgSCHCmnUpdUlHqProc
17188 * This function is invoked when harq process
17189 * control block is now in a new memory location
17190 * thus requiring a pointer/reference update.
17192 * @param[in] RgSchCellCb *cell
17193 * @param[in] RgSchUlHqProcCb *curProc
17194 * @param[in] RgSchUlHqProcCb *oldProc
17200 S16 rgSCHCmnUpdUlHqProc
17203 RgSchUlHqProcCb *curProc,
17204 RgSchUlHqProcCb *oldProc
17207 S16 rgSCHCmnUpdUlHqProc(cell, curProc, oldProc)
17209 RgSchUlHqProcCb *curProc;
17210 RgSchUlHqProcCb *oldProc;
17216 #if (ERRCLASS & ERRCLS_DEBUG)
17217 if (curProc->alloc == NULLP)
17222 curProc->alloc->hqProc = curProc;
17224 } /* rgSCHCmnUpdUlHqProc */
17227 /*MS_WORKAROUND for CR FIXME */
17229 * @brief Hsndles BSR timer expiry
17233 * Function : rgSCHCmnBsrTmrExpry
17235 * This function is invoked when periodic BSR timer expires for a UE.
17237 * @param[in] RgSchUeCb *ue
17243 S16 rgSCHCmnBsrTmrExpry
17248 S16 rgSCHCmnBsrTmrExpry(ueCb)
17252 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ueCb->cell);
17255 ueCb->isSrGrant = TRUE;
17258 emtcStatsUlBsrTmrTxp++;
17262 if(ueCb->cell->emtcEnable)
17266 cellSch->apisEmtcUl->rgSCHSrRcvd(ueCb->cell, ueCb);
17273 cellSch->apisUl->rgSCHSrRcvd(ueCb->cell, ueCb);
17280 * @brief Short BSR update.
17284 * Function : rgSCHCmnUpdBsrShort
17286 * This functions does requisite updates to handle short BSR reporting.
17288 * @param[in] RgSchCellCb *cell
17289 * @param[in] RgSchUeCb *ue
17290 * @param[in] RgSchLcgCb *ulLcg
17291 * @param[in] uint8_t bsr
17292 * @param[out] RgSchErrInfo *err
17298 S16 rgSCHCmnUpdBsrShort
17307 S16 rgSCHCmnUpdBsrShort(cell, ue, ulLcg, bsr, err)
17317 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17319 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17320 RgSchCmnLcg *cmnLcg = NULLP;
17326 if (!RGSCH_LCG_ISCFGD(ulLcg))
17328 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
17331 for (lcgCnt=0; lcgCnt<4; lcgCnt++)
17334 /* Set BS of all other LCGs to Zero.
17335 If Zero BSR is reported in Short BSR include this LCG too */
17336 if ((lcgCnt != ulLcg->lcgId) ||
17337 (!bsr && !ueUl->hqEnt.numBusyHqProcs))
17339 /* If old BO is zero do nothing */
17340 if(((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs != 0)
17342 for(idx = 0; idx < ue->ul.lcgArr[lcgCnt].numLch; idx++)
17344 if((ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount) &&
17345 (ue->ulActiveLCs & (1 <<
17346 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1))))
17349 ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount--;
17350 ue->ulActiveLCs &= ~(1 <<
17351 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1));
17357 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgCnt]))
17359 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs = 0;
17360 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->reportedBs = 0;
17365 if(ulLcg->lcgId && bsr && (((RgSchCmnLcg *)(ulLcg->sch))->bs == 0))
17367 for(idx = 0; idx < ulLcg->numLch; idx++)
17370 if (!(ue->ulActiveLCs & (1 << (ulLcg->lcArray[idx]->qciCb->qci -1))))
17372 ulLcg->lcArray[idx]->qciCb->ulUeCount++;
17373 ue->ulActiveLCs |= (1 << (ulLcg->lcArray[idx]->qciCb->qci -1));
17378 /* Resetting the nonGbrLcgBs info here */
17379 ue->ul.nonGbrLcgBs = 0;
17380 ue->ul.nonLcg0Bs = 0;
17382 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
17384 if (TRUE == ue->ul.useExtBSRSizes)
17386 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
17390 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
17392 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17394 /* TBD check for effGbr != 0 */
17395 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17397 else if (0 == ulLcg->lcgId)
17399 /* This is added for handling LCG0 */
17400 cmnLcg->bs = cmnLcg->reportedBs;
17404 /* Update non GBR LCG's BS*/
17405 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
17406 cmnLcg->bs = ue->ul.nonGbrLcgBs;
17408 ue->ul.totalBsr = cmnLcg->bs;
17411 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (bsr == 0))
17413 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
17417 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
17419 rgSCHCmnSpsBsrRpt(cell, ue, ulLcg);
17422 rgSCHCmnUpdUlCompEffBsr(ue);
17425 if(cell->emtcEnable)
17429 cellSch->apisEmtcUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
17436 cellSch->apisUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
17440 if (ue->ul.isUlCaEnabled && ue->numSCells)
17442 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
17444 #ifndef PAL_ENABLE_UL_CA
17445 if((ue->cellInfo[sCellIdx] != NULLP) &&
17446 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
17448 if(ue->cellInfo[sCellIdx] != NULLP)
17451 cellSch->apisUl->rgSCHUpdBsrShort(ue->cellInfo[sCellIdx]->cell,
17462 * @brief Truncated BSR update.
17466 * Function : rgSCHCmnUpdBsrTrunc
17468 * This functions does required updates to handle truncated BSR report.
17471 * @param[in] RgSchCellCb *cell
17472 * @param[in] RgSchUeCb *ue
17473 * @param[in] RgSchLcgCb *ulLcg
17474 * @param[in] uint8_t bsr
17475 * @param[out] RgSchErrInfo *err
17481 S16 rgSCHCmnUpdBsrTrunc
17490 S16 rgSCHCmnUpdBsrTrunc(cell, ue, ulLcg, bsr, err)
17498 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17499 RgSchCmnLcg *cmnLcg = NULLP;
17506 if (!RGSCH_LCG_ISCFGD(ulLcg))
17508 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
17511 /* set all higher prio lcgs bs to 0 and update this lcgs bs and
17512 total bsr= sumofall lcgs bs */
17515 for (cnt = ulLcg->lcgId-1; cnt >= 0; cnt--)
17518 /* If Existing BO is zero the don't do anything */
17519 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs != 0)
17521 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
17524 if((ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount) &&
17525 (ue->ulActiveLCs & (1 <<
17526 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
17528 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount--;
17529 ue->ulActiveLCs &= ~(1 <<
17530 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
17535 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs = 0;
17536 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->reportedBs = 0;
17541 for (cnt = ulLcg->lcgId; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
17543 if (ulLcg->lcgId == 0)
17547 /* If Existing BO is zero the don't do anything */
17548 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs == 0)
17550 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
17553 if (!(ue->ulActiveLCs & (1 <<
17554 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
17556 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount++;
17557 ue->ulActiveLCs |= (1 <<
17558 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
17564 ue->ul.nonGbrLcgBs = 0;
17565 ue->ul.nonLcg0Bs = 0;
17566 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
17567 if (TRUE == ue->ul.useExtBSRSizes)
17569 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
17573 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
17575 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17577 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17579 else if(ulLcg->lcgId == 0)
17581 /* This is for handeling LCG0 */
17582 cmnLcg->bs = cmnLcg->reportedBs;
17586 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
17587 cmnLcg->bs = ue->ul.nonGbrLcgBs;
17589 ue->ul.totalBsr = cmnLcg->bs;
17591 for (cnt = ulLcg->lcgId+1; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
17593 /* TODO: The bs for the other LCGs may be stale because some or all of
17594 * the part of bs may have been already scheduled/data received. Please
17595 * consider this when truncated BSR is tested/implemented */
17596 ue->ul.totalBsr += ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs;
17599 rgSCHCmnUpdUlCompEffBsr(ue);
17602 if(cell->emtcEnable)
17606 cellSch->apisEmtcUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
17613 cellSch->apisUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
17617 if (ue->ul.isUlCaEnabled && ue->numSCells)
17619 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
17621 #ifndef PAL_ENABLE_UL_CA
17622 if((ue->cellInfo[sCellIdx] != NULLP) &&
17623 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
17625 if(ue->cellInfo[sCellIdx] != NULLP)
17628 cellSch->apisUl->rgSCHUpdBsrTrunc(ue->cellInfo[sCellIdx]->cell, ue, ulLcg, bsr);
17638 * @brief Long BSR update.
17642 * Function : rgSCHCmnUpdBsrLong
17644 * - Update BSRs for all configured LCGs.
17645 * - Update priority of LCGs if needed.
17646 * - Update UE's position within/across uplink scheduling queues.
17649 * @param[in] RgSchCellCb *cell
17650 * @param[in] RgSchUeCb *ue
17651 * @param[in] uint8_t bsArr[]
17652 * @param[out] RgSchErrInfo *err
17658 S16 rgSCHCmnUpdBsrLong
17666 S16 rgSCHCmnUpdBsrLong(cell, ue, bsArr, err)
17673 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17674 uint32_t tmpBsArr[4] = {0, 0, 0, 0};
17675 uint32_t nonGbrBs = 0;
17684 for(idx1 = 1; idx1 < RGSCH_MAX_LCG_PER_UE; idx1++)
17686 /* If Old BO is non zero then do nothing */
17687 if ((((RgSchCmnLcg *)(ue->ul.lcgArr[idx1].sch))->bs == 0)
17690 for(idx2 = 0; idx2 < ue->ul.lcgArr[idx1].numLch; idx2++)
17693 if (!(ue->ulActiveLCs & (1 <<
17694 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1))))
17696 ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->ulUeCount++;
17697 ue->ulActiveLCs |= (1 <<
17698 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1));
17704 ue->ul.nonGbrLcgBs = 0;
17705 ue->ul.nonLcg0Bs = 0;
17707 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[0]))
17709 if (TRUE == ue->ul.useExtBSRSizes)
17711 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnExtBsrTbl[bsArr[0]];
17712 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnExtBsrTbl[bsArr[0]];
17713 tmpBsArr[0] = rgSchCmnExtBsrTbl[bsArr[0]];
17717 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnBsrTbl[bsArr[0]];
17718 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnBsrTbl[bsArr[0]];
17719 tmpBsArr[0] = rgSchCmnBsrTbl[bsArr[0]];
17722 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
17724 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
17726 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
17728 if (TRUE == ue->ul.useExtBSRSizes)
17730 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsArr[lcgId]];
17734 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsArr[lcgId]];
17736 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17738 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17739 tmpBsArr[lcgId] = cmnLcg->bs;
17743 nonGbrBs += cmnLcg->reportedBs;
17744 tmpBsArr[lcgId] = cmnLcg->reportedBs;
17745 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
17749 ue->ul.nonGbrLcgBs = RGSCH_MIN(nonGbrBs,ue->ul.effAmbr);
17751 ue->ul.totalBsr = tmpBsArr[0] + tmpBsArr[1] + tmpBsArr[2] + tmpBsArr[3];
17753 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (ue->ul.totalBsr == 0))
17755 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
17760 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE) /* SPS_FIX */
17762 if(ue->ul.totalBsr - tmpBsArr[1] == 0)
17763 {/* Updaing the BSR to SPS only if LCG1 BS is present in sps active state */
17764 rgSCHCmnSpsBsrRpt(cell, ue, &ue->ul.lcgArr[1]);
17768 rgSCHCmnUpdUlCompEffBsr(ue);
17771 if(cell->emtcEnable)
17775 cellSch->apisEmtcUl->rgSCHUpdBsrLong(cell, ue, bsArr);
17782 cellSch->apisUl->rgSCHUpdBsrLong(cell, ue, bsArr);
17786 if (ue->ul.isUlCaEnabled && ue->numSCells)
17788 for(uint8_t idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
17790 #ifndef PAL_ENABLE_UL_CA
17791 if((ue->cellInfo[idx] != NULLP) &&
17792 (ue->cellInfo[idx]->sCellState == RG_SCH_SCELL_ACTIVE))
17794 if(ue->cellInfo[idx] != NULLP)
17797 cellSch->apisUl->rgSCHUpdBsrLong(ue->cellInfo[idx]->cell, ue, bsArr);
17807 * @brief PHR update.
17811 * Function : rgSCHCmnUpdExtPhr
17813 * Updates extended power headroom information for an UE.
17815 * @param[in] RgSchCellCb *cell
17816 * @param[in] RgSchUeCb *ue
17817 * @param[in] uint8_t phr
17818 * @param[out] RgSchErrInfo *err
17824 S16 rgSCHCmnUpdExtPhr
17828 RgInfExtPhrCEInfo *extPhr,
17832 S16 rgSCHCmnUpdExtPhr(cell, ue, extPhr, err)
17835 RgInfExtPhrCEInfo *extPhr;
17839 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17840 RgSchCmnAllocRecord *allRcd;
17841 CmLList *node = ueUl->ulAllocLst.last;
17844 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
17851 allRcd = (RgSchCmnAllocRecord *)node->node;
17853 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
17855 rgSCHPwrUpdExtPhr(cell, ue, extPhr, allRcd);
17860 if(ulSpsUe->isUlSpsActv)
17862 rgSCHCmnSpsPhrInd(cell,ue);
17867 } /* rgSCHCmnUpdExtPhr */
17873 * @brief PHR update.
17877 * Function : rgSCHCmnUpdPhr
17879 * Updates power headroom information for an UE.
17881 * @param[in] RgSchCellCb *cell
17882 * @param[in] RgSchUeCb *ue
17883 * @param[in] uint8_t phr
17884 * @param[out] RgSchErrInfo *err
17898 S16 rgSCHCmnUpdPhr(cell, ue, phr, err)
17905 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17906 RgSchCmnAllocRecord *allRcd;
17907 CmLList *node = ueUl->ulAllocLst.last;
17910 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
17917 allRcd = (RgSchCmnAllocRecord *)node->node;
17919 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
17921 rgSCHPwrUpdPhr(cell, ue, phr, allRcd, RG_SCH_CMN_PWR_USE_CFG_MAX_PWR);
17926 if(ulSpsUe->isUlSpsActv)
17928 rgSCHCmnSpsPhrInd(cell,ue);
17933 } /* rgSCHCmnUpdPhr */
17936 * @brief UL grant for contention resolution.
17940 * Function : rgSCHCmnContResUlGrant
17942 * Add UE to another queue specifically for CRNTI based contention
17946 * @param[in] RgSchUeCb *ue
17947 * @param[out] RgSchErrInfo *err
17953 S16 rgSCHCmnContResUlGrant
17960 S16 rgSCHCmnContResUlGrant(cell, ue, err)
17966 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17969 if(cell->emtcEnable)
17973 cellSch->apisEmtcUl->rgSCHContResUlGrant(cell, ue);
17980 cellSch->apisUl->rgSCHContResUlGrant(cell, ue);
17986 * @brief SR reception handling.
17990 * Function : rgSCHCmnSrRcvd
17992 * - Update UE's position within/across uplink scheduling queues
17993 * - Update priority of LCGs if needed.
17995 * @param[in] RgSchCellCb *cell
17996 * @param[in] RgSchUeCb *ue
17997 * @param[in] CmLteTimingInfo frm
17998 * @param[out] RgSchErrInfo *err
18008 CmLteTimingInfo frm,
18012 S16 rgSCHCmnSrRcvd(cell, ue, frm, err)
18015 CmLteTimingInfo frm;
18019 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18020 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18021 CmLList *node = ueUl->ulAllocLst.last;
18025 emtcStatsUlTomSrInd++;
18028 RGSCH_INCR_SUB_FRAME(frm, 1); /* 1 TTI after the time SR was sent */
18031 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)node->node;
18032 if (RGSCH_TIMEINFO_SAME(frm, allRcd->allocTime))
18038 //TODO_SID Need to check when it is getting triggered
18039 ue->isSrGrant = TRUE;
18041 if(cell->emtcEnable)
18045 cellSch->apisEmtcUl->rgSCHSrRcvd(cell, ue);
18052 cellSch->apisUl->rgSCHSrRcvd(cell, ue);
18058 * @brief Returns first uplink allocation to send reception
18063 * Function: rgSCHCmnFirstRcptnReq(cell)
18064 * Purpose: This function returns the first uplink allocation
18065 * (or NULLP if there is none) in the subframe
18066 * in which is expected to prepare and send reception
18071 * @param[in] RgSchCellCb *cell
18072 * @return RgSchUlAlloc*
18075 RgSchUlAlloc *rgSCHCmnFirstRcptnReq
18080 RgSchUlAlloc *rgSCHCmnFirstRcptnReq(cell)
18084 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18086 RgSchUlAlloc* alloc = NULLP;
18089 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
18091 RgSchUlSf* sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18092 alloc = rgSCHUtlUlAllocFirst(sf);
18094 if (alloc && alloc->hqProc == NULLP)
18096 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18104 * @brief Returns first uplink allocation to send reception
18109 * Function: rgSCHCmnNextRcptnReq(cell)
18110 * Purpose: This function returns the next uplink allocation
18111 * (or NULLP if there is none) in the subframe
18112 * in which is expected to prepare and send reception
18117 * @param[in] RgSchCellCb *cell
18118 * @return RgSchUlAlloc*
18121 RgSchUlAlloc *rgSCHCmnNextRcptnReq
18124 RgSchUlAlloc *alloc
18127 RgSchUlAlloc *rgSCHCmnNextRcptnReq(cell, alloc)
18129 RgSchUlAlloc *alloc;
18132 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18134 //RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18137 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
18139 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18141 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18142 if (alloc && alloc->hqProc == NULLP)
18144 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18155 * @brief Collates DRX enabled UE's scheduled in this SF
18159 * Function: rgSCHCmnDrxStrtInActvTmrInUl(cell)
18160 * Purpose: This function collates the link
18161 * of UE's scheduled in this SF who
18162 * have drx enabled. It then calls
18163 * DRX specific function to start/restart
18164 * inactivity timer in Ul
18168 * @param[in] RgSchCellCb *cell
18172 Void rgSCHCmnDrxStrtInActvTmrInUl
18177 Void rgSCHCmnDrxStrtInActvTmrInUl(cell)
18181 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18182 RgSchUlSf *sf = &(cellUl->ulSfArr[cellUl->schdIdx]);
18183 RgSchUlAlloc *alloc = rgSCHUtlUlAllocFirst(sf);
18188 cmLListInit(&ulUeLst);
18196 if (!(alloc->grnt.isRtx) && ueCb->isDrxEnabled && !(ueCb->isSrGrant)
18198 /* ccpu00139513- DRX inactivity timer should not be started for
18199 * UL SPS occasions */
18200 && (alloc->hqProc->isSpsOccnHqP == FALSE)
18204 cmLListAdd2Tail(&ulUeLst,&(ueCb->ulDrxInactvTmrLnk));
18205 ueCb->ulDrxInactvTmrLnk.node = (PTR)ueCb;
18209 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18212 (Void)rgSCHDrxStrtInActvTmr(cell,&ulUeLst,RG_SCH_DRX_UL);
18219 * @brief Returns first uplink allocation to send HARQ feedback
18224 * Function: rgSCHCmnFirstHqFdbkAlloc
18225 * Purpose: This function returns the first uplink allocation
18226 * (or NULLP if there is none) in the subframe
18227 * for which it is expected to prepare and send HARQ
18232 * @param[in] RgSchCellCb *cell
18233 * @param[in] uint8_t idx
18234 * @return RgSchUlAlloc*
18237 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc
18243 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc(cell, idx)
18248 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18250 RgSchUlAlloc *alloc = NULLP;
18253 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
18255 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
18256 alloc = rgSCHUtlUlAllocFirst(sf);
18258 while (alloc && (alloc->hqProc == NULLP))
18260 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18268 * @brief Returns next allocation to send HARQ feedback for.
18272 * Function: rgSCHCmnNextHqFdbkAlloc(cell)
18273 * Purpose: This function returns the next uplink allocation
18274 * (or NULLP if there is none) in the subframe
18275 * for which HARQ feedback needs to be sent.
18279 * @param[in] RgSchCellCb *cell
18280 * @return RgSchUlAlloc*
18283 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc
18286 RgSchUlAlloc *alloc,
18290 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc(cell, alloc, idx)
18292 RgSchUlAlloc *alloc;
18296 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18298 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
18300 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
18302 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18303 while (alloc && (alloc->hqProc == NULLP))
18305 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18315 /***********************************************************
18317 * Func : rgSCHCmnUlGetITbsFrmIMcs
18319 * Desc : Returns the Itbs that is mapped to an Imcs
18320 * for the case of uplink.
18328 **********************************************************/
18330 uint8_t rgSCHCmnUlGetITbsFrmIMcs
18335 uint8_t rgSCHCmnUlGetITbsFrmIMcs(iMcs)
18340 return (rgUlIMcsTbl[iMcs].iTbs);
18343 /***********************************************************
18345 * Func : rgSCHCmnUlGetIMcsFrmITbs
18347 * Desc : Returns the Imcs that is mapped to an Itbs
18348 * for the case of uplink.
18352 * Notes: For iTbs 19, iMcs is dependant on modulation order.
18353 * Refer to 36.213, Table 8.6.1-1 and 36.306 Table 4.1-2
18354 * for UE capability information
18358 **********************************************************/
18360 uint8_t rgSCHCmnUlGetIMcsFrmITbs
18363 CmLteUeCategory ueCtg
18366 uint8_t rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg)
18368 CmLteUeCategory ueCtg;
18377 /*a higher layer can force a 64QAM UE to transmit at 16QAM.
18378 * We currently do not support this. Once the support for such
18379 * is added, ueCtg should be replaced by current transmit
18380 * modulation configuration.Refer to 36.213 -8.6.1
18382 else if ( iTbs < 19 )
18386 else if ((iTbs == 19) && (ueCtg != CM_LTE_UE_CAT_5))
18396 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
18397 was seen when IMCS exceeds 20 on T2k TDD*/
18407 /***********************************************************
18409 * Func : rgSCHCmnUlMinTbBitsForITbs
18411 * Desc : Returns the minimum number of bits that can
18412 * be given as grant for a specific CQI.
18420 **********************************************************/
18422 uint32_t rgSCHCmnUlMinTbBitsForITbs
18424 RgSchCmnUlCell *cellUl,
18428 uint32_t rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs)
18429 RgSchCmnUlCell *cellUl;
18434 RGSCH_ARRAY_BOUND_CHECK(0, rgTbSzTbl[0], iTbs);
18436 return (rgTbSzTbl[0][iTbs][cellUl->sbSize-1]);
18439 /***********************************************************
18441 * Func : rgSCHCmnUlSbAlloc
18443 * Desc : Given a required 'number of subbands' and a hole,
18444 * returns a suitable alloc such that the subband
18445 * allocation size is valid
18449 * Notes: Does not assume either passed numSb or hole size
18450 * to be valid for allocation, and hence arrives at
18451 * an acceptable value.
18454 **********************************************************/
18456 RgSchUlAlloc *rgSCHCmnUlSbAlloc
18463 RgSchUlAlloc *rgSCHCmnUlSbAlloc(sf, numSb, hole)
18469 uint8_t holeSz; /* valid hole size */
18470 RgSchUlAlloc *alloc;
18472 if ((holeSz = rgSchCmnMult235Tbl[hole->num].prvMatch) == hole->num)
18474 numSb = rgSchCmnMult235Tbl[numSb].match;
18475 if (numSb >= holeSz)
18477 alloc = rgSCHUtlUlAllocGetCompHole(sf, hole);
18481 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
18486 if (numSb < holeSz)
18488 numSb = rgSchCmnMult235Tbl[numSb].match;
18492 numSb = rgSchCmnMult235Tbl[numSb].prvMatch;
18495 if ( numSb >= holeSz )
18499 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
18505 * @brief To fill the RgSchCmnUeUlAlloc structure of UeCb.
18509 * Function: rgSCHCmnUlUeFillAllocInfo
18510 * Purpose: Specific scheduler to call this API to fill the alloc
18513 * Invoked by: Scheduler
18515 * @param[in] RgSchCellCb *cell
18516 * @param[out] RgSchUeCb *ue
18520 Void rgSCHCmnUlUeFillAllocInfo
18526 Void rgSCHCmnUlUeFillAllocInfo(cell, ue)
18531 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18532 RgSchCmnUeUlAlloc *ulAllocInfo;
18533 RgSchCmnUlUe *ueUl;
18536 ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18537 ulAllocInfo = &ueUl->alloc;
18539 /* Fill alloc structure */
18540 rgSCHCmnUlAllocFillTpc(cell, ue, ulAllocInfo->alloc);
18541 rgSCHCmnUlAllocFillNdmrs(cellUl, ulAllocInfo->alloc);
18542 rgSCHCmnUlAllocLnkHqProc(ue, ulAllocInfo->alloc, ulAllocInfo->alloc->hqProc,
18543 ulAllocInfo->alloc->hqProc->isRetx);
18545 rgSCHCmnUlFillPdcchWithAlloc(ulAllocInfo->alloc->pdcch,
18546 ulAllocInfo->alloc, ue);
18547 /* Recording information about this allocation */
18548 rgSCHCmnUlRecordUeAlloc(cell, ue);
18550 /* Update the UE's outstanding allocation */
18551 if (!ulAllocInfo->alloc->hqProc->isRetx)
18553 rgSCHCmnUlUpdOutStndAlloc(cell, ue, ulAllocInfo->allocdBytes);
18560 * @brief Update the UEs outstanding alloc based on the BSR report's timing.
18565 * Function: rgSCHCmnUpdUlCompEffBsr
18566 * Purpose: Clear off all the allocations from outstanding allocation that
18567 * are later than or equal to BSR timing information (stored in UEs datIndTime).
18569 * Invoked by: Scheduler
18571 * @param[in] RgSchUeCb *ue
18575 static Void rgSCHCmnUpdUlCompEffBsr
18580 static Void rgSCHCmnUpdUlCompEffBsr(ue)
18584 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,ue->cell);
18585 CmLList *node = ueUl->ulAllocLst.last;
18586 RgSchCmnAllocRecord *allRcd;
18587 uint32_t outStndAlloc=0;
18588 uint32_t nonLcg0OutStndAllocBs=0;
18589 uint32_t nonLcg0Bsr=0;
18591 RgSchCmnLcg *cmnLcg = NULLP;
18595 allRcd = (RgSchCmnAllocRecord *)node->node;
18596 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
18605 allRcd = (RgSchCmnAllocRecord *)node->node;
18607 outStndAlloc += allRcd->alloc;
18610 cmnLcg = (RgSchCmnLcg *)(ue->ul.lcgArr[0].sch);
18611 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
18612 if (cmnLcg->bs > outStndAlloc)
18614 cmnLcg->bs -= outStndAlloc;
18615 ue->ul.minReqBytes = cmnLcg->bs;
18620 nonLcg0OutStndAllocBs = outStndAlloc - cmnLcg->bs;
18624 for(lcgId = 1;lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
18626 if(RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
18628 cmnLcg = ((RgSchCmnLcg *) (ue->ul.lcgArr[lcgId].sch));
18629 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
18631 nonLcg0Bsr += cmnLcg->bs;
18635 nonLcg0Bsr += ue->ul.nonGbrLcgBs;
18636 if (nonLcg0OutStndAllocBs > nonLcg0Bsr)
18642 nonLcg0Bsr -= nonLcg0OutStndAllocBs;
18644 ue->ul.nonLcg0Bs = nonLcg0Bsr;
18645 /* Cap effBsr with nonLcg0Bsr and append lcg0 bs.
18646 * nonLcg0Bsr limit applies only to lcg1,2,3 */
18647 /* better be handled in individual scheduler */
18648 ue->ul.effBsr = nonLcg0Bsr +\
18649 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18654 * @brief Records information about the current allocation.
18658 * Function: rgSCHCmnUlRecordUeAlloc
18659 * Purpose: Records information about the curent allocation.
18660 * This includes the allocated bytes, as well
18661 * as some power information.
18663 * Invoked by: Scheduler
18665 * @param[in] RgSchCellCb *cell
18666 * @param[in] RgSchUeCb *ue
18670 Void rgSCHCmnUlRecordUeAlloc
18676 Void rgSCHCmnUlRecordUeAlloc(cell, ue)
18682 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18684 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18685 CmLListCp *lst = &ueUl->ulAllocLst;
18686 CmLList *node = ueUl->ulAllocLst.first;
18687 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
18688 RgSchCmnUeUlAlloc *ulAllocInfo = &ueUl->alloc;
18689 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18691 cmLListDelFrm(lst, &allRcd->lnk);
18693 /* To the crntTime, add the MIN time at which UE will
18694 * actually send the BSR i.e DELTA+4 */
18695 allRcd->allocTime = cell->crntTime;
18696 /*ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
18698 if(ue->isEmtcUe == TRUE)
18700 RGSCH_INCR_SUB_FRAME_EMTC(allRcd->allocTime,
18701 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
18706 RGSCH_INCR_SUB_FRAME(allRcd->allocTime,
18707 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
18710 allRcd->allocTime = cellUl->schdTime;
18712 cmLListAdd2Tail(lst, &allRcd->lnk);
18714 /* Filling in the parameters to be recorded */
18715 allRcd->alloc = ulAllocInfo->allocdBytes;
18716 //allRcd->numRb = ulAllocInfo->alloc->grnt.numRb;
18717 allRcd->numRb = (ulAllocInfo->alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
18718 /*Recording the UL CQI derived from the maxUlCqi */
18719 allRcd->cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
18720 allRcd->tpc = ulAllocInfo->alloc->grnt.tpc;
18722 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
18724 cell->measurements.ulBytesCnt += ulAllocInfo->allocdBytes;
18729 /** PHR handling for MSG3
18730 * @brief Records allocation information of msg3 in the the UE.
18734 * Function: rgSCHCmnUlRecMsg3Alloc
18735 * Purpose: Records information about msg3 allocation.
18736 * This includes the allocated bytes, as well
18737 * as some power information.
18739 * Invoked by: Scheduler
18741 * @param[in] RgSchCellCb *cell
18742 * @param[in] RgSchUeCb *ue
18743 * @param[in] RgSchRaCb *raCb
18747 Void rgSCHCmnUlRecMsg3Alloc
18754 Void rgSCHCmnUlRecMsg3Alloc(cell, ue, raCb)
18760 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18761 CmLListCp *lst = &ueUl->ulAllocLst;
18762 CmLList *node = ueUl->ulAllocLst.first;
18763 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
18765 /* Stack Crash problem for TRACE5 changes */
18767 cmLListDelFrm(lst, node);
18768 allRcd->allocTime = raCb->msg3AllocTime;
18769 cmLListAdd2Tail(lst, node);
18771 /* Filling in the parameters to be recorded */
18772 allRcd->alloc = raCb->msg3Grnt.datSz;
18773 allRcd->numRb = raCb->msg3Grnt.numRb;
18774 allRcd->cqi = raCb->ccchCqi;
18775 allRcd->tpc = raCb->msg3Grnt.tpc;
18777 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
18782 * @brief Keeps track of the most recent RG_SCH_CMN_MAX_ALLOC_TRACK
18783 * allocations to track. Adds this allocation to the ueUl's ulAllocLst.
18788 * Function: rgSCHCmnUlUpdOutStndAlloc
18789 * Purpose: Recent Allocation shall be at First Pos'n.
18790 * Remove the last node, update the fields
18791 * with the new allocation and add at front.
18793 * Invoked by: Scheduler
18795 * @param[in] RgSchCellCb *cell
18796 * @param[in] RgSchUeCb *ue
18797 * @param[in] uint32_t alloc
18801 Void rgSCHCmnUlUpdOutStndAlloc
18808 Void rgSCHCmnUlUpdOutStndAlloc(cell, ue, alloc)
18814 uint32_t nonLcg0Alloc=0;
18816 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
18817 if (((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs > alloc)
18819 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs -= alloc;
18823 nonLcg0Alloc = alloc - ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18824 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = 0;
18827 if (nonLcg0Alloc >= ue->ul.nonLcg0Bs)
18829 ue->ul.nonLcg0Bs = 0;
18833 ue->ul.nonLcg0Bs -= nonLcg0Alloc;
18835 /* Cap effBsr with effAmbr and append lcg0 bs.
18836 * effAmbr limit applies only to lcg1,2,3 non GBR LCG's*/
18837 /* better be handled in individual scheduler */
18838 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
18839 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18841 if (ue->ul.effBsr == 0)
18843 if (ue->bsrTmr.tmrEvnt != TMR_NONE)
18845 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
18848 if (FALSE == ue->isSrGrant)
18850 if (ue->ul.bsrTmrCfg.isPrdBsrTmrPres)
18853 rgSCHTmrStartTmr(cell, ue, RG_SCH_TMR_BSR,
18854 ue->ul.bsrTmrCfg.prdBsrTmr);
18860 /* Resetting UEs lower Cap */
18861 ue->ul.minReqBytes = 0;
18868 * @brief Returns the "Itbs" for a given UE.
18872 * Function: rgSCHCmnUlGetITbs
18873 * Purpose: This function returns the "Itbs" for a given UE.
18875 * Invoked by: Scheduler
18877 * @param[in] RgSchUeCb *ue
18881 uint8_t rgSCHCmnUlGetITbs
18888 uint8_t rgSCHCmnUlGetITbs(cell, ue, isEcp)
18894 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18895 /* CQI will be capped to maxUlCqi for 16qam UEs */
18896 CmLteUeCategory ueCtgy = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18900 uint8_t maxiTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ueUl->maxUlCqi];
18904 /* #ifdef RG_SCH_CMN_EXT_CP_SUP For ECP pick index 1 */
18906 if ( (ueCtgy != CM_LTE_UE_CAT_5) &&
18907 (ueUl->validUlCqi > ueUl->maxUlCqi)
18910 cqi = ueUl->maxUlCqi;
18914 cqi = ueUl->validUlCqi;
18918 iTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
18920 RG_SCH_CHK_ITBS_RANGE(iTbs, maxiTbs);
18922 iTbs = RGSCH_MIN(iTbs, ue->cell->thresholds.maxUlItbs);
18925 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
18926 was seen when IMCS exceeds 20 on T2k TDD */
18935 if ( (ueCtgy != CM_LTE_UE_CAT_5) && (ueUl->crntUlCqi[0] > ueUl->maxUlCqi ))
18937 cqi = ueUl->maxUlCqi;
18941 cqi = ueUl->crntUlCqi[0];
18944 return (rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][cqi]);
18948 * @brief This function adds the UE to DLRbAllocInfo TX lst.
18952 * Function: rgSCHCmnDlRbInfoAddUeTx
18953 * Purpose: This function adds the UE to DLRbAllocInfo TX lst.
18955 * Invoked by: Common Scheduler
18957 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
18958 * @param[in] RgSchUeCb *ue
18959 * @param[in] RgSchDlHqProcCb *hqP
18964 static Void rgSCHCmnDlRbInfoAddUeTx
18967 RgSchCmnDlRbAllocInfo *allocInfo,
18969 RgSchDlHqProcCb *hqP
18972 static Void rgSCHCmnDlRbInfoAddUeTx(cell, allocInfo, ue, hqP)
18974 RgSchCmnDlRbAllocInfo *allocInfo;
18976 RgSchDlHqProcCb *hqP;
18979 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18982 if (hqP->reqLnk.node == NULLP)
18984 if (cellSch->dl.isDlFreqSel)
18986 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
18987 &allocInfo->dedAlloc.txHqPLst, hqP);
18992 cmLListAdd2Tail(&allocInfo->dedAlloc.txHqPLst, &hqP->reqLnk);
18994 hqP->reqLnk.node = (PTR)hqP;
19001 * @brief This function adds the UE to DLRbAllocInfo RETX lst.
19005 * Function: rgSCHCmnDlRbInfoAddUeRetx
19006 * Purpose: This function adds the UE to DLRbAllocInfo RETX lst.
19008 * Invoked by: Common Scheduler
19010 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19011 * @param[in] RgSchUeCb *ue
19012 * @param[in] RgSchDlHqProcCb *hqP
19017 static Void rgSCHCmnDlRbInfoAddUeRetx
19020 RgSchCmnDlRbAllocInfo *allocInfo,
19022 RgSchDlHqProcCb *hqP
19025 static Void rgSCHCmnDlRbInfoAddUeRetx(cell, allocInfo, ue, hqP)
19027 RgSchCmnDlRbAllocInfo *allocInfo;
19029 RgSchDlHqProcCb *hqP;
19032 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
19035 if (cellSch->dl.isDlFreqSel)
19037 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
19038 &allocInfo->dedAlloc.retxHqPLst, hqP);
19042 /* checking UE's presence in this lst is unnecessary */
19043 cmLListAdd2Tail(&allocInfo->dedAlloc.retxHqPLst, &hqP->reqLnk);
19044 hqP->reqLnk.node = (PTR)hqP;
19050 * @brief This function adds the UE to DLRbAllocInfo TX-RETX lst.
19054 * Function: rgSCHCmnDlRbInfoAddUeRetxTx
19055 * Purpose: This adds the UE to DLRbAllocInfo TX-RETX lst.
19057 * Invoked by: Common Scheduler
19059 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19060 * @param[in] RgSchUeCb *ue
19061 * @param[in] RgSchDlHqProcCb *hqP
19066 static Void rgSCHCmnDlRbInfoAddUeRetxTx
19069 RgSchCmnDlRbAllocInfo *allocInfo,
19071 RgSchDlHqProcCb *hqP
19074 static Void rgSCHCmnDlRbInfoAddUeRetxTx(allocInfo, ue, hqP)
19076 RgSchCmnDlRbAllocInfo *allocInfo;
19078 RgSchDlHqProcCb *hqP;
19081 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
19084 if (cellSch->dl.isDlFreqSel)
19086 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
19087 &allocInfo->dedAlloc.txRetxHqPLst, hqP);
19091 cmLListAdd2Tail(&allocInfo->dedAlloc.txRetxHqPLst, &hqP->reqLnk);
19092 hqP->reqLnk.node = (PTR)hqP;
19098 * @brief This function adds the UE to DLRbAllocInfo NonSchdRetxLst.
19102 * Function: rgSCHCmnDlAdd2NonSchdRetxLst
19103 * Purpose: During RB estimation for RETX, if allocation fails
19104 * then appending it to NonSchdRetxLst, the further
19105 * action is taken as part of Finalization in
19106 * respective schedulers.
19108 * Invoked by: Common Scheduler
19110 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19111 * @param[in] RgSchUeCb *ue
19112 * @param[in] RgSchDlHqProcCb *hqP
19117 static Void rgSCHCmnDlAdd2NonSchdRetxLst
19119 RgSchCmnDlRbAllocInfo *allocInfo,
19121 RgSchDlHqProcCb *hqP
19124 static Void rgSCHCmnDlAdd2NonSchdRetxLst(allocInfo, ue, hqP)
19125 RgSchCmnDlRbAllocInfo *allocInfo;
19127 RgSchDlHqProcCb *hqP;
19130 CmLList *schdLnkNode;
19134 if ( (hqP->sch != (RgSchCmnDlHqProc *)NULLP) &&
19135 (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP)))
19141 schdLnkNode = &hqP->schdLstLnk;
19142 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
19143 cmLListAdd2Tail(&allocInfo->dedAlloc.nonSchdRetxHqPLst, schdLnkNode);
19151 * @brief This function adds the UE to DLRbAllocInfo NonSchdTxRetxLst.
19155 * Function: rgSCHCmnDlAdd2NonSchdTxRetxLst
19156 * Purpose: During RB estimation for TXRETX, if allocation fails
19157 * then appending it to NonSchdTxRetxLst, the further
19158 * action is taken as part of Finalization in
19159 * respective schedulers.
19161 * Invoked by: Common Scheduler
19163 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19164 * @param[in] RgSchUeCb *ue
19165 * @param[in] RgSchDlHqProcCb *hqP
19171 * @brief This function handles the initialisation of DL HARQ/ACK feedback
19172 * timing information for eaach DL subframe.
19176 * Function: rgSCHCmnDlANFdbkInit
19177 * Purpose: Each DL subframe stores the sfn and subframe
19178 * information of UL subframe in which it expects
19179 * HARQ ACK/NACK feedback for this subframe.It
19180 * generates the information based on Downlink
19181 * Association Set Index table.
19183 * Invoked by: Scheduler
19185 * @param[in] RgSchCellCb* cell
19190 static S16 rgSCHCmnDlANFdbkInit
19195 static S16 rgSCHCmnDlANFdbkInit(cell)
19200 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19201 uint8_t maxDlSubfrms = cell->numDlSubfrms;
19205 uint8_t calcSfnOffset;
19207 uint8_t ulSfCnt =0;
19208 RgSchTddSubfrmInfo ulSubfrmInfo;
19209 uint8_t maxUlSubfrms;
19212 ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19213 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19215 /* Generate HARQ ACK/NACK feedback information for each DL sf in a radio frame
19216 * Calculate this information based on DL Association set Index table */
19217 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19219 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19220 RG_SCH_TDD_UL_SUBFRAME)
19222 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19226 for(idx=0; idx < rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
19227 numFdbkSubfrms; idx++)
19229 calcSfNum = sfNum - rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
19233 calcSfnOffset = RGSCH_CEIL(-calcSfNum, RGSCH_NUM_SUB_FRAMES);
19240 calcSfNum = ((RGSCH_NUM_SUB_FRAMES * calcSfnOffset) + calcSfNum)\
19241 % RGSCH_NUM_SUB_FRAMES;
19243 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19247 else if((ulSubfrmInfo.switchPoints == 2) && (calcSfNum <= \
19248 RG_SCH_CMN_SPL_SUBFRM_6))
19250 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19254 dlIdx = calcSfNum - maxUlSubfrms;
19257 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = sfNum;
19258 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = calcSfnOffset;
19259 cell->subFrms[dlIdx]->dlFdbkInfo.m = idx;
19261 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19264 /* DL subframes in the subsequent radio frames are initialized
19265 * with the previous radio frames */
19266 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;\
19269 sfNum = dlIdx - rgSchTddNumDlSubfrmTbl[ulDlCfgIdx]\
19270 [RGSCH_NUM_SUB_FRAMES-1];
19271 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = \
19272 cell->subFrms[sfNum]->dlFdbkInfo.subframe;
19273 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = \
19274 cell->subFrms[sfNum]->dlFdbkInfo.sfnOffset;
19275 cell->subFrms[dlIdx]->dlFdbkInfo.m = cell->subFrms[sfNum]->dlFdbkInfo.m;
19281 * @brief This function handles the initialization of uplink association
19282 * set information for each DL subframe.
19287 * Function: rgSCHCmnDlKdashUlAscInit
19288 * Purpose: Each DL sf stores the sfn and sf information of UL sf
19289 * in which it expects HQ ACK/NACK trans. It generates the information
19290 * based on k` in UL association set index table.
19292 * Invoked by: Scheduler
19294 * @param[in] RgSchCellCb* cell
19299 static S16 rgSCHCmnDlKdashUlAscInit
19304 static S16 rgSCHCmnDlKdashUlAscInit(cell)
19309 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19310 uint8_t maxDlSubfrms = cell->numDlSubfrms;
19315 uint8_t ulSfCnt =0;
19316 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19317 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19318 [RGSCH_NUM_SUB_FRAMES-1];
19319 uint8_t dlPres = 0;
19322 /* Generate ACK/NACK offset information for each DL subframe in a radio frame
19323 * Calculate this information based on K` in UL Association Set table */
19324 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19326 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19327 RG_SCH_TDD_UL_SUBFRAME)
19329 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19333 calcSfNum = (sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum] + \
19334 RGSCH_NUM_SUB_FRAMES) % RGSCH_NUM_SUB_FRAMES;
19335 calcSfnOffset = sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum];
19336 if(calcSfnOffset < 0)
19338 calcSfnOffset = RGSCH_CEIL(-calcSfnOffset, RGSCH_NUM_SUB_FRAMES);
19345 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19349 else if((ulSubfrmInfo.switchPoints == 2) &&
19350 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
19352 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19356 dlIdx = calcSfNum - maxUlSubfrms;
19359 cell->subFrms[dlIdx]->ulAscInfo.subframe = sfNum;
19360 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset = calcSfnOffset;
19362 /* set dlIdx for which ulAscInfo is updated */
19363 dlPres = dlPres | (1 << dlIdx);
19364 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19367 /* Set Invalid information for which ulAscInfo is not present */
19369 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19372 /* If dlPres is 0, ulAscInfo is not present in that DL index */
19373 if(! ((dlPres >> sfCount)&0x01))
19375 cell->subFrms[sfCount]->ulAscInfo.sfnOffset =
19376 RGSCH_INVALID_INFO;
19377 cell->subFrms[sfCount]->ulAscInfo.subframe =
19378 RGSCH_INVALID_INFO;
19382 /* DL subframes in the subsequent radio frames are initialized
19383 * with the previous radio frames */
19384 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;
19388 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19389 cell->subFrms[dlIdx]->ulAscInfo.subframe =
19390 cell->subFrms[sfNum]->ulAscInfo.subframe;
19391 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset =
19392 cell->subFrms[sfNum]->ulAscInfo.sfnOffset;
19399 * @brief This function initialises the 'Np' value for 'p'
19403 * Function: rgSCHCmnDlNpValInit
19404 * Purpose: To initialise the 'Np' value for each 'p'. It is used
19405 * to find the mapping between nCCE and 'p' and used in
19406 * HARQ ACK/NACK reception.
19408 * Invoked by: Scheduler
19410 * @param[in] RgSchCellCb* cell
19415 static S16 rgSCHCmnDlNpValInit
19420 static S16 rgSCHCmnDlNpValInit(cell)
19427 /* Always Np is 0 for p=0 */
19428 cell->rgSchTddNpValTbl[0] = 0;
19430 for(idx=1; idx < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; idx++)
19432 np = cell->bwCfg.dlTotalBw * (idx * RG_SCH_CMN_NUM_SUBCAR - 4);
19433 cell->rgSchTddNpValTbl[idx] = (uint8_t) (np/36);
19440 * @brief This function handles the creation of RACH preamble
19441 * list to queue the preambles and process at the scheduled
19446 * Function: rgSCHCmnDlCreateRachPrmLst
19447 * Purpose: To create RACH preamble list based on RA window size.
19448 * It is used to queue the preambles and process it at the
19451 * Invoked by: Scheduler
19453 * @param[in] RgSchCellCb* cell
19458 static S16 rgSCHCmnDlCreateRachPrmLst
19463 static S16 rgSCHCmnDlCreateRachPrmLst(cell)
19472 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
19474 lstSize = raArrSz * RGSCH_MAX_RA_RNTI_PER_SUBFRM * RGSCH_NUM_SUB_FRAMES;
19476 cell->raInfo.maxRaSize = raArrSz;
19477 ret = rgSCHUtlAllocSBuf(cell->instIdx,
19478 (Data **)(&cell->raInfo.raReqLst), (Size)(lstSize * sizeof(CmLListCp)));
19484 cell->raInfo.lstSize = lstSize;
19491 * @brief This function handles the initialization of RACH Response
19492 * information at each DL subframe.
19496 * Function: rgSCHCmnDlRachInfoInit
19497 * Purpose: Each DL subframe stores the sfn and subframe information of
19498 * possible RACH response allowed for UL subframes. It generates
19499 * the information based on PRACH configuration.
19501 * Invoked by: Scheduler
19503 * @param[in] RgSchCellCb* cell
19508 static S16 rgSCHCmnDlRachInfoInit
19513 static S16 rgSCHCmnDlRachInfoInit(cell)
19518 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19520 uint8_t ulSfCnt =0;
19521 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19522 [RGSCH_NUM_SUB_FRAMES-1];
19524 RgSchTddRachRspLst rachRspLst[3][RGSCH_NUM_SUB_FRAMES];
19529 uint8_t endSubfrmIdx;
19530 uint8_t startSubfrmIdx;
19532 RgSchTddRachDelInfo *delInfo;
19534 uint8_t numSubfrms;
19537 memset(rachRspLst, 0, sizeof(rachRspLst));
19539 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
19541 /* Include Special subframes */
19542 maxUlSubfrms = maxUlSubfrms + \
19543 rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx].switchPoints;
19544 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19546 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] ==
19547 RG_SCH_TDD_DL_SUBFRAME)
19549 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19553 startWin = (sfNum + RG_SCH_CMN_RARSP_WAIT_PRD + \
19554 ((RgSchCmnCell *)cell->sc.sch)->dl.numRaSubFrms);
19555 endWin = (startWin + cell->rachCfg.raWinSize - 1);
19557 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][startWin%RGSCH_NUM_SUB_FRAMES];
19558 /* Find the next DL subframe starting from Subframe 0 */
19559 if((startSubfrmIdx % RGSCH_NUM_SUB_FRAMES) == 0)
19561 startWin = RGSCH_CEIL(startWin, RGSCH_NUM_SUB_FRAMES);
19562 startWin = startWin * RGSCH_NUM_SUB_FRAMES;
19566 rgSchTddLowDlSubfrmIdxTbl[ulDlCfgIdx][endWin%RGSCH_NUM_SUB_FRAMES];
19567 endWin = (endWin/RGSCH_NUM_SUB_FRAMES) * RGSCH_NUM_SUB_FRAMES \
19569 if(startWin > endWin)
19573 /* Find all the possible RACH Response transmission
19574 * time within the RA window size */
19575 startSubfrmIdx = startWin%RGSCH_NUM_SUB_FRAMES;
19576 for(sfnIdx = startWin/RGSCH_NUM_SUB_FRAMES;
19577 sfnIdx <= endWin/RGSCH_NUM_SUB_FRAMES; sfnIdx++)
19579 if(sfnIdx == endWin/RGSCH_NUM_SUB_FRAMES)
19581 endSubfrmIdx = endWin%RGSCH_NUM_SUB_FRAMES;
19585 endSubfrmIdx = RGSCH_NUM_SUB_FRAMES-1;
19588 /* Find all the possible RACH Response transmission
19589 * time within radio frame */
19590 for(subfrmIdx = startSubfrmIdx;
19591 subfrmIdx <= endSubfrmIdx; subfrmIdx++)
19593 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][subfrmIdx] ==
19594 RG_SCH_TDD_UL_SUBFRAME)
19598 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
19599 /* Find the next DL subframe starting from Subframe 0 */
19600 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
19604 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx], subfrmIdx);
19606 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
19607 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset = sfnIdx;
19608 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].subframe[numSubfrms]
19610 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms++;
19612 startSubfrmIdx = RG_SCH_CMN_SUBFRM_0;
19614 /* Update the subframes to be deleted at this subframe */
19615 /* Get the subframe after the end of RA window size */
19618 sfnOffset = endWin/RGSCH_NUM_SUB_FRAMES;
19621 sfnOffset += raArrSz;
19623 sfnIdx = (endWin/RGSCH_NUM_SUB_FRAMES) % raArrSz;
19625 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx],endSubfrmIdx-1);
19626 if((endSubfrmIdx == RGSCH_NUM_SUB_FRAMES) ||
19627 (rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx] ==
19628 RGSCH_NUM_SUB_FRAMES))
19631 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][RG_SCH_CMN_SUBFRM_0];
19635 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx];
19638 delInfo = &rachRspLst[sfnIdx][subfrmIdx].delInfo;
19639 delInfo->sfnOffset = sfnOffset;
19640 delInfo->subframe[delInfo->numSubfrms] = sfNum;
19641 delInfo->numSubfrms++;
19643 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19646 ret = rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz);
19656 * @brief This function handles the initialization of PHICH information
19657 * for each DL subframe based on PHICH table.
19661 * Function: rgSCHCmnDlPhichOffsetInit
19662 * Purpose: Each DL subf stores the sfn and subf information of UL subframe
19663 * for which it trnsmts PHICH in this subframe. It generates the information
19664 * based on PHICH table.
19666 * Invoked by: Scheduler
19668 * @param[in] RgSchCellCb* cell
19673 static S16 rgSCHCmnDlPhichOffsetInit
19678 static S16 rgSCHCmnDlPhichOffsetInit(cell)
19683 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19684 uint8_t maxDlSubfrms = cell->numDlSubfrms;
19687 uint8_t dlPres = 0;
19688 uint8_t calcSfnOffset;
19690 uint8_t ulSfCnt =0;
19691 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19692 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19693 [RGSCH_NUM_SUB_FRAMES-1];
19696 /* Generate PHICH offset information for each DL subframe in a radio frame
19697 * Calculate this information based on K in PHICH table */
19698 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19700 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19701 RG_SCH_TDD_UL_SUBFRAME)
19703 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19707 calcSfNum = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) % \
19708 RGSCH_NUM_SUB_FRAMES;
19709 calcSfnOffset = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) / \
19710 RGSCH_NUM_SUB_FRAMES;
19712 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19716 else if((ulSubfrmInfo.switchPoints == 2) &&
19717 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
19719 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19723 dlIdx = calcSfNum - maxUlSubfrms;
19726 cell->subFrms[dlIdx]->phichOffInfo.subframe = sfNum;
19727 cell->subFrms[dlIdx]->phichOffInfo.numSubfrms = 1;
19729 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset = calcSfnOffset;
19731 /* set dlIdx for which phich offset is updated */
19732 dlPres = dlPres | (1 << dlIdx);
19733 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19736 /* Set Invalid information for which phich offset is not present */
19738 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19741 /* If dlPres is 0, phich offset is not present in that DL index */
19742 if(! ((dlPres >> sfCount)&0x01))
19744 cell->subFrms[sfCount]->phichOffInfo.sfnOffset =
19745 RGSCH_INVALID_INFO;
19746 cell->subFrms[sfCount]->phichOffInfo.subframe =
19747 RGSCH_INVALID_INFO;
19748 cell->subFrms[sfCount]->phichOffInfo.numSubfrms = 0;
19752 /* DL subframes in the subsequent radio frames are
19753 * initialized with the previous radio frames */
19754 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms;
19755 dlIdx < maxDlSubfrms; dlIdx++)
19758 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19760 cell->subFrms[dlIdx]->phichOffInfo.subframe =
19761 cell->subFrms[sfNum]->phichOffInfo.subframe;
19763 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset =
19764 cell->subFrms[sfNum]->phichOffInfo.sfnOffset;
19771 * @brief Updation of Sch vars per TTI.
19775 * Function: rgSCHCmnUpdVars
19776 * Purpose: Updation of Sch vars per TTI.
19778 * @param[in] RgSchCellCb *cell
19783 Void rgSCHCmnUpdVars
19788 Void rgSCHCmnUpdVars(cell)
19792 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19793 CmLteTimingInfo timeInfo;
19795 uint8_t ulSubframe;
19796 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19797 uint8_t msg3Subfrm;
19800 /* ccpu00132654-ADD- Initializing all the indices in every subframe*/
19801 rgSCHCmnInitVars(cell);
19803 idx = (cell->crntTime.slot + TFU_ULCNTRL_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
19804 /* Calculate the UL scheduling subframe idx based on the
19806 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][idx] != 0)
19808 /* PUSCH transmission is based on offset from DL
19809 * PDCCH scheduling */
19810 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
19811 ulSubframe = rgSchTddPuschTxKTbl[ulDlCfgIdx][timeInfo.subframe];
19812 /* Add the DCI-0 to PUSCH time to get the time of UL subframe */
19813 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, ulSubframe);
19815 cellUl->schdTti = timeInfo.sfn * 10 + timeInfo.subframe;
19817 /* Fetch the corresponding UL subframe Idx in UL sf array */
19818 cellUl->schdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19819 /* Fetch the corresponding UL Harq Proc ID */
19820 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
19821 cellUl->schdTime = timeInfo;
19823 Mval = rgSchTddPhichMValTbl[ulDlCfgIdx][idx];
19826 /* Fetch the tx time for DL HIDCI-0 */
19827 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
19828 /* Fetch the corresponding n-k tx time of PUSCH */
19829 cellUl->hqFdbkIdx[0] = rgSCHCmnGetPhichUlSfIdx(&timeInfo, cell);
19830 /* Retx will happen according to the Pusch k table */
19831 cellUl->reTxIdx[0] = cellUl->schdIdx;
19833 if(ulDlCfgIdx == 0)
19835 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[0] */
19836 cellUl->reTxIdx[0] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
19837 cellUl->hqFdbkIdx[0]);
19840 /* At Idx 1 store the UL SF adjacent(left) to the UL SF
19842 cellUl->hqFdbkIdx[1] = (cellUl->hqFdbkIdx[0]-1 +
19843 cellUl->numUlSubfrms) % cellUl->numUlSubfrms;
19844 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[1] */
19845 cellUl->reTxIdx[1] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
19846 cellUl->hqFdbkIdx[1]);
19851 idx = (cell->crntTime.slot + TFU_RECPREQ_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
19852 if (rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] == RG_SCH_TDD_UL_SUBFRAME)
19854 RGSCHCMNADDTOCRNTTIME(cell->crntTime, timeInfo, TFU_RECPREQ_DLDELTA)
19855 cellUl->rcpReqIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19857 idx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) % RGSCH_NUM_SUB_FRAMES;
19859 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
19860 special subframe */
19861 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] != RG_SCH_TDD_UL_SUBFRAME)
19863 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
19864 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
19865 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, msg3Subfrm);
19866 cellUl->msg3SchdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19867 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
19870 if(!rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][idx])
19872 cellUl->spsUlRsrvIdx = RGSCH_INVALID_INFO;
19876 /* introduce some reuse with above code? */
19878 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
19879 //offst = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
19880 offst = rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][timeInfo.subframe];
19881 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, offst);
19882 cellUl->spsUlRsrvIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19883 /* The harq proc continues to be accessed and used the same delta before
19884 * actual data occurance, and hence use the same idx */
19885 cellUl->spsUlRsrvHqProcIdx = cellUl->schdHqProcIdx;
19889 /* RACHO: update cmn sched specific RACH variables,
19890 * mainly the prachMaskIndex */
19891 rgSCHCmnUpdRachParam(cell);
19897 * @brief To get 'p' value from nCCE.
19901 * Function: rgSCHCmnGetPValFrmCCE
19902 * Purpose: Gets 'p' value for HARQ ACK/NACK reception from CCE.
19904 * @param[in] RgSchCellCb *cell
19905 * @param[in] uint8_t cce
19910 uint8_t rgSCHCmnGetPValFrmCCE
19916 uint8_t rgSCHCmnGetPValFrmCCE(cell, cce)
19923 for(i=1; i < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; i++)
19925 if(cce < cell->rgSchTddNpValTbl[i])
19934 /***********************************************************
19936 * Func : rgSCHCmnUlAdapRetx
19938 * Desc : Adaptive retransmission for an allocation.
19946 **********************************************************/
19948 static Void rgSCHCmnUlAdapRetx
19950 RgSchUlAlloc *alloc,
19951 RgSchUlHqProcCb *proc
19954 static Void rgSCHCmnUlAdapRetx(alloc, proc)
19955 RgSchUlAlloc *alloc;
19956 RgSchUlHqProcCb *proc;
19960 rgSCHUhmRetx(proc, alloc);
19962 if (proc->rvIdx != 0)
19964 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[proc->rvIdx];
19969 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
19975 * @brief Scheduler invocation per TTI.
19979 * Function: rgSCHCmnHdlUlInactUes
19982 * Invoked by: Common Scheduler
19984 * @param[in] RgSchCellCb *cell
19988 static Void rgSCHCmnHdlUlInactUes
19993 static Void rgSCHCmnHdlUlInactUes(cell)
19997 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
19998 CmLListCp ulInactvLst;
19999 /* Get a List of Inactv UEs for UL*/
20000 cmLListInit(&ulInactvLst);
20002 /* Trigger Spfc Schedulers with Inactive UEs */
20003 rgSCHMeasGapANRepGetUlInactvUe (cell, &ulInactvLst);
20004 /* take care of this in UL retransmission */
20005 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInactvLst);
20011 * @brief Scheduler invocation per TTI.
20015 * Function: rgSCHCmnHdlDlInactUes
20018 * Invoked by: Common Scheduler
20020 * @param[in] RgSchCellCb *cell
20024 static Void rgSCHCmnHdlDlInactUes
20029 static Void rgSCHCmnHdlDlInactUes(cell)
20033 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20034 CmLListCp dlInactvLst;
20035 /* Get a List of Inactv UEs for DL */
20036 cmLListInit(&dlInactvLst);
20038 /* Trigger Spfc Schedulers with Inactive UEs */
20039 rgSCHMeasGapANRepGetDlInactvUe (cell, &dlInactvLst);
20041 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInactvLst);
20045 /* RACHO: Rach handover functions start here */
20046 /***********************************************************
20048 * Func : rgSCHCmnUeIdleExdThrsld
20050 * Desc : RETURN ROK if UE has been idle more
20059 **********************************************************/
20061 static S16 rgSCHCmnUeIdleExdThrsld
20067 static S16 rgSCHCmnUeIdleExdThrsld(cell, ue)
20072 /* Time difference in subframes */
20073 uint32_t sfDiff = RGSCH_CALC_SF_DIFF(cell->crntTime, ue->ul.ulTransTime);
20076 if (sfDiff > (uint32_t)RG_SCH_CMN_UE_IDLE_THRSLD(ue))
20088 * @brief Scheduler processing for Ded Preambles on cell configuration.
20092 * Function : rgSCHCmnCfgRachDedPrm
20094 * This function does requisite initialisation
20095 * for RACH Ded Preambles.
20098 * @param[in] RgSchCellCb *cell
20102 static Void rgSCHCmnCfgRachDedPrm
20107 static Void rgSCHCmnCfgRachDedPrm(cell)
20111 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20112 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
20116 if (cell->macPreambleSet.pres == NOTPRSNT)
20120 cellSch->rachCfg.numDedPrm = cell->macPreambleSet.size;
20121 cellSch->rachCfg.dedPrmStart = cell->macPreambleSet.start;
20122 /* Initialize handover List */
20123 cmLListInit(&cellSch->rachCfg.hoUeLst);
20124 /* Initialize pdcch Order List */
20125 cmLListInit(&cellSch->rachCfg.pdcchOdrLst);
20127 /* Intialize the rapId to UE mapping structure */
20128 for (cnt = 0; cnt<cellSch->rachCfg.numDedPrm; cnt++)
20130 cellSch->rachCfg.rapIdMap[cnt].rapId = cellSch->rachCfg.dedPrmStart + \
20132 cmLListInit(&cellSch->rachCfg.rapIdMap[cnt].assgndUes);
20134 /* Perform Prach Mask Idx, remDedPrm, applFrm initializations */
20135 /* Set remDedPrm as numDedPrm */
20136 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
20137 /* Initialize applFrm */
20138 cellSch->rachCfg.prachMskIndx = 0;
20139 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_EVEN)
20141 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + \
20142 (cell->crntTime.sfn % 2)) % RGSCH_MAX_SFN;
20145 else if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ODD)
20147 if((cell->crntTime.sfn%2) == 0)
20149 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + 1)\
20156 cellSch->rachCfg.applFrm.sfn = cell->crntTime.sfn;
20158 /* Initialize cellSch->rachCfg.applFrm as >= crntTime.
20159 * This is because of RGSCH_CALC_SF_DIFF logic */
20160 if (cellSch->rachCfg.applFrm.sfn == cell->crntTime.sfn)
20162 while (cellSch->rachCfg.prachMskIndx < cell->rachCfg.raOccasion.size)
20164 if (cell->crntTime.slot <\
20165 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx])
20169 cellSch->rachCfg.prachMskIndx++;
20171 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size)
20173 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
20175 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) %\
20180 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) %\
20183 cellSch->rachCfg.prachMskIndx = 0;
20185 cellSch->rachCfg.applFrm.slot = \
20186 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20190 cellSch->rachCfg.applFrm.slot = \
20191 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20194 /* Note first param to this macro should always be the latest in time */
20195 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
20196 while (sfDiff <= gap)
20198 rgSCHCmnUpdNxtPrchMskIdx(cell);
20199 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
20206 * @brief Updates the PRACH MASK INDEX.
20210 * Function: rgSCHCmnUpdNxtPrchMskIdx
20211 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
20212 * CFG is always >= "n"+"DELTA", where "n" is the crntTime
20213 * of the cell. If not, applFrm is updated to the next avl
20214 * PRACH oppurtunity as per the PRACH Cfg Index configuration.
20217 * Invoked by: Common Scheduler
20219 * @param[in] RgSchCellCb *cell
20223 static Void rgSCHCmnUpdNxtPrchMskIdx
20228 static Void rgSCHCmnUpdNxtPrchMskIdx(cell)
20232 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20234 /* Determine the next prach mask Index */
20235 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size - 1)
20237 /* PRACH within applFrm.sfn are done, go to next AVL sfn */
20238 cellSch->rachCfg.prachMskIndx = 0;
20239 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
20241 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) % \
20244 else/* RGR_SFN_EVEN or RGR_SFN_ODD */
20246 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) % \
20249 cellSch->rachCfg.applFrm.slot = cell->rachCfg.raOccasion.\
20252 else /* applFrm.sfn is still valid */
20254 cellSch->rachCfg.prachMskIndx += 1;
20255 if ( cellSch->rachCfg.prachMskIndx < RGR_MAX_SUBFRAME_NUM )
20257 cellSch->rachCfg.applFrm.slot = \
20258 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20265 * @brief Updates the Ded preamble RACH parameters
20270 * Function: rgSCHCmnUpdRachParam
20271 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
20272 * CFG is always >= "n"+"6"+"DELTA", where "n" is the crntTime
20273 * of the cell. If not, applFrm is updated to the next avl
20274 * PRACH oppurtunity as per the PRACH Cfg Index configuration,
20275 * accordingly the "remDedPrm" is reset to "numDedPrm" and
20276 * "prachMskIdx" field is updated as per "applFrm".
20279 * Invoked by: Common Scheduler
20281 * @param[in] RgSchCellCb *cell
20285 static Void rgSCHCmnUpdRachParam
20290 static Void rgSCHCmnUpdRachParam(cell)
20295 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20296 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
20299 if (cell->macPreambleSet.pres == NOTPRSNT)
20303 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, \
20307 /* applFrm is still a valid next Prach Oppurtunity */
20310 rgSCHCmnUpdNxtPrchMskIdx(cell);
20311 /* Reset remDedPrm as numDedPrm */
20312 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
20318 * @brief Dedicated Preamble allocation function.
20322 * Function: rgSCHCmnAllocPOParam
20323 * Purpose: Allocate pdcch, rapId and PrachMskIdx.
20324 * Set mapping of UE with the allocated rapId.
20326 * Invoked by: Common Scheduler
20328 * @param[in] RgSchCellCb *cell
20329 * @param[in] RgSchDlSf *dlSf
20330 * @param[in] RgSchUeCb *ue
20331 * @param[out] RgSchPdcch **pdcch
20332 * @param[out] uint8_t *rapId
20333 * @param[out] uint8_t *prachMskIdx
20337 static S16 rgSCHCmnAllocPOParam
20342 RgSchPdcch **pdcch,
20344 uint8_t *prachMskIdx
20347 static S16 rgSCHCmnAllocPOParam(cell, dlSf, ue, pdcch, rapId, prachMskIdx)
20351 RgSchPdcch **pdcch;
20353 uint8_t *prachMskIdx;
20357 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20358 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20361 if (cell->macPreambleSet.pres == PRSNT_NODEF)
20363 if (cellSch->rachCfg.remDedPrm == 0)
20367 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
20368 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
20372 /* The stored prachMskIdx is the index of PRACH Oppurtunities in
20373 * raOccasions.subframes[].
20374 * Converting the same to the actual PRACHMskIdx to be transmitted. */
20375 *prachMskIdx = cellSch->rachCfg.prachMskIndx + 1;
20376 /* Distribution starts from dedPrmStart till dedPrmStart + numDedPrm */
20377 *rapId = cellSch->rachCfg.dedPrmStart +
20378 cellSch->rachCfg.numDedPrm - cellSch->rachCfg.remDedPrm;
20379 cellSch->rachCfg.remDedPrm--;
20380 /* Map UE with the allocated RapId */
20381 ueDl->rachInfo.asgnOppr = cellSch->rachCfg.applFrm;
20382 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, cellSch->rachCfg.rapIdMap, (*rapId - cellSch->rachCfg.dedPrmStart));
20383 cmLListAdd2Tail(&cellSch->rachCfg.rapIdMap[*rapId - cellSch->rachCfg.dedPrmStart].assgndUes,
20384 &ueDl->rachInfo.rapIdLnk);
20385 ueDl->rachInfo.rapIdLnk.node = (PTR)ue;
20386 ueDl->rachInfo.poRapId = *rapId;
20388 else /* if dedicated preambles not configured */
20390 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
20391 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
20403 * @brief Dowlink Scheduling Handler.
20407 * Function: rgSCHCmnGenPdcchOrder
20408 * Purpose: For each UE in PO Q, grab a PDCCH,
20409 * get an available ded RapId and fill PDCCH
20410 * with PO information.
20412 * Invoked by: Common Scheduler
20414 * @param[in] RgSchCellCb *cell
20415 * @param[in] RgSchDlSf *dlSf
20419 static Void rgSCHCmnGenPdcchOrder
20425 static Void rgSCHCmnGenPdcchOrder(cell, dlSf)
20430 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20431 CmLList *node = cellSch->rachCfg.pdcchOdrLst.first;
20434 uint8_t prachMskIdx;
20435 RgSchPdcch *pdcch = NULLP;
20440 ue = (RgSchUeCb *)node->node;
20442 /* Skip sending for this subframe is Measuring or inActive in UL due
20443 * to MeasGap or inactie due to DRX
20445 if ((ue->measGapCb.isMeasuring == TRUE) ||
20446 (ue->ul.ulInactvMask & RG_MEASGAP_INACTIVE) ||
20447 (ue->isDrxEnabled &&
20448 ue->dl.dlInactvMask & RG_DRX_INACTIVE)
20453 if (rgSCHCmnAllocPOParam(cell, dlSf, ue, &pdcch, &rapId,\
20454 &prachMskIdx) != ROK)
20456 /* No More rapIds left for the valid next avl Oppurtunity.
20457 * Unsatisfied UEs here would be given a chance, when the
20458 * prach Mask Index changes as per rachUpd every TTI */
20460 /* PDDCH can also be ordered with rapId=0, prachMskIdx=0
20461 * so that UE triggers a RACH procedure with non-dedicated preamble.
20462 * But the implementation here does not do this. Instead, the "break"
20463 * here implies, that PDCCH Odr always given with valid rapId!=0,
20464 * prachMskIdx!=0 if dedicated preambles are configured.
20465 * If not configured, then trigger a PO with rapId=0,prchMskIdx=0*/
20468 /* Fill pdcch with pdcch odr information */
20469 rgSCHCmnFillPdcchOdr2Sf(cell, ue, pdcch, rapId, prachMskIdx);
20470 /* Remove this UE from the PDCCH ORDER QUEUE */
20471 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
20472 /* Reset UE's power state */
20473 rgSCHPwrUeReset(cell, ue);
20480 * @brief This function add UE to PdcchOdr Q if not already present.
20484 * Function: rgSCHCmnDlAdd2PdcchOdrQ
20487 * Invoked by: CMN Scheduler
20489 * @param[in] RgSchCellCb* cell
20490 * @param[in] RgSchUeCb* ue
20495 static Void rgSCHCmnDlAdd2PdcchOdrQ
20501 static Void rgSCHCmnDlAdd2PdcchOdrQ(cell, ue)
20506 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20507 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20510 if (ueDl->rachInfo.poLnk.node == NULLP)
20512 cmLListAdd2Tail(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
20513 ueDl->rachInfo.poLnk.node = (PTR)ue;
20520 * @brief This function rmvs UE to PdcchOdr Q if not already present.
20524 * Function: rgSCHCmnDlRmvFrmPdcchOdrQ
20527 * Invoked by: CMN Scheduler
20529 * @param[in] RgSchCellCb* cell
20530 * @param[in] RgSchUeCb* ue
20535 static Void rgSCHCmnDlRmvFrmPdcchOdrQ
20541 static Void rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue)
20546 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20547 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20550 cmLListDelFrm(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
20551 ueDl->rachInfo.poLnk.node = NULLP;
20556 * @brief Fill pdcch with PDCCH order information.
20560 * Function: rgSCHCmnFillPdcchOdr2Sf
20561 * Purpose: Fill PDCCH with PDCCH order information,
20563 * Invoked by: Common Scheduler
20565 * @param[in] RgSchUeCb *ue
20566 * @param[in] RgSchPdcch *pdcch
20567 * @param[in] uint8_t rapId
20568 * @param[in] uint8_t prachMskIdx
20572 static Void rgSCHCmnFillPdcchOdr2Sf
20578 uint8_t prachMskIdx
20581 static Void rgSCHCmnFillPdcchOdr2Sf(ue, pdcch, rapId, prachMskIdx)
20586 uint8_t prachMskIdx;
20589 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
20592 pdcch->rnti = ue->ueId;
20593 pdcch->dci.dciFormat = TFU_DCI_FORMAT_1A;
20594 pdcch->dci.u.format1aInfo.isPdcchOrder = TRUE;
20595 pdcch->dci.u.format1aInfo.t.pdcchOrder.preambleIdx = rapId;
20596 pdcch->dci.u.format1aInfo.t.pdcchOrder.prachMaskIdx = prachMskIdx;
20598 /* Request for APer CQI immediately after PDCCH Order */
20599 /* CR ccpu00144525 */
20601 if(ue->dl.ueDlCqiCfg.aprdCqiCfg.pres)
20603 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
20604 acqiCb->aCqiTrigWt = 0;
20613 * @brief UE deletion for scheduler.
20617 * Function : rgSCHCmnDelRachInfo
20619 * This functions deletes all scheduler information
20620 * pertaining to an UE.
20622 * @param[in] RgSchCellCb *cell
20623 * @param[in] RgSchUeCb *ue
20627 static Void rgSCHCmnDelRachInfo
20633 static Void rgSCHCmnDelRachInfo(cell, ue)
20638 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20639 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20643 if (ueDl->rachInfo.poLnk.node)
20645 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
20647 if (ueDl->rachInfo.hoLnk.node)
20649 cmLListDelFrm(&cellSch->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
20650 ueDl->rachInfo.hoLnk.node = NULLP;
20652 if (ueDl->rachInfo.rapIdLnk.node)
20654 rapIdIdx = ueDl->rachInfo.poRapId - cellSch->rachCfg.dedPrmStart;
20655 cmLListDelFrm(&cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes,
20656 &ueDl->rachInfo.rapIdLnk);
20657 ueDl->rachInfo.rapIdLnk.node = NULLP;
20663 * @brief This function retrieves the ue which has sent this raReq
20664 * and it allocates grant for UEs undergoing (for which RAR
20665 * is being generated) HandOver/PdcchOrder.
20670 * Function: rgSCHCmnHdlHoPo
20671 * Purpose: This function retrieves the ue which has sent this raReq
20672 * and it allocates grant for UEs undergoing (for which RAR
20673 * is being generated) HandOver/PdcchOrder.
20675 * Invoked by: Common Scheduler
20677 * @param[in] RgSchCellCb *cell
20678 * @param[out] CmLListCp *raRspLst
20679 * @param[in] RgSchRaReqInfo *raReq
20684 static Void rgSCHCmnHdlHoPo
20687 CmLListCp *raRspLst,
20688 RgSchRaReqInfo *raReq
20691 static Void rgSCHCmnHdlHoPo(cell, raRspLst, raReq)
20693 CmLListCp *raRspLst;
20694 RgSchRaReqInfo *raReq;
20697 RgSchUeCb *ue = raReq->ue;
20699 if ( ue->isDrxEnabled )
20701 rgSCHDrxDedRa(cell,ue);
20703 rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq);
20708 * @brief This function retrieves the UE which has sent this raReq
20709 * for handover case.
20714 * Function: rgSCHCmnGetHoUe
20715 * Purpose: This function retrieves the UE which has sent this raReq
20716 * for handover case.
20718 * Invoked by: Common Scheduler
20720 * @param[in] RgSchCellCb *cell
20721 * @param[in] RgSchRaReqInfo *raReq
20722 * @return RgSchUeCb*
20726 RgSchUeCb* rgSCHCmnGetHoUe
20732 RgSchUeCb* rgSCHCmnGetHoUe(cell, rapId)
20737 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20741 RgSchCmnDlUe *ueDl;
20743 ueLst = &cellSch->rachCfg.hoUeLst;
20744 node = ueLst->first;
20747 ue = (RgSchUeCb *)node->node;
20749 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20750 if (ueDl->rachInfo.hoRapId == rapId)
20759 static Void rgSCHCmnDelDedPreamble
20765 static rgSCHCmnDelDedPreamble(cell, preambleId)
20767 uint8_t preambleId;
20770 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20774 RgSchCmnDlUe *ueDl;
20776 ueLst = &cellSch->rachCfg.hoUeLst;
20777 node = ueLst->first;
20780 ue = (RgSchUeCb *)node->node;
20782 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20783 if (ueDl->rachInfo.hoRapId == preambleId)
20785 cmLListDelFrm(ueLst, &ueDl->rachInfo.hoLnk);
20786 ueDl->rachInfo.hoLnk.node = (PTR)NULLP;
20792 * @brief This function retrieves the UE which has sent this raReq
20793 * for PDCCh Order case.
20798 * Function: rgSCHCmnGetPoUe
20799 * Purpose: This function retrieves the UE which has sent this raReq
20800 * for PDCCH Order case.
20802 * Invoked by: Common Scheduler
20804 * @param[in] RgSchCellCb *cell
20805 * @param[in] RgSchRaReqInfo *raReq
20806 * @return RgSchUeCb*
20810 RgSchUeCb* rgSCHCmnGetPoUe
20814 CmLteTimingInfo timingInfo
20817 RgSchUeCb* rgSCHCmnGetPoUe(cell, rapId, timingInfo)
20820 CmLteTimingInfo timingInfo;
20823 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20827 RgSchCmnDlUe *ueDl;
20830 rapIdIdx = rapId -cellSch->rachCfg.dedPrmStart;
20831 ueLst = &cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes;
20832 node = ueLst->first;
20835 ue = (RgSchUeCb *)node->node;
20837 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20838 /* Remove UEs irrespective.
20839 * Old UE associations are removed.*/
20840 cmLListDelFrm(ueLst, &ueDl->rachInfo.rapIdLnk);
20841 ueDl->rachInfo.rapIdLnk.node = (PTR)NULLP;
20842 if (RGSCH_TIMEINFO_SAME(ueDl->rachInfo.asgnOppr, timingInfo))
20853 * @brief This function returns the valid UL cqi for a given UE.
20857 * Function: rgSCHCmnUlGetCqi
20858 * Purpose: This function returns the "valid UL cqi" for a given UE
20859 * based on UE category
20861 * Invoked by: Scheduler
20863 * @param[in] RgSchUeCb *ue
20864 * @param[in] uint8_t ueCtgy
20868 uint8_t rgSCHCmnUlGetCqi
20872 CmLteUeCategory ueCtgy
20875 uint8_t rgSCHCmnUlGetCqi(cell, ue, ueCtgy)
20878 CmLteUeCategory ueCtgy;
20881 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
20885 cqi = ueUl->maxUlCqi;
20887 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
20888 (ueUl->validUlCqi > ueUl->maxUlCqi)))
20890 cqi = ueUl->validUlCqi;
20893 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
20894 (ueUl->crntUlCqi[0] > ueUl->maxUlCqi )))
20896 cqi = ueUl->crntUlCqi[0];
20900 }/* End of rgSCHCmnUlGetCqi */
20902 /***********************************************************
20904 * Func : rgSCHCmnUlRbAllocForPoHoUe
20906 * Desc : Do uplink RB allocation for a HO/PO UE.
20910 * Notes: Note that as of now, for retx, maxRb
20911 * is not considered. Alternatives, such
20912 * as dropping retx if it crosses maxRb
20913 * could be considered.
20917 **********************************************************/
20919 static S16 rgSCHCmnUlRbAllocForPoHoUe
20927 static S16 rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, maxRb)
20934 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
20935 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
20936 uint8_t sbSize = cellUl->sbSize;
20937 uint32_t maxBits = ue->ul.maxBytesPerUePerTti*8;
20939 RgSchUlAlloc *alloc;
20949 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->msg3SchdHqProcIdx];
20950 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
20952 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
20956 /*MS_WORKAROUND for HO ccpu00121116*/
20957 cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
20958 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend], cqi);
20959 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
20960 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
20961 while(iMcs > RG_SCH_CMN_MAX_MSG3_IMCS)
20964 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
20965 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg);
20967 /* Filling the modorder in the grant structure*/
20968 RG_SCH_UL_MCS_TO_MODODR(iMcs,modOdr);
20969 if (!cell->isCpUlExtend)
20971 eff = rgSchCmnNorUlEff[0][iTbs];
20975 eff = rgSchCmnExtUlEff[0][iTbs];
20978 bits = ueUl->alloc.reqBytes * 8;
20980 #if (ERRCLASS & ERRCLS_DEBUG)
20987 if (bits < rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs))
20990 nPrb = numSb * sbSize;
20994 if (bits > maxBits)
20997 nPrb = bits * 1024 / eff / RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
21002 numSb = nPrb / sbSize;
21006 /*ccpu00128775:MOD-Change to get upper threshold nPrb*/
21007 nPrb = RGSCH_CEIL((RGSCH_CEIL(bits * 1024, eff)),
21008 RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl));
21013 numSb = RGSCH_DIV_ROUND(nPrb, sbSize);
21018 alloc = rgSCHCmnUlSbAlloc(sf, (uint8_t)RGSCH_MIN(numSb, cellUl->maxSbPerUe),\
21020 if (alloc == NULLP)
21022 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
21023 "rgSCHCmnUlRbAllocForPoHoUe(): Could not get UlAlloc");
21026 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
21028 /* Filling the modorder in the grant structure start*/
21029 alloc->grnt.modOdr = (TfuModScheme) modOdr;
21030 alloc->grnt.iMcs = iMcs;
21031 alloc->grnt.iMcsCrnt = iMcsCrnt;
21032 alloc->grnt.hop = 0;
21033 /* Fix for ccpu00123915*/
21034 alloc->forMsg3 = TRUE;
21035 alloc->hqProc = proc;
21036 alloc->hqProc->ulSfIdx = cellUl->msg3SchdIdx;
21038 alloc->rnti = ue->ueId;
21039 /* updating initNumRbs in case of HO */
21041 ue->initNumRbs = alloc->grnt.numRb;
21043 ueUl->alloc.alloc = alloc;
21044 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
21045 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
21046 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
21047 /* MS_WORKAROUND for HO ccpu00121124*/
21048 /*[Adi temp change] Need to fil modOdr */
21049 RG_SCH_UL_MCS_TO_MODODR(alloc->grnt.iMcsCrnt,alloc->grnt.modOdr);
21050 rgSCHUhmNewTx(proc, ueUl->hqEnt.maxHqRetx, alloc);
21051 /* No grant attr recorded now */
21056 * @brief This function allocates grant for UEs undergoing (for which RAR
21057 * is being generated) HandOver/PdcchOrder.
21062 * Function: rgSCHCmnAllocPoHoGrnt
21063 * Purpose: This function allocates grant for UEs undergoing (for which RAR
21064 * is being generated) HandOver/PdcchOrder.
21066 * Invoked by: Common Scheduler
21068 * @param[in] RgSchCellCb *cell
21069 * @param[out] CmLListCp *raRspLst,
21070 * @param[in] RgSchUeCb *ue
21071 * @param[in] RgSchRaReqInfo *raReq
21076 static Void rgSCHCmnAllocPoHoGrnt
21079 CmLListCp *raRspLst,
21081 RgSchRaReqInfo *raReq
21084 static Void rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq)
21086 CmLListCp *raRspLst;
21088 RgSchRaReqInfo *raReq;
21091 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21092 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
21094 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
21097 /* Clearing previous allocs if any*/
21098 rgSCHCmnUlUeDelAllocs(cell, ue);
21099 /* Fix : syed allocs are limited */
21100 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
21104 ueUl->alloc.reqBytes = RG_SCH_MIN_GRNT_HOPO;
21105 if (rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, RGSCH_MAX_UL_RB) != ROK)
21110 /* Fill grant information */
21111 grnt = &ueUl->alloc.alloc->grnt;
21116 RLOG_ARG1(L_ERROR,DBG_INSTID,cell->instIdx, "Failed to get"
21117 "the grant for HO/PDCCH Order. CRNTI:%d",ue->ueId);
21120 ue->ul.rarGrnt.rapId = raReq->raReq.rapId;
21121 ue->ul.rarGrnt.hop = grnt->hop;
21122 ue->ul.rarGrnt.rbStart = grnt->rbStart;
21123 ue->ul.rarGrnt.numRb = grnt->numRb;
21124 ue->ul.rarGrnt.tpc = grnt->tpc;
21125 ue->ul.rarGrnt.iMcsCrnt = grnt->iMcsCrnt;
21126 ue->ul.rarGrnt.ta.pres = TRUE;
21127 ue->ul.rarGrnt.ta.val = raReq->raReq.ta;
21128 ue->ul.rarGrnt.datSz = grnt->datSz;
21129 if((sf->numACqiCount < RG_SCH_MAX_ACQI_PER_ULSF) && (RG_SCH_APCQI_NO != ue->dl.reqForCqi))
21133 /* Send two bits cqireq field if more than one cells are configured else one*/
21134 for (idx = 1;idx < CM_LTE_MAX_CELLS;idx++)
21136 if (ue->cellInfo[idx] != NULLP)
21138 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
21142 if (idx == CM_LTE_MAX_CELLS)
21145 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
21147 ue->dl.reqForCqi = RG_SCH_APCQI_NO;
21148 sf->numACqiCount++;
21152 ue->ul.rarGrnt.cqiReqBit = 0;
21154 /* Attach Ho/Po allocation to RAR Rsp cont free Lst */
21155 cmLListAdd2Tail(raRspLst, &ue->ul.rarGrnt.raRspLnk);
21156 ue->ul.rarGrnt.raRspLnk.node = (PTR)ue;
21162 * @brief This is a utility function to set the fields in
21163 * an UL harq proc which is identified for non-adaptive retx
21167 * Function: rgSCHCmnUlNonadapRetx
21168 * Purpose: Sets the fields in UL Harq proc for non-adaptive retx
21170 * @param[in] RgSchCmnUlCell *cellUl
21171 * @param[out] RgSchUlAlloc *alloc
21172 * @param[in] uint8_t idx
21178 static Void rgSCHCmnUlNonadapRetx
21180 RgSchCmnUlCell *cellUl,
21181 RgSchUlAlloc *alloc,
21185 static Void rgSCHCmnUlNonadapRetx(cellUl, alloc, idx)
21186 RgSchCmnUlCell *cellUl;
21187 RgSchUlAlloc *alloc;
21191 rgSCHUhmRetx(alloc->hqProc, alloc);
21193 /* Update alloc to retx */
21194 alloc->hqProc->isRetx = TRUE;
21195 alloc->hqProc->ulSfIdx = cellUl->reTxIdx[idx];
21197 if (alloc->hqProc->rvIdx != 0)
21199 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[alloc->hqProc->rvIdx];
21203 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
21205 alloc->grnt.isRtx = TRUE;
21206 alloc->pdcch = NULLP;
21210 * @brief Check if 2 allocs overlap
21214 * Function : rgSCHCmnUlAllocsOvrLap
21216 * - Return TRUE if alloc1 and alloc2 overlap.
21218 * @param[in] RgSchUlAlloc *alloc1
21219 * @param[in] RgSchUlAlloc *alloc2
21223 static Bool rgSCHCmnUlAllocsOvrLap
21225 RgSchUlAlloc *alloc1,
21226 RgSchUlAlloc *alloc2
21229 static Bool rgSCHCmnUlAllocsOvrLap(alloc1, alloc2)
21230 RgSchUlAlloc *alloc1;
21231 RgSchUlAlloc *alloc2;
21236 if (((alloc1->sbStart >= alloc2->sbStart) &&
21237 (alloc1->sbStart <= alloc2->sbStart + alloc2->numSb-1)) ||
21238 ((alloc2->sbStart >= alloc1->sbStart) &&
21239 (alloc2->sbStart <= alloc1->sbStart + alloc1->numSb-1)))
21246 * @brief Copy allocation Info from src to dst.
21250 * Function : rgSCHCmnUlCpyAllocInfo
21252 * - Copy allocation Info from src to dst.
21254 * @param[in] RgSchUlAlloc *srcAlloc
21255 * @param[in] RgSchUlAlloc *dstAlloc
21259 static Void rgSCHCmnUlCpyAllocInfo
21262 RgSchUlAlloc *srcAlloc,
21263 RgSchUlAlloc *dstAlloc
21266 static Void rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc)
21268 RgSchUlAlloc *srcAlloc;
21269 RgSchUlAlloc *dstAlloc;
21272 RgSchCmnUlUe *ueUl;
21274 dstAlloc->grnt = srcAlloc->grnt;
21275 dstAlloc->hqProc = srcAlloc->hqProc;
21276 /* Fix : syed During UE context release, hqProc->alloc
21277 * was pointing to srcAlloc instead of dstAlloc and
21278 * freeing from incorrect sf->allocDb was
21279 * corrupting the list. */
21280 /* In case of SPS Occasion Allocation is done in advance and
21281 at a later time Hq Proc is linked. Hence HqProc
21282 pointer in alloc shall be NULL */
21284 if (dstAlloc->hqProc)
21287 dstAlloc->hqProc->alloc = dstAlloc;
21289 dstAlloc->ue = srcAlloc->ue;
21290 dstAlloc->rnti = srcAlloc->rnti;
21291 dstAlloc->forMsg3 = srcAlloc->forMsg3;
21292 dstAlloc->raCb = srcAlloc->raCb;
21293 dstAlloc->pdcch = srcAlloc->pdcch;
21294 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21297 ueUl = RG_SCH_CMN_GET_UL_UE(dstAlloc->ue,cell);
21298 ueUl->alloc.alloc = dstAlloc;
21300 if (dstAlloc->ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
21302 if((dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc != NULLP)
21303 && (dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc == srcAlloc))
21305 dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc = dstAlloc;
21314 * @brief Update TX and RETX subframe's allocation
21319 * Function : rgSCHCmnUlInsAllocFrmNewSf2OldSf
21321 * - Release all preassigned allocations of newSf and merge
21323 * - If alloc of newSf collide with one or more allocs of oldSf
21324 * - mark all such allocs of oldSf for Adaptive Retx.
21325 * - Swap the alloc and hole DB references of oldSf and newSf.
21327 * @param[in] RgSchCellCb *cell
21328 * @param[in] RgSchUlSf *newSf
21329 * @param[in] RgSchUlSf *oldSf
21330 * @param[in] RgSchUlAlloc *srcAlloc
21334 static Void rgSCHCmnUlInsAllocFrmNewSf2OldSf
21339 RgSchUlAlloc *srcAlloc
21342 static Void rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, srcAlloc)
21346 RgSchUlAlloc *srcAlloc;
21349 RgSchUlAlloc *alloc, *dstAlloc, *nxtAlloc;
21351 /* MS_WORKAROUND ccpu00120827 */
21352 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
21355 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
21359 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
21360 /* If there is an overlap between alloc and srcAlloc
21361 * then alloc is marked for Adaptive retx and it is released
21363 if (rgSCHCmnUlAllocsOvrLap(alloc, srcAlloc) == TRUE)
21365 rgSCHCmnUlUpdAllocRetx(cell, alloc);
21366 rgSCHUtlUlAllocRls(oldSf, alloc);
21368 /* No further allocs spanning the srcAlloc subbands */
21369 if (srcAlloc->sbStart + srcAlloc->numSb - 1 <= alloc->sbStart)
21373 } while ((alloc = nxtAlloc) != NULLP);
21376 /* After freeing all the colliding allocs, request for an allocation
21377 * specifying the start and numSb with in txSf. This function should
21378 * always return positively with a nonNULL dstAlloc */
21379 /* MS_WORKAROUND ccpu00120827 */
21380 remAllocs = schCmnCell->ul.maxAllocPerUlSf - *oldSf->allocCountRef;
21383 /* Fix : If oldSf already has max Allocs then release the
21384 * old RETX alloc to make space for new alloc of newSf.
21385 * newSf allocs(i.e new Msg3s) are given higher priority
21386 * over retx allocs. */
21387 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
21391 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
21392 if (!alloc->mrgdNewTxAlloc)
21394 /* If alloc is for RETX */
21395 /* TODO: Incase of this ad also in case of choosing
21396 * and alloc for ADAP RETX, we need to send ACK for
21397 * the corresponding alloc in PHICH */
21398 #ifndef EMTC_ENABLE
21399 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc);
21401 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc,FALSE);
21405 }while((alloc = nxtAlloc) != NULLP);
21408 dstAlloc = rgSCHUtlUlGetSpfcAlloc(oldSf, srcAlloc->sbStart, srcAlloc->numSb);
21410 /* This should never happen */
21411 if (dstAlloc == NULLP)
21413 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"CRNTI:%d "
21414 "rgSCHUtlUlGetSpfcAlloc failed in rgSCHCmnUlInsAllocFrmNewSf2OldSf",
21419 /* Copy the srcAlloc's state information in to dstAlloc */
21420 rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc);
21421 /* Set new Tx merged Alloc Flag to TRUE, indicating that this
21422 * alloc shall not be processed for non-adaptive retransmission */
21423 dstAlloc->mrgdNewTxAlloc = TRUE;
21427 * @brief Merge all allocations of newSf to oldSf.
21431 * Function : rgSCHCmnUlMergeSfAllocs
21433 * - Merge all allocations of newSf to oldSf.
21434 * - If newSf's alloc collides with oldSf's alloc
21435 * then oldSf's alloc is marked for adaptive Retx
21436 * and is released from oldSf to create space for
21439 * @param[in] RgSchCellCb *cell
21440 * @param[in] RgSchUlSf *oldSf
21441 * @param[in] RgSchUlSf *newSf
21445 static Void rgSCHCmnUlMergeSfAllocs
21452 static Void rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf)
21458 RgSchUlAlloc *alloc, *nxtAlloc;
21461 /* Merge each alloc of newSf in to oldSf
21462 * and release it from newSf */
21463 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
21467 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
21468 rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, alloc);
21469 rgSCHUtlUlAllocRls(newSf, alloc);
21470 } while((alloc = nxtAlloc) != NULLP);
21475 * @brief Swap Hole/Alloc DB context of newSf and oldSf.
21479 * Function : rgSCHCmnUlSwapSfAllocs
21481 * - Swap Hole/Alloc DB context of newSf and oldSf.
21483 * @param[in] RgSchCellCb *cell
21484 * @param[in] RgSchUlSf *oldSf
21485 * @param[in] RgSchUlSf *newSf
21489 static Void rgSCHCmnUlSwapSfAllocs
21496 static Void rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf)
21502 RgSchUlAllocDb *tempAllocDb = newSf->allocDb;
21503 RgSchUlHoleDb *tempHoleDb = newSf->holeDb;
21504 uint8_t tempAvailSbs = newSf->availSubbands;
21508 newSf->allocDb = oldSf->allocDb;
21509 newSf->holeDb = oldSf->holeDb;
21510 newSf->availSubbands = oldSf->availSubbands;
21512 oldSf->allocDb = tempAllocDb;
21513 oldSf->holeDb = tempHoleDb;
21514 oldSf->availSubbands = tempAvailSbs;
21516 /* Fix ccpu00120610*/
21517 newSf->allocCountRef = &newSf->allocDb->count;
21518 oldSf->allocCountRef = &oldSf->allocDb->count;
21522 * @brief Perform non-adaptive RETX for non-colliding allocs.
21526 * Function : rgSCHCmnUlPrcNonAdptRetx
21528 * - Perform non-adaptive RETX for non-colliding allocs.
21530 * @param[in] RgSchCellCb *cell
21531 * @param[in] RgSchUlSf *newSf
21532 * @param[in] uint8_t idx
21536 static Void rgSCHCmnUlPrcNonAdptRetx
21543 static Void rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx)
21549 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21550 RgSchUlAlloc *alloc, *nxtAlloc;
21552 /* perform non-adaptive retx allocation(adjustment) */
21553 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
21557 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
21558 /* A merged new TX alloc, reset the state and skip */
21559 if (alloc->mrgdNewTxAlloc)
21561 alloc->mrgdNewTxAlloc = FALSE;
21566 rgSCHCmnUlNonadapRetx(cellUl, alloc, idx);
21568 } while((alloc = nxtAlloc) != NULLP);
21574 * @brief Update TX and RETX subframe's allocation
21579 * Function : rgSCHCmnUlPrfmSfMerge
21581 * - Release all preassigned allocations of newSf and merge
21583 * - If alloc of newSf collide with one or more allocs of oldSf
21584 * - mark all such allocs of oldSf for Adaptive Retx.
21585 * - Swap the alloc and hole DB references of oldSf and newSf.
21586 * - The allocs which did not collide with pre-assigned msg3
21587 * allocs are marked for non-adaptive RETX.
21589 * @param[in] RgSchCellCb *cell
21590 * @param[in] RgSchUlSf *oldSf
21591 * @param[in] RgSchUlSf *newSf
21592 * @param[in] uint8_t idx
21596 static Void rgSCHCmnUlPrfmSfMerge
21604 static Void rgSCHCmnUlPrfmSfMerge(cell, oldSf, newSf, idx)
21611 /* Preassigned resources for msg3 in newSf.
21612 * Hence do adaptive retx for all NACKED TXs */
21613 rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf);
21614 /* swap alloc and hole DBs of oldSf and newSf. */
21615 rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf);
21616 /* Here newSf has the resultant merged allocs context */
21617 /* Perform non-adaptive RETX for non-colliding allocs */
21618 rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx);
21624 * @brief Update TX and RETX subframe's allocation
21629 * Function : rgSCHCmnUlRmvCmpltdAllocs
21631 * - Free all Transmission which are ACKED
21632 * OR for which MAX retransmission have
21636 * @param[in] RgSchCellCb *cell,
21637 * @param[in] RgSchUlSf *sf
21641 static Void rgSCHCmnUlRmvCmpltdAllocs
21647 static Void rgSCHCmnUlRmvCmpltdAllocs(cell, sf)
21652 RgSchUlAlloc *alloc, *nxtAlloc;
21654 if ((alloc = rgSCHUtlUlAllocFirst(sf)) == NULLP)
21660 nxtAlloc = rgSCHUtlUlAllocNxt(sf, alloc);
21662 printf("rgSCHCmnUlRmvCmpltdAllocs:time(%d %d) alloc->hqProc->remTx %d hqProcId(%d) \n",cell->crntTime.sfn,cell->crntTime.slot,alloc->hqProc->remTx, alloc->grnt.hqProcId);
21664 alloc->hqProc->rcvdCrcInd = TRUE;
21665 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
21668 /* SR_RACH_STATS : MSG 3 MAX RETX FAIL*/
21669 if ((alloc->forMsg3 == TRUE) && (alloc->hqProc->remTx == 0))
21671 rgNumMsg3FailMaxRetx++;
21673 cell->tenbStats->sch.msg3Fail++;
21677 #ifdef MAC_SCH_STATS
21678 if(alloc->ue != NULLP)
21680 /* access from ulHarqProc*/
21681 RgSchUeCb *ueCb = alloc->ue;
21682 RgSchCmnUe *cmnUe = (RgSchCmnUe*)ueCb->sch;
21683 RgSchCmnUlUe *ulUe = &(cmnUe->ul);
21684 uint8_t cqi = ulUe->crntUlCqi[0];
21685 uint16_t numUlRetx = ueCb->ul.hqEnt.maxHqRetx - alloc->hqProc->remTx;
21687 hqRetxStats.ulCqiStat[(cqi - 1)].mcs = alloc->grnt.iMcs;
21692 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1++;
21695 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2++;
21698 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3++;
21701 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4++;
21704 hqRetxStats.ulCqiStat[(cqi - 1)].totalTx = \
21705 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1 + \
21706 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2 * 2) + \
21707 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3 * 3) + \
21708 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4 * 4);
21711 #endif /*MAC_SCH_STATS*/
21712 rgSCHCmnUlFreeAllocation(cell, sf, alloc);
21714 /*ccpu00106104 MOD added check for AckNackRep */
21715 /*added check for acknack so that adaptive retx considers ue
21716 inactivity due to ack nack repetition*/
21717 else if((alloc->ue != NULLP) && (TRUE != alloc->forMsg3))
21719 rgSCHCmnUlUpdAllocRetx(cell, alloc);
21720 rgSCHUtlUlAllocRls(sf, alloc);
21722 } while ((alloc = nxtAlloc) != NULLP);
21728 * @brief Update an uplink subframe.
21732 * Function : rgSCHCmnRlsUlSf
21734 * For each allocation
21735 * - if no more tx needed
21736 * - Release allocation
21738 * - Perform retransmission
21740 * @param[in] RgSchUlSf *sf
21741 * @param[in] uint8_t idx
21745 Void rgSCHCmnRlsUlSf
21751 Void rgSCHCmnRlsUlSf(cell, idx)
21757 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21759 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
21761 RgSchUlSf *oldSf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
21763 /* Initialize the reTxLst of UL HqProcs for RETX subframe */
21764 if (rgSCHUtlUlAllocFirst(oldSf) == NULLP)
21768 /* Release all completed TX allocs from sf */
21769 rgSCHCmnUlRmvCmpltdAllocs(cell, oldSf);
21771 oldSf->numACqiCount = 0;
21777 * @brief Handle uplink allocation for retransmission.
21781 * Function : rgSCHCmnUlUpdAllocRetx
21783 * - Perform adaptive retransmission
21785 * @param[in] RgSchUlSf *sf
21786 * @param[in] RgSchUlAlloc *alloc
21790 static Void rgSCHCmnUlUpdAllocRetx
21793 RgSchUlAlloc *alloc
21796 static Void rgSCHCmnUlUpdAllocRetx(cell, alloc)
21798 RgSchUlAlloc *alloc;
21801 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
21804 alloc->hqProc->reTxAlloc.rnti = alloc->rnti;
21805 alloc->hqProc->reTxAlloc.numSb = alloc->numSb;
21806 alloc->hqProc->reTxAlloc.iMcs = alloc->grnt.iMcs;
21808 alloc->hqProc->reTxAlloc.dciFrmt = alloc->grnt.dciFrmt;
21809 alloc->hqProc->reTxAlloc.numLyr = alloc->grnt.numLyr;
21810 alloc->hqProc->reTxAlloc.vrbgStart = alloc->grnt.vrbgStart;
21811 alloc->hqProc->reTxAlloc.numVrbg = alloc->grnt.numVrbg;
21812 alloc->hqProc->reTxAlloc.modOdr = alloc->grnt.modOdr;
21814 //iTbs = rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs);
21815 //iTbs = alloc->grnt.iMcs;
21816 //RGSCH_ARRAY_BOUND_CHECK( 0, rgTbSzTbl[0], iTbs);
21817 alloc->hqProc->reTxAlloc.tbSz = alloc->grnt.datSz;
21818 //rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1]/8;
21819 alloc->hqProc->reTxAlloc.ue = alloc->ue;
21820 alloc->hqProc->reTxAlloc.forMsg3 = alloc->forMsg3;
21821 alloc->hqProc->reTxAlloc.raCb = alloc->raCb;
21823 /* Set as retransmission is pending */
21824 alloc->hqProc->isRetx = TRUE;
21825 alloc->hqProc->alloc = NULLP;
21826 alloc->hqProc->ulSfIdx = RGSCH_INVALID_INFO;
21828 printf("Adding Harq Proc Id in the retx list hqProcId %d \n",alloc->grnt.hqProcId);
21830 cmLListAdd2Tail(&cmnUlCell->reTxLst, &alloc->hqProc->reTxLnk);
21831 alloc->hqProc->reTxLnk.node = (PTR)alloc->hqProc;
21836 * @brief Attempts allocation for msg3s for which ADAP retransmissions
21841 * Function : rgSCHCmnUlAdapRetxAlloc
21843 * Attempts allocation for msg3s for which ADAP retransmissions
21846 * @param[in] RgSchCellCb *cell
21847 * @param[in] RgSchUlSf *sf
21848 * @param[in] RgSchUlHqProcCb *proc;
21849 * @param[in] RgSchUlHole *hole;
21853 static Bool rgSCHCmnUlAdapRetxAlloc
21857 RgSchUlHqProcCb *proc,
21861 static Bool rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole)
21864 RgSchUlHqProcCb *proc;
21868 uint8_t numSb = proc->reTxAlloc.numSb;
21869 uint8_t iMcs = proc->reTxAlloc.iMcs;
21870 CmLteTimingInfo frm = cell->crntTime;
21871 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21874 RgSchUlAlloc *alloc;
21876 /* Fetch PDCCH for msg3 */
21877 /* ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
21878 /* Introduced timing delta for UL control */
21879 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
21880 dlSf = rgSCHUtlSubFrmGet(cell, frm);
21881 pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
21882 if (pdcch == NULLP)
21887 /* Fetch UL Alloc for msg3 */
21888 if (numSb <= hole->num)
21890 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
21895 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
21896 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
21897 "UL Alloc fail for msg3 retx for rnti: %d\n",
21898 proc->reTxAlloc.rnti);
21902 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
21903 alloc->grnt.iMcs = iMcs;
21904 alloc->grnt.datSz = proc->reTxAlloc.tbSz;
21907 //RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
21909 /* Fill UL Alloc for msg3 */
21910 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
21911 alloc->grnt.nDmrs = 0;
21912 alloc->grnt.hop = 0;
21913 alloc->grnt.delayBit = 0;
21914 alloc->grnt.isRtx = TRUE;
21915 proc->ulSfIdx = cellUl->schdIdx;
21917 proc->schdTime = cellUl->schdTime;
21918 alloc->grnt.hqProcId = proc->procId;
21919 alloc->grnt.dciFrmt = proc->reTxAlloc.dciFrmt;
21920 alloc->grnt.numLyr = proc->reTxAlloc.numLyr;
21921 alloc->grnt.vrbgStart = proc->reTxAlloc.vrbgStart;
21922 alloc->grnt.numVrbg = proc->reTxAlloc.numVrbg;
21923 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
21924 alloc->grnt.modOdr = proc->reTxAlloc.modOdr;
21926 /* TODO : Hardcoding these as of now */
21927 alloc->grnt.hop = 0;
21928 alloc->grnt.SCID = 0;
21929 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
21930 alloc->grnt.PMI = 0;
21931 alloc->grnt.uciOnxPUSCH = 0;
21933 alloc->rnti = proc->reTxAlloc.rnti;
21934 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21935 alloc->ue = proc->reTxAlloc.ue;
21936 alloc->pdcch = pdcch;
21937 alloc->forMsg3 = proc->reTxAlloc.forMsg3;
21938 alloc->raCb = proc->reTxAlloc.raCb;
21939 alloc->hqProc = proc;
21940 alloc->isAdaptive = TRUE;
21942 sf->totPrb += alloc->grnt.numRb;
21944 /* FIX : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21947 alloc->raCb->msg3Grnt= alloc->grnt;
21949 /* To the crntTime, add the time at which UE will
21950 * actually send MSG3 */
21951 alloc->raCb->msg3AllocTime = cell->crntTime;
21952 RGSCH_INCR_SUB_FRAME(alloc->raCb->msg3AllocTime, RG_SCH_CMN_MIN_RETXMSG3_RECP_INTRVL);
21954 alloc->raCb->msg3AllocTime = cellUl->schdTime;
21956 rgSCHCmnUlAdapRetx(alloc, proc);
21957 /* Fill PDCCH with alloc info */
21958 pdcch->rnti = alloc->rnti;
21959 pdcch->dci.dciFormat = TFU_DCI_FORMAT_0;
21960 pdcch->dci.u.format0Info.hoppingEnbld = alloc->grnt.hop;
21961 pdcch->dci.u.format0Info.rbStart = alloc->grnt.rbStart;
21962 pdcch->dci.u.format0Info.numRb = alloc->grnt.numRb;
21963 pdcch->dci.u.format0Info.mcs = alloc->grnt.iMcsCrnt;
21964 pdcch->dci.u.format0Info.ndi = alloc->hqProc->ndi;
21965 pdcch->dci.u.format0Info.nDmrs = alloc->grnt.nDmrs;
21966 pdcch->dci.u.format0Info.tpcCmd = alloc->grnt.tpc;
21970 /* ulIdx setting for cfg 0 shall be appropriately fixed thru ccpu00109015 */
21971 pdcch->dci.u.format0Info.ulIdx = RG_SCH_ULIDX_MSB;
21972 pdcch->dci.u.format0Info.dai = RG_SCH_MAX_DAI_IDX;
21975 pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_0];
21979 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue,cell);
21981 alloc->ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
21984 ue->ul.nPrb = alloc->grnt.numRb;
21986 ueUl->alloc.alloc = alloc;
21987 /* FIx: Removed the call to rgSCHCmnUlAdapRetx */
21988 rgSCHCmnUlUeFillAllocInfo(cell, alloc->ue);
21989 /* Setting csireq as false for Adaptive Retx*/
21990 ueUl->alloc.alloc->pdcch->dci.u.format0Info.cqiReq = RG_SCH_APCQI_NO;
21991 pdcch->dciNumOfBits = alloc->ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
21993 /* Reset as retransmission is done */
21994 proc->isRetx = FALSE;
21996 else /* Intg fix */
21998 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
21999 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
22000 "Num SB not suffiecient for adap retx for rnti: %d",
22001 proc->reTxAlloc.rnti);
22007 /* Fix: syed Adaptive Msg3 Retx crash. */
22009 * @brief Releases all Adaptive Retx HqProcs which failed for
22010 * allocations in this scheduling occassion.
22014 * Function : rgSCHCmnUlSfRlsRetxProcs
22017 * @param[in] RgSchCellCb *cell
22018 * @param[in] RgSchUlSf *sf
22023 static Void rgSCHCmnUlSfRlsRetxProcs
22029 static Void rgSCHCmnUlSfRlsRetxProcs(cell, sf)
22036 RgSchUlHqProcCb *proc;
22037 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
22040 cp = &(cellUl->reTxLst);
22044 proc = (RgSchUlHqProcCb *)node->node;
22046 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
22047 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
22048 proc->reTxLnk.node = (PTR)NULLP;
22055 * @brief Attempts allocation for UEs for which retransmissions
22060 * Function : rgSCHCmnUlSfReTxAllocs
22062 * Attempts allocation for UEs for which retransmissions
22065 * @param[in] RgSchCellCb *cell
22066 * @param[in] RgSchUlSf *sf
22070 static Void rgSCHCmnUlSfReTxAllocs
22076 static Void rgSCHCmnUlSfReTxAllocs(cell, sf)
22083 RgSchUlHqProcCb *proc;
22086 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
22087 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
22089 cp = &(cellUl->reTxLst);
22093 proc = (RgSchUlHqProcCb *)node->node;
22094 ue = proc->reTxAlloc.ue;
22096 /*ccpu00106104 MOD added check for AckNackRep */
22097 /*added check for acknack so that adaptive retx considers ue
22098 inactivity due to ack nack repetition*/
22099 if((ue != NULLP) &&
22100 ((ue->measGapCb.isMeasuring == TRUE)||
22101 (ue->ackNakRepCb.isAckNakRep == TRUE)))
22105 /* Fix for ccpu00123917: Check if maximum allocs per UL sf have been exhausted */
22106 if (((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
22107 || (sf->allocDb->count == schCmnCell->ul.maxAllocPerUlSf))
22109 /* No more UL BW then return */
22112 /* perform adaptive retx for UE's */
22113 if (rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole) == FALSE)
22117 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
22118 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
22119 /* Fix: syed Adaptive Msg3 Retx crash. */
22120 proc->reTxLnk.node = (PTR)NULLP;
22126 * @brief Handles RB allocation for downlink.
22130 * Function : rgSCHCmnDlRbAlloc
22132 * Invoking Module Processing:
22133 * - This function is invoked for DL RB allocation
22135 * Processing Steps:
22136 * - If cell is frequency selecive,
22137 * - Call rgSCHDlfsAllocRb().
22139 * - Call rgSCHCmnNonDlfsRbAlloc().
22141 * @param[in] RgSchCellCb *cell
22142 * @param[in] RgSchDlRbAllocInfo *allocInfo
22147 static Void rgSCHCmnDlRbAlloc
22150 RgSchCmnDlRbAllocInfo *allocInfo
22153 static Void rgSCHCmnDlRbAlloc(cell, allocInfo)
22155 RgSchCmnDlRbAllocInfo *allocInfo;
22158 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
22160 if (cellSch->dl.isDlFreqSel)
22162 printf("5GTF_ERROR DLFS SCH Enabled\n");
22163 cellSch->apisDlfs->rgSCHDlfsAllocRb(cell, allocInfo);
22167 rgSCHCmnNonDlfsRbAlloc(cell, allocInfo);
22175 * @brief Determines number of RBGs and RBG subset sizes for the given DL
22176 * bandwidth and rbgSize
22179 * Function : rgSCHCmnDlGetRbgInfo
22182 * Processing Steps:
22183 * - Fill-up rbgInfo data structure for given DL bandwidth and rbgSize
22185 * @param[in] uint8_t dlTotalBw
22186 * @param[in] uint8_t dlSubsetBw
22187 * @param[in] uint8_t maxRaType1SubsetBw
22188 * @param[in] uint8_t rbgSize
22189 * @param[out] RgSchBwRbgInfo *rbgInfo
22193 Void rgSCHCmnDlGetRbgInfo
22196 uint8_t dlSubsetBw,
22197 uint8_t maxRaType1SubsetBw,
22199 RgSchBwRbgInfo *rbgInfo
22202 Void rgSCHCmnDlGetRbgInfo(dlTotalBw, dlSubsetBw, maxRaType1SubsetBw,
22205 uint8_t dlSubsetBw;
22206 uint8_t maxRaType1SubsetBw;
22208 RgSchBwRbgInfo *rbgInfo;
22211 #ifdef RGSCH_SPS_UNUSED
22213 uint8_t lastRbgIdx = ((dlTotalBw + rbgSize - 1)/rbgSize) - 1;
22214 uint8_t currRbgSize = rbgSize;
22215 uint8_t subsetSizeIdx = 0;
22216 uint8_t subsetSize[RG_SCH_NUM_RATYPE1_SUBSETS] = {0};
22217 uint8_t lastRbgSize = rbgSize - (dlTotalBw - ((dlTotalBw/rbgSize) * rbgSize));
22218 uint8_t numRaType1Rbgs = (maxRaType1SubsetBw + rbgSize - 1)/rbgSize;
22221 /* Compute maximum number of SPS RBGs for the cell */
22222 rbgInfo->numRbgs = ((dlSubsetBw + rbgSize - 1)/rbgSize);
22224 #ifdef RGSCH_SPS_UNUSED
22225 /* Distribute RBGs across subsets except last RBG */
22226 for (;idx < numRaType1Rbgs - 1; ++idx)
22228 subsetSize[subsetSizeIdx] += currRbgSize;
22229 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
22232 /* Computation for last RBG */
22233 if (idx == lastRbgIdx)
22235 currRbgSize = lastRbgSize;
22237 subsetSize[subsetSizeIdx] += currRbgSize;
22238 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
22241 /* Update the computed sizes */
22242 #ifdef RGSCH_SPS_UNUSED
22243 rbgInfo->lastRbgSize = currRbgSize;
22245 rbgInfo->lastRbgSize = rbgSize -
22246 (dlSubsetBw - ((dlSubsetBw/rbgSize) * rbgSize));
22247 #ifdef RGSCH_SPS_UNUSED
22248 memcpy(rbgInfo->rbgSubsetSize, subsetSize, 4 * sizeof(uint8_t));
22250 rbgInfo->numRbs = (rbgInfo->numRbgs * rbgSize > dlTotalBw) ?
22251 dlTotalBw:(rbgInfo->numRbgs * rbgSize);
22252 rbgInfo->rbgSize = rbgSize;
22256 * @brief Handles RB allocation for Resource allocation type 0
22260 * Function : rgSCHCmnDlRaType0Alloc
22262 * Invoking Module Processing:
22263 * - This function is invoked for DL RB allocation for resource allocation
22266 * Processing Steps:
22267 * - Determine the available positions in the rbgMask.
22268 * - Allocate RBGs in the available positions.
22269 * - Update RA Type 0, RA Type 1 and RA type 2 masks.
22271 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22272 * @param[in] uint8_t rbsReq
22273 * @param[in] RgSchBwRbgInfo *rbgInfo
22274 * @param[out] uint8_t *numAllocRbs
22275 * @param[out] RgSchDlSfAllocInfo *resAllocInfo
22276 * @param[in] Bool isPartialAlloc
22282 uint8_t rgSCHCmnDlRaType0Alloc
22284 RgSchDlSfAllocInfo *allocedInfo,
22286 RgSchBwRbgInfo *rbgInfo,
22287 uint8_t *numAllocRbs,
22288 RgSchDlSfAllocInfo *resAllocInfo,
22289 Bool isPartialAlloc
22292 uint8_t rgSCHCmnDlRaType0Alloc(allocedInfo, rbsReq, rbgInfo,
22293 numAllocRbs, resAllocInfo, isPartialAlloc)
22294 RgSchDlSfAllocInfo *allocedInfo;
22296 RgSchBwRbgInfo *rbgInfo;
22297 uint8_t *numAllocRbs;
22298 RgSchDlSfAllocInfo *resAllocInfo;
22299 Bool isPartialAlloc;
22302 /* Note: This function atttempts allocation only full allocation */
22303 uint32_t remNumRbs, rbgPosInRbgMask, ueRaType2Mask;
22304 uint8_t type2MaskIdx, cnt, rbIdx;
22305 uint8_t maskSize, rbg;
22306 uint8_t bestNumAvailRbs = 0;
22307 uint8_t usedRbs = 0;
22308 uint8_t numAllocRbgs = 0;
22309 uint8_t rbgSize = rbgInfo->rbgSize;
22310 uint32_t *rbgMask = &(resAllocInfo->raType0Mask);
22311 #ifdef RGSCH_SPS_UNUSED
22313 uint32_t ueRaType1Mask;
22314 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
22315 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22317 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
22319 uint32_t allocedMask = allocedInfo->raType0Mask;
22321 maskSize = rbgInfo->numRbgs;
22324 RG_SCH_CMN_DL_COUNT_ONES(allocedMask, maskSize, &usedRbs);
22325 if (maskSize == usedRbs)
22327 /* All RBGs are allocated, including the last one */
22332 remNumRbs = (maskSize - usedRbs - 1) * rbgSize; /* vamsee: removed minus 1 */
22334 /* If last RBG is available, add last RBG size */
22335 if (!(allocedMask & (1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(maskSize - 1))))
22337 remNumRbs += rbgInfo->lastRbgSize;
22341 /* If complete allocation is needed, check if total requested RBs are available else
22342 * check the best available RBs */
22343 if (!isPartialAlloc)
22345 if (remNumRbs >= rbsReq)
22347 bestNumAvailRbs = rbsReq;
22352 bestNumAvailRbs = remNumRbs > rbsReq ? rbsReq : remNumRbs;
22355 /* Allocate for bestNumAvailRbs */
22356 if (bestNumAvailRbs)
22358 for (rbg = 0; rbg < maskSize - 1; ++rbg)
22360 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22361 if (!(allocedMask & rbgPosInRbgMask))
22363 /* Update RBG mask */
22364 *rbgMask |= rbgPosInRbgMask;
22366 /* Compute RB index of the first RB of the RBG allocated */
22367 rbIdx = rbg * rbgSize;
22369 for (cnt = 0; cnt < rbgSize; ++cnt)
22371 #ifdef RGSCH_SPS_UNUSED
22372 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22374 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22375 #ifdef RGSCH_SPS_UNUSED
22376 /* Update RBG mask for RA type 1 */
22377 raType1Mask[rbgSubset] |= ueRaType1Mask;
22378 raType1UsedRbs[rbgSubset]++;
22380 /* Update RA type 2 mask */
22381 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22384 *numAllocRbs += rbgSize;
22385 remNumRbs -= rbgSize;
22387 if (*numAllocRbs >= bestNumAvailRbs)
22393 /* If last RBG available and allocation is not completed, allocate
22395 if (*numAllocRbs < bestNumAvailRbs)
22397 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22398 *rbgMask |= rbgPosInRbgMask;
22399 *numAllocRbs += rbgInfo->lastRbgSize;
22401 /* Compute RB index of the first RB of the last RBG */
22402 rbIdx = ((rbgInfo->numRbgs - 1 ) * rbgSize ); /* removed minus 1 vamsee */
22404 for (cnt = 0; cnt < rbgInfo->lastRbgSize; ++cnt)
22406 #ifdef RGSCH_SPS_UNUSED
22407 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22409 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22410 #ifdef RGSCH_SPS_UNUSED
22411 /* Update RBG mask for RA type 1 */
22412 raType1Mask[rbgSubset] |= ueRaType1Mask;
22413 raType1UsedRbs[rbgSubset]++;
22415 /* Update RA type 2 mask */
22416 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22419 remNumRbs -= rbgInfo->lastRbgSize;
22422 /* Note: this should complete allocation, not checking for the
22426 return (numAllocRbgs);
22429 #ifdef RGSCH_SPS_UNUSED
22431 * @brief Handles RB allocation for Resource allocation type 1
22435 * Function : rgSCHCmnDlRaType1Alloc
22437 * Invoking Module Processing:
22438 * - This function is invoked for DL RB allocation for resource allocation
22441 * Processing Steps:
22442 * - Determine the available positions in the subsets.
22443 * - Allocate RB in the available subset.
22444 * - Update RA Type1, RA type 0 and RA type 2 masks.
22446 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22447 * @param[in] uint8_t rbsReq
22448 * @param[in] RgSchBwRbgInfo *rbgInfo
22449 * @param[in] uint8_t startRbgSubset
22450 * @param[in] uint8_t *allocRbgSubset
22451 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
22452 * @param[in] Bool isPartialAlloc
22455 * Number of allocated RBs
22459 uint8_t rgSCHCmnDlRaType1Alloc
22461 RgSchDlSfAllocInfo *allocedInfo,
22463 RgSchBwRbgInfo *rbgInfo,
22464 uint8_t startRbgSubset,
22465 uint8_t *allocRbgSubset,
22466 RgSchDlSfAllocInfo *resAllocInfo,
22467 Bool isPartialAlloc
22470 uint8_t rgSCHCmnDlRaType1Alloc(allocedInfo, rbsReq,rbgInfo,startRbgSubset,
22471 allocRbgSubset, resAllocInfo, isPartialAlloc)
22472 RgSchDlSfAllocInfo *allocedInfo;
22474 RgSchBwRbgInfo *rbgInfo;
22475 uint8_t startRbgSubset;
22476 uint8_t *allocRbgSubset;
22477 RgSchDlSfAllocInfo *resAllocInfo;
22478 Bool isPartialAlloc;
22481 /* Note: This function atttempts only full allocation */
22482 uint8_t *rbgSubsetSzArr;
22483 uint8_t type2MaskIdx, subsetIdx, rbIdx, rbInSubset, rbgInSubset;
22484 uint8_t offset, rbg, maskSize, bestSubsetIdx;
22485 uint8_t startPos = 0;
22486 uint8_t bestNumAvailRbs = 0;
22487 uint8_t numAllocRbs = 0;
22488 uint32_t ueRaType2Mask, ueRaType0Mask, rbPosInSubset;
22489 uint32_t remNumRbs, allocedMask;
22490 uint8_t usedRbs = 0;
22491 uint8_t rbgSize = rbgInfo->rbgSize;
22492 uint8_t rbgSubset = startRbgSubset;
22493 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
22494 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
22495 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
22496 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22497 uint32_t *allocMask = allocedInfo->raType1Mask;
22499 /* Initialize the subset size Array */
22500 rbgSubsetSzArr = rbgInfo->rbgSubsetSize;
22502 /* Perform allocation for RA type 1 */
22503 for (subsetIdx = 0;subsetIdx < rbgSize; ++subsetIdx)
22505 allocedMask = allocMask[rbgSubset];
22506 maskSize = rbgSubsetSzArr[rbgSubset];
22508 /* Determine number of available RBs in the subset */
22509 usedRbs = allocedInfo->raType1UsedRbs[subsetIdx];
22510 remNumRbs = maskSize - usedRbs;
22512 if (remNumRbs >= rbsReq)
22514 bestNumAvailRbs = rbsReq;
22515 bestSubsetIdx = rbgSubset;
22518 else if (isPartialAlloc && (remNumRbs > bestNumAvailRbs))
22520 bestNumAvailRbs = remNumRbs;
22521 bestSubsetIdx = rbgSubset;
22524 rbgSubset = (rbgSubset + 1) % rbgSize;
22525 } /* End of for (each rbgsubset) */
22527 if (bestNumAvailRbs)
22529 /* Initialize alloced mask and subsetSize depending on the RBG
22530 * subset of allocation */
22531 uint8_t startIdx = 0;
22532 maskSize = rbgSubsetSzArr[bestSubsetIdx];
22533 allocedMask = allocMask[bestSubsetIdx];
22534 RG_SCH_CMN_DL_GET_START_POS(allocedMask, maskSize,
22536 for (; startIdx < rbgSize; ++startIdx, ++startPos)
22538 for (rbInSubset = startPos; rbInSubset < maskSize;
22539 rbInSubset = rbInSubset + rbgSize)
22541 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
22542 if (!(allocedMask & rbPosInSubset))
22544 raType1Mask[bestSubsetIdx] |= rbPosInSubset;
22545 raType1UsedRbs[bestSubsetIdx]++;
22547 /* Compute RB index value for the RB being allocated */
22548 rbgInSubset = rbInSubset /rbgSize;
22549 offset = rbInSubset % rbgSize;
22550 rbg = (rbgInSubset * rbgSize) + bestSubsetIdx;
22551 rbIdx = (rbg * rbgSize) + offset;
22553 /* Update RBG mask for RA type 0 allocation */
22554 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
22555 *rbgMask |= ueRaType0Mask;
22557 /* Update RA type 2 mask */
22558 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22559 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22561 /* Update the counters */
22564 if (numAllocRbs == bestNumAvailRbs)
22569 } /* End of for (each position in the subset mask) */
22570 if (numAllocRbs == bestNumAvailRbs)
22574 } /* End of for startIdx = 0 to rbgSize */
22576 *allocRbgSubset = bestSubsetIdx;
22577 } /* End of if (bestNumAvailRbs) */
22579 return (numAllocRbs);
22583 * @brief Handles RB allocation for Resource allocation type 2
22587 * Function : rgSCHCmnDlRaType2Alloc
22589 * Invoking Module Processing:
22590 * - This function is invoked for DL RB allocation for resource allocation
22593 * Processing Steps:
22594 * - Determine the available positions in the mask
22595 * - Allocate best fit cosecutive RBs.
22596 * - Update RA Type2, RA type 1 and RA type 0 masks.
22598 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22599 * @param[in] uint8_t rbsReq
22600 * @param[in] RgSchBwRbgInfo *rbgInfo
22601 * @param[out] uint8_t *rbStart
22602 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
22603 * @param[in] Bool isPartialAlloc
22606 * Number of allocated RBs
22610 uint8_t rgSCHCmnDlRaType2Alloc
22612 RgSchDlSfAllocInfo *allocedInfo,
22614 RgSchBwRbgInfo *rbgInfo,
22616 RgSchDlSfAllocInfo *resAllocInfo,
22617 Bool isPartialAlloc
22620 uint8_t rgSCHCmnDlRaType2Alloc(allocedInfo, rbsReq, rbgInfo, rbStart,
22621 resAllocInfo, isPartialAlloc)
22622 RgSchDlSfAllocInfo *allocedInfo;
22624 RgSchBwRbgInfo *rbgInfo;
22626 RgSchDlSfAllocInfo *resAllocInfo;
22627 Bool isPartialAlloc;
22630 uint8_t numAllocRbs = 0;
22632 uint8_t rbgSize = rbgInfo->rbgSize;
22633 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
22634 #ifdef RGSCH_SPS_UNUSED
22635 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
22637 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
22638 #ifdef RGSCH_SPS_UNUSED
22639 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22641 uint32_t *allocedMask = allocedInfo->raType2Mask;
22643 /* Note: This function atttempts only full allocation */
22644 rgSCHCmnDlGetBestFitHole(allocedMask, rbgInfo->numRbs,
22645 raType2Mask, rbsReq, rbStart, &numAllocRbs, isPartialAlloc);
22648 /* Update the allocation in RA type 0 and RA type 1 masks */
22649 uint8_t rbCnt = numAllocRbs;
22650 #ifdef RGSCH_SPS_UNUSED
22652 uint32_t ueRaType1Mask;
22654 uint32_t ueRaType0Mask;
22659 /* Update RBG mask for RA type 0 allocation */
22660 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
22661 *rbgMask |= ueRaType0Mask;
22663 #ifdef RGSCH_SPS_UNUSED
22664 /* Update RBG mask for RA type 1 */
22665 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22666 raType1Mask[rbgSubset] |= ueRaType1Mask;
22667 raType1UsedRbs[rbgSubset]++;
22669 /* Update the counters */
22675 return (numAllocRbs);
22679 * @brief Determines RA type 0 mask from given RB index.
22683 * Function : rgSCHCmnGetRaType0Mask
22686 * Processing Steps:
22687 * - Determine RA Type 0 mask for given rbIdex and rbg size.
22689 * @param[in] uint8_t rbIdx
22690 * @param[in] uint8_t rbgSize
22691 * @return uint32_t RA type 0 mask
22694 static uint32_t rgSCHCmnGetRaType0Mask
22700 static uint32_t rgSCHCmnGetRaType0Mask(rbIdx, rbgSize)
22706 uint32_t rbgPosInRbgMask = 0;
22708 rbg = rbIdx/rbgSize;
22709 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22711 return (rbgPosInRbgMask);
22714 #ifdef RGSCH_SPS_UNUSED
22716 * @brief Determines RA type 1 mask from given RB index.
22720 * Function : rgSCHCmnGetRaType1Mask
22723 * Processing Steps:
22724 * - Determine RA Type 1 mask for given rbIdex and rbg size.
22726 * @param[in] uint8_t rbIdx
22727 * @param[in] uint8_t rbgSize
22728 * @param[out] uint8_t *type1Subset
22729 * @return uint32_t RA type 1 mask
22732 static uint32_t rgSCHCmnGetRaType1Mask
22736 uint8_t *type1Subset
22739 static uint32_t rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, type1Subset)
22742 uint8_t *type1Subset;
22745 uint8_t rbg, rbgSubset, rbgInSubset, offset, rbInSubset;
22746 uint32_t rbPosInSubset;
22748 rbg = rbIdx/rbgSize;
22749 rbgSubset = rbg % rbgSize;
22750 rbgInSubset = rbg/rbgSize;
22751 offset = rbIdx % rbgSize;
22752 rbInSubset = rbgInSubset * rbgSize + offset;
22753 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
22755 *type1Subset = rbgSubset;
22756 return (rbPosInSubset);
22758 #endif /* RGSCH_SPS_UNUSED */
22760 * @brief Determines RA type 2 mask from given RB index.
22764 * Function : rgSCHCmnGetRaType2Mask
22767 * Processing Steps:
22768 * - Determine RA Type 2 mask for given rbIdx and rbg size.
22770 * @param[in] uint8_t rbIdx
22771 * @param[out] uint8_t *maskIdx
22772 * @return uint32_t RA type 2 mask
22775 static uint32_t rgSCHCmnGetRaType2Mask
22781 static uint32_t rgSCHCmnGetRaType2Mask(rbIdx, maskIdx)
22786 uint32_t rbPosInType2;
22788 *maskIdx = rbIdx / 32;
22789 rbPosInType2 = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbIdx % 32);
22791 return (rbPosInType2);
22795 * @brief Performs resource allocation for a non-SPS UE in SPS bandwidth
22799 * Function : rgSCHCmnAllocUeInSpsBw
22802 * Processing Steps:
22803 * - Determine allocation for the UE.
22804 * - Use resource allocation type 0, 1 and 2 for allocation
22805 * within maximum SPS bandwidth.
22807 * @param[in] RgSchDlSf *dlSf
22808 * @param[in] RgSchCellCb *cell
22809 * @param[in] RgSchUeCb *ue
22810 * @param[in] RgSchDlRbAlloc *rbAllocInfo
22811 * @param[in] Bool isPartialAlloc
22817 Bool rgSCHCmnAllocUeInSpsBw
22822 RgSchDlRbAlloc *rbAllocInfo,
22823 Bool isPartialAlloc
22826 Bool rgSCHCmnAllocUeInSpsBw(dlSf, cell, ue, rbAllocInfo, isPartialAlloc)
22830 RgSchDlRbAlloc *rbAllocInfo;
22831 Bool isPartialAlloc;
22834 uint8_t rbgSize = cell->rbgSize;
22835 uint8_t numAllocRbs = 0;
22836 uint8_t numAllocRbgs = 0;
22837 uint8_t rbStart = 0;
22838 uint8_t idx, noLyr, iTbs;
22839 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22840 RgSchDlSfAllocInfo *dlSfAlloc = &rbAllocInfo->dlSf->dlSfAllocInfo;
22841 RgSchBwRbgInfo *spsRbgInfo = &cell->spsBwRbgInfo;
22843 /* SPS_FIX : Check if this Hq proc is scheduled */
22844 if ((0 == rbAllocInfo->tbInfo[0].schdlngForTb) &&
22845 (0 == rbAllocInfo->tbInfo[1].schdlngForTb))
22850 /* Check if the requirement can be accomodated in SPS BW */
22851 if (dlSf->spsAllocdBw == spsRbgInfo->numRbs)
22853 /* SPS Bandwidth has been exhausted: no further allocations possible */
22856 if (!isPartialAlloc)
22858 if((dlSf->spsAllocdBw + rbAllocInfo->rbsReq) > spsRbgInfo->numRbs)
22864 /* Perform allocation for RA type 0 if rbsReq is multiple of RBG size (also
22865 * if RBG size = 1) */
22866 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22868 rbAllocInfo->rbsReq += (rbgSize - rbAllocInfo->rbsReq % rbgSize);
22869 numAllocRbgs = rgSCHCmnDlRaType0Alloc(dlSfAlloc,
22870 rbAllocInfo->rbsReq, spsRbgInfo, &numAllocRbs,
22871 &rbAllocInfo->resAllocInfo, isPartialAlloc);
22873 #ifdef RGSCH_SPS_UNUSED
22874 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
22876 /* If no RBS could be allocated, attempt RA TYPE 1 */
22878 numAllocRbs = rgSCHCmnDlRaType1Alloc(dlSfAlloc,
22879 rbAllocInfo->rbsReq, spsRbgInfo, (uint8_t)dlSfAlloc->nxtRbgSubset,
22880 &rbAllocInfo->allocInfo.raType1.rbgSubset,
22881 &rbAllocInfo->resAllocInfo, isPartialAlloc);
22885 dlSfAlloc->nxtRbgSubset =
22886 (rbAllocInfo->allocInfo.raType1.rbgSubset + 1 ) % rbgSize;
22890 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22892 numAllocRbs = rgSCHCmnDlRaType2Alloc(dlSfAlloc,
22893 rbAllocInfo->rbsReq, spsRbgInfo,
22894 &rbStart, &rbAllocInfo->resAllocInfo, isPartialAlloc);
22901 if (!(rbAllocInfo->pdcch =
22902 rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi,\
22903 rbAllocInfo->dciFormat, FALSE)))
22905 /* Note: Returning TRUE since PDCCH might be available for another UE */
22909 /* Update Tb info for each scheduled TB */
22910 iTbs = rbAllocInfo->tbInfo[0].iTbs;
22911 noLyr = rbAllocInfo->tbInfo[0].noLyr;
22912 rbAllocInfo->tbInfo[0].bytesAlloc =
22913 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
22915 if (rbAllocInfo->tbInfo[1].schdlngForTb)
22917 iTbs = rbAllocInfo->tbInfo[1].iTbs;
22918 noLyr = rbAllocInfo->tbInfo[1].noLyr;
22919 rbAllocInfo->tbInfo[1].bytesAlloc =
22920 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
22923 /* Update rbAllocInfo with the allocation information */
22924 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22926 rbAllocInfo->allocInfo.raType0.dlAllocBitMask =
22927 rbAllocInfo->resAllocInfo.raType0Mask;
22928 rbAllocInfo->allocInfo.raType0.numDlAlloc = numAllocRbgs;
22930 #ifdef RGSCH_SPS_UNUSED
22931 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
22933 rbAllocInfo->allocInfo.raType1.dlAllocBitMask =
22934 rbAllocInfo->resAllocInfo.raType1Mask[rbAllocInfo->allocInfo.raType1.rbgSubset];
22935 rbAllocInfo->allocInfo.raType1.numDlAlloc = numAllocRbs;
22936 rbAllocInfo->allocInfo.raType1.shift = 0;
22939 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22941 rbAllocInfo->allocInfo.raType2.isLocal = TRUE;
22942 rbAllocInfo->allocInfo.raType2.rbStart = rbStart;
22943 rbAllocInfo->allocInfo.raType2.numRb = numAllocRbs;
22946 rbAllocInfo->rbsAlloc = numAllocRbs;
22947 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
22949 /* Update allocation masks for RA types 0, 1 and 2 in DL SF */
22951 /* Update type 0 allocation mask */
22952 dlSfAlloc->raType0Mask |= rbAllocInfo->resAllocInfo.raType0Mask;
22953 #ifdef RGSCH_SPS_UNUSED
22954 /* Update type 1 allocation masks */
22955 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
22957 dlSfAlloc->raType1Mask[idx] |= rbAllocInfo->resAllocInfo.raType1Mask[idx];
22958 dlSfAlloc->raType1UsedRbs[idx] +=
22959 rbAllocInfo->resAllocInfo.raType1UsedRbs[idx];
22962 /* Update type 2 allocation masks */
22963 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
22965 dlSfAlloc->raType2Mask[idx] |= rbAllocInfo->resAllocInfo.raType2Mask[idx];
22968 dlSf->spsAllocdBw += numAllocRbs;
22972 /***********************************************************
22974 * Func : rgSCHCmnDlGetBestFitHole
22977 * Desc : Converts the best fit hole into allocation and returns the
22978 * allocation information.
22988 **********************************************************/
22990 static Void rgSCHCmnDlGetBestFitHole
22992 uint32_t *allocMask,
22993 uint8_t numMaskRbs,
22994 uint32_t *crntAllocMask,
22996 uint8_t *allocStart,
22997 uint8_t *allocNumRbs,
22998 Bool isPartialAlloc
23001 static Void rgSCHCmnDlGetBestFitHole (allocMask, numMaskRbs,
23002 crntAllocMask, rbsReq, allocStart, allocNumRbs, isPartialAlloc)
23003 uint32_t *allocMask;
23004 uint8_t numMaskRbs;
23005 uint32_t *crntAllocMask;
23007 uint8_t *allocStart;
23008 uint8_t *allocNumRbs;
23009 Bool isPartialAlloc;
23012 uint8_t maskSz = (numMaskRbs + 31)/32;
23013 uint8_t maxMaskPos = (numMaskRbs % 32);
23014 uint8_t maskIdx, maskPos;
23015 uint8_t numAvailRbs = 0;
23016 uint8_t bestAvailNumRbs = 0;
23017 S8 bestStartPos = -1;
23019 uint32_t tmpMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
23020 uint32_t bestMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
23022 *allocNumRbs = numAvailRbs;
23025 for (maskIdx = 0; maskIdx < maskSz; ++maskIdx)
23028 if (maskIdx == (maskSz - 1))
23030 if (numMaskRbs % 32)
23032 maxMaskPos = numMaskRbs % 32;
23035 for (maskPos = 0; maskPos < maxMaskPos; ++maskPos)
23037 if (!(allocMask[maskIdx] & (1 << (31 - maskPos))))
23039 tmpMask[maskIdx] |= (1 << (31 - maskPos));
23040 if (startPos == -1)
23042 startPos = maskIdx * 32 + maskPos;
23045 if (numAvailRbs == rbsReq)
23047 *allocStart = (uint8_t)startPos;
23048 *allocNumRbs = rbsReq;
23054 if (numAvailRbs > bestAvailNumRbs)
23056 bestAvailNumRbs = numAvailRbs;
23057 bestStartPos = startPos;
23058 memcpy(bestMask, tmpMask, 4 * sizeof(uint32_t));
23062 memset(tmpMask, 0, 4 * sizeof(uint32_t));
23065 if (*allocNumRbs == rbsReq)
23071 if (*allocNumRbs == rbsReq)
23073 /* Convert the hole into allocation */
23074 memcpy(crntAllocMask, tmpMask, 4 * sizeof(uint32_t));
23079 if (bestAvailNumRbs && isPartialAlloc)
23081 /* Partial allocation could have been done */
23082 *allocStart = (uint8_t)bestStartPos;
23083 *allocNumRbs = bestAvailNumRbs;
23084 /* Convert the hole into allocation */
23085 memcpy(crntAllocMask, bestMask, 4 * sizeof(uint32_t));
23091 #endif /* LTEMAC_SPS */
23093 /***************************************************************************
23095 * NON-DLFS Allocation functions
23097 * *************************************************************************/
23101 * @brief Function to find out code rate
23105 * Function : rgSCHCmnFindCodeRate
23107 * Processing Steps:
23109 * @param[in] RgSchCellCb *cell
23110 * @param[in] RgSchDlSf *dlSf
23111 * @param[in,out] RgSchDlRbAlloc *allocInfo
23116 static Void rgSCHCmnFindCodeRate
23120 RgSchDlRbAlloc *allocInfo,
23124 static Void rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,idx)
23127 RgSchDlRbAlloc *allocInfo;
23136 /* Adjust the Imcs and bytes allocated also with respect to the adjusted
23137 RBs - Here we will find out the Imcs by identifying first Highest
23138 number of bits compared to the original bytes allocated. */
23140 * @brief Adjust IMCS according to tbSize and ITBS
23144 * Function : rgSCHCmnNonDlfsPbchTbImcsAdj
23146 * Processing Steps:
23147 * - Adjust Imcs according to tbSize and ITBS.
23149 * @param[in,out] RgSchDlRbAlloc *allocInfo
23150 * @param[in] uint8_t *idx
23154 static Void rgSCHCmnNonDlfsPbchTbImcsAdj
23157 RgSchDlRbAlloc *allocInfo,
23162 static Void rgSCHCmnNonDlfsPbchTbImcsAdj(cell,allocInfo, idx, rbsReq)
23164 RgSchDlRbAlloc *allocInfo;
23169 uint8_t noLyrs = 0;
23171 uint32_t origBytesReq;
23172 uint8_t noRbgs = 0;
23174 RgSchDlSf *dlSf = allocInfo->dlSf;
23176 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
23177 noLyrs = allocInfo->tbInfo[idx].noLyr;
23179 if((allocInfo->raType == RG_SCH_CMN_RA_TYPE0))
23181 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + dlSf->lstRbgDfct), cell->rbgSize);
23182 noRbs = (noRbgs * cell->rbgSize) - dlSf->lstRbgDfct;
23186 noRbs = allocInfo->rbsReq;
23189 /* This line will help in case if tbs is zero and reduction in MCS is not possible */
23190 if (allocInfo->rbsReq == 0 )
23194 origBytesReq = rgTbSzTbl[noLyrs - 1][tbs][rbsReq - 1]/8;
23196 /* Find out the ITbs & Imcs by identifying first Highest
23197 number of bits compared to the original bytes allocated.*/
23200 if(((rgTbSzTbl[noLyrs - 1][0][noRbs - 1])/8) < origBytesReq)
23202 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyrs - 1], tbs);
23203 while(((rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1])/8) > origBytesReq)
23212 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1]/8;
23213 allocInfo->tbInfo[idx].iTbs = tbs;
23214 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
23219 /* Added funcion to adjust TBSize*/
23221 * @brief Function to adjust the tbsize in case of subframe 0 & 5 when
23222 * we were not able to do RB alloc adjustment by adding extra required Rbs
23226 * Function : rgSCHCmnNonDlfsPbchTbSizeAdj
23228 * Processing Steps:
23230 * @param[in,out] RgSchDlRbAlloc *allocInfo
23231 * @param[in] uint8_t numOvrlapgPbchRb
23232 * @param[in] uint8_t idx
23233 * @param[in] uint8_t pbchSsRsSym
23237 static Void rgSCHCmnNonDlfsPbchTbSizeAdj
23239 RgSchDlRbAlloc *allocInfo,
23240 uint8_t numOvrlapgPbchRb,
23241 uint8_t pbchSsRsSym,
23246 static Void rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,idx,bytesReq)
23247 RgSchDlRbAlloc *allocInfo;
23248 uint8_t numOvrlapgPbchRb;
23249 uint8_t pbchSsRsSym;
23254 uint32_t reducedTbs = 0;
23255 uint8_t noLyrs = 0;
23258 noLyrs = allocInfo->tbInfo[idx].noLyr;
23260 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
23262 reducedTbs = bytesReq - (((uint32_t)numOvrlapgPbchRb * (uint32_t)pbchSsRsSym * 6)/8);
23264 /* find out the ITbs & Imcs by identifying first Highest
23265 number of bits compared with reduced bits considering the bits that are
23266 reserved for PBCH/PSS/SSS */
23267 if(((rgTbSzTbl[noLyrs - 1][0][allocInfo->rbsReq - 1])/8) < reducedTbs)
23269 while(((rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1])/8) > reducedTbs)
23278 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1]/8;
23279 allocInfo->tbInfo[idx].iTbs = tbs;
23280 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
23285 /* Added this function to find num of ovrlapping PBCH rb*/
23287 * @brief Function to find out how many additional rbs are available
23288 * in the entire bw which can be allocated to a UE
23291 * Function : rgSCHCmnFindNumAddtlRbsAvl
23293 * Processing Steps:
23294 * - Calculates number of additinal rbs available
23296 * @param[in] RgSchCellCb *cell
23297 * @param[in] RgSchDlSf *dlSf
23298 * @param[in,out] RgSchDlRbAlloc *allocInfo
23299 * @param[out] uint8_t addtlRbsAvl
23303 static uint8_t rgSCHCmnFindNumAddtlRbsAvl
23307 RgSchDlRbAlloc *allocInfo
23310 static uint8_t rgSCHCmnFindNumAddtlRbsAvl(cell,dlSf,allocInfo)
23313 RgSchDlRbAlloc *allocInfo;
23316 uint8_t addtlRbsAvl = 0;
23319 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
23321 addtlRbsAvl = (((dlSf->type0End - dlSf->type2End + 1)*\
23322 cell->rbgSize) - dlSf->lstRbgDfct) - allocInfo->rbsReq;
23324 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
23326 addtlRbsAvl = (dlSf->bw - dlSf->bwAlloced) - allocInfo->rbsReq;
23329 return (addtlRbsAvl);
23332 /* Added this function to find num of ovrlapping PBCH rb*/
23334 * @brief Function to find out how many of the requested RBs are
23335 * falling in the center 6 RBs of the downlink bandwidth.
23338 * Function : rgSCHCmnFindNumPbchOvrlapRbs
23340 * Processing Steps:
23341 * - Calculates number of overlapping rbs
23343 * @param[in] RgSchCellCb *cell
23344 * @param[in] RgSchDlSf *dlSf
23345 * @param[in,out] RgSchDlRbAlloc *allocInfo
23346 * @param[out] uint8_t* numOvrlapgPbchRb
23350 static Void rgSCHCmnFindNumPbchOvrlapRbs
23354 RgSchDlRbAlloc *allocInfo,
23355 uint8_t *numOvrlapgPbchRb
23358 static Void rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,numOvrlapgPbchRb)
23361 RgSchDlRbAlloc *allocInfo;
23362 uint8_t *numOvrlapgPbchRb;
23365 *numOvrlapgPbchRb = 0;
23366 /*Find if we have already crossed the start boundary for PBCH 6 RBs,
23367 * if yes then lets find the number of RBs which are getting overlapped
23368 * with this allocation.*/
23369 if(dlSf->bwAlloced <= (cell->pbchRbStart))
23371 /*We have not crossed the start boundary of PBCH RBs. Now we need
23372 * to know that if take this allocation then how much PBCH RBs
23373 * are overlapping with this allocation.*/
23374 /* Find out the overlapping RBs in the centre 6 RBs */
23375 if((dlSf->bwAlloced + allocInfo->rbsReq) > cell->pbchRbStart)
23377 *numOvrlapgPbchRb = (dlSf->bwAlloced + allocInfo->rbsReq) - (cell->pbchRbStart);
23378 if(*numOvrlapgPbchRb > 6)
23379 *numOvrlapgPbchRb = 6;
23382 else if ((dlSf->bwAlloced > (cell->pbchRbStart)) &&
23383 (dlSf->bwAlloced < (cell->pbchRbEnd)))
23385 /*We have already crossed the start boundary of PBCH RBs.We need to
23386 * find that if we take this allocation then how much of the RBs for
23387 * this allocation will overlap with PBCH RBs.*/
23388 /* Find out the overlapping RBs in the centre 6 RBs */
23389 if(dlSf->bwAlloced + allocInfo->rbsReq < (cell->pbchRbEnd))
23391 /*If we take this allocation then also we are not crossing the
23392 * end boundary of PBCH 6 RBs.*/
23393 *numOvrlapgPbchRb = allocInfo->rbsReq;
23397 /*If we take this allocation then we are crossing the
23398 * end boundary of PBCH 6 RBs.*/
23399 *numOvrlapgPbchRb = (cell->pbchRbEnd) - dlSf->bwAlloced;
23406 * @brief Performs RB allocation adjustment if the requested RBs are
23407 * falling in the center 6 RBs of the downlink bandwidth.
23410 * Function : rgSCHCmnNonDlfsPbchRbAllocAdj
23412 * Processing Steps:
23413 * - Allocate consecutively available RBs.
23415 * @param[in] RgSchCellCb *cell
23416 * @param[in,out] RgSchDlRbAlloc *allocInfo
23417 * @param[in] uint8_t pbchSsRsSym
23421 static Void rgSCHCmnNonDlfsPbchRbAllocAdj
23424 RgSchDlRbAlloc *allocInfo,
23425 uint8_t pbchSsRsSym,
23429 static Void rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo,pbchSsRsSym)
23431 RgSchDlRbAlloc *allocInfo;
23432 uint8_t pbchSsRsSym;
23436 RgSchDlSf *dlSf = allocInfo->dlSf;
23437 uint8_t numOvrlapgPbchRb = 0;
23438 uint8_t numOvrlapgAdtlPbchRb = 0;
23440 uint8_t addtlRbsReq = 0;
23441 uint8_t moreAddtlRbsReq = 0;
23442 uint8_t addtlRbsAdd = 0;
23443 uint8_t moreAddtlRbsAdd = 0;
23445 uint8_t origRbsReq = 0;
23453 origRbsReq = allocInfo->rbsReq;
23454 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23456 totSym = (cell->isCpDlExtend) ? RGSCH_TOT_NUM_SYM_EXTCP : RGSCH_TOT_NUM_SYM_NORCP;
23458 /* Additional RBs are allocated by considering the loss due to
23459 the reserved symbols for CFICH, PBCH, PSS, SSS and cell specific RS */
23461 divResult = (numOvrlapgPbchRb * pbchSsRsSym)/totSym;
23462 if((numOvrlapgPbchRb * pbchSsRsSym) % totSym)
23466 addtlRbsReq = divResult;
23468 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, addtlRbsReq, addtlRbsAdd)
23470 /*Now RBs requires is original requested RBs + these additional RBs to make
23471 * up for PSS/SSS/BCCH.*/
23472 allocInfo->rbsReq = allocInfo->rbsReq + addtlRbsAdd;
23474 /*Check if with these additional RBs we have taken up, these are also falling
23475 * under PBCH RBs range, if yes then we would need to account for
23476 * PSS/BSS/BCCH for these additional RBs too.*/
23477 if(addtlRbsAdd && ((dlSf->bwAlloced + allocInfo->rbsReq - addtlRbsAdd) < (cell->pbchRbEnd)))
23479 if((dlSf->bwAlloced + allocInfo->rbsReq) <= (cell->pbchRbEnd))
23481 /*With additional RBs taken into account, we are not crossing the
23482 * PBCH RB end boundary.Thus here we need to account just for
23483 * overlapping PBCH RBs for these additonal RBs.*/
23484 divResult = (addtlRbsAdd * pbchSsRsSym)/totSym;
23485 if((addtlRbsAdd * pbchSsRsSym) % totSym)
23490 moreAddtlRbsReq = divResult;
23492 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
23494 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
23499 /*Here we have crossed the PBCH RB end boundary, thus we need to take
23500 * into account the overlapping RBs for additional RBs which will be
23501 * subset of addtlRbs.*/
23502 numOvrlapgAdtlPbchRb = (cell->pbchRbEnd) - ((dlSf->bwAlloced + allocInfo->rbsReq) - addtlRbsAdd);
23504 divResult = (numOvrlapgAdtlPbchRb * pbchSsRsSym)/totSym;
23505 if((numOvrlapgAdtlPbchRb * pbchSsRsSym) % totSym)
23510 moreAddtlRbsReq = divResult;
23512 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
23514 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
23517 if (isBcchPcch == TRUE)
23522 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
23525 /* This case might be for Imcs value 6 and NPrb = 1 case - Not
23526 Adjusting either RBs or Imcs or Bytes Allocated */
23527 allocInfo->rbsReq = allocInfo->rbsReq - addtlRbsAdd - moreAddtlRbsAdd;
23529 else if(tbs && ((0 == addtlRbsAdd) && (moreAddtlRbsAdd == 0)))
23531 /*In case of a situation where we the entire bandwidth is already occupied
23532 * and we dont have room to add additional Rbs then in order to decrease the
23533 * code rate we reduce the tbsize such that we reduce the present calculated
23534 * tbsize by number of bytes that would be occupied by PBCH/PSS/SSS in overlapping
23535 * rbs and find the nearest tbsize which would be less than this deduced value*/
23537 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23539 noLyr = allocInfo->tbInfo[0].noLyr;
23540 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyr - 1], tbs);
23541 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
23543 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,bytesReq);
23545 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23547 noLyr = allocInfo->tbInfo[1].noLyr;
23548 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
23549 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,bytesReq);
23553 else if(tbs && ((addtlRbsAdd != addtlRbsReq) ||
23554 (addtlRbsAdd && (moreAddtlRbsReq != moreAddtlRbsAdd))))
23556 /*In case of a situation where we were not able to add required number of
23557 * additional RBs then we adjust the Imcs based on original RBs requested.
23558 * Doing this would comensate for the few extra Rbs we have added but inorder
23559 * to comensate for number of RBS we couldnt add we again do the TBSize adjustment*/
23561 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
23563 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23565 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
23568 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23569 numOvrlapgPbchRb = numOvrlapgPbchRb - (addtlRbsAdd + moreAddtlRbsAdd);
23571 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,allocInfo->tbInfo[0].bytesReq);
23573 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23575 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,allocInfo->tbInfo[1].bytesReq);
23581 /*We hit this code when we were able to add the required additional RBS
23582 * hence we should adjust the IMcs based on orignals RBs requested*/
23584 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
23586 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23588 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
23593 } /* end of rgSCHCmnNonDlfsPbchRbAllocAdj */
23597 * @brief Performs RB allocation for frequency non-selective cell.
23601 * Function : rgSCHCmnNonDlfsCmnRbAlloc
23603 * Processing Steps:
23604 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
23606 * @param[in] RgSchCellCb *cell
23607 * @param[in, out] RgSchDlRbAlloc *allocInfo
23613 static S16 rgSCHCmnNonDlfsCmnRbAlloc
23616 RgSchDlRbAlloc *allocInfo
23619 static S16 rgSCHCmnNonDlfsCmnRbAlloc(cell, allocInfo)
23621 RgSchDlRbAlloc *allocInfo;
23627 uint8_t pbchSsRsSym = 0;
23628 uint8_t pbchFrame = 0;
23630 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
23632 RgSchDlSf *dlSf = allocInfo->dlSf;
23634 uint8_t rbStart = 0;
23635 uint8_t spsRbsAlloc = 0;
23636 RgSchDlSfAllocInfo *dlSfAlloc = &allocInfo->dlSf->dlSfAllocInfo;
23639 allocInfo->tbInfo[0].noLyr = 1;
23642 /* Note: Initialize the masks to 0, this might not be needed since alloInfo
23643 * is initialized to 0 at the beginning of allcoation */
23644 allocInfo->resAllocInfo.raType0Mask = 0;
23645 memset(allocInfo->resAllocInfo.raType1Mask, 0,
23646 RG_SCH_NUM_RATYPE1_32BIT_MASK * sizeof (uint32_t));
23647 memset(allocInfo->resAllocInfo.raType2Mask, 0,
23648 RG_SCH_NUM_RATYPE2_32BIT_MASK * sizeof (uint32_t));
23650 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
23651 (dlSf->bwAlloced == dlSf->bw))
23653 if(dlSf->bwAlloced == dlSf->bw)
23659 if (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced))
23662 if ((allocInfo->tbInfo[0].imcs < 29) && (dlSf->bwAlloced < dlSf->bw))
23664 if(allocInfo->tbInfo[0].imcs < 29)
23667 /* set the remaining RBs for the requested UE */
23668 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
23669 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
23670 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[0][tbs][allocInfo->rbsReq - 1]/8;
23675 /* Attempt RA Type 2 allocation in SPS Bandwidth */
23676 if (dlSf->spsAllocdBw < cell->spsBwRbgInfo.numRbs)
23679 rgSCHCmnDlRaType2Alloc(dlSfAlloc,
23680 allocInfo->rbsReq, &cell->spsBwRbgInfo, &rbStart,
23681 &allocInfo->resAllocInfo, FALSE);
23682 /* rbsAlloc assignment moved from line 16671 to here to avoid
23683 * compilation error. Recheck */
23684 dlSf->spsAllocdBw += spsRbsAlloc;
23687 #endif /* LTEMAC_SPS */
23695 /* Update allocation information */
23696 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
23697 if (allocInfo->pdcch == NULLP)
23701 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
23702 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
23703 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
23704 allocInfo->allocInfo.raType2.isLocal = TRUE;
23708 allocInfo->allocInfo.raType2.rbStart = rbStart;
23709 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23710 allocInfo->rbsAlloc = allocInfo->rbsReq;
23721 if(!(dlSf->sfNum == 5))
23723 /* case for subframes 1 to 9 except 5 */
23725 allocInfo->allocInfo.raType2.rbStart = rbStart;
23727 /*Fix for ccpu00123918*/
23728 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
23733 pbchFrame = 1; /* case for subframe 5 */
23734 /* In subframe 5, symbols are reserved for PSS and SSS and CFICH
23735 and Cell Specific Reference Signals */
23736 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PSS_SSS_SYM) *
23737 RGSCH_NUM_SC_IN_RB + cell->numCellRSPerSf);
23743 /* In subframe 0, symbols are reserved for PSS, SSS, PBCH, CFICH and
23744 and Cell Specific Reference signals */
23745 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PBCH_SYM +
23746 RGSCH_NUM_PSS_SSS_SYM) * RGSCH_NUM_SC_IN_RB +
23747 cell->numCellRSPerSf);
23748 } /* end of outer else */
23751 (((dlSf->bwAlloced + allocInfo->rbsReq) - cell->pbchRbStart) > 0)&&
23752 (dlSf->bwAlloced < cell->pbchRbEnd))
23754 if(allocInfo->tbInfo[0].imcs < 29)
23756 rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo, pbchSsRsSym, TRUE);
23768 /*Fix for ccpu00123918*/
23769 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
23770 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23771 allocInfo->rbsAlloc = allocInfo->rbsReq;
23773 /* LTE_ADV_FLAG_REMOVED_START */
23775 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
23777 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
23778 allocInfo->allocInfo.raType2.rbStart, \
23779 allocInfo->allocInfo.raType2.numRb);
23784 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
23785 allocInfo->allocInfo.raType2.rbStart, \
23786 allocInfo->allocInfo.raType2.numRb);
23792 /* LTE_ADV_FLAG_REMOVED_END */
23793 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23800 /* Update type 0, 1 and 2 masks */
23801 dlSfAlloc->raType0Mask |= allocInfo->resAllocInfo.raType0Mask;
23802 #ifdef RGSCH_SPS_UNUSED
23803 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
23805 dlSfAlloc->raType1Mask[idx] |=
23806 allocInfo->resAllocInfo.raType1Mask[idx];
23807 dlSfAlloc->raType1UsedRbs[idx] +=
23808 allocInfo->resAllocInfo.raType1UsedRbs[idx];
23811 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
23813 dlSfAlloc->raType2Mask[idx] |=
23814 allocInfo->resAllocInfo.raType2Mask[idx];
23824 * @brief Performs RB allocation for frequency non-selective cell.
23828 * Function : rgSCHCmnNonDlfsCmnRbAllocRar
23830 * Processing Steps:
23831 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
23833 * @param[in] RgSchCellCb *cell
23834 * @param[in, out] RgSchDlRbAlloc *allocInfo
23840 static S16 rgSCHCmnNonDlfsCmnRbAllocRar
23843 RgSchDlRbAlloc *allocInfo
23846 static S16 rgSCHCmnNonDlfsCmnRbAlloc(cell, allocInfo)
23848 RgSchDlRbAlloc *allocInfo;
23851 RgSchDlSf *dlSf = allocInfo->dlSf;
23854 if(dlSf->bwAlloced == dlSf->bw)
23859 allocInfo->tbInfo[0].noLyr = 1;
23861 /* Update allocation information */
23862 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
23863 if (allocInfo->pdcch == NULLP)
23867 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
23868 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
23869 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
23870 allocInfo->allocInfo.raType2.isLocal = TRUE;
23872 /*Fix for ccpu00123918*/
23873 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
23874 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23875 allocInfo->rbsAlloc = allocInfo->rbsReq;
23877 /* LTE_ADV_FLAG_REMOVED_END */
23878 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23881 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, NULLP, dlSf, 13, TFU_DCI_FORMAT_B1, FALSE);
23882 if (allocInfo->pdcch == NULLP)
23886 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
23887 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
23889 printf("5GTF_ERROR vrbg allocated > 25\n");
23893 allocInfo->tbInfo[0].cmnGrnt.vrbgStart = beamInfo->vrbgStart;
23894 allocInfo->tbInfo[0].cmnGrnt.numVrbg = allocInfo->vrbgReq;
23896 /* Update allocation information */
23897 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23899 allocInfo->tbInfo[0].cmnGrnt.xPDSCHRange = 1;
23900 allocInfo->tbInfo[0].cmnGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
23901 allocInfo->tbInfo[0].cmnGrnt.vrbgStart, allocInfo->tbInfo[0].cmnGrnt.numVrbg);
23903 allocInfo->tbInfo[0].cmnGrnt.rbStrt = (allocInfo->tbInfo[0].cmnGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
23904 allocInfo->tbInfo[0].cmnGrnt.numRb = (allocInfo->tbInfo[0].cmnGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
23906 beamInfo->vrbgStart += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
23907 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
23908 allocInfo->tbInfo[0].cmnGrnt.rv = 0;
23909 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23912 printf("\n[%s],allocInfo->tbInfo[0].bytesAlloc:%u,vrbgReq:%u\n",
23913 __func__,allocInfo->tbInfo[0].bytesAlloc,allocInfo->vrbgReq);
23919 /* LTE_ADV_FLAG_REMOVED_START */
23922 * @brief To check if DL BW available for non-DLFS allocation.
23926 * Function : rgSCHCmnNonDlfsBwAvlbl
23928 * Processing Steps:
23929 * - Determine availability based on RA Type.
23931 * @param[in] RgSchCellCb *cell
23932 * @param[in] RgSchDlSf *dlSf
23933 * @param[in] RgSchDlRbAlloc *allocInfo
23941 static Bool rgSCHCmnNonDlfsSFRBwAvlbl
23944 RgSchSFRPoolInfo **sfrpoolInfo,
23946 RgSchDlRbAlloc *allocInfo,
23950 static Bool rgSCHCmnNonDlfsSFRBwAvlbl(cell, sfrpoolInfo, dlSf, allocInfo, isUeCellEdge)
23952 RgSchSFRPoolInfo **sfrpoolInfo;
23954 RgSchDlRbAlloc *allocInfo;
23962 RgSchSFRPoolInfo *sfrPool;
23963 RgSchSFRPoolInfo *sfrCEPool;
23967 RgSchSFRPoolInfo *poolWithMaxAvlblBw = NULLP;
23968 uint32_t bwAvlbl = 0;
23969 uint32_t addtnlPRBs = 0;
23971 if (dlSf->bw <= dlSf->bwAlloced)
23973 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
23974 "BW is fully allocated for subframe (%d) CRNTI:%d", dlSf->sfNum,allocInfo->rnti);
23978 if (dlSf->sfrTotalPoolInfo.ccBwFull == TRUE)
23980 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
23981 "BW is fully allocated for CC Pool CRNTI:%d",allocInfo->rnti);
23985 if ((dlSf->sfrTotalPoolInfo.ceBwFull == TRUE) && (isUeCellEdge))
23987 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
23988 "BW is fully allocated for CE Pool CRNTI:%d",allocInfo->rnti);
23992 /* We first check if the ue scheduled is a cell edge or cell centre and accordingly check the avaialble
23993 memory in their pool. If the cell centre UE doesnt have Bw available in its pool, then it will check
23994 Bw availability in cell edge pool but the other way around is NOT possible. */
23997 l = &dlSf->sfrTotalPoolInfo.cePool;
24001 l = &dlSf->sfrTotalPoolInfo.ccPool;
24004 n = cmLListFirst(l);
24008 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
24010 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24012 /* MS_FIX for ccpu00123919 : Number of RBs in case of RETX should be same as that of initial transmission. */
24013 if(allocInfo->tbInfo[0].tbCb->txCntr)
24015 /* If RB assignment is being done for RETX. Then if reqRbs are a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
24016 * not a multiple of rbgSize then check if lsgRbgDfct exists */
24017 if (allocInfo->rbsReq % cell->rbgSize == 0)
24019 if ((sfrPool->type2End == dlSf->type2End) && dlSf->lstRbgDfct)
24021 /* In this scenario we are wasting the last RBG for this dlSf */
24022 sfrPool->type0End--;
24023 sfrPool->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24025 dlSf->lstRbgDfct = 0;
24027 /*ABHINAV To check if these variables need to be taken care of*/
24029 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24034 if (dlSf->lstRbgDfct)
24036 /* Check if type0 allocation can cater to this RETX requirement */
24037 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
24043 if (sfrPool->type2End != dlSf->type2End) /*Search again for some pool which has the END RBG of the BandWidth*/
24051 /* cannot allocate same number of required RBs */
24057 /*rg002.301 ccpu00120391 MOD condition is modified approprialtely to find if rbsReq is less than available RBS*/
24058 if(allocInfo->rbsReq <= (((sfrPool->type0End - sfrPool->type2End + 1)*\
24059 cell->rbgSize) - dlSf->lstRbgDfct))
24061 *sfrpoolInfo = sfrPool;
24066 if (sfrPool->bw <= sfrPool->bwAlloced + cell->rbgSize)
24068 n = cmLListNext(l);
24069 /* If the ue is cell centre then it will simply check the memory available in next pool.
24070 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
24072 if((!isUeCellEdge) && (!n->node))
24074 l = &dlSf->sfrTotalPoolInfo.cePool;
24075 n = cmLListFirst(l);
24081 /* MS_FIX: Number of RBs in case of RETX should be same as that of initial transmission */
24082 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24084 /*rg002.301 ccpu00120391 MOD setting the remaining RBs for the requested UE*/
24085 allocInfo->rbsReq = (((sfrPool->type0End - sfrPool->type2End + 1)*\
24086 cell->rbgSize) - dlSf->lstRbgDfct);
24087 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24088 noLyrs = allocInfo->tbInfo[0].noLyr;
24089 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24090 *sfrpoolInfo = sfrPool;
24095 n = cmLListNext(l);
24097 /* If the ue is cell centre then it will simply check the memory available in next pool.
24098 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
24099 if((!isUeCellEdge) && (!n->node))
24101 l = &dlSf->sfrTotalPoolInfo.cePool;
24102 n = cmLListFirst(l);
24111 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
24113 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24114 /* This is a Case where a UE was CC and had more RBs allocated than present in CE pool.
24115 In case this UE whn become CE with retx going on, then BW is not sufficient for Retx */
24116 if ((isUeCellEdge) &&
24117 (allocInfo->tbInfo[0].tbCb->txCntr != 0))
24119 if(allocInfo->rbsReq > (sfrPool->bw - sfrPool->bwAlloced))
24121 /* Adjust CE BW such that Retx alloc is successful */
24122 /* Check if merging CE with adjacent CC pool will be sufficient to process Retx */
24124 /* If no Type 0 allocations are made from this pool */
24125 if (sfrPool->type0End == (((sfrPool->poolendRB + 1) / cell->rbgSize) - 1))
24127 if (sfrPool->adjCCPool &&
24128 (sfrPool->adjCCPool->type2Start == sfrPool->poolendRB + 1) &&
24129 (allocInfo->rbsReq <= ((sfrPool->bw - sfrPool->bwAlloced) +
24130 ((sfrPool->adjCCPool->bw - sfrPool->adjCCPool->bwAlloced)))))
24132 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
24134 /* Adjusting CE Pool Info */
24135 sfrPool->bw += addtnlPRBs;
24136 sfrPool->type0End = ((sfrPool->poolendRB + addtnlPRBs + 1) /
24137 cell->rbgSize) - 1;
24139 /* Adjusting CC Pool Info */
24140 sfrPool->adjCCPool->type2Start += addtnlPRBs;
24141 sfrPool->adjCCPool->type2End = RGSCH_CEIL(sfrPool->adjCCPool->type2Start,
24143 sfrPool->adjCCPool->bw -= addtnlPRBs;
24144 *sfrpoolInfo = sfrPool;
24151 /* Check if CC pool is one of the following:
24152 * 1. |CE| + |CC "CCPool2Exists" = TRUE|
24153 * 2. |CC "CCPool2Exists" = FALSE| + |CE| + |CC "CCPool2Exists" = TRUE|
24155 if(TRUE == sfrPool->CCPool2Exists)
24157 l1 = &dlSf->sfrTotalPoolInfo.cePool;
24158 n1 = cmLListFirst(l1);
24159 sfrCEPool = (RgSchSFRPoolInfo*)(n1->node);
24160 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced))
24162 *sfrpoolInfo = sfrCEPool;
24165 else if(allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
24167 *sfrpoolInfo = sfrPool;
24170 /* Check if CE and CC boundary has unallocated prbs */
24171 else if ((sfrPool->poolstartRB == sfrPool->type2Start) &&
24172 (sfrCEPool->type0End == ((sfrCEPool->poolendRB + 1) / cell->rbgSize) - 1))
24174 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced) +
24175 (sfrPool->bw - sfrPool->bwAlloced))
24177 /* Checking if BW can be allocated partly from CE pool and partly
24180 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
24181 /* Updating CE and CC type2 parametrs based on the RBs allocated
24182 * from these pools*/
24183 sfrPool->type2Start -= addtnlPRBs;
24184 sfrPool->type2End = RGSCH_CEIL(sfrPool->type2Start, cell->rbgSize);
24185 sfrPool->bw += addtnlPRBs;
24186 if (addtnlPRBs == (sfrCEPool->bw - sfrCEPool->bwAlloced))
24188 sfrCEPool->bwAlloced = sfrCEPool->bw;
24189 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24193 sfrCEPool->bw -= addtnlPRBs;
24194 sfrCEPool->type0End = ((sfrCEPool->poolendRB + 1 - addtnlPRBs) / cell->rbgSize) - 1;
24196 *sfrpoolInfo = sfrPool;
24199 else if ( bwAvlbl <
24200 ((sfrCEPool->bw - sfrCEPool->bwAlloced) +
24201 (sfrPool->bw - sfrPool->bwAlloced)))
24203 /* All the Prbs from CE BW shall be allocated */
24204 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24206 sfrPool->type2Start = sfrCEPool->type2Start;
24207 sfrPool->bw += sfrCEPool->bw - sfrCEPool->bwAlloced;
24208 sfrCEPool->type2Start = sfrCEPool->poolendRB + 1;
24209 sfrCEPool->bwAlloced = sfrCEPool->bw;
24210 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24212 /* set the remaining RBs for the requested UE */
24213 allocInfo->rbsReq = (sfrPool->bw - sfrPool->bwAlloced);
24214 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24215 noLyrs = allocInfo->tbInfo[0].noLyr;
24216 allocInfo->tbInfo[0].bytesReq =
24217 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24218 *sfrpoolInfo = sfrPool;
24229 /* Checking if no. of RBs required can be allocated from
24231 * 1. If available return the SFR pool.
24232 * 2. Else update the RBs required parameter based on the
24233 * BW available in the pool
24234 * 3. Return FALSE if no B/W is available.
24236 if (allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
24238 *sfrpoolInfo = sfrPool;
24243 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24245 if (bwAvlbl < sfrPool->bw - sfrPool->bwAlloced)
24249 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24251 bwAvlbl = sfrPool->bw - sfrPool->bwAlloced;
24252 poolWithMaxAvlblBw = sfrPool;
24254 n = cmLListNext(l);
24256 if ((isUeCellEdge == FALSE) && (n == NULLP))
24258 if(l != &dlSf->sfrTotalPoolInfo.cePool)
24260 l = &dlSf->sfrTotalPoolInfo.cePool;
24261 n = cmLListFirst(l);
24271 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24275 dlSf->sfrTotalPoolInfo.ccBwFull = TRUE;
24281 /* set the remaining RBs for the requested UE */
24282 allocInfo->rbsReq = poolWithMaxAvlblBw->bw -
24283 poolWithMaxAvlblBw->bwAlloced;
24284 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24285 noLyrs = allocInfo->tbInfo[0].noLyr;
24286 allocInfo->tbInfo[0].bytesReq =
24287 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24288 *sfrpoolInfo = poolWithMaxAvlblBw;
24295 n = cmLListNext(l);
24297 if ((isUeCellEdge == FALSE) && (n == NULLP))
24299 if(l != &dlSf->sfrTotalPoolInfo.cePool)
24301 l = &dlSf->sfrTotalPoolInfo.cePool;
24302 n = cmLListFirst(l);
24318 #endif /* end of ifndef LTE_TDD*/
24319 /* LTE_ADV_FLAG_REMOVED_END */
24322 * @brief To check if DL BW available for non-DLFS allocation.
24326 * Function : rgSCHCmnNonDlfsUeRbAlloc
24328 * Processing Steps:
24329 * - Determine availability based on RA Type.
24331 * @param[in] RgSchCellCb *cell
24332 * @param[in] RgSchDlSf *dlSf
24333 * @param[in] RgSchDlRbAlloc *allocInfo
24341 static Bool rgSCHCmnNonDlfsBwAvlbl
24345 RgSchDlRbAlloc *allocInfo
24348 static Bool rgSCHCmnNonDlfsBwAvlbl(cell, dlSf, allocInfo)
24351 RgSchDlRbAlloc *allocInfo;
24356 uint8_t ignoredDfctRbg = FALSE;
24358 if (dlSf->bw <= dlSf->bwAlloced)
24360 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId, "(%d:%d)FAILED CRNTI:%d",
24361 dlSf->bw, dlSf->bwAlloced,allocInfo->rnti);
24364 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
24366 /* Fix for ccpu00123919 : Number of RBs in case of RETX should be same as
24367 * that of initial transmission. */
24368 if(allocInfo->tbInfo[0].tbCb->txCntr)
24370 /* If RB assignment is being done for RETX. Then if reqRbs are
24371 * a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
24372 * not a multiple of rbgSize then check if lsgRbgDfct exists */
24373 if (allocInfo->rbsReq % cell->rbgSize == 0)
24375 if (dlSf->lstRbgDfct)
24377 /* In this scenario we are wasting the last RBG for this dlSf */
24380 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24381 /* Fix: MUE_PERTTI_DL */
24382 dlSf->lstRbgDfct = 0;
24383 ignoredDfctRbg = TRUE;
24389 if (dlSf->lstRbgDfct)
24391 /* Check if type0 allocation can cater to this RETX requirement */
24392 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
24399 /* cannot allocate same number of required RBs */
24405 /* Condition is modified approprialtely to find
24406 * if rbsReq is less than available RBS*/
24407 if(allocInfo->rbsReq <= (((dlSf->type0End - dlSf->type2End + 1)*\
24408 cell->rbgSize) - dlSf->lstRbgDfct))
24412 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
24413 * allocation in TDD when requested RBs are more than available RBs*/
24416 /* MS_WORKAROUND for ccpu00122022 */
24417 if (dlSf->bw < dlSf->bwAlloced + cell->rbgSize)
24419 /* ccpu00132358- Re-assigning the values which were updated above
24420 * if it is RETX and Last RBG available*/
24421 if(ignoredDfctRbg == TRUE)
24424 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
24425 dlSf->lstRbgDfct = 1;
24431 /* Fix: Number of RBs in case of RETX should be same as
24432 * that of initial transmission. */
24433 if(allocInfo->tbInfo[0].tbCb->txCntr == 0
24435 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
24439 /* Setting the remaining RBs for the requested UE*/
24440 allocInfo->rbsReq = (((dlSf->type0End - dlSf->type2End + 1)*\
24441 cell->rbgSize) - dlSf->lstRbgDfct);
24442 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24443 noLyrs = allocInfo->tbInfo[0].noLyr;
24444 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24445 /* DwPts Scheduling Changes Start */
24447 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24449 allocInfo->tbInfo[0].bytesReq =
24450 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
24453 /* DwPts Scheduling Changes End */
24457 /* ccpu00132358- Re-assigning the values which were updated above
24458 * if it is RETX and Last RBG available*/
24459 if(ignoredDfctRbg == TRUE)
24462 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
24463 dlSf->lstRbgDfct = 1;
24466 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "FAILED for CRNTI:%d",
24468 printf ("RB Alloc failed for LAA TB type 0\n");
24474 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
24476 if (allocInfo->rbsReq <= (dlSf->bw - dlSf->bwAlloced))
24480 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
24481 * allocation in TDD when requested RBs are more than available RBs*/
24484 /* Fix: Number of RBs in case of RETX should be same as
24485 * that of initial transmission. */
24486 if((allocInfo->tbInfo[0].tbCb->txCntr == 0)
24488 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
24492 /* set the remaining RBs for the requested UE */
24493 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
24494 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24495 noLyrs = allocInfo->tbInfo[0].noLyr;
24496 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24497 /* DwPts Scheduling Changes Start */
24499 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24501 allocInfo->tbInfo[0].bytesReq =
24502 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
24505 /* DwPts Scheduling Changes End */
24509 printf ("RB Alloc failed for LAA TB type 2\n");
24510 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
24513 /* Fix: Number of RBs in case of RETX should be same as
24514 * that of initial transmission. */
24518 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
24522 /* LTE_ADV_FLAG_REMOVED_START */
24525 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24529 * Function : rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
24531 * Processing Steps:
24533 * @param[in] RgSchCellCb *cell
24534 * @param[in] RgSchDlSf *dlSf
24535 * @param[in] uint8_t rbStrt
24536 * @param[in] uint8_t numRb
24541 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
24549 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, rbStrt, numRb)
24558 RgSchSFRPoolInfo *sfrPool;
24560 l = &dlSf->sfrTotalPoolInfo.ccPool;
24562 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24563 dlSf->bwAlloced += numRb;
24564 dlSf->type2Start += numRb;
24565 n = cmLListFirst(l);
24569 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24570 n = cmLListNext(l);
24572 /* If the pool contains some RBs allocated in this allocation, e.g: Pool is [30.50]. Pool->type2Start is 40 , dlSf->type2Start is 45. then update the variables in pool */
24573 if((sfrPool->poolendRB >= dlSf->type2Start) && (sfrPool->type2Start < dlSf->type2Start))
24575 sfrPool->type2End = dlSf->type2End;
24576 sfrPool->bwAlloced = dlSf->type2Start - sfrPool->poolstartRB;
24577 sfrPool->type2Start = dlSf->type2Start;
24581 /* If the pool contains all RBs allocated in this allocation*/
24582 if(dlSf->type2Start > sfrPool->poolendRB)
24584 sfrPool->type2End = sfrPool->type0End + 1;
24585 sfrPool->bwAlloced = sfrPool->bw;
24586 sfrPool->type2Start = sfrPool->poolendRB + 1;
24591 if (l != &dlSf->sfrTotalPoolInfo.cePool)
24593 l = &dlSf->sfrTotalPoolInfo.cePool;
24594 n = cmLListFirst(l);
24604 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24608 * Function : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
24610 * Processing Steps:
24612 * @param[in] RgSchCellCb *cell
24613 * @param[in] RgSchDlSf *dlSf
24614 * @param[in] uint8_t rbStrt
24615 * @param[in] uint8_t numRb
24621 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
24630 static S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc(cell, ue, dlSf, rbStrt, numRb)
24640 RgSchSFRPoolInfo *sfrCCPool1 = NULL;
24641 RgSchSFRPoolInfo *sfrCCPool2 = NULL;
24644 /* Move the type2End pivot forward */
24647 l = &dlSf->sfrTotalPoolInfo.ccPool;
24648 n = cmLListFirst(l);
24651 sfrCCPool1 = (RgSchSFRPoolInfo*)(n->node);
24653 if (sfrCCPool1 == NULLP)
24655 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24656 "sfrCCPool1 is NULL for CRNTI:%d",ue->ueId);
24659 n = cmLListNext(l);
24662 sfrCCPool2 = (RgSchSFRPoolInfo*)(n->node);
24663 n = cmLListNext(l);
24665 if((sfrCCPool1) && (sfrCCPool2))
24667 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
24668 if(((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
24669 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb)) ||
24670 ((dlSf->type2Start >= sfrCCPool2->pwrHiCCRange.startRb) &&
24671 (dlSf->type2Start + numRb < sfrCCPool2->pwrHiCCRange.endRb)))
24673 ue->lteAdvUeCb.isCCUePHigh = TRUE;
24675 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
24676 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
24679 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24680 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
24687 if((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
24688 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb))
24690 ue->lteAdvUeCb.isCCUePHigh = TRUE;
24692 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
24693 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
24696 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24697 "rgSCHCmnBuildRntpInfo() function returned RFAILED CRNTI:%d",ue->ueId);
24703 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24705 dlSf->bwAlloced += numRb;
24706 /*MS_FIX for ccpu00123918*/
24707 dlSf->type2Start += numRb;
24713 #endif /* end of ifndef LTE_TDD*/
24714 /* LTE_ADV_FLAG_REMOVED_END */
24716 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24720 * Function : rgSCHCmnNonDlfsUpdTyp2Alloc
24722 * Processing Steps:
24724 * @param[in] RgSchCellCb *cell
24725 * @param[in] RgSchDlSf *dlSf
24726 * @param[in] uint8_t rbStrt
24727 * @param[in] uint8_t numRb
24732 static Void rgSCHCmnNonDlfsUpdTyp2Alloc
24740 static Void rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, rbStrt, numRb)
24747 /* Move the type2End pivot forward */
24748 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24749 //#ifndef LTEMAC_SPS
24750 dlSf->bwAlloced += numRb;
24751 /*Fix for ccpu00123918*/
24752 dlSf->type2Start += numRb;
24758 * @brief To do DL allocation using TYPE0 RA.
24762 * Function : rgSCHCmnNonDlfsType0Alloc
24764 * Processing Steps:
24765 * - Perform TYPE0 allocation using the RBGs between
24766 * type0End and type2End.
24767 * - Build the allocation mask as per RBG positioning.
24768 * - Update the allocation parameters.
24770 * @param[in] RgSchCellCb *cell
24771 * @param[in] RgSchDlSf *dlSf
24772 * @param[in] RgSchDlRbAlloc *allocInfo
24778 static Void rgSCHCmnNonDlfsType0Alloc
24782 RgSchDlRbAlloc *allocInfo,
24786 static Void rgSCHCmnNonDlfsType0Alloc(cell, dlSf, allocInfo, dlUe)
24789 RgSchDlRbAlloc *allocInfo;
24793 uint32_t dlAllocMsk = 0;
24794 uint8_t rbgFiller = dlSf->lstRbgDfct;
24795 uint8_t noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
24796 //uint8_t noRbgs = (allocInfo->rbsReq + rbgFiller)/ cell->rbgSize;
24800 uint32_t tb1BytesAlloc = 0;
24801 uint32_t tb2BytesAlloc = 0;
24802 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
24804 //if(noRbgs == 0) noRbgs = 1; /* Not required as ceilling is used above*/
24806 /* Fix for ccpu00123919*/
24807 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24808 if (dlSf->bwAlloced + noRbs > dlSf->bw)
24814 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24817 /* Fix for ccpu00138701: Ceilling is using to derive num of RBGs, Therefore,
24818 * after this operation,checking Max TB size and Max RBs are not crossed
24819 * if it is crossed then decrement num of RBGs. */
24820 //if((noRbs + rbgFiller) % cell->rbgSize)
24821 if((noRbs > allocInfo->rbsReq) &&
24822 (allocInfo->rbsReq + rbgFiller) % cell->rbgSize)
24823 {/* considering ue category limitation
24824 * due to ceiling */
24827 if (rgSCHLaaIsLaaTB(allocInfo)== FALSE)
24830 if ((allocInfo->tbInfo[0].schdlngForTb) && (!allocInfo->tbInfo[0].tbCb->txCntr))
24832 iTbs = allocInfo->tbInfo[0].iTbs;
24833 noLyr = allocInfo->tbInfo[0].noLyr;
24834 tb1BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24837 if ((allocInfo->tbInfo[1].schdlngForTb) && (!allocInfo->tbInfo[1].tbCb->txCntr))
24839 iTbs = allocInfo->tbInfo[1].iTbs;
24840 noLyr = allocInfo->tbInfo[1].noLyr;
24841 tb2BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24845 /* Only Check for New Tx No need for Retx */
24846 if (tb1BytesAlloc || tb2BytesAlloc)
24848 if (( ue->dl.aggTbBits >= dlUe->maxTbBits) ||
24849 (tb1BytesAlloc >= dlUe->maxTbSz/8) ||
24850 (tb2BytesAlloc >= dlUe->maxTbSz/8) ||
24851 (noRbs >= dlUe->maxRb))
24857 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24861 /* type0End would have been initially (during subfrm Init) at the bit position
24862 * (cell->noOfRbgs - 1), 0 being the most significant.
24863 * Getting DlAllocMsk for noRbgs and at the appropriate position */
24864 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - dlSf->type0End));
24865 /* Move backwards the type0End pivot */
24866 dlSf->type0End -= noRbgs;
24867 /*Fix for ccpu00123919*/
24868 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
24869 /* Update the bwAlloced field accordingly */
24870 //#ifndef LTEMAC_SPS /* ccpu00129474*/
24871 dlSf->bwAlloced += noRbs;
24873 /* Update Type0 Alloc Info */
24874 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
24875 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
24876 allocInfo->rbsAlloc = noRbs;
24878 /* Update Tb info for each scheduled TB */
24879 iTbs = allocInfo->tbInfo[0].iTbs;
24880 noLyr = allocInfo->tbInfo[0].noLyr;
24881 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
24882 * RETX TB Size is same as Init TX TB Size */
24883 if (allocInfo->tbInfo[0].tbCb->txCntr)
24885 allocInfo->tbInfo[0].bytesAlloc =
24886 allocInfo->tbInfo[0].bytesReq;
24890 allocInfo->tbInfo[0].bytesAlloc =
24891 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24892 /* DwPts Scheduling Changes Start */
24894 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24896 allocInfo->tbInfo[0].bytesAlloc =
24897 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
24900 /* DwPts Scheduling Changes End */
24903 if (allocInfo->tbInfo[1].schdlngForTb)
24905 iTbs = allocInfo->tbInfo[1].iTbs;
24906 noLyr = allocInfo->tbInfo[1].noLyr;
24907 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
24908 * RETX TB Size is same as Init TX TB Size */
24909 if (allocInfo->tbInfo[1].tbCb->txCntr)
24911 allocInfo->tbInfo[1].bytesAlloc =
24912 allocInfo->tbInfo[1].bytesReq;
24916 allocInfo->tbInfo[1].bytesAlloc =
24917 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24918 /* DwPts Scheduling Changes Start */
24920 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24922 allocInfo->tbInfo[1].bytesAlloc =
24923 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
24926 /* DwPts Scheduling Changes End */
24930 /* The last RBG which can be smaller than the RBG size is consedered
24931 * only for the first time allocation of TYPE0 UE */
24932 dlSf->lstRbgDfct = 0;
24939 * @brief To prepare RNTP value from the PRB allocation (P-High -> 1 and P-Low -> 0)
24943 * Function : rgSCHCmnBuildRntpInfo
24945 * Processing Steps:
24947 * @param[in] uint8_t *rntpPtr
24948 * @param[in] uint8_t startRb
24949 * @param[in] uint8_t numRb
24955 static S16 rgSCHCmnBuildRntpInfo
24964 static S16 rgSCHCmnBuildRntpInfo(cell, rntpPtr, startRb, nmbRb, bw)
24972 uint16_t rbPtrStartIdx; /* Start Index of Octete Buffer to be filled */
24973 uint16_t rbPtrEndIdx; /* End Index of Octete Buffer to be filled */
24974 uint16_t rbBitLoc; /* Bit Location to be set as 1 in the current Byte */
24975 uint16_t nmbRbPerByte; /* PRB's to be set in the current Byte (in case of multiple Bytes) */
24978 rbPtrStartIdx = (startRb)/8;
24979 rbPtrEndIdx = (startRb + nmbRb)/8;
24981 if (rntpPtr == NULLP)
24983 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
24984 "rgSCHCmnBuildRntpInfo():"
24985 "rntpPtr can't be NULLP (Memory Allocation Failed)");
24989 while(rbPtrStartIdx <= rbPtrEndIdx)
24991 rbBitLoc = (startRb)%8;
24993 /* case 1: startRb and endRb lies in same Byte */
24994 if (rbPtrStartIdx == rbPtrEndIdx)
24996 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
24997 | (((1<<nmbRb)-1)<<rbBitLoc);
25000 /* case 2: startRb and endRb lies in different Byte */
25001 if (rbPtrStartIdx != rbPtrEndIdx)
25003 nmbRbPerByte = 8 - rbBitLoc;
25004 nmbRb = nmbRb - nmbRbPerByte;
25005 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
25006 | (((1<<nmbRbPerByte)-1)<<rbBitLoc);
25007 startRb = startRb + nmbRbPerByte;
25013 /* dsfr_pal_fixes ** 21-March-2013 ** SKS ** Adding Debug logs */
25015 /* dsfr_pal_fixes ** 25-March-2013 ** SKS ** Adding Debug logs to print RNTP */
25021 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
25025 * Function : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
25027 * Processing Steps:
25029 * @param[in] RgSchCellCb *cell
25030 * @param[in] RgSchDlSf *dlSf
25031 * @param[in] uint8_t rbStrt
25032 * @param[in] uint8_t numRb
25037 static S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
25042 RgSchSFRPoolInfo *sfrPool,
25047 static S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrPool, rbStrt, numRb)
25051 RgSchSFRPoolInfo *sfrPool;
25060 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
25061 sfrPool->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
25064 dlSf->type2Start += numRb;
25065 dlSf->bwAlloced += numRb;
25067 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
25069 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
25070 if(FALSE == ue->lteAdvUeCb.rgrLteAdvUeCfg.isUeCellEdge)
25072 if((sfrPool->type2Start >= sfrPool->pwrHiCCRange.startRb) &&
25073 (sfrPool->type2Start + numRb < sfrPool->pwrHiCCRange.endRb))
25075 ue->lteAdvUeCb.isCCUePHigh = TRUE;
25077 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
25078 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
25081 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
25082 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
25089 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
25090 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
25093 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
25094 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
25099 sfrPool->type2Start += numRb;
25100 sfrPool->bwAlloced += numRb;
25107 * @brief To do DL allocation using TYPE0 RA.
25111 * Function : rgSCHCmnNonDlfsSFRPoolType0Alloc
25113 * Processing Steps:
25114 * - Perform TYPE0 allocation using the RBGs between type0End and type2End.
25115 * - Build the allocation mask as per RBG positioning.
25116 * - Update the allocation parameters.
25118 * @param[in] RgSchCellCb *cell
25119 * @param[in] RgSchDlSf *dlSf
25120 * @param[in] RgSchDlRbAlloc *allocInfo
25125 static Void rgSCHCmnNonDlfsSFRPoolType0Alloc
25129 RgSchSFRPoolInfo *poolInfo,
25130 RgSchDlRbAlloc *allocInfo
25133 static Void rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, poolInfo, allocInfo)
25136 RgSchSFRPoolInfo *poolInfo;
25137 RgSchDlRbAlloc *allocInfo;
25140 uint32_t dlAllocMsk = 0;
25141 uint8_t rbgFiller = 0;
25142 uint8_t noRbgs = 0;
25148 if (poolInfo->poolstartRB + poolInfo->bw == dlSf->bw)
25150 if (poolInfo->type0End == dlSf->bw/4)
25152 rbgFiller = dlSf->lstRbgDfct;
25153 /* The last RBG which can be smaller than the RBG size is consedered
25154 * only for the first time allocation of TYPE0 UE */
25155 dlSf->lstRbgDfct = 0;
25159 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
25161 /* Abhinav to-do start */
25162 /* MS_FIX for ccpu00123919*/
25163 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
25164 if (dlSf->bwAlloced + noRbs > dlSf->bw)
25170 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
25172 /* Abhinav to-do end */
25176 /* type0End would have been initially (during subfrm Init) at the bit position
25177 * (cell->noOfRbgs - 1), 0 being the most significant.
25178 * Getting DlAllocMsk for noRbgs and at the appropriate position */
25179 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - poolInfo->type0End));
25180 /* Move backwards the type0End pivot */
25181 poolInfo->type0End -= noRbgs;
25182 /*MS_FIX for ccpu00123919*/
25183 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
25184 /* Update the bwAlloced field accordingly */
25185 poolInfo->bwAlloced += noRbs + dlSf->lstRbgDfct;
25186 dlSf->bwAlloced += noRbs + dlSf->lstRbgDfct;
25188 /* Update Type0 Alloc Info */
25189 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
25190 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
25191 allocInfo->rbsAlloc = noRbs;
25193 /* Update Tb info for each scheduled TB */
25194 iTbs = allocInfo->tbInfo[0].iTbs;
25195 noLyr = allocInfo->tbInfo[0].noLyr;
25196 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
25197 * RETX TB Size is same as Init TX TB Size */
25198 if (allocInfo->tbInfo[0].tbCb->txCntr)
25200 allocInfo->tbInfo[0].bytesAlloc =
25201 allocInfo->tbInfo[0].bytesReq;
25205 allocInfo->tbInfo[0].bytesAlloc =
25206 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
25209 if (allocInfo->tbInfo[1].schdlngForTb)
25211 iTbs = allocInfo->tbInfo[1].iTbs;
25212 noLyr = allocInfo->tbInfo[1].noLyr;
25213 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
25214 * RETX TB Size is same as Init TX TB Size */
25215 if (allocInfo->tbInfo[1].tbCb->txCntr)
25217 allocInfo->tbInfo[1].bytesAlloc =
25218 allocInfo->tbInfo[1].bytesReq;
25222 allocInfo->tbInfo[1].bytesAlloc =
25223 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
25227 /* The last RBG which can be smaller than the RBG size is consedered
25228 * only for the first time allocation of TYPE0 UE */
25229 dlSf->lstRbgDfct = 0;
25234 * @brief Computes RNTP Info for a subframe.
25238 * Function : rgSCHCmnNonDlfsDsfrRntpComp
25240 * Processing Steps:
25241 * - Computes RNTP info from individual pools.
25243 * @param[in] RgSchDlSf *dlSf
25249 static void rgSCHCmnNonDlfsDsfrRntpComp
25255 static void rgSCHCmnNonDlfsDsfrRntpComp(cell, dlSf)
25260 static uint16_t samples = 0;
25262 uint16_t bwBytes = (dlSf->bw-1)/8;
25263 RgrLoadInfIndInfo *rgrLoadInf;
25265 uint16_t ret = ROK;
25268 len = (dlSf->bw % 8 == 0) ? dlSf->bw/8 : dlSf->bw/8 + 1;
25270 /* RNTP info is ORed every TTI and the sample is stored in cell control block */
25271 for(i = 0; i <= bwBytes; i++)
25273 cell->rntpAggrInfo.val[i] |= dlSf->rntpInfo.val[i];
25275 samples = samples + 1;
25276 /* After every 1000 ms, the RNTP info will be sent to application to be further sent to all neighbouring eNB
25277 informing them about the load indication for cell edge users */
25278 if(RG_SCH_MAX_RNTP_SAMPLES == samples)
25281 ret = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&rgrLoadInf,
25282 sizeof(RgrLoadInfIndInfo));
25285 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
25286 "allocate memory for sending LoadInfo");
25290 rgrLoadInf->u.rntpInfo.pres = cell->rntpAggrInfo.pres;
25291 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
25292 rgrLoadInf->u.rntpInfo.len = len;
25294 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
25295 rgrLoadInf->u.rntpInfo.val = cell->rntpAggrInfo.val;
25296 rgrLoadInf->cellId = cell->cellId;
25298 /* dsfr_pal_fixes ** 22-March-2013 ** SKS */
25299 rgrLoadInf->bw = dlSf->bw;
25300 rgrLoadInf->type = RGR_SFR;
25302 ret = rgSCHUtlRgrLoadInfInd(cell, rgrLoadInf);
25305 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsDsfrRntpComp():"
25306 "rgSCHUtlRgrLoadInfInd() returned RFAILED");
25309 memset(cell->rntpAggrInfo.val,0,len);
25313 /* LTE_ADV_FLAG_REMOVED_END */
25315 /* LTE_ADV_FLAG_REMOVED_START */
25317 * @brief Performs RB allocation per UE from a pool.
25321 * Function : rgSCHCmnSFRNonDlfsUeRbAlloc
25323 * Processing Steps:
25324 * - Allocate consecutively available RBs.
25326 * @param[in] RgSchCellCb *cell
25327 * @param[in] RgSchUeCb *ue
25328 * @param[in] RgSchDlSf *dlSf
25329 * @param[out] uint8_t *isDlBwAvail
25337 static S16 rgSCHCmnSFRNonDlfsUeRbAlloc
25342 uint8_t *isDlBwAvail
25345 static S16 rgSCHCmnSFRNonDlfsUeRbAlloc(cell, ue, dlSf, isDlBwAvail)
25349 uint8_t *isDlBwAvail;
25352 RgSchDlRbAlloc *allocInfo;
25353 RgSchCmnDlUe *dlUe;
25355 RgSchSFRPoolInfo *sfrpoolInfo = NULLP;
25358 isUECellEdge = RG_SCH_CMN_IS_UE_CELL_EDGE(ue);
25360 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
25361 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25362 *isDlBwAvail = TRUE;
25364 /*Find which pool is available for this UE*/
25365 if (rgSCHCmnNonDlfsSFRBwAvlbl(cell, &sfrpoolInfo, dlSf, allocInfo, isUECellEdge) != TRUE)
25367 /* SFR_FIX - If this is CE UE there may be BW available in CC Pool
25368 So CC UEs will be scheduled */
25371 *isDlBwAvail = TRUE;
25375 *isDlBwAvail = FALSE;
25380 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX || dlUe->proc->tbInfo[1].isAckNackDtx)
25382 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
25386 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
25389 if (!(allocInfo->pdcch))
25391 /* Returning ROK since PDCCH might be available for another UE and further allocations could be done */
25396 allocInfo->rnti = ue->ueId;
25399 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
25401 allocInfo->allocInfo.raType2.isLocal = TRUE;
25402 /* rg004.201 patch - ccpu00109921 fix end */
25403 /* MS_FIX for ccpu00123918*/
25404 allocInfo->allocInfo.raType2.rbStart = (uint8_t)sfrpoolInfo->type2Start;
25405 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25406 /* rg007.201 - Changes for MIMO feature addition */
25407 /* rg008.201 - Removed dependency on MIMO compile-time flag */
25408 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrpoolInfo, \
25409 allocInfo->allocInfo.raType2.rbStart, \
25410 allocInfo->allocInfo.raType2.numRb);
25411 allocInfo->rbsAlloc = allocInfo->rbsReq;
25412 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25414 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
25416 rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, sfrpoolInfo, allocInfo);
25420 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,0);
25421 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
25423 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,1);
25428 #if defined(LTEMAC_SPS)
25429 /* Update the sub-frame with new allocation */
25430 dlSf->bwAlloced += allocInfo->rbsReq;
25436 /* LTE_ADV_FLAG_REMOVED_END */
25437 #endif /* LTE_TDD */
25440 * @brief Performs RB allocation per UE for frequency non-selective cell.
25444 * Function : rgSCHCmnNonDlfsUeRbAlloc
25446 * Processing Steps:
25447 * - Allocate consecutively available RBs.
25449 * @param[in] RgSchCellCb *cell
25450 * @param[in] RgSchUeCb *ue
25451 * @param[in] RgSchDlSf *dlSf
25452 * @param[out] uint8_t *isDlBwAvail
25459 static S16 rgSCHCmnNonDlfsUeRbAlloc
25464 uint8_t *isDlBwAvail
25467 static S16 rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, isDlBwAvail)
25471 uint8_t *isDlBwAvail;
25474 RgSchDlRbAlloc *allocInfo;
25475 RgSchCmnDlUe *dlUe;
25477 uint32_t dbgRbsReq = 0;
25481 RgSch5gtfUeCb *ue5gtfCb = &(ue->ue5gtfCb);
25482 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[ue5gtfCb->BeamId]);
25484 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
25485 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25486 *isDlBwAvail = TRUE;
25488 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
25490 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25491 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
25493 printf("5GTF_ERROR vrbg allocated > 25\n");
25497 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX
25498 || dlUe->proc->tbInfo[1].isAckNackDtx)
25500 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
25504 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
25506 if (!(allocInfo->pdcch))
25508 /* Returning ROK since PDCCH might be available for another UE and
25509 * further allocations could be done */
25510 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25511 "5GTF_ERROR : PDCCH allocation failed :ue (%u)",
25513 printf("5GTF_ERROR PDCCH allocation failed\n");
25517 //maxPrb = RGSCH_MIN((allocInfo->vrbgReq * MAX_5GTF_VRBG_SIZE), ue5gtfCb->maxPrb);
25518 //maxPrb = RGSCH_MIN(maxPrb,
25519 //((beamInfo->totVrbgAvail - beamInfo->vrbgStart)* MAX_5GTF_VRBG_SIZE)));
25520 //TODO_SID Need to check for vrbg available after scheduling for same beam.
25521 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
25522 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
25523 //TODO_SID: Setting for max TP
25524 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
25525 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
25526 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
25527 allocInfo->tbInfo[0].tbCb->dlGrnt.SCID = 0;
25528 allocInfo->tbInfo[0].tbCb->dlGrnt.dciFormat = allocInfo->dciFormat;
25529 //Filling temporarily
25530 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
25531 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
25533 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25534 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25535 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25543 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
25547 * Function : rgSCHCmnNonDlfsCcchSduAlloc
25549 * Processing Steps:
25550 * - For each element in the list, Call rgSCHCmnNonDlfsCcchSduRbAlloc().
25551 * - If allocation is successful, add the ueCb to scheduled list of CCCH
25553 * - else, add UeCb to non-scheduled list.
25555 * @param[in] RgSchCellCb *cell
25556 * @param[in, out] RgSchCmnCcchSduRbAlloc *allocInfo
25557 * @param[in] uint8_t isRetx
25562 static Void rgSCHCmnNonDlfsCcchSduAlloc
25565 RgSchCmnCcchSduRbAlloc *allocInfo,
25569 static Void rgSCHCmnNonDlfsCcchSduAlloc(cell, allocInfo, isRetx)
25571 RgSchCmnCcchSduRbAlloc *allocInfo;
25576 CmLListCp *ccchSduLst = NULLP;
25577 CmLListCp *schdCcchSduLst = NULLP;
25578 CmLListCp *nonSchdCcchSduLst = NULLP;
25579 CmLList *schdLnkNode = NULLP;
25580 CmLList *toBeSchdLnk = NULLP;
25581 RgSchDlSf *dlSf = allocInfo->ccchSduDlSf;
25582 RgSchUeCb *ueCb = NULLP;
25583 RgSchDlHqProcCb *hqP = NULLP;
25587 /* Initialize re-transmitting lists */
25588 ccchSduLst = &(allocInfo->ccchSduRetxLst);
25589 schdCcchSduLst = &(allocInfo->schdCcchSduRetxLst);
25590 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduRetxLst);
25594 /* Initialize transmitting lists */
25595 ccchSduLst = &(allocInfo->ccchSduTxLst);
25596 schdCcchSduLst = &(allocInfo->schdCcchSduTxLst);
25597 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduTxLst);
25600 /* Perform allocaations for the list */
25601 toBeSchdLnk = cmLListFirst(ccchSduLst);
25602 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
25604 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25605 ueCb = hqP->hqE->ue;
25606 schdLnkNode = &hqP->schdLstLnk;
25607 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25608 ret = rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf);
25611 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
25612 * list and return */
25615 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25616 ueCb = hqP->hqE->ue;
25617 schdLnkNode = &hqP->schdLstLnk;
25618 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25619 cmLListAdd2Tail(nonSchdCcchSduLst, schdLnkNode);
25620 toBeSchdLnk = toBeSchdLnk->next;
25621 } while(toBeSchdLnk);
25625 /* Allocation successful: Add UE to the scheduled list */
25626 cmLListAdd2Tail(schdCcchSduLst, schdLnkNode);
25634 * @brief Performs RB allocation for CcchSdu for frequency non-selective cell.
25638 * Function : rgSCHCmnNonDlfsCcchSduRbAlloc
25640 * Processing Steps:
25642 * - Allocate consecutively available RBs
25644 * @param[in] RgSchCellCb *cell
25645 * @param[in] RgSchUeCb *ueCb
25646 * @param[in] RgSchDlSf *dlSf
25652 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc
25659 static S16 rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf)
25665 RgSchDlRbAlloc *allocInfo;
25666 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
25670 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb,cell);
25672 /* [ccpu00138802]-MOD-If Bw is less than required, return fail
25673 It will be allocated in next TTI */
25675 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
25676 (dlSf->bwAlloced == dlSf->bw))
25678 if((dlSf->bwAlloced == dlSf->bw) ||
25679 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
25684 /* Retrieve PDCCH */
25685 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
25686 if (ueDl->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
25688 /* allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, dlSf, y, ueDl->cqi,
25689 * TFU_DCI_FORMAT_1A, TRUE);*/
25690 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, TRUE);
25694 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE);
25696 if (!(allocInfo->pdcch))
25698 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
25702 /* Update allocation information */
25703 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25704 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25705 allocInfo->allocInfo.raType2.isLocal = TRUE;
25707 /*Fix for ccpu00123918*/
25708 /* Push this harq process back to the free queue */
25709 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
25710 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25711 allocInfo->rbsAlloc = allocInfo->rbsReq;
25712 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25713 /* Update the sub-frame with new allocation */
25715 /* LTE_ADV_FLAG_REMOVED_START */
25717 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
25719 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf,
25720 allocInfo->allocInfo.raType2.rbStart,
25721 allocInfo->allocInfo.raType2.numRb);
25724 #endif /* end of ifndef LTE_TDD*/
25726 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf,
25727 allocInfo->allocInfo.raType2.rbStart,
25728 allocInfo->allocInfo.raType2.numRb);
25731 /* LTE_ADV_FLAG_REMOVED_END */
25732 /* ccpu00131941 - bwAlloced is updated from SPS bandwidth */
25740 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
25744 * Function : rgSCHCmnNonDlfsMsg4RbAlloc
25746 * Processing Steps:
25748 * - Allocate consecutively available RBs
25750 * @param[in] RgSchCellCb *cell
25751 * @param[in] RgSchRaCb *raCb
25752 * @param[in] RgSchDlSf *dlSf
25758 static S16 rgSCHCmnNonDlfsMsg4RbAlloc
25765 static S16 rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf)
25771 RgSchDlRbAlloc *allocInfo;
25774 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_RACB(raCb);
25777 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
25778 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
25780 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25781 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
25783 printf("5GTF_ERROR vrbg allocated > 25\n");
25788 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
25789 (dlSf->bwAlloced == dlSf->bw))
25791 if((dlSf->bwAlloced == dlSf->bw) ||
25792 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
25799 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
25800 if (raCb->dlHqE->msg4Proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
25802 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, TRUE);
25806 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, FALSE);
25808 if (!(allocInfo->pdcch))
25810 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
25815 /* SR_RACH_STATS : MSG4 TX Failed */
25816 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
25818 /* Update allocation information */
25819 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25820 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25821 allocInfo->allocInfo.raType2.isLocal = TRUE;
25824 /*Fix for ccpu00123918*/
25825 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
25826 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25827 /* LTE_ADV_FLAG_REMOVED_START */
25829 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
25831 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
25832 allocInfo->allocInfo.raType2.rbStart, \
25833 allocInfo->allocInfo.raType2.numRb);
25836 #endif /* end of ifndef LTE_TDD */
25838 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
25839 allocInfo->allocInfo.raType2.rbStart, \
25840 allocInfo->allocInfo.raType2.numRb);
25842 /* LTE_ADV_FLAG_REMOVED_END */
25844 allocInfo->rbsAlloc = allocInfo->rbsReq;
25845 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25849 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
25851 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
25852 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
25854 /* Update allocation information */
25855 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
25857 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
25858 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
25859 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
25861 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
25862 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
25865 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25866 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25867 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25875 * @brief Performs RB allocation for Msg4 lists of frequency non-selective cell.
25879 * Function : rgSCHCmnNonDlfsMsg4Alloc
25881 * Processing Steps:
25882 * - For each element in the list, Call rgSCHCmnNonDlfsMsg4RbAlloc().
25883 * - If allocation is successful, add the raCb to scheduled list of MSG4.
25884 * - else, add RaCb to non-scheduled list.
25886 * @param[in] RgSchCellCb *cell
25887 * @param[in, out] RgSchCmnMsg4RbAlloc *allocInfo
25888 * @param[in] uint8_t isRetx
25893 static Void rgSCHCmnNonDlfsMsg4Alloc
25896 RgSchCmnMsg4RbAlloc *allocInfo,
25900 static Void rgSCHCmnNonDlfsMsg4Alloc(cell, allocInfo, isRetx)
25902 RgSchCmnMsg4RbAlloc *allocInfo;
25907 CmLListCp *msg4Lst = NULLP;
25908 CmLListCp *schdMsg4Lst = NULLP;
25909 CmLListCp *nonSchdMsg4Lst = NULLP;
25910 CmLList *schdLnkNode = NULLP;
25911 CmLList *toBeSchdLnk = NULLP;
25912 RgSchDlSf *dlSf = allocInfo->msg4DlSf;
25913 RgSchRaCb *raCb = NULLP;
25914 RgSchDlHqProcCb *hqP = NULLP;
25918 /* Initialize re-transmitting lists */
25919 msg4Lst = &(allocInfo->msg4RetxLst);
25920 schdMsg4Lst = &(allocInfo->schdMsg4RetxLst);
25921 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4RetxLst);
25925 /* Initialize transmitting lists */
25926 msg4Lst = &(allocInfo->msg4TxLst);
25927 schdMsg4Lst = &(allocInfo->schdMsg4TxLst);
25928 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4TxLst);
25931 /* Perform allocaations for the list */
25932 toBeSchdLnk = cmLListFirst(msg4Lst);
25933 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
25935 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25936 raCb = hqP->hqE->raCb;
25937 schdLnkNode = &hqP->schdLstLnk;
25938 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25939 ret = rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf);
25942 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
25943 * list and return */
25946 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25947 raCb = hqP->hqE->raCb;
25948 schdLnkNode = &hqP->schdLstLnk;
25949 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25950 cmLListAdd2Tail(nonSchdMsg4Lst, schdLnkNode);
25951 toBeSchdLnk = toBeSchdLnk->next;
25952 } while(toBeSchdLnk);
25956 /* Allocation successful: Add UE to the scheduled list */
25957 cmLListAdd2Tail(schdMsg4Lst, schdLnkNode);
25968 * @brief Performs RB allocation for the list of UEs of a frequency
25969 * non-selective cell.
25973 * Function : rgSCHCmnNonDlfsDedRbAlloc
25975 * Processing Steps:
25976 * - For each element in the list, Call rgSCHCmnNonDlfsUeRbAlloc().
25977 * - If allocation is successful, add the ueCb to scheduled list of UEs.
25978 * - else, add ueCb to non-scheduled list of UEs.
25980 * @param[in] RgSchCellCb *cell
25981 * @param[in, out] RgSchCmnUeRbAlloc *allocInfo
25982 * @param[in] CmLListCp *ueLst,
25983 * @param[in, out] CmLListCp *schdHqPLst,
25984 * @param[in, out] CmLListCp *nonSchdHqPLst
25989 Void rgSCHCmnNonDlfsDedRbAlloc
25992 RgSchCmnUeRbAlloc *allocInfo,
25994 CmLListCp *schdHqPLst,
25995 CmLListCp *nonSchdHqPLst
25998 Void rgSCHCmnNonDlfsDedRbAlloc(cell, allocInfo, ueLst,
25999 schdHqPLst, nonSchdHqPLst)
26001 RgSchCmnUeRbAlloc *allocInfo;
26003 CmLListCp *schdHqPLst;
26004 CmLListCp *nonSchdHqPLst;
26008 CmLList *schdLnkNode = NULLP;
26009 CmLList *toBeSchdLnk = NULLP;
26010 RgSchDlSf *dlSf = allocInfo->dedDlSf;
26011 RgSchUeCb *ue = NULLP;
26012 RgSchDlHqProcCb *hqP = NULLP;
26013 uint8_t isDlBwAvail;
26016 /* Perform allocaations for the list */
26017 toBeSchdLnk = cmLListFirst(ueLst);
26018 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
26020 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
26022 schdLnkNode = &hqP->schdLstLnk;
26023 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
26025 ret = rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, &isDlBwAvail);
26028 /* Allocation failed: Add remaining UEs to non-scheduled
26029 * list and return */
26032 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
26034 schdLnkNode = &hqP->schdLstLnk;
26035 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
26036 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
26037 toBeSchdLnk = toBeSchdLnk->next;
26038 } while(toBeSchdLnk);
26044 #if defined (TENB_STATS) && defined (RG_5GTF)
26045 cell->tenbStats->sch.dl5gtfRbAllocPass++;
26047 /* Allocation successful: Add UE to the scheduled list */
26048 cmLListAdd2Tail(schdHqPLst, schdLnkNode);
26052 #if defined (TENB_STATS) && defined (RG_5GTF)
26053 cell->tenbStats->sch.dl5gtfRbAllocFail++;
26055 /* Allocation failed : Add UE to the non-scheduled list */
26056 printf("5GTF_ERROR Dl rb alloc failed adding nonSchdHqPLst\n");
26057 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
26065 * @brief Handles RB allocation for frequency non-selective cell.
26069 * Function : rgSCHCmnNonDlfsRbAlloc
26071 * Invoking Module Processing:
26072 * - SCH shall invoke this if downlink frequency selective is disabled for
26073 * the cell for RB allocation.
26074 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
26075 * estimate and subframe for each allocation to be made to SCH.
26077 * Processing Steps:
26078 * - Allocate sequentially for common channels.
26079 * - For transmitting and re-transmitting UE list.
26081 * - Perform wide-band allocations for UE in increasing order of
26083 * - Determine Imcs for the allocation.
26084 * - Determine RA type.
26085 * - Determine DCI format.
26087 * @param[in] RgSchCellCb *cell
26088 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
26093 Void rgSCHCmnNonDlfsRbAlloc
26096 RgSchCmnDlRbAllocInfo *allocInfo
26099 Void rgSCHCmnNonDlfsRbAlloc(cell, allocInfo)
26101 RgSchCmnDlRbAllocInfo *allocInfo;
26104 uint8_t raRspCnt = 0;
26105 RgSchDlRbAlloc *reqAllocInfo;
26107 /* Allocate for MSG4 retransmissions */
26108 if (allocInfo->msg4Alloc.msg4RetxLst.count)
26110 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc RetxLst\n");
26111 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), TRUE);
26114 /* Allocate for MSG4 transmissions */
26115 /* Assuming all the nodes in the list need allocations: rbsReq is valid */
26116 if (allocInfo->msg4Alloc.msg4TxLst.count)
26118 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc txLst\n");
26119 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), FALSE);
26122 /* Allocate for CCCH SDU (received after guard timer expiry)
26123 * retransmissions */
26124 if (allocInfo->ccchSduAlloc.ccchSduRetxLst.count)
26126 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
26127 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), TRUE);
26130 /* Allocate for CCCD SDU transmissions */
26131 /* Allocate for CCCH SDU (received after guard timer expiry) transmissions */
26132 if (allocInfo->ccchSduAlloc.ccchSduTxLst.count)
26134 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
26135 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), FALSE);
26139 /* Allocate for Random access response */
26140 for (raRspCnt = 0; raRspCnt < RG_SCH_CMN_MAX_CMN_PDCCH; ++raRspCnt)
26142 /* Assuming that the requests will be filled in sequentially */
26143 reqAllocInfo = &(allocInfo->raRspAlloc[raRspCnt]);
26144 if (!reqAllocInfo->rbsReq)
26148 printf("5GTF_ERROR calling RAR rgSCHCmnNonDlfsCmnRbAlloc\n");
26149 // if ((rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo)) != ROK)
26150 if ((rgSCHCmnNonDlfsCmnRbAllocRar(cell, reqAllocInfo)) != ROK)
26156 /* Allocate for RETX+TX UEs */
26157 if(allocInfo->dedAlloc.txRetxHqPLst.count)
26159 printf("5GTF_ERROR TX RETX rgSCHCmnNonDlfsDedRbAlloc\n");
26160 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26161 &(allocInfo->dedAlloc.txRetxHqPLst),
26162 &(allocInfo->dedAlloc.schdTxRetxHqPLst),
26163 &(allocInfo->dedAlloc.nonSchdTxRetxHqPLst));
26166 if((allocInfo->dedAlloc.retxHqPLst.count))
26168 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26169 &(allocInfo->dedAlloc.retxHqPLst),
26170 &(allocInfo->dedAlloc.schdRetxHqPLst),
26171 &(allocInfo->dedAlloc.nonSchdRetxHqPLst));
26174 /* Allocate for transmitting UEs */
26175 if((allocInfo->dedAlloc.txHqPLst.count))
26177 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26178 &(allocInfo->dedAlloc.txHqPLst),
26179 &(allocInfo->dedAlloc.schdTxHqPLst),
26180 &(allocInfo->dedAlloc.nonSchdTxHqPLst));
26183 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cell);
26184 if ((allocInfo->dedAlloc.txRetxHqPLst.count +
26185 allocInfo->dedAlloc.retxHqPLst.count +
26186 allocInfo->dedAlloc.txHqPLst.count) >
26187 cmnCell->dl.maxUePerDlSf)
26189 #ifndef ALIGN_64BIT
26190 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
26191 " scheduler exceed maximumUePerDlSf(%u)tx-retx %ld retx %ld tx %ld\n",
26192 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
26193 allocInfo->dedAlloc.retxHqPLst.count,
26194 allocInfo->dedAlloc.txHqPLst.count));
26196 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
26197 " scheduler exceed maximumUePerDlSf(%u)tx-retx %d retx %d tx %d\n",
26198 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
26199 allocInfo->dedAlloc.retxHqPLst.count,
26200 allocInfo->dedAlloc.txHqPLst.count));
26205 /* LTE_ADV_FLAG_REMOVED_START */
26206 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
26208 printf("5GTF_ERROR RETX rgSCHCmnNonDlfsDsfrRntpComp\n");
26209 rgSCHCmnNonDlfsDsfrRntpComp(cell, allocInfo->dedAlloc.dedDlSf);
26211 /* LTE_ADV_FLAG_REMOVED_END */
26212 #endif /* LTE_TDD */
26216 /***********************************************************
26218 * Func : rgSCHCmnCalcRiv
26220 * Desc : This function calculates RIV.
26226 * File : rg_sch_utl.c
26228 **********************************************************/
26231 uint32_t rgSCHCmnCalcRiv
26238 uint32_t rgSCHCmnCalcRiv(bw, rbStart, numRb)
26245 uint32_t rgSCHCmnCalcRiv
26252 uint32_t rgSCHCmnCalcRiv(bw, rbStart, numRb)
26259 uint8_t numRbMinus1 = numRb - 1;
26263 if (numRbMinus1 <= bw/2)
26265 riv = bw * numRbMinus1 + rbStart;
26269 riv = bw * (bw - numRbMinus1) + (bw - rbStart - 1);
26272 } /* rgSCHCmnCalcRiv */
26276 * @brief This function allocates and copies the RACH response scheduling
26277 * related information into cell control block.
26281 * Function: rgSCHCmnDlCpyRachInfo
26282 * Purpose: This function allocates and copies the RACH response
26283 * scheduling related information into cell control block
26284 * for each DL subframe.
26287 * Invoked by: Scheduler
26289 * @param[in] RgSchCellCb* cell
26290 * @param[in] RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES]
26291 * @param[in] uint8_t raArrSz
26296 static S16 rgSCHCmnDlCpyRachInfo
26299 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
26303 static S16 rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz)
26305 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES];
26309 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
26312 uint16_t subfrmIdx;
26314 uint8_t numSubfrms;
26319 /* Allocate RACH response information for each DL
26320 * subframe in a radio frame */
26321 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cell->rachRspLst,
26322 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1] *
26323 sizeof(RgSchTddRachRspLst));
26329 for(sfnIdx=raArrSz-1; sfnIdx>=0; sfnIdx--)
26331 for(subfrmIdx=0; subfrmIdx < RGSCH_NUM_SUB_FRAMES; subfrmIdx++)
26333 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
26334 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
26339 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx],subfrmIdx);
26341 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
26343 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddNumDlSubfrmTbl[ulDlCfgIdx],subfrmIdx);
26344 sfNum = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][subfrmIdx]-1;
26345 numRfs = cell->rachRspLst[sfNum].numRadiofrms;
26346 /* For each DL subframe in which RACH response can
26347 * be sent is updated */
26350 cell->rachRspLst[sfNum].rachRsp[numRfs].sfnOffset =
26351 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset;
26352 for(sfcount=0; sfcount < numSubfrms; sfcount++)
26354 cell->rachRspLst[sfNum].rachRsp[numRfs].\
26355 subframe[sfcount] =
26356 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].\
26359 cell->rachRspLst[sfNum].rachRsp[numRfs].numSubfrms =
26360 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
26361 cell->rachRspLst[sfNum].numRadiofrms++;
26364 /* Copy the subframes to be deleted at ths subframe */
26366 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
26369 cell->rachRspLst[sfNum].delInfo.sfnOffset =
26370 rachRspLst[sfnIdx][subfrmIdx].delInfo.sfnOffset;
26371 for(sfcount=0; sfcount < numSubfrms; sfcount++)
26373 cell->rachRspLst[sfNum].delInfo.subframe[sfcount] =
26374 rachRspLst[sfnIdx][subfrmIdx].delInfo.subframe[sfcount];
26376 cell->rachRspLst[sfNum].delInfo.numSubfrms =
26377 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
26385 * @brief This function determines the iTbs based on the new CFI,
26386 * CQI and BLER based delta iTbs
26390 * Function: rgSchCmnFetchItbs
26391 * Purpose: Fetch the new iTbs when CFI changes.
26393 * @param[in] RgSchCellCb *cell
26394 * @param[in] RgSchCmnDlUe *ueDl
26395 * @param[in] uint8_t cqi
26402 static S32 rgSchCmnFetchItbs
26405 RgSchCmnDlUe *ueDl,
26413 static S32 rgSchCmnFetchItbs (cell, ueDl, subFrm, cqi, cfi, cwIdx, noLyr)
26415 RgSchCmnDlUe *ueDl;
26424 static S32 rgSchCmnFetchItbs
26427 RgSchCmnDlUe *ueDl,
26434 static S32 rgSchCmnFetchItbs (cell, ueDl, cqi, cfi, cwIdx, noLyr)
26436 RgSchCmnDlUe *ueDl;
26445 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26450 /* Special Handling for Spl Sf when CFI is 3 as
26451 * CFI in Spl Sf will be max 2 */
26452 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26454 if((cellDl->currCfi == 3) ||
26455 ((cell->bwCfg.dlTotalBw <= 10) && (cellDl->currCfi == 1)))
26457 /* Use CFI 2 in this case */
26458 iTbs = (ueDl->laCb[cwIdx].deltaiTbs +
26459 ((*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][2]))[cqi])* 100)/100;
26461 RG_SCH_CHK_ITBS_RANGE(iTbs, RGSCH_NUM_ITBS - 1);
26465 iTbs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1];
26467 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
26469 else /* CFI Changed. Update with new iTbs Reset the BLER*/
26472 S32 tmpiTbs = (*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][cfi]))[cqi];
26474 iTbs = (ueDl->laCb[cwIdx].deltaiTbs + tmpiTbs*100)/100;
26476 RG_SCH_CHK_ITBS_RANGE(iTbs, tmpiTbs);
26478 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
26480 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1] = iTbs;
26482 ueDl->lastCfi = cfi;
26483 ueDl->laCb[cwIdx].deltaiTbs = 0;
26490 * @brief This function determines the RBs and Bytes required for BO
26491 * transmission for UEs configured with TM 1/2/6/7.
26495 * Function: rgSCHCmnDlAllocTxRb1Tb1Cw
26496 * Purpose: Allocate TB1 on CW1.
26498 * Reference Parameter effBo is filled with alloced bytes.
26499 * Returns RFAILED if BO not satisfied at all.
26501 * Invoked by: rgSCHCmnDlAllocTxRbTM1/2/6/7
26503 * @param[in] RgSchCellCb *cell
26504 * @param[in] RgSchDlSf *subFrm
26505 * @param[in] RgSchUeCb *ue
26506 * @param[in] uint32_t bo
26507 * @param[out] uint32_t *effBo
26508 * @param[in] RgSchDlHqProcCb *proc
26509 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26514 static Void rgSCHCmnDlAllocTxRb1Tb1Cw
26521 RgSchDlHqProcCb *proc,
26522 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26525 static Void rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26531 RgSchDlHqProcCb *proc;
26532 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26535 RgSchDlRbAlloc *allocInfo;
26540 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26542 if (ue->ue5gtfCb.rank == 2)
26544 allocInfo->dciFormat = TFU_DCI_FORMAT_B2;
26548 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
26551 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
26552 allocInfo->raType);
26554 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
26555 bo, &numRb, effBo);
26556 if (ret == RFAILED)
26558 /* If allocation couldn't be made then return */
26561 /* Adding UE to RbAllocInfo TX Lst */
26562 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
26563 /* Fill UE alloc Info */
26564 allocInfo->rbsReq = numRb;
26565 allocInfo->dlSf = subFrm;
26567 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
26575 * @brief This function determines the RBs and Bytes required for BO
26576 * retransmission for UEs configured with TM 1/2/6/7.
26580 * Function: rgSCHCmnDlAllocRetxRb1Tb1Cw
26581 * Purpose: Allocate TB1 on CW1.
26583 * Reference Parameter effBo is filled with alloced bytes.
26584 * Returns RFAILED if BO not satisfied at all.
26586 * Invoked by: rgSCHCmnDlAllocRetxRbTM1/2/6/7
26588 * @param[in] RgSchCellCb *cell
26589 * @param[in] RgSchDlSf *subFrm
26590 * @param[in] RgSchUeCb *ue
26591 * @param[in] uint32_t bo
26592 * @param[out] uint32_t *effBo
26593 * @param[in] RgSchDlHqProcCb *proc
26594 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26599 static Void rgSCHCmnDlAllocRetxRb1Tb1Cw
26606 RgSchDlHqProcCb *proc,
26607 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26610 static Void rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26616 RgSchDlHqProcCb *proc;
26617 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26620 RgSchDlRbAlloc *allocInfo;
26625 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26628 /* 5GTF: RETX DCI format same as TX */
26629 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
26630 &allocInfo->raType);
26633 /* Get the Allocation in terms of RBs that are required for
26634 * this retx of TB1 */
26635 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
26637 if (ret == RFAILED)
26639 /* Allocation couldn't be made for Retx */
26640 /* Fix : syed If TxRetx allocation failed then add the UE along with the proc
26641 * to the nonSchdTxRetxUeLst and let spfc scheduler take care of it during
26643 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
26646 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
26647 /* Fill UE alloc Info */
26648 allocInfo->rbsReq = numRb;
26649 allocInfo->dlSf = subFrm;
26651 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
26659 * @brief This function determines the RBs and Bytes required for BO
26660 * transmission for UEs configured with TM 2.
26664 * Function: rgSCHCmnDlAllocTxRbTM1
26667 * Reference Parameter effBo is filled with alloced bytes.
26668 * Returns RFAILED if BO not satisfied at all.
26670 * Invoked by: rgSCHCmnDlAllocTxRb
26672 * @param[in] RgSchCellCb *cell
26673 * @param[in] RgSchDlSf *subFrm
26674 * @param[in] RgSchUeCb *ue
26675 * @param[in] uint32_t bo
26676 * @param[out] uint32_t *effBo
26677 * @param[in] RgSchDlHqProcCb *proc
26678 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26683 static Void rgSCHCmnDlAllocTxRbTM1
26690 RgSchDlHqProcCb *proc,
26691 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26694 static Void rgSCHCmnDlAllocTxRbTM1(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26700 RgSchDlHqProcCb *proc;
26701 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26704 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26710 * @brief This function determines the RBs and Bytes required for BO
26711 * retransmission for UEs configured with TM 2.
26715 * Function: rgSCHCmnDlAllocRetxRbTM1
26718 * Reference Parameter effBo is filled with alloced bytes.
26719 * Returns RFAILED if BO not satisfied at all.
26721 * Invoked by: rgSCHCmnDlAllocRetxRb
26723 * @param[in] RgSchCellCb *cell
26724 * @param[in] RgSchDlSf *subFrm
26725 * @param[in] RgSchUeCb *ue
26726 * @param[in] uint32_t bo
26727 * @param[out] uint32_t *effBo
26728 * @param[in] RgSchDlHqProcCb *proc
26729 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26734 static Void rgSCHCmnDlAllocRetxRbTM1
26741 RgSchDlHqProcCb *proc,
26742 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26745 static Void rgSCHCmnDlAllocRetxRbTM1(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26751 RgSchDlHqProcCb *proc;
26752 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26755 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26761 * @brief This function determines the RBs and Bytes required for BO
26762 * transmission for UEs configured with TM 2.
26766 * Function: rgSCHCmnDlAllocTxRbTM2
26769 * Reference Parameter effBo is filled with alloced bytes.
26770 * Returns RFAILED if BO not satisfied at all.
26772 * Invoked by: rgSCHCmnDlAllocTxRb
26774 * @param[in] RgSchCellCb *cell
26775 * @param[in] RgSchDlSf *subFrm
26776 * @param[in] RgSchUeCb *ue
26777 * @param[in] uint32_t bo
26778 * @param[out] uint32_t *effBo
26779 * @param[in] RgSchDlHqProcCb *proc
26780 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26785 static Void rgSCHCmnDlAllocTxRbTM2
26792 RgSchDlHqProcCb *proc,
26793 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26796 static Void rgSCHCmnDlAllocTxRbTM2(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26802 RgSchDlHqProcCb *proc;
26803 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26806 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26812 * @brief This function determines the RBs and Bytes required for BO
26813 * retransmission for UEs configured with TM 2.
26817 * Function: rgSCHCmnDlAllocRetxRbTM2
26820 * Reference Parameter effBo is filled with alloced bytes.
26821 * Returns RFAILED if BO not satisfied at all.
26823 * Invoked by: rgSCHCmnDlAllocRetxRb
26825 * @param[in] RgSchCellCb *cell
26826 * @param[in] RgSchDlSf *subFrm
26827 * @param[in] RgSchUeCb *ue
26828 * @param[in] uint32_t bo
26829 * @param[out] uint32_t *effBo
26830 * @param[in] RgSchDlHqProcCb *proc
26831 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26836 static Void rgSCHCmnDlAllocRetxRbTM2
26843 RgSchDlHqProcCb *proc,
26844 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26847 static Void rgSCHCmnDlAllocRetxRbTM2(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26853 RgSchDlHqProcCb *proc;
26854 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26857 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26863 * @brief This function determines the RBs and Bytes required for BO
26864 * transmission for UEs configured with TM 3.
26868 * Function: rgSCHCmnDlAllocTxRbTM3
26871 * Reference Parameter effBo is filled with alloced bytes.
26872 * Returns RFAILED if BO not satisfied at all.
26874 * Invoked by: rgSCHCmnDlAllocTxRb
26876 * @param[in] RgSchCellCb *cell
26877 * @param[in] RgSchDlSf *subFrm
26878 * @param[in] RgSchUeCb *ue
26879 * @param[in] uint32_t bo
26880 * @param[out] uint32_t *effBo
26881 * @param[in] RgSchDlHqProcCb *proc
26882 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26887 static Void rgSCHCmnDlAllocTxRbTM3
26894 RgSchDlHqProcCb *proc,
26895 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26898 static Void rgSCHCmnDlAllocTxRbTM3(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26904 RgSchDlHqProcCb *proc;
26905 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26910 /* Both TBs free for TX allocation */
26911 rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo,\
26912 proc, cellWdAllocInfo);
26919 * @brief This function determines the RBs and Bytes required for BO
26920 * retransmission for UEs configured with TM 3.
26924 * Function: rgSCHCmnDlAllocRetxRbTM3
26927 * Reference Parameter effBo is filled with alloced bytes.
26928 * Returns RFAILED if BO not satisfied at all.
26930 * Invoked by: rgSCHCmnDlAllocRetxRb
26932 * @param[in] RgSchCellCb *cell
26933 * @param[in] RgSchDlSf *subFrm
26934 * @param[in] RgSchUeCb *ue
26935 * @param[in] uint32_t bo
26936 * @param[out] uint32_t *effBo
26937 * @param[in] RgSchDlHqProcCb *proc
26938 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26943 static Void rgSCHCmnDlAllocRetxRbTM3
26950 RgSchDlHqProcCb *proc,
26951 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26954 static Void rgSCHCmnDlAllocRetxRbTM3(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26960 RgSchDlHqProcCb *proc;
26961 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26966 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
26967 (proc->tbInfo[1].state == HQ_TB_NACKED))
26970 printf ("RETX RB TM3 nack for both hqp %d cell %d \n", proc->procId, proc->hqE->cell->cellId);
26972 /* Both TBs require RETX allocation */
26973 rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo,\
26974 proc, cellWdAllocInfo);
26978 /* One of the TBs need RETX allocation. Other TB may/maynot
26979 * be available for new TX allocation. */
26980 rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo,\
26981 proc, cellWdAllocInfo);
26989 * @brief This function performs the DCI format selection in case of
26990 * Transmit Diversity scheme where there can be more
26991 * than 1 option for DCI format selection.
26995 * Function: rgSCHCmnSlctPdcchFrmt
26996 * Purpose: 1. If DLFS is enabled, then choose TM specific
26997 * DCI format for Transmit diversity. All the
26998 * TM Specific DCI Formats support Type0 and/or
26999 * Type1 resource allocation scheme. DLFS
27000 * supports only Type-0&1 Resource allocation.
27001 * 2. If DLFS is not enabled, select a DCI format
27002 * which is of smaller size. Since Non-DLFS
27003 * scheduler supports all Resource allocation
27004 * schemes, selection is based on efficiency.
27006 * Invoked by: DL UE Allocation by Common Scheduler.
27008 * @param[in] RgSchCellCb *cell
27009 * @param[in] RgSchUeCb *ue
27010 * @param[out] uint8_t *raType
27011 * @return TfuDciFormat
27015 TfuDciFormat rgSCHCmnSlctPdcchFrmt
27022 TfuDciFormat rgSCHCmnSlctPdcchFrmt(cell, ue, raType)
27028 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27031 /* ccpu00140894- Selective DCI Format and RA type should be selected only
27032 * after TX Mode transition is completed*/
27033 if ((cellSch->dl.isDlFreqSel) && (ue->txModeTransCmplt))
27035 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciRAType;
27036 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciFrmt);
27040 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciRAType;
27041 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciFrmt);
27047 * @brief This function handles Retx allocation in case of TM3 UEs
27048 * where both the TBs were NACKED previously.
27052 * Function: rgSCHCmnDlTM3RetxRetx
27053 * Purpose: If forceTD flag enabled
27054 * TD for TB1 on CW1.
27056 * DCI Frmt 2A and RA Type 0
27057 * RI layered SM of both TBs on 2 CWs
27058 * Add UE to cell Alloc Info.
27059 * Fill UE alloc Info.
27062 * Successful allocation is indicated by non-zero effBo value.
27064 * Invoked by: rgSCHCmnDlAllocRbTM3
27066 * @param[in] RgSchCellCb *cell
27067 * @param[in] RgSchDlSf *subFrm
27068 * @param[in] RgSchUeCb *ue
27069 * @param[in] uint32_t bo
27070 * @param[out] uint32_t *effBo
27071 * @param[in] RgSchDlHqProcCb *proc
27072 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27077 static Void rgSCHCmnDlTM3RetxRetx
27084 RgSchDlHqProcCb *proc,
27085 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27088 static Void rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
27094 RgSchDlHqProcCb *proc;
27095 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
27099 RgSchDlRbAlloc *allocInfo;
27104 uint8_t precInfoAntIdx;
27108 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
27110 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
27112 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
27113 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
27115 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
27117 if (ret == RFAILED)
27119 /* Allocation couldn't be made for Retx */
27120 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
27123 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
27124 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
27125 #ifdef FOUR_TX_ANTENNA
27126 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1 should
27127 * have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
27128 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
27131 proc->cwSwpEnabled = TRUE;
27134 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27135 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
27139 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
27142 /* Adding UE to allocInfo RETX Lst */
27143 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
27145 /* Fill UE alloc Info scratch pad */
27146 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
27147 precInfo, noTxLyrs, subFrm);
27154 * @brief This function handles Retx allocation in case of TM4 UEs
27155 * where both the TBs were NACKED previously.
27159 * Function: rgSCHCmnDlTM4RetxRetx
27160 * Purpose: If forceTD flag enabled
27161 * TD for TB1 on CW1.
27163 * DCI Frmt 2 and RA Type 0
27165 * 1 layer SM of TB1 on CW1.
27167 * RI layered SM of both TBs on 2 CWs
27168 * Add UE to cell Alloc Info.
27169 * Fill UE alloc Info.
27172 * Successful allocation is indicated by non-zero effBo value.
27174 * Invoked by: rgSCHCmnDlAllocRbTM4
27176 * @param[in] RgSchCellCb *cell
27177 * @param[in] RgSchDlSf *subFrm
27178 * @param[in] RgSchUeCb *ue
27179 * @param[in] uint32_t bo
27180 * @param[out] uint32_t *effBo
27181 * @param[in] RgSchDlHqProcCb *proc
27182 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27187 static Void rgSCHCmnDlTM4RetxRetx
27194 RgSchDlHqProcCb *proc,
27195 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27198 static Void rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
27204 RgSchDlHqProcCb *proc;
27205 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
27209 RgSchDlRbAlloc *allocInfo;
27211 Bool swpFlg = FALSE;
27213 #ifdef FOUR_TX_ANTENNA
27214 uint8_t precInfoAntIdx;
27220 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
27222 /* Irrespective of RI Schedule both CWs */
27223 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
27224 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
27226 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
27228 if (ret == RFAILED)
27230 /* Allocation couldn't be made for Retx */
27231 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
27234 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
27236 #ifdef FOUR_TX_ANTENNA
27237 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1
27238 * should have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
27239 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
27242 proc->cwSwpEnabled = TRUE;
27244 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27245 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
27249 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
27252 /* Adding UE to allocInfo RETX Lst */
27253 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
27255 /* Fill UE alloc Info scratch pad */
27256 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
27257 precInfo, noTxLyrs, subFrm);
27265 * @brief This function determines Transmission attributes
27266 * incase of Spatial multiplexing for TX and RETX TBs.
27270 * Function: rgSCHCmnDlSMGetAttrForTxRetx
27271 * Purpose: 1. Reached here for a TM3/4 UE's HqP whose one of the TBs is
27272 * NACKED and the other TB is either NACKED or WAITING.
27273 * 2. Select the NACKED TB for RETX allocation.
27274 * 3. Allocation preference for RETX TB by mapping it to a better
27275 * CW (better in terms of efficiency).
27276 * 4. Determine the state of the other TB.
27277 * Determine if swapFlag were to be set.
27278 * Swap flag would be set if Retx TB is cross
27280 * 5. If UE has new data available for TX and if the other TB's state
27281 * is ACKED then set furtherScope as TRUE.
27283 * Invoked by: rgSCHCmnDlTM3[4]TxRetx
27285 * @param[in] RgSchUeCb *ue
27286 * @param[in] RgSchDlHqProcCb *proc
27287 * @param[out] RgSchDlHqTbCb **retxTb
27288 * @param[out] RgSchDlHqTbCb **txTb
27289 * @param[out] Bool *frthrScp
27290 * @param[out] Bool *swpFlg
27295 static Void rgSCHCmnDlSMGetAttrForTxRetx
27298 RgSchDlHqProcCb *proc,
27299 RgSchDlHqTbCb **retxTb,
27300 RgSchDlHqTbCb **txTb,
27305 static Void rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, frthrScp,\
27308 RgSchDlHqProcCb *proc;
27309 RgSchDlHqTbCb **retxTb;
27310 RgSchDlHqTbCb **txTb;
27315 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,proc->hqE->cell);
27316 RgSchDlRbAlloc *allocInfo;
27319 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27321 *retxTb = &proc->tbInfo[0];
27322 *txTb = &proc->tbInfo[1];
27323 /* TENB_BRDCM_TM4- Currently disabling swapflag for TM3/TM4, since
27324 * HqFeedback processing does not consider a swapped hq feedback */
27325 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 1))
27328 proc->cwSwpEnabled = TRUE;
27330 if (proc->tbInfo[1].state == HQ_TB_ACKED)
27332 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
27333 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
27338 *retxTb = &proc->tbInfo[1];
27339 *txTb = &proc->tbInfo[0];
27340 /* TENB_BRDCM_TM4 - Currently disabling swapflag for TM3/TM4, since
27341 * HqFeedback processing does not consider a swapped hq feedback */
27342 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 0))
27345 proc->cwSwpEnabled = TRUE;
27347 if (proc->tbInfo[0].state == HQ_TB_ACKED)
27349 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
27350 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
27358 * @brief Determine Precoding information for TM3 2 TX Antenna.
27362 * Function: rgSCHCmnDlTM3PrecInf2
27365 * Invoked by: rgSCHCmnDlGetAttrForTM3
27367 * @param[in] RgSchUeCb *ue
27368 * @param[in] uint8_t numTxLyrs
27369 * @param[in] Bool bothCwEnbld
27374 static uint8_t rgSCHCmnDlTM3PrecInf2
27382 static uint8_t rgSCHCmnDlTM3PrecInf2(ue, numTxLyrs, bothCwEnbld)
27395 * @brief Determine Precoding information for TM4 2 TX Antenna.
27399 * Function: rgSCHCmnDlTM4PrecInf2
27400 * Purpose: To determine a logic of deriving precoding index
27401 * information from 36.212 table 5.3.3.1.5-4
27403 * Invoked by: rgSCHCmnDlGetAttrForTM4
27405 * @param[in] RgSchUeCb *ue
27406 * @param[in] uint8_t numTxLyrs
27407 * @param[in] Bool bothCwEnbld
27412 static uint8_t rgSCHCmnDlTM4PrecInf2
27420 static uint8_t rgSCHCmnDlTM4PrecInf2(ue, numTxLyrs, bothCwEnbld)
27427 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27431 if (ueDl->mimoInfo.ri == numTxLyrs)
27433 if (ueDl->mimoInfo.ri == 2)
27435 /* PrecInfo corresponding to 2 CW
27437 if (ue->mimoInfo.puschFdbkVld)
27443 precIdx = ueDl->mimoInfo.pmi - 1;
27448 /* PrecInfo corresponding to 1 CW
27450 if (ue->mimoInfo.puschFdbkVld)
27456 precIdx = ueDl->mimoInfo.pmi + 1;
27460 else if (ueDl->mimoInfo.ri > numTxLyrs)
27462 /* In case of choosing among the columns of a
27463 * precoding matrix, choose the column corresponding
27464 * to the MAX-CQI */
27465 if (ue->mimoInfo.puschFdbkVld)
27471 precIdx = (ueDl->mimoInfo.pmi- 1)* 2 + 1;
27474 else /* if RI < numTxLyrs */
27476 precIdx = (ueDl->mimoInfo.pmi < 2)? 0:1;
27483 * @brief Determine Precoding information for TM3 4 TX Antenna.
27487 * Function: rgSCHCmnDlTM3PrecInf4
27488 * Purpose: To determine a logic of deriving precoding index
27489 * information from 36.212 table 5.3.3.1.5A-2
27491 * Invoked by: rgSCHCmnDlGetAttrForTM3
27493 * @param[in] RgSchUeCb *ue
27494 * @param[in] uint8_t numTxLyrs
27495 * @param[in] Bool bothCwEnbld
27500 static uint8_t rgSCHCmnDlTM3PrecInf4
27508 static uint8_t rgSCHCmnDlTM3PrecInf4(ue, numTxLyrs, bothCwEnbld)
27520 precIdx = numTxLyrs - 2;
27522 else /* one 1 CW transmission */
27531 * @brief Determine Precoding information for TM4 4 TX Antenna.
27535 * Function: rgSCHCmnDlTM4PrecInf4
27536 * Purpose: To determine a logic of deriving precoding index
27537 * information from 36.212 table 5.3.3.1.5-5
27539 * Invoked by: rgSCHCmnDlGetAttrForTM4
27541 * @param[in] RgSchUeCb *ue
27542 * @param[in] uint8_t numTxLyrs
27543 * @param[in] Bool bothCwEnbld
27548 static uint8_t rgSCHCmnDlTM4PrecInf4
27556 static uint8_t rgSCHCmnDlTM4PrecInf4(cell, ue, numTxLyrs, bothCwEnbld)
27563 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27564 uint8_t precInfoBaseIdx, precIdx;
27567 precInfoBaseIdx = (ue->mimoInfo.puschFdbkVld)? (16):
27568 (ueDl->mimoInfo.pmi);
27571 precIdx = precInfoBaseIdx + (numTxLyrs-2)*17;
27573 else /* one 1 CW transmission */
27575 precInfoBaseIdx += 1;
27576 precIdx = precInfoBaseIdx + (numTxLyrs-1)*17;
27583 * @brief This function determines Transmission attributes
27584 * incase of TM3 scheduling.
27588 * Function: rgSCHCmnDlGetAttrForTM3
27589 * Purpose: Determine retx TB and tx TB based on TB states.
27590 * If forceTD enabled
27591 * perform only retx TB allocation.
27592 * If retxTB == TB2 then DCI Frmt = 2A, RA Type = 0.
27593 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
27595 * perform retxTB allocation on CW1.
27597 * Determine further Scope and Swap Flag attributes
27598 * assuming a 2 CW transmission of RetxTB and new Tx TB.
27599 * If no further scope for new TX allocation
27600 * Allocate only retx TB using 2 layers if
27601 * this TB was previously transmitted using 2 layers AND
27602 * number of Tx antenna ports == 4.
27603 * otherwise do single layer precoding.
27605 * Invoked by: rgSCHCmnDlTM3TxRetx
27607 * @param[in] RgSchUeCb *ue
27608 * @param[in] RgSchDlHqProcCb *proc
27609 * @param[out] uint8_t *numTxLyrs
27610 * @param[out] Bool *isTraDiv
27611 * @param[out] uint8_t *prcdngInf
27612 * @param[out] uint8_t *raType
27617 static Void rgSCHCmnDlGetAttrForTM3
27621 RgSchDlHqProcCb *proc,
27622 uint8_t *numTxLyrs,
27623 TfuDciFormat *dciFrmt,
27624 uint8_t *prcdngInf,
27625 RgSchDlHqTbCb **retxTb,
27626 RgSchDlHqTbCb **txTb,
27632 static Void rgSCHCmnDlGetAttrForTM3(cell, ue, proc, numTxLyrs, dciFrmt,\
27633 prcdngInf, retxTb, txTb, frthrScp, swpFlg, raType)
27636 RgSchDlHqProcCb *proc;
27637 uint8_t *numTxLyrs;
27638 TfuDciFormat *dciFrmt;
27639 uint8_t *prcdngInf;
27640 RgSchDlHqTbCb **retxTb;
27641 RgSchDlHqTbCb **txTb;
27647 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27648 uint8_t precInfoAntIdx;
27651 /* Avoiding Tx-Retx for LAA cell as firstSchedTime is associated with
27653 /* Integration_fix: SPS Proc shall always have only one Cw */
27655 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
27656 (ueDl->mimoInfo.forceTD))
27658 ||(TRUE == rgSCHLaaSCellEnabled(cell))
27662 if ((ueDl->mimoInfo.forceTD)
27664 || (TRUE == rgSCHLaaSCellEnabled(cell))
27669 /* Transmit Diversity. Format based on dlfsEnabled
27670 * No further scope */
27671 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27673 *retxTb = &proc->tbInfo[0];
27674 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27678 *retxTb = &proc->tbInfo[1];
27679 *dciFrmt = TFU_DCI_FORMAT_2A;
27680 *raType = RG_SCH_CMN_RA_TYPE0;
27688 /* Determine the 2 TB transmission attributes */
27689 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
27693 /* Prefer allocation of RETX TB over 2 layers rather than combining
27694 * it with a new TX. */
27695 if ((ueDl->mimoInfo.ri == 2)
27696 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
27698 /* Allocate TB on CW1, using 2 Lyrs,
27699 * Format 2, precoding accordingly */
27705 *numTxLyrs= ((*retxTb)->numLyrs + ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)].noLyr);
27707 if((*retxTb)->tbIdx == 0 && ((*retxTb)->numLyrs == 2 ) && *numTxLyrs ==3)
27710 proc->cwSwpEnabled = TRUE;
27712 else if((*retxTb)->tbIdx == 1 && ((*retxTb)->numLyrs == 1) && *numTxLyrs ==3)
27715 proc->cwSwpEnabled = TRUE;
27719 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27720 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])\
27721 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
27722 *dciFrmt = TFU_DCI_FORMAT_2A;
27723 *raType = RG_SCH_CMN_RA_TYPE0;
27725 else /* frthrScp == FALSE */
27727 if (cell->numTxAntPorts == 2)
27729 /* Transmit Diversity */
27731 if ((*retxTb)->tbIdx == 0)
27733 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27737 /* If retxTB is TB2 then use format 2A */
27738 *dciFrmt = TFU_DCI_FORMAT_2A;
27739 *raType = RG_SCH_CMN_RA_TYPE0;
27744 else /* NumAntPorts == 4 */
27746 if ((*retxTb)->numLyrs == 2)
27748 /* Allocate TB on CW1, using 2 Lyrs,
27749 * Format 2A, precoding accordingly */
27751 *dciFrmt = TFU_DCI_FORMAT_2A;
27752 *raType = RG_SCH_CMN_RA_TYPE0;
27753 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27754 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, *numTxLyrs, *frthrScp);
27759 /* Transmit Diversity */
27761 if ((*retxTb)->tbIdx == 0)
27763 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27767 /* If retxTB is TB2 then use format 2A */
27768 *dciFrmt = TFU_DCI_FORMAT_2A;
27769 *raType = RG_SCH_CMN_RA_TYPE0;
27783 * @brief This function determines Transmission attributes
27784 * incase of TM4 scheduling.
27788 * Function: rgSCHCmnDlGetAttrForTM4
27789 * Purpose: Determine retx TB and tx TB based on TB states.
27790 * If forceTD enabled
27791 * perform only retx TB allocation.
27792 * If retxTB == TB2 then DCI Frmt = 2, RA Type = 0.
27793 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
27795 * perform retxTB allocation on CW1.
27797 * Determine further Scope and Swap Flag attributes
27798 * assuming a 2 CW transmission of RetxTB and new Tx TB.
27799 * If no further scope for new TX allocation
27800 * Allocate only retx TB using 2 layers if
27801 * this TB was previously transmitted using 2 layers AND
27802 * number of Tx antenna ports == 4.
27803 * otherwise do single layer precoding.
27805 * Invoked by: rgSCHCmnDlTM4TxRetx
27807 * @param[in] RgSchUeCb *ue
27808 * @param[in] RgSchDlHqProcCb *proc
27809 * @param[out] uint8_t *numTxLyrs
27810 * @param[out] Bool *isTraDiv
27811 * @param[out] uint8_t *prcdngInf
27812 * @param[out] uint8_t *raType
27817 static Void rgSCHCmnDlGetAttrForTM4
27821 RgSchDlHqProcCb *proc,
27822 uint8_t *numTxLyrs,
27823 TfuDciFormat *dciFrmt,
27824 uint8_t *prcdngInf,
27825 RgSchDlHqTbCb **retxTb,
27826 RgSchDlHqTbCb **txTb,
27832 static Void rgSCHCmnDlGetAttrForTM4(cell, ue, proc, numTxLyrs, dciFrmt,\
27833 prcdngInf, retxTb, txTb, frthrScp, swpFlg, raType)
27836 RgSchDlHqProcCb *proc;
27837 uint8_t *numTxLyrs;
27838 TfuDciFormat *dciFrmt;
27839 uint8_t *prcdngInf;
27840 RgSchDlHqTbCb **retxTb;
27841 RgSchDlHqTbCb **txTb;
27847 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27848 uint8_t precInfoAntIdx;
27852 /* Integration_fix: SPS Proc shall always have only one Cw */
27854 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
27855 (ueDl->mimoInfo.forceTD))
27857 ||(TRUE == rgSCHLaaSCellEnabled(cell))
27861 if ((ueDl->mimoInfo.forceTD)
27863 || (TRUE == rgSCHLaaSCellEnabled(cell))
27868 /* Transmit Diversity. Format based on dlfsEnabled
27869 * No further scope */
27870 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27872 *retxTb = &proc->tbInfo[0];
27873 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27877 *retxTb = &proc->tbInfo[1];
27878 *dciFrmt = TFU_DCI_FORMAT_2;
27879 *raType = RG_SCH_CMN_RA_TYPE0;
27887 if (ueDl->mimoInfo.ri == 1)
27889 /* single layer precoding. Format 2.
27890 * No further scope */
27891 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27893 *retxTb = &proc->tbInfo[0];
27897 *retxTb = &proc->tbInfo[1];
27900 *dciFrmt = TFU_DCI_FORMAT_2;
27901 *raType = RG_SCH_CMN_RA_TYPE0;
27903 *prcdngInf = 0; /*When RI= 1*/
27907 /* Determine the 2 TB transmission attributes */
27908 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
27910 *dciFrmt = TFU_DCI_FORMAT_2;
27911 *raType = RG_SCH_CMN_RA_TYPE0;
27914 /* Prefer allocation of RETX TB over 2 layers rather than combining
27915 * it with a new TX. */
27916 if ((ueDl->mimoInfo.ri == 2)
27917 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
27919 /* Allocate TB on CW1, using 2 Lyrs,
27920 * Format 2, precoding accordingly */
27924 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27925 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])
27926 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
27928 else /* frthrScp == FALSE */
27930 if (cell->numTxAntPorts == 2)
27932 /* single layer precoding. Format 2. */
27934 *prcdngInf = (getPrecInfoFunc[1][cell->numTxAntPorts/2 - 1])\
27935 (cell, ue, *numTxLyrs, *frthrScp);
27938 else /* NumAntPorts == 4 */
27940 if ((*retxTb)->numLyrs == 2)
27942 /* Allocate TB on CW1, using 2 Lyrs,
27943 * Format 2, precoding accordingly */
27945 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27946 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
27947 (cell, ue, *numTxLyrs, *frthrScp);
27952 /* Allocate TB with 1 lyr precoding,
27953 * Format 2, precoding info accordingly */
27955 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27956 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
27957 (cell, ue, *numTxLyrs, *frthrScp);
27968 * @brief This function handles Retx allocation in case of TM3 UEs
27969 * where previously one of the TBs was NACKED and the other
27970 * TB is either ACKED/WAITING.
27974 * Function: rgSCHCmnDlTM3TxRetx
27975 * Purpose: Determine the TX attributes for TM3 TxRetx Allocation.
27976 * If futher Scope for New Tx Allocation on other TB
27977 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
27978 * Add UE to cell wide RetxTx List.
27980 * Perform only RETX alloc'n on CW1.
27981 * Add UE to cell wide Retx List.
27983 * effBo is set to a non-zero value if allocation is
27986 * Invoked by: rgSCHCmnDlAllocRbTM3
27988 * @param[in] RgSchCellCb *cell
27989 * @param[in] RgSchDlSf *subFrm
27990 * @param[in] RgSchUeCb *ue
27991 * @param[in] uint32_t bo
27992 * @param[out] uint32_t *effBo
27993 * @param[in] RgSchDlHqProcCb *proc
27994 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27999 static Void rgSCHCmnDlTM3TxRetx
28006 RgSchDlHqProcCb *proc,
28007 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28010 static Void rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28016 RgSchDlHqProcCb *proc;
28017 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28021 RgSchDlRbAlloc *allocInfo;
28023 RgSchDlHqTbCb *retxTb, *txTb;
28032 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28035 /* Determine the transmission attributes */
28036 rgSCHCmnDlGetAttrForTM3(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
28037 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
28038 &allocInfo->raType);
28043 printf ("TX RETX called from proc %d cell %d \n",proc->procId, cell->cellId);
28045 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
28047 if (ret == RFAILED)
28049 /* Allocation couldn't be made for Retx */
28050 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28053 /* Adding UE to RbAllocInfo RETX-TX Lst */
28054 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
28058 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
28059 numTxLyrs, &numRb, effBo);
28060 if (ret == RFAILED)
28062 /* Allocation couldn't be made for Retx */
28063 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28067 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28070 /* Adding UE to allocInfo RETX Lst */
28071 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28074 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
28075 prcdngInf, numTxLyrs, subFrm);
28082 * @brief This function handles Retx allocation in case of TM4 UEs
28083 * where previously one of the TBs was NACKED and the other
28084 * TB is either ACKED/WAITING.
28088 * Function: rgSCHCmnDlTM4TxRetx
28089 * Purpose: Determine the TX attributes for TM4 TxRetx Allocation.
28090 * If futher Scope for New Tx Allocation on other TB
28091 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
28092 * Add UE to cell wide RetxTx List.
28094 * Perform only RETX alloc'n on CW1.
28095 * Add UE to cell wide Retx List.
28097 * effBo is set to a non-zero value if allocation is
28100 * Invoked by: rgSCHCmnDlAllocRbTM4
28102 * @param[in] RgSchCellCb *cell
28103 * @param[in] RgSchDlSf *subFrm
28104 * @param[in] RgSchUeCb *ue
28105 * @param[in] uint32_t bo
28106 * @param[out] uint32_t *effBo
28107 * @param[in] RgSchDlHqProcCb *proc
28108 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28113 static Void rgSCHCmnDlTM4TxRetx
28120 RgSchDlHqProcCb *proc,
28121 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28124 static Void rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28130 RgSchDlHqProcCb *proc;
28131 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28135 RgSchDlRbAlloc *allocInfo;
28137 RgSchDlHqTbCb *retxTb, *txTb;
28145 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28148 /* Determine the transmission attributes */
28149 rgSCHCmnDlGetAttrForTM4(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
28150 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
28151 &allocInfo->raType);
28155 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
28157 if (ret == RFAILED)
28159 /* Fix : syed If TxRetx allocation failed then add the UE along
28160 * with the proc to the nonSchdTxRetxUeLst and let spfc scheduler
28161 * take care of it during finalization. */
28162 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28165 /* Adding UE to RbAllocInfo RETX-TX Lst */
28166 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
28170 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
28171 numTxLyrs, &numRb, effBo);
28172 if (ret == RFAILED)
28174 /* Allocation couldn't be made for Retx */
28175 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28179 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28182 /* Adding UE to allocInfo RETX Lst */
28183 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28186 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
28187 prcdngInf, numTxLyrs, subFrm)
28194 * @brief This function handles Retx allocation in case of TM4 UEs
28195 * where previously both the TBs were ACKED and ACKED
28200 * Function: rgSCHCmnDlTM3TxTx
28201 * Purpose: Reached here for a TM3 UE's HqP's fresh allocation
28202 * where both the TBs are free for TX scheduling.
28203 * If forceTD flag is set
28204 * perform TD on CW1 with TB1.
28209 * RI layered precoding 2 TB on 2 CW.
28210 * Set precoding info.
28211 * Add UE to cellAllocInfo.
28212 * Fill ueAllocInfo.
28214 * effBo is set to a non-zero value if allocation is
28217 * Invoked by: rgSCHCmnDlAllocRbTM3
28219 * @param[in] RgSchCellCb *cell
28220 * @param[in] RgSchDlSf *subFrm
28221 * @param[in] RgSchUeCb *ue
28222 * @param[in] uint32_t bo
28223 * @param[out] uint32_t *effBo
28224 * @param[in] RgSchDlHqProcCb *proc
28225 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28230 static Void rgSCHCmnDlTM3TxTx
28237 RgSchDlHqProcCb *proc,
28238 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28241 static Void rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28247 RgSchDlHqProcCb *proc;
28248 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28251 RgSchCmnDlUe *ueDl;
28252 RgSchDlRbAlloc *allocInfo;
28257 uint8_t precInfoAntIdx;
28261 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28262 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28264 /* Integration_fix: SPS Proc shall always have only one Cw */
28266 #ifdef FOUR_TX_ANTENNA
28267 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28268 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
28270 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28271 (ueDl->mimoInfo.forceTD))
28274 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
28277 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
28278 &allocInfo->raType);
28279 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28280 bo, &numRb, effBo);
28281 if (ret == RFAILED)
28283 /* If allocation couldn't be made then return */
28287 precInfo = 0; /* TD */
28289 else /* Precoding */
28291 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
28292 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
28294 /* Spatial Multiplexing using 2 CWs */
28295 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
28296 if (ret == RFAILED)
28298 /* If allocation couldn't be made then return */
28301 noTxLyrs = ueDl->mimoInfo.ri;
28302 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
28303 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, getPrecInfoFunc[0], precInfoAntIdx);
28304 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
28308 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28311 /* Adding UE to RbAllocInfo TX Lst */
28312 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28314 /* Fill UE allocInfo scrath pad */
28315 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
28316 precInfo, noTxLyrs, subFrm);
28323 * @brief This function handles Retx allocation in case of TM4 UEs
28324 * where previously both the TBs were ACKED and ACKED
28329 * Function: rgSCHCmnDlTM4TxTx
28330 * Purpose: Reached here for a TM4 UE's HqP's fresh allocation
28331 * where both the TBs are free for TX scheduling.
28332 * If forceTD flag is set
28333 * perform TD on CW1 with TB1.
28339 * Single layer precoding of TB1 on CW1.
28340 * Set precoding info.
28342 * RI layered precoding 2 TB on 2 CW.
28343 * Set precoding info.
28344 * Add UE to cellAllocInfo.
28345 * Fill ueAllocInfo.
28347 * effBo is set to a non-zero value if allocation is
28350 * Invoked by: rgSCHCmnDlAllocRbTM4
28352 * @param[in] RgSchCellCb *cell
28353 * @param[in] RgSchDlSf *subFrm
28354 * @param[in] RgSchUeCb *ue
28355 * @param[in] uint32_t bo
28356 * @param[out] uint32_t *effBo
28357 * @param[in] RgSchDlHqProcCb *proc
28358 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28363 static Void rgSCHCmnDlTM4TxTx
28370 RgSchDlHqProcCb *proc,
28371 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28374 static Void rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28380 RgSchDlHqProcCb *proc;
28381 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28384 RgSchCmnDlUe *ueDl;
28385 RgSchDlRbAlloc *allocInfo;
28389 uint8_t precInfoAntIdx;
28394 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28395 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28397 /* Integration_fix: SPS Proc shall always have only one Cw */
28399 #ifdef FOUR_TX_ANTENNA
28400 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28401 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
28403 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28404 (ueDl->mimoInfo.forceTD))
28407 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
28410 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
28411 &allocInfo->raType);
28413 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28414 bo, &numRb, effBo);
28415 if (ret == RFAILED)
28417 /* If allocation couldn't be made then return */
28421 precInfo = 0; /* TD */
28423 else /* Precoding */
28425 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
28426 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
28428 if (ueDl->mimoInfo.ri == 1)
28430 /* Single Layer SM using FORMAT 2 */
28431 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28432 bo, &numRb, effBo);
28433 if (ret == RFAILED)
28435 /* If allocation couldn't be made then return */
28439 precInfo = 0; /* PrecInfo as 0 for RI=1*/
28443 /* Spatial Multiplexing using 2 CWs */
28444 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
28445 if (ret == RFAILED)
28447 /* If allocation couldn't be made then return */
28450 noTxLyrs = ueDl->mimoInfo.ri;
28451 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
28452 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
28458 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28461 /* Adding UE to RbAllocInfo TX Lst */
28462 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28465 /* Fill UE allocInfo scrath pad */
28466 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
28467 precInfo, noTxLyrs, subFrm);
28474 * @brief This function determines the RBs and Bytes required for BO
28475 * transmission for UEs configured with TM 4.
28479 * Function: rgSCHCmnDlAllocTxRbTM4
28480 * Purpose: Invokes the functionality particular to the
28481 * current state of the TBs of the "proc".
28483 * Reference Parameter effBo is filled with alloced bytes.
28484 * Returns RFAILED if BO not satisfied at all.
28486 * Invoked by: rgSCHCmnDlAllocTxRb
28488 * @param[in] RgSchCellCb *cell
28489 * @param[in] RgSchDlSf *subFrm
28490 * @param[in] RgSchUeCb *ue
28491 * @param[in] uint32_t bo
28492 * @param[out] uint32_t *effBo
28493 * @param[in] RgSchDlHqProcCb *proc
28494 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28499 static Void rgSCHCmnDlAllocTxRbTM4
28506 RgSchDlHqProcCb *proc,
28507 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28510 static Void rgSCHCmnDlAllocTxRbTM4(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28516 RgSchDlHqProcCb *proc;
28517 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28521 /* Both TBs free for TX allocation */
28522 rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo,\
28523 proc, cellWdAllocInfo);
28530 * @brief This function determines the RBs and Bytes required for BO
28531 * retransmission for UEs configured with TM 4.
28535 * Function: rgSCHCmnDlAllocRetxRbTM4
28536 * Purpose: Invokes the functionality particular to the
28537 * current state of the TBs of the "proc".
28539 * Reference Parameter effBo is filled with alloced bytes.
28540 * Returns RFAILED if BO not satisfied at all.
28542 * Invoked by: rgSCHCmnDlAllocRetxRb
28544 * @param[in] RgSchCellCb *cell
28545 * @param[in] RgSchDlSf *subFrm
28546 * @param[in] RgSchUeCb *ue
28547 * @param[in] uint32_t bo
28548 * @param[out] uint32_t *effBo
28549 * @param[in] RgSchDlHqProcCb *proc
28550 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28555 static Void rgSCHCmnDlAllocRetxRbTM4
28562 RgSchDlHqProcCb *proc,
28563 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28566 static Void rgSCHCmnDlAllocRetxRbTM4(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28572 RgSchDlHqProcCb *proc;
28573 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28577 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
28578 (proc->tbInfo[1].state == HQ_TB_NACKED))
28580 /* Both TBs require RETX allocation */
28581 rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo,\
28582 proc, cellWdAllocInfo);
28586 /* One of the TBs need RETX allocation. Other TB may/maynot
28587 * be available for new TX allocation. */
28588 rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo,\
28589 proc, cellWdAllocInfo);
28598 * @brief This function determines the RBs and Bytes required for BO
28599 * transmission for UEs configured with TM 5.
28603 * Function: rgSCHCmnDlAllocTxRbTM5
28606 * Reference Parameter effBo is filled with alloced bytes.
28607 * Returns RFAILED if BO not satisfied at all.
28609 * Invoked by: rgSCHCmnDlAllocTxRb
28611 * @param[in] RgSchCellCb *cell
28612 * @param[in] RgSchDlSf *subFrm
28613 * @param[in] RgSchUeCb *ue
28614 * @param[in] uint32_t bo
28615 * @param[out] uint32_t *effBo
28616 * @param[in] RgSchDlHqProcCb *proc
28617 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28622 static Void rgSCHCmnDlAllocTxRbTM5
28629 RgSchDlHqProcCb *proc,
28630 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28633 static Void rgSCHCmnDlAllocTxRbTM5(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28639 RgSchDlHqProcCb *proc;
28640 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28643 #if (ERRCLASS & ERRCLS_DEBUG)
28644 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
28651 * @brief This function determines the RBs and Bytes required for BO
28652 * retransmission for UEs configured with TM 5.
28656 * Function: rgSCHCmnDlAllocRetxRbTM5
28659 * Reference Parameter effBo is filled with alloced bytes.
28660 * Returns RFAILED if BO not satisfied at all.
28662 * Invoked by: rgSCHCmnDlAllocRetxRb
28664 * @param[in] RgSchCellCb *cell
28665 * @param[in] RgSchDlSf *subFrm
28666 * @param[in] RgSchUeCb *ue
28667 * @param[in] uint32_t bo
28668 * @param[out] uint32_t *effBo
28669 * @param[in] RgSchDlHqProcCb *proc
28670 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28675 static Void rgSCHCmnDlAllocRetxRbTM5
28682 RgSchDlHqProcCb *proc,
28683 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28686 static Void rgSCHCmnDlAllocRetxRbTM5(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28692 RgSchDlHqProcCb *proc;
28693 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28696 #if (ERRCLASS & ERRCLS_DEBUG)
28697 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
28705 * @brief This function determines the RBs and Bytes required for BO
28706 * transmission for UEs configured with TM 6.
28710 * Function: rgSCHCmnDlAllocTxRbTM6
28713 * Reference Parameter effBo is filled with alloced bytes.
28714 * Returns RFAILED if BO not satisfied at all.
28716 * Invoked by: rgSCHCmnDlAllocTxRb
28718 * @param[in] RgSchCellCb *cell
28719 * @param[in] RgSchDlSf *subFrm
28720 * @param[in] RgSchUeCb *ue
28721 * @param[in] uint32_t bo
28722 * @param[out] uint32_t *effBo
28723 * @param[in] RgSchDlHqProcCb *proc
28724 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28729 static Void rgSCHCmnDlAllocTxRbTM6
28736 RgSchDlHqProcCb *proc,
28737 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28740 static Void rgSCHCmnDlAllocTxRbTM6(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28746 RgSchDlHqProcCb *proc;
28747 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28750 RgSchDlRbAlloc *allocInfo;
28751 RgSchCmnDlUe *ueDl;
28757 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28758 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28760 if (ueDl->mimoInfo.forceTD)
28762 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
28763 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28767 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
28768 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28769 /* Fill precoding information for FORMAT 1B */
28770 /* First 4 least significant bits to indicate PMI.
28771 * 4th most significant corresponds to pmi Confirmation.
28773 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
28774 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
28776 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28777 bo, &numRb, effBo);
28778 if (ret == RFAILED)
28780 /* If allocation couldn't be made then return */
28785 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28788 /* Adding UE to RbAllocInfo TX Lst */
28789 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28791 /* Fill UE alloc Info */
28792 allocInfo->rbsReq = numRb;
28793 allocInfo->dlSf = subFrm;
28799 * @brief This function determines the RBs and Bytes required for BO
28800 * retransmission for UEs configured with TM 6.
28804 * Function: rgSCHCmnDlAllocRetxRbTM6
28807 * Reference Parameter effBo is filled with alloced bytes.
28808 * Returns RFAILED if BO not satisfied at all.
28810 * Invoked by: rgSCHCmnDlAllocRetxRb
28812 * @param[in] RgSchCellCb *cell
28813 * @param[in] RgSchDlSf *subFrm
28814 * @param[in] RgSchUeCb *ue
28815 * @param[in] uint32_t bo
28816 * @param[out] uint32_t *effBo
28817 * @param[in] RgSchDlHqProcCb *proc
28818 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28823 static Void rgSCHCmnDlAllocRetxRbTM6
28830 RgSchDlHqProcCb *proc,
28831 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28834 static Void rgSCHCmnDlAllocRetxRbTM6(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28840 RgSchDlHqProcCb *proc;
28841 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28844 RgSchDlRbAlloc *allocInfo;
28845 RgSchCmnDlUe *ueDl;
28851 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28852 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28854 if (ueDl->mimoInfo.forceTD)
28856 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
28857 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28861 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
28862 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28863 /* Fill precoding information for FORMAT 1B */
28864 /* First 4 least significant bits to indicate PMI.
28865 * 4th most significant corresponds to pmi Confirmation.
28867 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
28868 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
28871 /* Get the Allocation in terms of RBs that are required for
28872 * this retx of TB1 */
28873 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
28875 if (ret == RFAILED)
28877 /* Allocation couldn't be made for Retx */
28878 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28881 /* Adding UE to allocInfo RETX Lst */
28882 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28883 /* Fill UE alloc Info */
28884 allocInfo->rbsReq = numRb;
28885 allocInfo->dlSf = subFrm;
28891 * @brief This function determines the RBs and Bytes required for BO
28892 * transmission for UEs configured with TM 7.
28896 * Function: rgSCHCmnDlAllocTxRbTM7
28899 * Reference Parameter effBo is filled with alloced bytes.
28900 * Returns RFAILED if BO not satisfied at all.
28902 * Invoked by: rgSCHCmnDlAllocTxRb
28904 * @param[in] RgSchCellCb *cell
28905 * @param[in] RgSchDlSf *subFrm
28906 * @param[in] RgSchUeCb *ue
28907 * @param[in] uint32_t bo
28908 * @param[out] uint32_t *effBo
28909 * @param[in] RgSchDlHqProcCb *proc
28910 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28915 static Void rgSCHCmnDlAllocTxRbTM7
28922 RgSchDlHqProcCb *proc,
28923 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28926 static Void rgSCHCmnDlAllocTxRbTM7(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28932 RgSchDlHqProcCb *proc;
28933 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28936 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
28942 * @brief This function determines the RBs and Bytes required for BO
28943 * retransmission for UEs configured with TM 7.
28947 * Function: rgSCHCmnDlAllocRetxRbTM7
28950 * Reference Parameter effBo is filled with alloced bytes.
28951 * Returns RFAILED if BO not satisfied at all.
28953 * Invoked by: rgSCHCmnDlAllocRetxRb
28955 * @param[in] RgSchCellCb *cell
28956 * @param[in] RgSchDlSf *subFrm
28957 * @param[in] RgSchUeCb *ue
28958 * @param[in] uint32_t bo
28959 * @param[out] uint32_t *effBo
28960 * @param[in] RgSchDlHqProcCb *proc
28961 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28966 static Void rgSCHCmnDlAllocRetxRbTM7
28973 RgSchDlHqProcCb *proc,
28974 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28977 static Void rgSCHCmnDlAllocRetxRbTM7(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28983 RgSchDlHqProcCb *proc;
28984 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28987 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
28993 * @brief This function invokes the TM specific DL TX RB Allocation routine.
28997 * Function: rgSCHCmnDlAllocTxRb
28998 * Purpose: This function invokes the TM specific
28999 * DL TX RB Allocation routine.
29001 * Invoked by: Specific Schedulers
29003 * @param[in] RgSchCellCb *cell
29004 * @param[in] RgSchDlSf *subFrm
29005 * @param[in] RgSchUeCb *ue
29006 * @param[in] uint32_t bo
29007 * @param[out] uint32_t *effBo
29008 * @param[in] RgSchDlHqProcCb *proc
29009 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29014 S16 rgSCHCmnDlAllocTxRb
29021 RgSchDlHqProcCb *proc,
29022 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29025 S16 rgSCHCmnDlAllocTxRb(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
29031 RgSchDlHqProcCb *proc;
29032 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
29035 uint32_t newSchBits = 0;
29036 uint32_t prevSchBits = 0;
29037 RgSchDlRbAlloc *allocInfo;
29040 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
29042 ue->dl.aggTbBits = 0;
29046 /* Calculate totals bits previously allocated */
29047 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29048 if (allocInfo->tbInfo[0].schdlngForTb)
29050 prevSchBits += allocInfo->tbInfo[0].bytesReq;
29052 if (allocInfo->tbInfo[1].schdlngForTb)
29054 prevSchBits += allocInfo->tbInfo[1].bytesReq;
29057 /* Call TM specific RB allocation routine */
29058 (dlAllocTxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
29059 proc, cellWdAllocInfo);
29063 /* Calculate totals bits newly allocated */
29064 if (allocInfo->tbInfo[0].schdlngForTb)
29066 newSchBits += allocInfo->tbInfo[0].bytesReq;
29068 if (allocInfo->tbInfo[1].schdlngForTb)
29070 newSchBits += allocInfo->tbInfo[1].bytesReq;
29072 if (newSchBits > prevSchBits)
29074 ue->dl.aggTbBits += ((newSchBits - prevSchBits) * 8);
29075 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
29082 /* DwPTS Scheduling Changes Start */
29085 * @brief Retransmit decision for TDD. Retx is avoided in below cases
29086 * 1) DL Sf -> Spl Sf
29087 * 2) DL SF -> DL SF 0
29091 * Function: rgSCHCmnRetxAvoidTdd
29092 * Purpose: Avoid allocating RETX for cases 1, 2
29094 * Invoked by: rgSCHCmnRetxAvoidTdd
29096 * @param[in] RgSchDlSf *curSf
29097 * @param[in] RgSchCellCb *cell
29098 * @param[in] RgSchDlHqProcCb *proc
29103 Bool rgSCHCmnRetxAvoidTdd
29107 RgSchDlHqProcCb *proc
29110 Bool rgSCHCmnRetxAvoidTdd(curSf, cell, proc)
29113 RgSchDlHqProcCb *proc;
29116 RgSchTddSfType txSfType = 0;
29119 /* Get the RBs of TB that will be retransmitted */
29120 if (proc->tbInfo[0].state == HQ_TB_NACKED)
29122 txSfType = proc->tbInfo[0].sfType;
29124 #ifdef XEON_SPECIFIC_CHANGES
29125 #ifndef XEON_TDD_SPCL
29126 /* Avoid re-transmission on Normal SF when the corresponding TB wss transmitted on SPCL SF */
29127 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
29134 if (proc->tbInfo[1].state == HQ_TB_NACKED)
29136 /* Select the TxSf with the highest num of possible REs
29137 * In ascending order -> 1) SPL SF 2) DL_SF_0 3) DL_SF */
29138 txSfType = RGSCH_MAX(txSfType, proc->tbInfo[1].sfType);
29140 #ifdef XEON_SPECIFIC_CHANGES
29141 #ifndef XEON_TDD_SPCL
29142 /* Avoid re-transmission on Normal SF when the corresponding TB wss tranmitted on SPCL SF */
29143 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
29151 if (txSfType > curSf->sfType)
29162 /* DwPTS Scheduling Changes End */
29165 * @brief Avoid allocating RETX incase of collision
29166 * with reserved resources for BCH/PSS/SSS occassions.
29170 * Function: rgSCHCmnRetxAllocAvoid
29171 * Purpose: Avoid allocating RETX incase of collision
29172 * with reserved resources for BCH/PSS/SSS occassions
29174 * Invoked by: rgSCHCmnDlAllocRetxRb
29176 * @param[in] RgSchDlSf *subFrm
29177 * @param[in] RgSchUeCb *ue
29178 * @param[in] RgSchDlHqProcCb *proc
29183 Bool rgSCHCmnRetxAllocAvoid
29187 RgSchDlHqProcCb *proc
29190 Bool rgSCHCmnRetxAllocAvoid(subFrm, cell, proc)
29193 RgSchDlHqProcCb *proc;
29199 if (proc->tbInfo[0].state == HQ_TB_NACKED)
29201 reqRbs = proc->tbInfo[0].dlGrnt.numRb;
29205 reqRbs = proc->tbInfo[1].dlGrnt.numRb;
29207 /* Consider the dlGrnt.numRb of the Retransmitting proc->tbInfo
29208 * and current available RBs to determine if this RETX TB
29209 * will collide with the BCH/PSS/SSS occassion */
29210 if (subFrm->sfNum % 5 == 0)
29212 if ((subFrm->bwAssigned < cell->pbchRbEnd) &&
29213 (((subFrm->bwAssigned + reqRbs) - cell->pbchRbStart) > 0))
29225 * @brief This function invokes the TM specific DL RETX RB Allocation routine.
29229 * Function: rgSCHCmnDlAllocRetxRb
29230 * Purpose: This function invokes the TM specific
29231 * DL RETX RB Allocation routine.
29233 * Invoked by: Specific Schedulers
29235 * @param[in] RgSchCellCb *cell
29236 * @param[in] RgSchDlSf *subFrm
29237 * @param[in] RgSchUeCb *ue
29238 * @param[in] uint32_t bo
29239 * @param[out] uint32_t *effBo
29240 * @param[in] RgSchDlHqProcCb *proc
29241 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29246 S16 rgSCHCmnDlAllocRetxRb
29253 RgSchDlHqProcCb *proc,
29254 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29257 S16 rgSCHCmnDlAllocRetxRb(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
29263 RgSchDlHqProcCb *proc;
29264 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
29267 uint32_t newSchBits = 0;
29268 RgSchDlRbAlloc *allocInfo;
29271 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
29273 ue->dl.aggTbBits = 0;
29277 /* Check for DL BW exhaustion */
29278 if (subFrm->bw <= subFrm->bwAssigned)
29282 /* Call TM specific RB allocation routine */
29283 (dlAllocRetxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
29284 proc, cellWdAllocInfo);
29288 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29289 /* Calculate totals bits newly allocated */
29290 if (allocInfo->tbInfo[0].schdlngForTb)
29292 newSchBits += allocInfo->tbInfo[0].bytesReq;
29294 if (allocInfo->tbInfo[1].schdlngForTb)
29296 newSchBits += allocInfo->tbInfo[1].bytesReq;
29298 ue->dl.aggTbBits += (newSchBits * 8);
29299 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
29307 * @brief This function determines the RBs and Bytes required for
29308 * Transmission on 1 CW.
29312 * Function: rgSCHCmnDlAlloc1CwTxRb
29313 * Purpose: This function determines the RBs and Bytes required
29314 * for Transmission of DL SVC BO on 1 CW.
29315 * Also, takes care of SVC by SVC allocation by tracking
29316 * previous SVCs allocations.
29317 * Returns RFAILED if BO not satisfied at all.
29319 * Invoked by: DL UE Allocation
29321 * @param[in] RgSchCellCb *cell
29322 * @param[in] RgSchDlSf *subFrm
29323 * @param[in] RgSchUeCb *ue
29324 * @param[in] RgSchDlHqTbCb *tbInfo
29325 * @param[in] uint32_t bo
29326 * @param[out] uint8_t *numRb
29327 * @param[out] uint32_t *effBo
29332 static S16 rgSCHCmnDlAlloc1CwTxRb
29337 RgSchDlHqTbCb *tbInfo,
29343 static S16 rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, tbInfo, bo, numRb, effBo)
29347 RgSchDlHqTbCb *tbInfo;
29356 RgSchCmnDlUe *ueDl;
29357 RgSchDlRbAlloc *allocInfo;
29360 /* Correcting wrap around issue.
29361 * This change has been done at mutliple places in this function.*/
29362 uint32_t tempNumRb;
29365 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29366 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29367 oldReq = ueDl->outStndAlloc;
29370 //TODO_SID: Currently setting max Tb size wrt to 5GTF TM3
29371 iTbs = ue->ue5gtfCb.mcs;
29372 ueDl->maxTbSz = MAX_5GTF_TB_SIZE * ue->ue5gtfCb.rank;
29373 ueDl->maxRb = MAX_5GTF_PRBS;
29375 ueDl->outStndAlloc += bo;
29376 /* consider Cumulative amount of this BO and bytes so far allocated */
29377 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbSz/8);
29378 /* Get the number of REs needed for this bo. */
29379 //noRes = ((bo * 8 * 1024) / eff);
29381 /* Get the number of RBs needed for this transmission */
29382 /* Number of RBs = No of REs / No of REs per RB */
29383 //tempNumRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
29384 tempNumRb = MAX_5GTF_PRBS;
29385 tbSz = RGSCH_MIN(bo, (rgSch5gtfTbSzTbl[iTbs]/8) * ue->ue5gtfCb.rank);
29387 /* DwPts Scheduling Changes End */
29388 *effBo = RGSCH_MIN(tbSz - oldReq, reqBytes);
29391 //RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs);
29396 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbSz, \
29397 iTbs, imcs, tbInfo, ue->ue5gtfCb.rank);
29398 *numRb = (uint8_t) tempNumRb;
29400 /* Update the subframe Allocated BW field */
29401 subFrm->bwAssigned = subFrm->bwAssigned + tempNumRb - allocInfo->rbsReq;
29408 * @brief This function is invoked in the event of any TB's allocation
29409 * being underutilized by the specific scheduler. Here we reduce iMcs
29410 * to increase redundancy and hence increase reception quality at UE.
29414 * Function: rgSCHCmnRdcImcsTxTb
29415 * Purpose: This function shall reduce the iMcs in accordance with
29416 * the total consumed bytes by the UE at allocation
29419 * Invoked by: UE DL Allocation finalization routine
29420 * of specific scheduler.
29422 * @param[in] RgSchDlRbAlloc *allocInfo
29423 * @param[in] uint8_t tbInfoIdx
29424 * @param[in] uint32_t cnsmdBytes
29429 Void rgSCHCmnRdcImcsTxTb
29431 RgSchDlRbAlloc *allocInfo,
29433 uint32_t cnsmdBytes
29436 Void rgSCHCmnRdcImcsTxTb(allocInfo, tbInfoIdx, cnsmdBytes)
29437 RgSchDlRbAlloc *allocInfo;
29439 uint32_t cnsmdBytes;
29443 /*The below functionality is not needed.*/
29449 iTbs = allocInfo->tbInfo[tbInfoIdx].iTbs;
29450 noLyr = allocInfo->tbInfo[tbInfoIdx].noLyr;
29451 numRb = allocInfo->rbsAlloc;
29454 if ((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) == cnsmdBytes)
29459 /* Get iTbs as suitable for the consumed bytes */
29460 while((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) > cnsmdBytes)
29464 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].\
29465 tbCb->dlGrnt.iMcs);
29471 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].tbCb->dlGrnt.iMcs);
29478 * @brief This function determines the RBs and Bytes required for
29479 * Transmission on 2 CWs.
29483 * Function: rgSCHCmnDlAlloc2CwTxRb
29484 * Purpose: This function determines the RBs and Bytes required
29485 * for Transmission of DL SVC BO on 2 CWs.
29486 * Also, takes care of SVC by SVC allocation by tracking
29487 * previous SVCs allocations.
29488 * Returns RFAILED if BO not satisfied at all.
29490 * Invoked by: TM3 and TM4 DL UE Allocation
29492 * @param[in] RgSchCellCb *cell
29493 * @param[in] RgSchDlSf *subFrm
29494 * @param[in] RgSchUeCb *ue
29495 * @param[in] RgSchDlHqProcCb *proc
29496 * @param[in] RgSchDlHqProcCb bo
29497 * @param[out] uint8_t *numRb
29498 * @param[out] uint32_t *effBo
29503 static S16 rgSCHCmnDlAlloc2CwTxRb
29508 RgSchDlHqProcCb *proc,
29514 static S16 rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, numRbRef, effBo)
29518 RgSchDlHqProcCb *proc;
29525 uint32_t eff1, eff2;
29526 uint32_t tb1Sz, tb2Sz;
29527 uint8_t imcs1, imcs2;
29528 uint8_t noLyr1, noLyr2;
29529 uint8_t iTbs1, iTbs2;
29530 RgSchCmnDlCell *cellDl;
29531 RgSchCmnDlUe *ueDl;
29532 RgSchDlRbAlloc *allocInfo;
29535 /* Fix: MUE_PERTTI_DL */
29537 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
29538 uint8_t cfi = cellSch->dl.currCfi;
29540 uint32_t availBits = 0;
29542 uint32_t boTmp = bo;
29547 cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
29548 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29549 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29550 oldReq = ueDl->outStndAlloc;
29553 if (ueDl->maxTbBits > ue->dl.aggTbBits)
29555 availBits = ueDl->maxTbBits - ue->dl.aggTbBits;
29557 /* check if we can further allocate to this UE */
29558 if ((ue->dl.aggTbBits >= ueDl->maxTbBits) ||
29559 (allocInfo->tbInfo[0].bytesReq >= ueDl->maxTbSz/8) ||
29560 (allocInfo->tbInfo[1].bytesReq >= ueDl->maxTbSz/8) ||
29561 (allocInfo->rbsReq >= ueDl->maxRb))
29563 RLOG_ARG0(L_DEBUG,DBG_CELLID,cell->cellId,
29564 "rgSCHCmnDlAllocRb(): UEs max allocation exceed");
29568 noLyr1 = ueDl->mimoInfo.cwInfo[0].noLyr;
29569 noLyr2 = ueDl->mimoInfo.cwInfo[1].noLyr;
29571 /* If there is no CFI change, continue to use the BLER based
29573 if (ueDl->lastCfi == cfi)
29575 iTbs1 = ueDl->mimoInfo.cwInfo[0].iTbs[noLyr1 - 1];
29576 iTbs2 = ueDl->mimoInfo.cwInfo[1].iTbs[noLyr2 - 1];
29580 uint8_t cqi = ueDl->mimoInfo.cwInfo[0].cqi;
29582 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 0, noLyr1);
29584 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 0, noLyr1);
29587 cqi = ueDl->mimoInfo.cwInfo[1].cqi;
29589 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 1, noLyr2);
29591 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 1, noLyr2);
29595 /*ccpu00131191 and ccpu00131317 - Fix for RRC Reconfig failure
29596 * issue for VoLTE call */
29597 //if ((proc->hasDcch) || (TRUE == rgSCHLaaSCellEnabled(cell)))
29617 else if(!cellSch->dl.isDlFreqSel)
29620 /* for Tdd reduce iTbs only for SF0. SF5 contains only
29621 * SSS and can be ignored */
29622 if (subFrm->sfNum == 0)
29624 (iTbs1 > 1)? (iTbs1 -= 1) : (iTbs1 = 0);
29625 (iTbs2 > 1)? (iTbs2 -= 1) : (iTbs2 = 0);
29627 /* For SF 3 and 8 CRC is getting failed in DL.
29628 Need to do proper fix after the replay from
29630 #ifdef CA_PHY_BRDCM_61765
29631 if ((subFrm->sfNum == 3) || (subFrm->sfNum == 8))
29633 (iTbs1 > 2)? (iTbs1 -= 2) : (iTbs1 = 0);
29634 (iTbs2 > 2)? (iTbs2 -= 2) : (iTbs2 = 0);
29642 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
29644 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
29648 eff1 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr1 - 1][cfi]))[iTbs1];
29649 eff2 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr2 - 1][cfi]))[iTbs2];
29652 bo = RGSCH_MIN(bo,availBits/8);
29653 ueDl->outStndAlloc += bo;
29654 /* consider Cumulative amount of this BO and bytes so far allocated */
29655 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbBits/8);
29656 bo = RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff1)/(eff1+eff2)),
29658 RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff2)/(eff1+eff2)),
29659 (ueDl->maxTbSz)/8) +
29660 1; /* Add 1 to adjust the truncation at weighted averaging */
29661 /* Get the number of REs needed for this bo. */
29662 noRes = ((bo * 8 * 1024) / (eff1 + eff2));
29664 /* Get the number of RBs needed for this transmission */
29665 /* Number of RBs = No of REs / No of REs per RB */
29666 numRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
29667 /* Cannot exceed the maximum number of RBs per UE */
29668 if (numRb > ueDl->maxRb)
29670 numRb = ueDl->maxRb;
29675 if(RFAILED == rgSCHLaaCmn2CwAdjustPrb(allocInfo, boTmp, &numRb, ueDl, noLyr1, noLyr2, iTbs1, iTbs2))
29678 while ((numRb <= ueDl->maxRb) &&
29679 (rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1] <= ueDl->maxTbSz) &&
29680 (rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1] <= ueDl->maxTbSz) &&
29681 ((rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8 +
29682 rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8) <= bo))
29688 availBw = subFrm->bw - subFrm->bwAssigned;
29689 /* Cannot exceed the total number of RBs in the cell */
29690 if ((S16)(numRb - allocInfo->rbsReq) > availBw)
29692 numRb = availBw + allocInfo->rbsReq;
29694 tb1Sz = rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8;
29695 tb2Sz = rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8;
29696 /* DwPts Scheduling Changes Start */
29698 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
29700 /* Max Rb for Special Sf is approximated as 4/3 of maxRb */
29701 rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, (uint8_t*)&numRb, ueDl->maxRb*4/3,
29702 &iTbs1, &iTbs2, noLyr1,
29703 noLyr2, &tb1Sz, &tb2Sz, cfi);
29704 /* Check for available Bw */
29705 if ((S16)numRb - allocInfo->rbsReq > availBw)
29707 numRb = availBw + allocInfo->rbsReq;
29708 tb1Sz = rgTbSzTbl[noLyr1-1][iTbs1][RGSCH_MAX(numRb*3/4,1)-1]/8;
29709 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs2][RGSCH_MAX(numRb*3/4,1)-1]/8;
29713 /* DwPts Scheduling Changes End */
29714 /* Update the subframe Allocated BW field */
29715 subFrm->bwAssigned = subFrm->bwAssigned + numRb - \
29718 *effBo = RGSCH_MIN((tb1Sz + tb2Sz) - oldReq, reqBytes);
29721 if (ROK != rgSCHLaaCmn2TBPrbCheck(allocInfo, tb1Sz, tb2Sz, boTmp, effBo, iTbs1, iTbs2, numRb, proc))
29727 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs1, imcs1);
29728 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs2, imcs2);
29729 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tb1Sz, \
29730 iTbs1, imcs1, &proc->tbInfo[0], noLyr1);
29731 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
29732 iTbs2, imcs2, &proc->tbInfo[1], noLyr2);
29733 *numRbRef = (uint8_t)numRb;
29741 * @brief This function determines the RBs and Bytes required for
29742 * Transmission & Retransmission on 2 CWs.
29746 * Function: rgSCHCmnDlAlloc2CwTxRetxRb
29747 * Purpose: This function determines the RBs and Bytes required
29748 * for Transmission & Retransmission on 2 CWs. Allocate
29749 * RETX TB on a better CW and restrict new TX TB by
29751 * Returns RFAILED if BO not satisfied at all.
29753 * Invoked by: TM3 and TM4 DL UE Allocation
29755 * @param[in] RgSchCellCb *cell
29756 * @param[in] RgSchDlSf *subFrm
29757 * @param[in] RgSchUeCb *ue
29758 * @param[in] RgSchDlHqTbCb *reTxTb
29759 * @param[in] RgSchDlHqTbCb *txTb
29760 * @param[out] uint8_t *numRb
29761 * @param[out] uint32_t *effBo
29766 static S16 rgSCHCmnDlAlloc2CwTxRetxRb
29771 RgSchDlHqTbCb *reTxTb,
29772 RgSchDlHqTbCb *txTb,
29777 static S16 rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, reTxTb, txTb, numRb,\
29782 RgSchDlHqTbCb *reTxTb;
29783 RgSchDlHqTbCb *txTb;
29788 RgSchCmnDlUe *ueDl;
29789 RgSchDlRbAlloc *allocInfo;
29790 uint8_t imcs1, imcs2;
29793 RgSchCmnDlUeCwInfo *otherCw;
29795 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
29796 uint8_t cfi = cellDl->currCfi;
29800 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29801 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29802 otherCw = &ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)];
29805 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
29806 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
29808 availBw = subFrm->bw - subFrm->bwAssigned;
29809 *numRb = reTxTb->dlGrnt.numRb;
29811 #ifdef XEON_TDD_SPCL
29812 *numRb = (reTxTb->initTxNumRbs);
29813 if(reTxTb->sfType == RG_SCH_SPL_SF_DATA && subFrm->sfType != RG_SCH_SPL_SF_DATA)
29815 *numRb = (reTxTb->initTxNumRbs*3/4);
29819 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
29825 if ((S16)*numRb > availBw)
29829 /* Update the subframe Allocated BW field */
29830 subFrm->bwAssigned += *numRb;
29831 noLyr2 = otherCw->noLyr;
29832 RG_SCH_CMN_GET_MCS_FOR_RETX(reTxTb, imcs1);
29834 /* If there is no CFI change, continue to use the BLER based
29836 if (ueDl->lastCfi == cfi)
29838 iTbs = otherCw->iTbs[noLyr2-1];
29843 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, otherCw->cqi, cfi,
29844 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
29846 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, otherCw->cqi, cfi,
29847 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
29850 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs][*numRb-1]/8;
29851 /* DwPts Scheduling Changes Start */
29854 /* DwPts Scheduling Changes End */
29855 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs2);
29857 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], reTxTb->tbSz, \
29858 0, imcs1, reTxTb, reTxTb->numLyrs);
29860 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
29861 iTbs, imcs2, txTb, noLyr2);
29863 *effBo = reTxTb->tbSz + tb2Sz;
29870 * @brief This function determines the RBs and Bytes required for BO
29871 * Retransmission on 2 CWs.
29875 * Function: rgSCHCmnDlAlloc2CwRetxRb
29876 * Purpose: This function determines the RBs and Bytes required
29877 * for BO Retransmission on 2 CWs. Allocate larger TB
29878 * on a better CW and check if the smaller TB can be
29879 * accomodated on the other CW.
29880 * Returns RFAILED if BO not satisfied at all.
29882 * Invoked by: Common Scheduler
29884 * @param[in] RgSchCellCb *cell
29885 * @param[in] RgSchDlSf *subFrm
29886 * @param[in] RgSchUeCb *ue
29887 * @param[in] RgSchDlHqProcCb *proc
29888 * @param[out] uint8_t *numRb
29889 * @param[out] Bool *swpFlg
29890 * @param[out] uint32_t *effBo
29895 static S16 rgSCHCmnDlAlloc2CwRetxRb
29900 RgSchDlHqProcCb *proc,
29906 static S16 rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc,\
29907 numRb, swpFlg, effBo)
29911 RgSchDlHqProcCb *proc;
29917 RgSchDlRbAlloc *allocInfo;
29920 RgSchDlHqTbCb *lrgTbInfo, *othrTbInfo;
29923 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29926 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
29927 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
29929 lrgTbInfo = &proc->tbInfo[0];
29930 othrTbInfo = &proc->tbInfo[1];
29931 *numRb = lrgTbInfo->dlGrnt.numRb;
29932 #ifdef XEON_TDD_SPCL
29933 if((lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA || othrTbInfo->sfType == RG_SCH_SPL_SF_DATA))
29935 if(lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA)
29937 *numRb = (lrgTbInfo->initTxNumRbs);
29941 *numRb = (othrTbInfo->initTxNumRbs);
29944 if(subFrm->sfType != RG_SCH_SPL_SF_DATA)
29946 *numRb = (*numRb)*3/4;
29951 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
29956 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
29960 /* Update the subframe Allocated BW field */
29961 subFrm->bwAssigned += *numRb;
29962 RG_SCH_CMN_GET_MCS_FOR_RETX(lrgTbInfo, imcs1);
29963 RG_SCH_CMN_GET_MCS_FOR_RETX(othrTbInfo, imcs2);
29964 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], lrgTbInfo->tbSz, \
29965 0, imcs1, lrgTbInfo, lrgTbInfo->numLyrs);
29966 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], othrTbInfo->tbSz, \
29967 0, imcs2, othrTbInfo, othrTbInfo->numLyrs);
29968 *effBo = lrgTbInfo->tbSz + othrTbInfo->tbSz;
29977 * @brief This function determines the RBs and Bytes required for BO
29978 * Retransmission on 1 CW.
29982 * Function: rgSCHCmnDlAlloc1CwRetxRb
29983 * Purpose: This function determines the RBs and Bytes required
29984 * for BO Retransmission on 1 CW, the first CW.
29985 * Returns RFAILED if BO not satisfied at all.
29987 * Invoked by: Common Scheduler
29989 * @param[in] RgSchCellCb *cell
29990 * @param[in] RgSchDlSf *subFrm
29991 * @param[in] RgSchUeCb *ue
29992 * @param[in] RgSchDlHqTbCb *tbInfo
29993 * @param[in] uint8_t noLyr
29994 * @param[out] uint8_t *numRb
29995 * @param[out] uint32_t *effBo
30000 static S16 rgSCHCmnDlAlloc1CwRetxRb
30005 RgSchDlHqTbCb *tbInfo,
30011 static S16 rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, tbInfo, noLyr,\
30016 RgSchDlHqTbCb *tbInfo;
30022 RgSchDlRbAlloc *allocInfo;
30026 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
30029 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
30030 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
30032 *numRb = tbInfo->dlGrnt.numRb;
30033 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
30037 /* Update the subframe Allocated BW field */
30038 subFrm->bwAssigned += *numRb;
30039 imcs = tbInfo->dlGrnt.iMcs;
30040 allocInfo->dciFormat = tbInfo->dlGrnt.dciFormat;
30041 /* Fix: For a RETX TB the iTbs is irrelevant, hence setting 0 */
30042 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbInfo->tbSz, \
30043 0, imcs, tbInfo, tbInfo->numLyrs);
30044 *effBo = tbInfo->tbSz;
30052 * @brief This function is called to handle Release PDCCH feedback for SPS UE
30056 * Function: rgSCHCmnDlRelPdcchFbk
30057 * Purpose: Invokes SPS module to handle release PDCCH feedback
30061 * @param[in] RgSchCellCb *cell
30062 * @param[in] RgSchUeCb *ue
30063 * @param[in] Bool isAck
30068 Void rgSCHCmnDlRelPdcchFbk
30075 Void rgSCHCmnDlRelPdcchFbk(cell, ue, isAck)
30082 rgSCHCmnSpsDlRelPdcchFbk(cell, ue, isAck);
30089 * @brief This function is invoked to handle Ack processing for a HARQ proc.
30093 * Function: rgSCHCmnDlProcAck
30094 * Purpose: DTX processing for HARQ proc
30098 * @param[in] RgSchCellCb *cell
30099 * @param[in] RgSchDlHqProcCb *hqP
30104 Void rgSCHCmnDlProcAck
30107 RgSchDlHqProcCb *hqP
30110 Void rgSCHCmnDlProcAck(cell, hqP)
30112 RgSchDlHqProcCb *hqP;
30117 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
30119 /* Invoke SPS module if SPS service was scheduled for this HARQ proc */
30120 rgSCHCmnSpsDlProcAck(cell, hqP);
30124 #ifdef RGSCH_SPS_STATS
30125 uint32_t rgSchStatCrntiCeRcvCnt;
30128 * @brief This function is invoked to handle CRNTI CE reception for an UE
30132 * Function: rgSCHCmnHdlCrntiCE
30133 * Purpose: Handle CRNTI CE reception
30137 * @param[in] RgSchCellCb *cell
30138 * @param[in] RgSchDlHqProcCb *hqP
30143 Void rgSCHCmnHdlCrntiCE
30149 Void rgSCHCmnHdlCrntiCE(cell, ue)
30155 #ifdef RGSCH_SPS_STATS
30156 rgSchStatCrntiCeRcvCnt++;
30159 /* When UL sync lost happened due to TA timer expiry UE is being moved to
30160 PDCCH order inactivity list.But when CRNTI CE received in msg3 from UE
30161 we are not moving UE into active state due to that RRC Reconfiguration is
30163 So here we are moving UE to active list whenever we receive the CRNTI CE and
30165 /* CR ccpu00144525 */
30166 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
30168 /* Activate this UE if it was inactive */
30169 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
30170 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
30173 /* Handling is same as reception of UE RESET for both DL and UL */
30174 if (ue->dl.dlSpsCfg.isDlSpsEnabled)
30176 rgSCHCmnSpsDlUeReset(cell, ue);
30178 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30180 rgSCHCmnSpsUlUeReset(cell, ue);
30188 * @brief This function is called to handle relInd from MAC for a UE
30192 * Function: rgSCHCmnUlSpsRelInd
30193 * Purpose: Invokes SPS module to handle UL SPS release for a UE
30195 * Invoked by: SCH_UTL
30197 * @param[in] RgSchCellCb *cell
30198 * @param[in] RgSchUeCb *ue
30199 * @param[in] Bool isExplRel
30204 Void rgSCHCmnUlSpsRelInd
30211 Void rgSCHCmnUlSpsRelInd(cell, ue, isExplRel)
30218 rgSCHCmnSpsUlProcRelInd(cell, ue, isExplRel);
30221 } /* end of rgSCHCmnUlSpsRelInd */
30224 * @brief This function is called to handle SPS Activate Ind from MAC for a UE
30228 * Function: rgSCHCmnUlSpsActInd
30229 * Purpose: Invokes SPS module to handle UL SPS activate for a UE
30231 * Invoked by: SCH_UTL
30233 * @param[in] RgSchCellCb *cell
30234 * @param[in] RgSchUeCb *ue
30239 Void rgSCHCmnUlSpsActInd
30243 uint16_t spsSduSize
30246 Void rgSCHCmnUlSpsActInd(cell, ue,spsSduSize)
30249 uint16_t spsSduSize;
30254 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30256 rgSCHCmnSpsUlProcActInd(cell, ue,spsSduSize);
30260 } /* end of rgSCHCmnUlSpsActInd */
30263 * @brief This function is called to handle CRC in UL for UEs
30264 * undergoing SPS release
30268 * Function: rgSCHCmnUlCrcInd
30269 * Purpose: Invokes SPS module to handle CRC in UL for SPS UE
30271 * Invoked by: SCH_UTL
30273 * @param[in] RgSchCellCb *cell
30274 * @param[in] RgSchUeCb *ue
30275 * @param[in] CmLteTimingInfo crcTime
30280 Void rgSCHCmnUlCrcInd
30284 CmLteTimingInfo crcTime
30287 Void rgSCHCmnUlCrcInd(cell, ue, crcTime)
30290 CmLteTimingInfo crcTime;
30294 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30296 rgSCHCmnSpsUlProcCrcInd(cell, ue, crcTime);
30300 } /* end of rgSCHCmnUlCrcFailInd */
30303 * @brief This function is called to handle CRC failure in UL
30307 * Function: rgSCHCmnUlCrcFailInd
30308 * Purpose: Invokes SPS module to handle CRC failure in UL for SPS UE
30310 * Invoked by: SCH_UTL
30312 * @param[in] RgSchCellCb *cell
30313 * @param[in] RgSchUeCb *ue
30314 * @param[in] CmLteTimingInfo crcTime
30319 Void rgSCHCmnUlCrcFailInd
30323 CmLteTimingInfo crcTime
30326 Void rgSCHCmnUlCrcFailInd(cell, ue, crcTime)
30329 CmLteTimingInfo crcTime;
30333 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30335 rgSCHCmnSpsUlProcDtxInd(cell, ue, crcTime);
30339 } /* end of rgSCHCmnUlCrcFailInd */
30341 #endif /* LTEMAC_SPS */
30344 * @brief BCH,BCCH,PCCH Dowlink Scheduling Handler.
30348 * Function: rgSCHCmnDlBcchPcchAlloc
30349 * Purpose: This function calls common scheduler APIs to
30350 * schedule for BCCH/PCCH.
30351 * It then invokes Allocator for actual RB
30352 * allocations. It processes on the actual resources allocated
30353 * against requested to the allocator module.
30355 * Invoked by: Common Scheduler
30357 * @param[in] RgSchCellCb *cell
30361 static Void rgSCHCmnDlBcchPcchAlloc
30366 static Void rgSCHCmnDlBcchPcchAlloc(cell)
30371 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
30373 #ifdef LTEMAC_HDFDD
30374 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
30376 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
30379 RgInfSfAlloc *nextsfAlloc = &(cell->sfAllocArr[nextSfIdx]);
30380 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
30381 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
30385 /*Reset the bitmask for BCCH/PCCH*/
30386 rgSCHUtlResetSfAlloc(nextsfAlloc,TRUE,FALSE);
30387 #ifndef DISABLE_MIB_SIB /* Not sending MIB and SIB to CL */
30389 rgSCHChkNUpdSiCfg(cell);
30390 rgSCHSelectSi(cell);
30393 /*Perform the scheduling for BCCH,PCCH*/
30394 rgSCHCmnDlBcchPcch(cell, allocInfo, nextsfAlloc);
30396 /* Call common allocator for RB Allocation */
30397 rgSCHBcchPcchDlRbAlloc(cell, allocInfo);
30399 /* Finalize the Allocations for reqested Against alloced */
30400 rgSCHCmnDlBcchPcchFnlz(cell, allocInfo);
30401 #endif /* DISABLE_MIB_SIB */
30406 * @brief Handles RB allocation for BCCH/PCCH for downlink.
30410 * Function : rgSCHBcchPcchDlRbAlloc
30412 * Invoking Module Processing:
30413 * - This function is invoked for DL RB allocation of BCCH/PCCH
30415 * Processing Steps:
30416 * - If cell is frequency selecive,
30417 * - Call rgSCHDlfsBcchPcchAllocRb().
30419 * - Do the processing
30421 * @param[in] RgSchCellCb *cell
30422 * @param[in] RgSchDlRbAllocInfo *allocInfo
30427 static Void rgSCHBcchPcchDlRbAlloc
30430 RgSchCmnDlRbAllocInfo *allocInfo
30433 static Void rgSCHBcchPcchDlRbAlloc(cell, allocInfo)
30435 RgSchCmnDlRbAllocInfo *allocInfo;
30438 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
30442 if (cellSch->dl.isDlFreqSel)
30444 cellSch->apisDlfs->rgSCHDlfsBcchPcchAllocRb(cell, allocInfo);
30448 rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo);
30455 * @brief Handles RB allocation for BCCH,PCCH for frequency
30456 * non-selective cell.
30460 * Function : rgSCHCmnNonDlfsBcchPcchRbAlloc
30462 * Invoking Module Processing:
30463 * - SCH shall invoke this if downlink frequency selective is disabled for
30464 * the cell for RB allocation.
30465 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
30466 * estimate and subframe for each allocation to be made to SCH.
30468 * Processing Steps:
30469 * - Allocate sequentially for BCCH,PCCH common channels.
30471 * @param[in] RgSchCellCb *cell
30472 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
30477 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc
30480 RgSchCmnDlRbAllocInfo *allocInfo
30483 static Void rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo)
30485 RgSchCmnDlRbAllocInfo *allocInfo;
30488 RgSchDlRbAlloc *reqAllocInfo;
30492 /* Allocate for PCCH */
30493 reqAllocInfo = &(allocInfo->pcchAlloc);
30494 if (reqAllocInfo->rbsReq)
30496 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
30498 /* Allocate for BCCH on DLSCH */
30499 reqAllocInfo = &(allocInfo->bcchAlloc);
30500 if (reqAllocInfo->rbsReq)
30502 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
30510 * @brief This function implements the handling to check and
30511 * update the SI cfg at the start of the modificiation period.
30515 * Function: rgSCHChkNUpdSiCfg
30516 * Purpose: This function implements handling for update of SI Cfg
30517 * at the start of modification period.
30519 * Invoked by: Scheduler
30521 * @param[in] RgSchCellCb* cell
30527 static Void rgSCHChkNUpdSiCfg
30532 static Void rgSCHChkNUpdSiCfg(cell)
30536 CmLteTimingInfo pdSchTmInfo;
30540 pdSchTmInfo = cell->crntTime;
30541 #ifdef LTEMAC_HDFDD
30542 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30543 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30544 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30546 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA);
30550 /* Updating the SIB1 for Warning SI message immediately after it is received
30551 * from application. No need to wait for next modification period.
30553 if((pdSchTmInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
30554 && (RGSCH_SIB1_TX_SF_NUM == (pdSchTmInfo.slot % RGSCH_NUM_SUB_FRAMES)))
30556 /*Check whether SIB1 with PWS has been updated*/
30557 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_PWS_UPD)
30559 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
30560 cell->siCb.newSiInfo.sib1Info.sib1);
30561 cell->siCb.crntSiInfo.sib1Info.mcs =
30562 cell->siCb.newSiInfo.sib1Info.mcs;
30563 cell->siCb.crntSiInfo.sib1Info.nPrb =
30564 cell->siCb.newSiInfo.sib1Info.nPrb;
30565 cell->siCb.crntSiInfo.sib1Info.msgLen =
30566 cell->siCb.newSiInfo.sib1Info.msgLen;
30567 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_PWS_UPD;
30571 /*Check if this SFN and SF No marks the start of next modification
30572 period. If current SFN,SF No doesn't marks the start of next
30573 modification period, then return. */
30574 if(!((pdSchTmInfo.sfn % cell->siCfg.modPrd == 0)
30575 && (0 == pdSchTmInfo.slot)))
30576 /*if(!((((pdSchTmInfo.hSfn * 1024) + pdSchTmInfo.sfn) % cell->siCfg.modPrd == 0)
30577 && (0 == pdSchTmInfo.slot)))*/
30582 /*Check whether MIB has been updated*/
30583 if(cell->siCb.siBitMask & RGSCH_SI_MIB_UPD)
30585 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.mib,
30586 cell->siCb.newSiInfo.mib);
30587 cell->siCb.siBitMask &= ~RGSCH_SI_MIB_UPD;
30590 /*Check whether SIB1 has been updated*/
30591 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_UPD)
30593 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
30594 cell->siCb.newSiInfo.sib1Info.sib1);
30595 cell->siCb.crntSiInfo.sib1Info.mcs = cell->siCb.newSiInfo.sib1Info.mcs;
30596 cell->siCb.crntSiInfo.sib1Info.nPrb = cell->siCb.newSiInfo.sib1Info.nPrb;
30597 cell->siCb.crntSiInfo.sib1Info.msgLen =
30598 cell->siCb.newSiInfo.sib1Info.msgLen;
30599 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_UPD;
30602 /*Check whether SIs have been updated*/
30603 if(cell->siCb.siBitMask & RGSCH_SI_SI_UPD)
30607 /*Check if SI cfg have been modified And Check if numSi have
30608 been changed, if yes then we would need to update the
30609 pointers for all the SIs */
30610 if((cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD) &&
30611 (cell->siCfg.numSi != cell->siCb.newSiCfg.numSi))
30613 for(idx = 0;idx < cell->siCb.newSiCfg.numSi;idx++)
30615 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
30616 cell->siCb.newSiInfo.siInfo[idx].si);
30617 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
30618 cell->siCb.siArray[idx].isWarningSi = FALSE;
30620 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
30621 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
30622 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
30625 /*If numSi have been reduced then we need to free the
30626 pointers at the indexes in crntSiInfo which haven't
30627 been exercised. If numSi has increased then nothing
30628 additional is requires as above handling has taken
30630 if(cell->siCfg.numSi > cell->siCb.newSiCfg.numSi)
30632 for(idx = cell->siCb.newSiCfg.numSi;
30633 idx < cell->siCfg.numSi;idx++)
30635 RGSCH_FREE_MSG(cell->siCb.crntSiInfo.siInfo[idx].si);
30636 cell->siCb.siArray[idx].si = NULLP;
30642 /*numSi has not been updated, we just need to update the
30643 pointers for the SIs which are set to NON NULLP */
30644 /*ccpu00118260 - Correct Update of SIB2 */
30645 for(idx = 0;idx < cell->siCfg.numSi;idx++)
30647 if(NULLP != cell->siCb.newSiInfo.siInfo[idx].si)
30649 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
30650 cell->siCb.newSiInfo.siInfo[idx].si);
30652 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
30653 cell->siCb.siArray[idx].isWarningSi = FALSE;
30654 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
30655 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
30656 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
30660 cell->siCb.siBitMask &= ~RGSCH_SI_SI_UPD;
30663 /*Check whether SI cfg have been updated*/
30664 if(cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD)
30666 cell->siCfg = cell->siCb.newSiCfg;
30667 cell->siCb.siBitMask &= ~RGSCH_SI_SICFG_UPD;
30675 * @brief This function implements the selection of the SI
30676 * that is to be scheduled.
30680 * Function: rgSCHSelectSi
30681 * Purpose: This function implements the selection of SI
30682 * that is to be scheduled.
30684 * Invoked by: Scheduler
30686 * @param[in] RgSchCellCb* cell
30692 static Void rgSCHSelectSi
30697 static Void rgSCHSelectSi(cell)
30701 CmLteTimingInfo crntTmInfo;
30708 crntTmInfo = cell->crntTime;
30709 #ifdef LTEMAC_HDFDD
30710 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30711 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30712 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30714 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA);
30717 siWinSize = cell->siCfg.siWinSize;
30719 /* Select SI only once at the starting of the new window */
30720 if(cell->siCb.inWindow)
30722 if ((crntTmInfo.sfn % cell->siCfg.minPeriodicity) == 0 &&
30723 crntTmInfo.slot == 0)
30725 /* Reinit inWindow at the beginning of every SI window */
30726 cell->siCb.inWindow = siWinSize - 1;
30730 cell->siCb.inWindow--;
30734 else /* New window. Re-init the winSize counter with the window length */
30736 if((cell->siCb.siArray[cell->siCb.siCtx.siId - 1].isWarningSi == TRUE)&&
30737 (cell->siCb.siCtx.retxCntRem != 0))
30739 rgSCHUtlFreeWarningSiPdu(cell);
30740 cell->siCb.siCtx.warningSiFlag = FALSE;
30743 cell->siCb.inWindow = siWinSize - 1;
30746 x = rgSCHCmnGetSiSetId(crntTmInfo.sfn, crntTmInfo.slot,
30747 cell->siCfg.minPeriodicity);
30749 /* Window Id within a SI set. This window Id directly maps to a
30751 windowId = (((crntTmInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) +
30752 crntTmInfo.slot) - (x * (cell->siCfg.minPeriodicity * 10)))
30755 if(windowId >= RGR_MAX_NUM_SI)
30758 /* Update the siCtx if there is a valid SI and its periodicity
30760 if (NULLP != cell->siCb.siArray[windowId].si)
30762 /* Warning SI Periodicity is same as SIB2 Periodicity */
30763 if(((cell->siCb.siArray[windowId].isWarningSi == FALSE) &&
30764 (x % (cell->siCfg.siPeriodicity[windowId]
30765 /cell->siCfg.minPeriodicity) == 0)) ||
30766 ((cell->siCb.siArray[windowId].isWarningSi == TRUE) &&
30767 (x % (cell->siCfg.siPeriodicity[0]
30768 /cell->siCfg.minPeriodicity) == 0)))
30770 cell->siCb.siCtx.siId = windowId+1;
30771 cell->siCb.siCtx.retxCntRem = cell->siCfg.retxCnt;
30772 cell->siCb.siCtx.warningSiFlag = cell->siCb.siArray[windowId].
30774 cell->siCb.siCtx.timeToTx.sfn = crntTmInfo.sfn;
30775 cell->siCb.siCtx.timeToTx.slot = crntTmInfo.slot;
30777 RG_SCH_ADD_TO_CRNT_TIME(cell->siCb.siCtx.timeToTx,
30778 cell->siCb.siCtx.maxTimeToTx, (siWinSize - 1))
30782 {/* Update the siCtx with invalid si Id */
30783 cell->siCb.siCtx.siId = 0;
30791 * @brief This function implements scheduler DL allocation for
30796 * Function: rgSCHDlSiSched
30797 * Purpose: This function implements scheduler for DL allocation
30800 * Invoked by: Scheduler
30802 * @param[in] RgSchCellCb* cell
30808 static Void rgSCHDlSiSched
30811 RgSchCmnDlRbAllocInfo *allocInfo,
30812 RgInfSfAlloc *subfrmAlloc
30815 static Void rgSCHDlSiSched(cell, allocInfo, subfrmAlloc)
30817 RgSchCmnDlRbAllocInfo *allocInfo;
30818 RgInfSfAlloc *subfrmAlloc;
30821 CmLteTimingInfo crntTimInfo;
30827 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
30828 /* DwPTS Scheduling Changes Start */
30831 uint8_t cfi = cellDl->currCfi;
30833 /* DwPTS Scheduling Changes End */
30837 crntTimInfo = cell->crntTime;
30838 #ifdef LTEMAC_HDFDD
30839 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30840 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30841 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30843 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA);
30846 /* Compute the subframe for which allocation is being made.
30847 Essentially, we need pointer to the dl frame for this subframe */
30848 sf = rgSCHUtlSubFrmGet(cell, crntTimInfo);
30850 /*Check if scheduling of MIB is required */
30852 /* since we are adding the MIB repetition logic for EMTC UEs, checking if
30853 * emtcEnabled or not, If enabled MIB would be repeted at as part of EMTC
30854 * feature, otherwise scheduling at (n,0) */
30855 if(0 == cell->emtcEnable)
30858 if((crntTimInfo.sfn % RGSCH_MIB_PERIODICITY == 0)
30859 && (RGSCH_MIB_TX_SF_NUM == crntTimInfo.slot))
30862 uint8_t sfnOctet, mibOct2 = 0;
30863 uint8_t mibOct1 = 0;
30864 /*If MIB has not been yet setup by Application, return*/
30865 if(NULLP == cell->siCb.crntSiInfo.mib)
30868 SFndLenMsg(cell->siCb.crntSiInfo.mib, &mibLen);
30869 sf->bch.tbSize = mibLen;
30870 /*Fill the interface information */
30871 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, NULLD, NULLD);
30873 /*Set the bits of MIB to reflect SFN */
30874 /*First get the Most signficant 8 bits of SFN */
30875 sfnOctet = (uint8_t)(crntTimInfo.sfn >> 2);
30876 /*Get the first two octets of MIB, and then update them
30877 using the SFN octet value obtained above.*/
30878 if(ROK != SExamMsg((Data *)(&mibOct1),
30879 cell->siCb.crntSiInfo.mib, 0))
30882 if(ROK != SExamMsg((Data *)(&mibOct2),
30883 cell->siCb.crntSiInfo.mib, 1))
30886 /* ccpu00114572- Fix for improper way of MIB Octet setting for SFN */
30887 mibOct1 = (mibOct1 & 0xFC) | (sfnOctet >> 6);
30888 mibOct2 = (mibOct2 & 0x03) | (sfnOctet << 2);
30889 /* ccpu00114572- Fix ends*/
30891 /*Now, replace the two octets in MIB */
30892 if(ROK != SRepMsg((Data)(mibOct1),
30893 cell->siCb.crntSiInfo.mib, 0))
30896 if(ROK != SRepMsg((Data)(mibOct2),
30897 cell->siCb.crntSiInfo.mib, 1))
30900 /*Copy the MIB msg buff into interface buffer */
30901 SCpyMsgMsg(cell->siCb.crntSiInfo.mib,
30902 rgSchCb[cell->instIdx].rgSchInit.region,
30903 rgSchCb[cell->instIdx].rgSchInit.pool,
30904 &subfrmAlloc->cmnLcInfo.bchInfo.pdu);
30905 /* Added Dl TB count for MIB message transmission
30906 * This counter is incremented 4 times to consider
30907 * the retransmission at the PHY level on PBCH channel*/
30909 cell->dlUlTbCnt.tbTransDlTotalCnt += RG_SCH_MIB_CNT;
30916 allocInfo->bcchAlloc.schdFirst = FALSE;
30917 /*Check if scheduling of SIB1 is required.
30918 Check of (crntTimInfo.sfn % RGSCH_SIB1_PERIODICITY == 0)
30919 is not required here since the below check takes care
30920 of SFNs applicable for this one too.*/
30921 if((crntTimInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
30922 && (RGSCH_SIB1_TX_SF_NUM == crntTimInfo.slot))
30924 /*If SIB1 has not been yet setup by Application, return*/
30925 if(NULLP == (cell->siCb.crntSiInfo.sib1Info.sib1))
30930 allocInfo->bcchAlloc.schdFirst = TRUE;
30931 mcs = cell->siCb.crntSiInfo.sib1Info.mcs;
30932 nPrb = cell->siCb.crntSiInfo.sib1Info.nPrb;
30933 msgLen = cell->siCb.crntSiInfo.sib1Info.msgLen;
30937 /*Check if scheduling of SI can be performed.*/
30938 Bool invalid = FALSE;
30940 if(cell->siCb.siCtx.siId == 0)
30943 /*Check if the Si-Window for the current Si-Context is completed*/
30944 invalid = rgSCHCmnChkPastWin(crntTimInfo, cell->siCb.siCtx.maxTimeToTx);
30947 /* LTE_ADV_FLAG_REMOVED_START */
30948 if(cell->siCb.siCtx.retxCntRem)
30950 RGSCHLOGERROR(cell->instIdx,ERRCLS_INT_PAR,ERG011,(ErrVal)cell->siCb.siCtx.siId,
30951 "rgSCHDlSiSched(): SI not scheduled and window expired");
30953 /* LTE_ADV_FLAG_REMOVED_END */
30954 if(cell->siCb.siCtx.warningSiFlag == TRUE)
30956 rgSCHUtlFreeWarningSiPdu(cell);
30957 cell->siCb.siCtx.warningSiFlag = FALSE;
30962 /*Check the timinginfo of the current SI-Context to see if its
30963 transmission can be scheduled. */
30964 if(FALSE == (rgSCHCmnChkInWin(crntTimInfo,
30965 cell->siCb.siCtx.timeToTx,
30966 cell->siCb.siCtx.maxTimeToTx)))
30971 /*Check if retransmission count has become 0*/
30972 if(0 == cell->siCb.siCtx.retxCntRem)
30977 /* LTE_ADV_FLAG_REMOVED_START */
30978 /* Check if ABS is enabled/configured */
30979 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
30981 /* The pattern type is RGR_ABS_MUTE, then eNB need to blank the subframe */
30982 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
30984 /* Determine next scheduling subframe is ABS or not */
30985 if(RG_SCH_ABS_ENABLED_ABS_SF == (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern
30986 [((crntTimInfo.sfn*RGSCH_NUM_SUB_FRAMES) + crntTimInfo.slot) % RGR_ABS_PATTERN_LEN]))
30988 /* Skip the SI scheduling to next tti */
30993 /* LTE_ADV_FLAG_REMOVED_END */
30995 /*Schedule the transmission of the current SI-Context */
30996 /*Find out the messg length for the SI message */
30997 /* warningSiFlag is to differentiate between Warning SI
30999 if((rgSCHUtlGetMcsAndNPrb(cell, &nPrb, &mcs, &msgLen)) != ROK)
31004 cell->siCb.siCtx.i = RGSCH_CALC_SF_DIFF(crntTimInfo,
31005 cell->siCb.siCtx.timeToTx);
31009 /*Get the number of rb required */
31010 /*rgSCHCmnClcRbAllocForFxdTb(cell, msgLen, cellDl->ccchCqi, &rb);*/
31011 if(cellDl->bitsPerRb==0)
31013 while ((rgTbSzTbl[0][0][rb]) < (uint32_t) (msgLen*8))
31021 rb = RGSCH_CEIL((msgLen*8), cellDl->bitsPerRb);
31023 /* DwPTS Scheduling Changes Start */
31025 if (sf->sfType == RG_SCH_SPL_SF_DATA)
31027 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
31029 /* Calculate the less RE's because of DwPTS */
31030 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
31032 /* Increase number of RBs in Spl SF to compensate for lost REs */
31033 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
31036 /* DwPTS Scheduling Changes End */
31037 /*ccpu00115595- end*/
31038 /* Additional check to see if required RBs
31039 * exceeds the available */
31040 if (rb > sf->bw - sf->bwAssigned)
31042 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHDlSiSched(): "
31043 "BW allocation failed CRNTI:%d",RGSCH_SI_RNTI);
31047 /* Update the subframe Allocated BW field */
31048 sf->bwAssigned = sf->bwAssigned + rb;
31050 /*Fill the parameters in allocInfo */
31051 allocInfo->bcchAlloc.rnti = RGSCH_SI_RNTI;
31052 allocInfo->bcchAlloc.dlSf = sf;
31053 allocInfo->bcchAlloc.rbsReq = rb;
31054 /*ccpu00116710- MCS is not getting assigned */
31055 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
31057 /* ccpu00117510 - ADD - Assignment of nPrb and other information */
31058 allocInfo->bcchAlloc.nPrb = nPrb;
31059 allocInfo->bcchAlloc.tbInfo[0].bytesReq = msgLen;
31060 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
31063 #endif /*RGR_SI_SCH*/
31066 /* ccpu00117452 - MOD - Changed macro name from
31067 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
31068 #ifdef RGR_CQI_REPT
31070 * @brief This function Updates the DL CQI for the UE.
31074 * Function: rgSCHCmnUeDlPwrCtColltCqiRept
31075 * Purpose: Manages PUSH N CQI reporting
31076 * Step 1: Store the CQI in collation array
31077 * Step 2: Increament the tracking count
31078 * Step 3: Check is it time to to send the report
31079 * Step 4: if yes, Send StaInd to RRM
31080 * Step 4.1: Fill StaInd for sending collated N CQI rpeorts
31081 * Step 4.2: Call utility function (rgSCHUtlRgrStaInd) to send rpts to RRM
31082 * Step 4.2.1: If sending was not sucessful, return RFAILED
31083 * Step 4.2.2: If sending was sucessful, return ROK
31084 * Step 5: If no, return
31085 * Invoked by: rgSCHCmnDlCqiInd
31087 * @param[in] RgSchCellCb *cell
31088 * @param[in] RgSchUeCb *ue
31089 * @param[in] RgrUeCqiRept *ueCqiRpt
31094 static S16 rgSCHCmnUeDlPwrCtColltCqiRept
31098 RgrUeCqiRept *ueCqiRpt
31101 static S16 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, ueCqiRpt)
31104 RgrUeCqiRept *ueCqiRpt;
31107 uint8_t *cqiCount = NULLP;
31109 RgrStaIndInfo *staInfo = NULLP;
31112 /* Step 1: Store the CQI in collation array */
31113 /* Step 2: Increament the tracking count */
31114 cqiCount = &(ue->schCqiInfo.cqiCount);
31115 ue->schCqiInfo.cqiRept[(*cqiCount)++] =
31119 /* Step 3: Check is it time to to send the report */
31120 if(RG_SCH_CQIR_IS_TIMTOSEND_CQIREPT(ue))
31122 /* Step 4: if yes, Send StaInd to RRM */
31123 retVal = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&staInfo,
31124 sizeof(RgrStaIndInfo));
31127 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
31128 "allocate memory for sending StaInd CRNTI:%d",ue->ueId);
31132 /* Step 4.1: Fill StaInd for sending collated N CQI rpeorts */
31135 uint32_t gCqiReptToAppCount;
31136 gCqiReptToAppCount++;
31141 retVal = rgSCHUtlFillSndStaInd(cell, ue, staInfo,
31142 ue->cqiReptCfgInfo.numColltdCqiRept);
31148 } /* End of rgSCHCmnUeDlPwrCtColltCqiRept */
31150 #endif /* End of RGR_CQI_REPT */
31153 * @brief This function checks for the retransmisson
31154 * for a DTX scenario.
31161 * @param[in] RgSchCellCb *cell
31162 * @param[in] RgSchUeCb *ue
31168 Void rgSCHCmnChkRetxAllowDtx
31172 RgSchDlHqProcCb *proc,
31176 Void rgSCHCmnChkRetxAllowDtx(cell, ueCb, proc, reTxAllwd)
31179 RgSchDlHqProcCb *proc;
31187 if ((proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX))
31189 *reTxAllwd = FALSE;
31196 * @brief API for calculating the SI Set Id
31200 * Function: rgSCHCmnGetSiSetId
31202 * This API is used for calculating the SI Set Id, as shown below
31204 * siSetId = 0 siSetId = 1
31205 * |******************|******************|---------------->
31206 * (0,0) (8,0) (16,0) (SFN, SF)
31209 * @param[in] uint16_t sfn
31210 * @param[in] uint8_t sf
31211 * @return uint16_t siSetId
31214 uint16_t rgSCHCmnGetSiSetId
31218 uint16_t minPeriodicity
31221 uint16_t rgSCHCmnGetSiSetId(sfn, sf, minPeriodicity)
31224 uint16_t minPeriodicity;
31227 /* 80 is the minimum SI periodicity in sf. Also
31228 * all other SI periodicities are multiples of 80 */
31229 return (((sfn * RGSCH_NUM_SUB_FRAMES_5G) + sf) / (minPeriodicity * 10));
31233 * @brief API for calculating the DwPts Rb, Itbs and tbSz
31237 * Function: rgSCHCmnCalcDwPtsTbSz
31239 * @param[in] RgSchCellCb *cell
31240 * @param[in] uint32_t bo
31241 * @param[in/out] uint8_t *rb
31242 * @param[in/out] uint8_t *iTbs
31243 * @param[in] uint8_t lyr
31244 * @param[in] uint8_t cfi
31245 * @return uint32_t tbSz
31248 static uint32_t rgSCHCmnCalcDwPtsTbSz
31258 static uint32_t rgSCHCmnCalcDwPtsTbSz(cell, bo, rb, iTbs, lyr, cfi)
31268 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
31269 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
31270 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
31273 /* DwPts Rb cannot exceed the cell Bw */
31274 numDwPtsRb = RGSCH_MIN(numDwPtsRb, cellDl->maxDlBwPerUe);
31276 /* Adjust the iTbs for optimum usage of the DwPts region.
31277 * Using the same iTbs adjustment will not work for all
31278 * special subframe configurations and iTbs levels. Hence use the
31279 * static iTbs Delta table for adjusting the iTbs */
31280 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs);
31284 while(rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1] < bo*8 &&
31285 numDwPtsRb < cellDl->maxDlBwPerUe)
31290 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
31294 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
31302 * @brief API for calculating the DwPts Rb, Itbs and tbSz
31306 * Function: rgSCHCmnCalcDwPtsTbSz2Cw
31308 * @param[in] RgSchCellCb *cell
31309 * @param[in] uint32_t bo
31310 * @param[in/out] uint8_t *rb
31311 * @param[in] uint8_t maxRb
31312 * @param[in/out] uint8_t *iTbs1
31313 * @param[in/out] uint8_t *iTbs2
31314 * @param[in] uint8_t lyr1
31315 * @param[in] uint8_t lyr2
31316 * @return[in/out] uint32_t *tb1Sz
31317 * @return[in/out] uint32_t *tb2Sz
31318 * @param[in] uint8_t cfi
31321 static Void rgSCHCmnCalcDwPtsTbSz2Cw
31336 static Void rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, rb, maxRb, iTbs1, iTbs2,
31337 lyr1, lyr2, tb1Sz, tb2Sz, cfi)
31351 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
31352 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
31353 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
31356 /* DwPts Rb cannot exceed the cell Bw */
31357 numDwPtsRb = RGSCH_MIN(numDwPtsRb, maxRb);
31359 /* Adjust the iTbs for optimum usage of the DwPts region.
31360 * Using the same iTbs adjustment will not work for all
31361 * special subframe configurations and iTbs levels. Hence use the
31362 * static iTbs Delta table for adjusting the iTbs */
31363 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs1);
31364 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs2);
31366 while((rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1] +
31367 rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1])< bo*8 &&
31368 numDwPtsRb < maxRb)
31373 *tb1Sz = rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
31374 *tb2Sz = rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
31384 * @brief Updates the GBR LCGs when datInd is received from MAC
31388 * Function: rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
31389 * Purpose: This function updates the GBR LCGs
31390 * when datInd is received from MAC.
31394 * @param[in] RgSchCellCb *cell
31395 * @param[in] RgSchUeCb *ue
31396 * @param[in] RgInfUeDatInd *datInd
31400 Void rgSCHCmnUpdUeDataIndLcg
31404 RgInfUeDatInd *datInd
31407 Void rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
31410 RgInfUeDatInd *datInd;
31414 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31416 Inst inst = cell->instIdx;
31420 for (idx = 0; (idx < RGINF_MAX_LCG_PER_UE - 1); idx++)
31422 if (datInd->lcgInfo[idx].bytesRcvd != 0)
31424 uint8_t lcgId = datInd->lcgInfo[idx].lcgId;
31425 uint32_t bytesRcvd = datInd->lcgInfo[idx].bytesRcvd;
31427 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
31429 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
31430 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
31432 if(bytesRcvd > cmnLcg->effGbr)
31434 bytesRcvd -= cmnLcg->effGbr;
31435 cmnLcg->effDeltaMbr = (cmnLcg->effDeltaMbr > bytesRcvd) ? \
31436 (cmnLcg->effDeltaMbr - bytesRcvd) : (0);
31437 cmnLcg->effGbr = 0;
31441 cmnLcg->effGbr -= bytesRcvd;
31443 /* To keep BS updated with the amount of data received for the GBR */
31444 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31445 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31446 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr+cmnLcg->effDeltaMbr);
31448 else if(lcgId != 0)
31450 ue->ul.effAmbr = (ue->ul.effAmbr > datInd->lcgInfo[idx].bytesRcvd) ? \
31451 (ue->ul.effAmbr - datInd->lcgInfo[idx].bytesRcvd) : (0);
31452 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31453 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31454 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
31455 ue->ul.nonGbrLcgBs = (ue->ul.nonGbrLcgBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31456 (ue->ul.nonGbrLcgBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31458 ue->ul.nonLcg0Bs = (ue->ul.nonLcg0Bs > datInd->lcgInfo[idx].bytesRcvd) ? \
31459 (ue->ul.nonLcg0Bs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31468 if(TRUE == ue->isEmtcUe)
31470 if (cellSch->apisEmtcUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
31472 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
31479 if (cellSch->apisUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
31481 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
31487 /** @brief This function initializes DL allocation lists and prepares
31492 * Function: rgSCHCmnInitRbAlloc
31494 * @param [in] RgSchCellCb *cell
31500 static Void rgSCHCmnInitRbAlloc
31505 static Void rgSCHCmnInitRbAlloc (cell)
31509 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31510 CmLteTimingInfo frm;
31515 /* Initializing RgSchCmnUlRbAllocInfo structure.*/
31516 rgSCHCmnInitDlRbAllocInfo(&cellSch->allocInfo);
31518 frm = cellSch->dl.time;
31520 dlSf = rgSCHUtlSubFrmGet(cell, frm);
31522 dlSf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
31523 dlSf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
31524 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
31526 dlSf->sfBeamInfo[idx].totVrbgAllocated = 0;
31527 dlSf->sfBeamInfo[idx].totVrbgRequired = 0;
31528 dlSf->sfBeamInfo[idx].vrbgStart = 0;
31531 dlSf->remUeCnt = cellSch->dl.maxUePerDlSf;
31532 /* Updating the Subframe information in RBAllocInfo */
31533 cellSch->allocInfo.dedAlloc.dedDlSf = dlSf;
31534 cellSch->allocInfo.msg4Alloc.msg4DlSf = dlSf;
31536 /* LTE_ADV_FLAG_REMOVED_START */
31537 /* Determine next scheduling subframe is ABS or not */
31538 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
31540 cell->lteAdvCb.absPatternDlIdx =
31541 ((frm.sfn*RGSCH_NUM_SUB_FRAMES_5G) + frm.slot) % RGR_ABS_PATTERN_LEN;
31542 cell->lteAdvCb.absDlSfInfo = (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern[
31543 cell->lteAdvCb.absPatternDlIdx]);
31548 cell->lteAdvCb.absDlSfInfo = RG_SCH_ABS_DISABLED;
31550 /* LTE_ADV_FLAG_REMOVED_END */
31553 cellSch->allocInfo.ccchSduAlloc.ccchSduDlSf = dlSf;
31556 /* Update subframe-wide allocation information with SPS allocation */
31557 rgSCHCmnSpsDlUpdDlSfAllocWithSps(cell, frm, dlSf);
31566 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
31571 * Function: rgSCHCmnSendTxModeInd(cell, ueUl, newTxMode)
31572 * Purpose: This function sends the TX mode Change
31573 * indication to RRM
31578 * @param[in] RgSchCellCb *cell
31579 * @param[in] RgSchUeCb *ue
31580 * @param[in] uint8_t newTxMode
31584 static Void rgSCHCmnSendTxModeInd
31591 static Void rgSCHCmnSendTxModeInd(cell, ue, newTxMode)
31597 RgmTransModeInd *txModeChgInd;
31598 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
31601 if(!(ueDl->mimoInfo.forceTD & RG_SCH_CMN_TD_TXMODE_RECFG))
31604 if(SGetSBuf(cell->rgmSap->sapCfg.sapPst.region,
31605 cell->rgmSap->sapCfg.sapPst.pool, (Data**)&txModeChgInd,
31606 sizeof(RgmTransModeInd)) != ROK)
31610 RG_SCH_FILL_RGM_TRANSMODE_IND(ue->ueId, cell->cellId, newTxMode, txModeChgInd);
31611 RgUiRgmChangeTransModeInd(&(cell->rgmSap->sapCfg.sapPst),
31612 cell->rgmSap->sapCfg.suId, txModeChgInd);
31615 ue->mimoInfo.txModUpChgFactor = 0;
31616 ue->mimoInfo.txModDownChgFactor = 0;
31617 ueDl->laCb[0].deltaiTbs = 0;
31623 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
31628 * Function: rgSchCheckAndTriggerModeChange(cell, ueUl, iTbsNew)
31629 * Purpose: This function update and check for threashold for TM mode
31634 * @param[in] RgSchCellCb *cell
31635 * @param[in] RgSchUeCb *ue
31636 * @param[in] uint8_t iTbs
31640 Void rgSchCheckAndTriggerModeChange
31644 uint8_t reportediTbs,
31649 Void rgSchCheckAndTriggerModeChange(cell, ue, reportediTbs, previTbs, maxiTbs)
31652 uint8_t reportediTbs;
31657 RgrTxMode txMode; /*!< UE's Transmission Mode */
31658 RgrTxMode modTxMode; /*!< UE's Transmission Mode */
31661 txMode = ue->mimoInfo.txMode;
31663 /* Check for Step down */
31664 /* Step down only when TM4 is configured. */
31665 if(RGR_UE_TM_4 == txMode)
31667 if((previTbs <= reportediTbs) && ((reportediTbs - previTbs) >= RG_SCH_MODE_CHNG_STEPDOWN_CHECK_FACTOR))
31669 ue->mimoInfo.txModDownChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
31673 ue->mimoInfo.txModDownChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
31676 ue->mimoInfo.txModDownChgFactor =
31677 RGSCH_MAX(ue->mimoInfo.txModDownChgFactor, -(RG_SCH_MODE_CHNG_STEPDOWN_THRSHD));
31679 if(ue->mimoInfo.txModDownChgFactor >= RG_SCH_MODE_CHNG_STEPDOWN_THRSHD)
31681 /* Trigger Mode step down */
31682 modTxMode = RGR_UE_TM_3;
31683 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
31687 /* Check for Setup up */
31688 /* Step Up only when TM3 is configured, Max possible Mode is TM4*/
31689 if(RGR_UE_TM_3 == txMode)
31691 if((previTbs > reportediTbs) || (maxiTbs == previTbs))
31693 ue->mimoInfo.txModUpChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
31697 ue->mimoInfo.txModUpChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
31700 ue->mimoInfo.txModUpChgFactor =
31701 RGSCH_MAX(ue->mimoInfo.txModUpChgFactor, -(RG_SCH_MODE_CHNG_STEPUP_THRSHD));
31703 /* Check if TM step up need to be triggered */
31704 if(ue->mimoInfo.txModUpChgFactor >= RG_SCH_MODE_CHNG_STEPUP_THRSHD)
31706 /* Trigger mode chnage */
31707 modTxMode = RGR_UE_TM_4;
31708 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
31717 * @brief Updates the GBR LCGs when datInd is received from MAC
31721 * Function: rgSCHCmnIsDlCsgPrio (cell)
31722 * Purpose: This function returns if csg UEs are
31723 * having priority at current time
31725 * Invoked by: Scheduler
31727 * @param[in] RgSchCellCb *cell
31728 * @param[in] RgSchUeCb *ue
31729 * @param[in] RgInfUeDatInd *datInd
31733 Bool rgSCHCmnIsDlCsgPrio
31738 Bool rgSCHCmnIsDlCsgPrio(cell)
31743 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
31745 /* Calculating the percentage resource allocated */
31746 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
31752 if(((cmnDlCell->ncsgPrbCnt * 100) / cmnDlCell->totPrbCnt) < cell->minDlResNonCsg)
31764 * @brief Updates the GBR LCGs when datInd is received from MAC
31768 * Function: rgSCHCmnIsUlCsgPrio (cell)
31769 * Purpose: This function returns if csg UEs are
31770 * having priority at current time
31772 * Invoked by: Scheduler
31774 * @param[in] RgSchCellCb *cell
31775 * @param[in] RgSchUeCb *ue
31776 * @param[in] RgInfUeDatInd *datInd
31780 Bool rgSCHCmnIsUlCsgPrio
31785 Bool rgSCHCmnIsUlCsgPrio(cell)
31789 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
31792 /* Calculating the percentage resource allocated */
31793 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
31799 if (((cmnUlCell->ncsgPrbCnt * 100) /cmnUlCell->totPrbCnt) < cell->minUlResNonCsg)
31810 /** @brief DL scheduler for SPS, and all other downlink data
31814 * Function: rgSchCmnPreDlSch
31816 * @param [in] Inst schInst;
31821 Void rgSchCmnPreDlSch
31823 RgSchCellCb **cell,
31825 RgSchCellCb **cellLst
31828 Void rgSchCmnPreDlSch(cell, nCell, cellLst)
31829 RgSchCellCb **cell;
31831 RgSchCellCb **cellLst;
31834 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell[0]);
31839 if(nCell > CM_LTE_MAX_CELLS)
31844 if (cell[0]->isDlDataAllwd && (cell[0]->stopDlSch == FALSE))
31846 /* Specific DL scheduler to perform UE scheduling */
31847 cellSch->apisDl->rgSCHDlPreSched(cell[0]);
31849 /* Rearranging the cell entries based on their remueCnt in SF.
31850 * cells will be processed in the order of number of ue scheduled
31852 for (idx = 0; idx < nCell; idx++)
31855 cellSch = RG_SCH_CMN_GET_CELL(cell[idx]);
31856 sf = cellSch->allocInfo.dedAlloc.dedDlSf;
31860 cellLst[idx] = cell[idx];
31864 for(j = 0; j < idx; j++)
31866 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cellLst[j]);
31867 RgSchDlSf *subfrm = cmnCell->allocInfo.dedAlloc.dedDlSf;
31869 if(sf->remUeCnt < subfrm->remUeCnt)
31872 for(k = idx; k > j; k--)
31874 cellLst[k] = cellLst[k-1];
31879 cellLst[j] = cell[idx];
31884 for (idx = 0; idx < nCell; idx++)
31886 cellLst[idx] = cell[idx];
31892 /** @brief DL scheduler for SPS, and all other downlink data
31895 * Function: rgSchCmnPstDlSch
31897 * @param [in] Inst schInst;
31902 Void rgSchCmnPstDlSch
31907 Void rgSchCmnPstDlSch(cell)
31911 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31914 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
31916 cellSch->apisDl->rgSCHDlPstSched(cell->instIdx);
31921 uint8_t rgSCHCmnCalcPcqiBitSz
31927 uint8_t rgSCHCmnCalcPcqiBitSz(ueCb, numTxAnt)
31932 uint8_t confRepMode;
31935 RgSchUePCqiCb *cqiCb = ueCb->nPCqiCb;
31938 confRepMode = cqiCb->cqiCfg.cqiSetup.prdModeEnum;
31939 if((ueCb->mimoInfo.txMode != RGR_UE_TM_3) &&
31940 (ueCb->mimoInfo.txMode != RGR_UE_TM_4))
31946 ri = cqiCb->perRiVal;
31948 switch(confRepMode)
31950 case RGR_PRD_CQI_MOD10:
31956 case RGR_PRD_CQI_MOD11:
31969 else if(numTxAnt == 4)
31982 /* This is number of antenna case 1.
31983 * This is not applicable for Mode 1-1.
31984 * So setting it to invalid value */
31990 case RGR_PRD_CQI_MOD20:
31998 pcqiSz = 4 + cqiCb->label;
32003 case RGR_PRD_CQI_MOD21:
32018 else if(numTxAnt == 4)
32031 /* This might be number of antenna case 1.
32032 * For mode 2-1 wideband case only antenna port 2 or 4 is supported.
32033 * So setting invalid value.*/
32041 pcqiSz = 4 + cqiCb->label;
32045 pcqiSz = 7 + cqiCb->label;
32058 /** @brief DL scheduler for SPS, and all other downlink data
32062 * Function: rgSCHCmnDlSch
32064 * @param [in] RgSchCellCb *cell
32075 Void rgSCHCmnDlSch (cell)
32080 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
32082 RgSchDynTddCb *rgSchDynTddInfo = &(rgSchCb[cell->instIdx].rgSchDynTdd);
32083 uint16_t dlCntrlSfIdx;
32087 dlSf = rgSCHUtlSubFrmGet(cell, cellSch->dl.time);
32089 if (rgSchDynTddInfo->isDynTddEnbld)
32091 RG_SCH_DYN_TDD_GET_SFIDX(dlCntrlSfIdx, rgSchDynTddInfo->crntDTddSfIdx,
32092 RG_SCH_CMN_DL_DELTA);
32093 if(RG_SCH_DYNTDD_DLC_ULD == rgSchDynTddInfo->sfInfo[dlCntrlSfIdx].sfType)
32095 if(1 == cell->cellId)
32097 ul5gtfsidDlAlreadyMarkUl++;
32099 printf("ul5gtfsidDlAlreadyMarkUl: %d, [sfn:sf] [%04d:%02d]\n",
32100 ul5gtfsidDlAlreadyMarkUl, cellSch->dl.time.sfn,
32101 cellSch->dl.time.slot);
32109 /* Specific DL scheduler to perform UE scheduling */
32110 cellSch->apisDl->rgSCHDlNewSched(cell, &cellSch->allocInfo);
32111 /* LTE_ADV_FLAG_REMOVED_END */
32113 /* call common allocator for RB Allocation */
32114 rgSCHCmnDlRbAlloc(cell, &cellSch->allocInfo);
32116 /* Finalize the Allocations for reqested Against alloced */
32117 rgSCHCmnDlAllocFnlz(cell);
32119 /* Perform Pdcch allocations for PDCCH Order Q.
32120 * As of now, giving this the least preference.
32121 * This func call could be moved above other allocations
32123 rgSCHCmnGenPdcchOrder(cell, dlSf);
32125 /* Do group power control for PUCCH */
32126 rgSCHCmnGrpPwrCntrlPucch(cell, dlSf);
32131 /**********************************************************************
32134 **********************************************************************/