1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /************************************************************************
25 Desc: C source code for Entry point fucntions
29 **********************************************************************/
31 /** @file rg_sch_cmn.c
32 @brief This file implements the schedulers main access to MAC layer code.
35 static const char* RLOG_MODULE_NAME="MAC";
36 static int RLOG_FILE_ID=187;
37 static int RLOG_MODULE_ID=4096;
39 /* header include files -- defines (.h) */
40 #include "common_def.h"
46 #include "rg_sch_err.h"
47 #include "rg_sch_inf.h"
49 #include "rg_sch_cmn.h"
50 #include "rl_interface.h"
51 #include "rl_common.h"
53 /* header/extern include files (.x) */
54 #include "tfu.x" /* TFU types */
55 #include "lrg.x" /* layer management typedefs for MAC */
56 #include "rgr.x" /* layer management typedefs for MAC */
57 #include "rgm.x" /* layer management typedefs for MAC */
58 #include "rg_sch_inf.x" /* typedefs for Scheduler */
59 #include "rg_sch.x" /* typedefs for Scheduler */
60 #include "rg_sch_cmn.x" /* typedefs for Scheduler */
62 #include "lrg.x" /* Stats Structures */
63 #endif /* MAC_SCH_STATS */
66 #endif /* __cplusplus */
69 EXTERN uint32_t emtcStatsUlTomSrInd;
70 EXTERN uint32_t emtcStatsUlBsrTmrTxp;
73 #define RG_ITBS_DIFF(_x, _y) ((_x) > (_y) ? (_x) - (_y) : (_y) - (_x))
74 EXTERN Void rgSCHSc1UlInit ARGS((RgUlSchdApis *apis));
75 #ifdef RG_PHASE2_SCHED
76 EXTERN Void rgSCHRrUlInit ARGS((RgUlSchdApis *apis));
78 EXTERN Void rgSCHEmtcHqInfoFree ARGS((RgSchCellCb *cell, RgSchDlHqProcCb *hqP));
79 EXTERN Void rgSCHEmtcRrUlInit ARGS((RgUlSchdApis *apis));
80 EXTERN Void rgSCHEmtcCmnDlInit ARGS((Void));
81 EXTERN Void rgSCHEmtcCmnUlInit ARGS((Void));
82 EXTERN Void rgSCHEmtcCmnUeNbReset ARGS((RgSchUeCb *ueCb));
83 EXTERN RgSchCmnCqiToTbs *rgSchEmtcCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
85 EXTERN Void rgSCHMaxciUlInit ARGS((RgUlSchdApis *apis));
86 EXTERN Void rgSCHPfsUlInit ARGS((RgUlSchdApis *apis));
88 EXTERN Void rgSCHSc1DlInit ARGS((RgDlSchdApis *apis));
89 #ifdef RG_PHASE2_SCHED
90 EXTERN Void rgSCHRrDlInit ARGS((RgDlSchdApis *apis));
92 EXTERN Void rgSCHEmtcRrDlInit ARGS((RgDlEmtcSchdApis *apis));
94 EXTERN Void rgSCHMaxciDlInit ARGS((RgDlSchdApis *apis));
95 EXTERN Void rgSCHPfsDlInit ARGS((RgDlSchdApis *apis));
97 EXTERN Void rgSCHDlfsInit ARGS((RgDlfsSchdApis *apis));
101 EXTERN Void rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl ARGS((RgSchCellCb *cell));
102 EXTERN Void rgSCHCmnGetEmtcDciFrmtSizes ARGS((RgSchCellCb *cell));
103 EXTERN Void rgSCHEmtcRrUlProcRmvFrmRetx ARGS((RgSchCellCb *cell, RgSchUlHqProcCb *proc));
104 EXTERN S16 rgSCHCmnPrecompEmtcMsg3Vars
106 RgSchCmnUlCell *cellUl,
112 Void rgSCHEmtcCmnUeCcchSduDel
117 EXTERN Void rgSCHEmtcRmvFrmTaLst
119 RgSchCmnDlCell *cellDl,
122 EXTERN Void rgSCHEmtcInitTaLst
124 RgSchCmnDlCell *cellDl
126 EXTERN Void rgSCHEmtcAddToTaLst
128 RgSchCmnDlCell *cellDl,
135 PRIVATE Void rgSCHDlSiSched ARGS((RgSchCellCb *cell,
136 RgSchCmnDlRbAllocInfo *allocInfo,
137 RgInfSfAlloc *subfrmAlloc));
138 PRIVATE Void rgSCHChkNUpdSiCfg ARGS((RgSchCellCb *cell));
139 PRIVATE Void rgSCHSelectSi ARGS((RgSchCellCb *cell));
140 #endif /*RGR_SI_SCH*/
141 /* LTE_ADV_FLAG_REMOVED_START */
144 PRIVATE S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
152 PRIVATE S16 rgSCHCmnBuildRntpInfo (
160 PRIVATE Void rgSCHCmnNonDlfsType0Alloc
164 RgSchDlRbAlloc *allocInfo,
167 PRIVATE uint8_t rgSchCmnUlRvIdxToIMcsTbl[4] = {32, 30, 31, 29};
168 PRIVATE Void rgSCHCmnUlNonadapRetx ARGS((
169 RgSchCmnUlCell *cellUl,
173 PRIVATE Void rgSCHCmnUlSfRlsRetxProcs ARGS((
179 PRIVATE S16 rgSCHCmnUlMdfyGrntForCqi ARGS((
186 uint32_t stepDownItbs,
190 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1 ARGS((
192 RgSchDlRbAlloc *rbAllocInfo,
193 RgSchDlHqProcCb *hqP,
197 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1A ARGS((
199 RgSchDlRbAlloc *rbAllocInfo,
200 RgSchDlHqProcCb *hqP,
204 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1B ARGS((
206 RgSchDlRbAlloc *rbAllocInfo,
207 RgSchDlHqProcCb *hqP,
211 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2 ARGS((
213 RgSchDlRbAlloc *rbAllocInfo,
214 RgSchDlHqProcCb *hqP,
218 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2A ARGS((
220 RgSchDlRbAlloc *rbAllocInfo,
221 RgSchDlHqProcCb *hqP,
228 Void rgSCHCmnDlSpsSch
232 /* LTE_ADV_FLAG_REMOVED_END */
234 PRIVATE Void rgSCHCmnNonDlfsBcchPcchRbAlloc ARGS((
236 RgSchCmnDlRbAllocInfo *allocInfo
238 PRIVATE Void rgSCHBcchPcchDlRbAlloc ARGS((
240 RgSchCmnDlRbAllocInfo *allocInfo
242 PRIVATE Void rgSCHCmnDlBcchPcchAlloc ARGS((
246 PRIVATE Void rgSCHCmnDlCqiOnPucchInd ARGS ((
249 TfuDlCqiPucch *pucchCqi,
250 RgrUeCqiRept *ueCqiRept,
252 Bool *is2ndCwCqiAvail
254 PRIVATE Void rgSCHCmnDlCqiOnPuschInd ARGS ((
257 TfuDlCqiPusch *puschCqi,
258 RgrUeCqiRept *ueCqiRept,
260 Bool *is2ndCwCqiAvail
263 PRIVATE Void rgSCHCmnDlCqiOnPucchInd ARGS ((
266 TfuDlCqiPucch *pucchCqi
268 PRIVATE Void rgSCHCmnDlCqiOnPuschInd ARGS ((
271 TfuDlCqiPusch *puschCqi
274 /* ccpu00117452 - MOD - Changed macro name from
275 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
277 PRIVATE S16 rgSCHCmnUeDlPwrCtColltCqiRept ARGS((
280 RgrUeCqiRept *ueCqiRept));
281 #endif /* End of RGR_CQI_REPT */
282 /* Fix: syed align multiple UEs to refresh at same time */
283 PRIVATE Void rgSCHCmnGetRefreshPer ARGS((
287 PRIVATE S16 rgSCHCmnApplyUeRefresh ARGS((
291 Void rgSCHCmnDlSetUeAllocLmtLa ARGS
296 PRIVATE Void rgSCHCheckAndSetTxScheme ARGS
304 PRIVATE uint32_t rgSCHCmnCalcDwPtsTbSz ARGS
314 PRIVATE Void rgSCHCmnCalcDwPtsTbSz2Cw ARGS
330 PRIVATE Void rgSCHCmnInitRbAlloc ARGS
336 #endif /* __cplusplus */
340 RgSchdApis rgSchCmnApis;
341 PRIVATE RgUlSchdApis rgSchUlSchdTbl[RGSCH_NUM_SCHEDULERS];
342 PRIVATE RgDlSchdApis rgSchDlSchdTbl[RGSCH_NUM_SCHEDULERS];
344 PRIVATE RgUlSchdApis rgSchEmtcUlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
345 PRIVATE RgDlEmtcSchdApis rgSchEmtcDlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
347 #ifdef RG_PHASE2_SCHED
348 PRIVATE RgDlfsSchdApis rgSchDlfsSchdTbl[RGSCH_NUM_DLFS_SCHEDULERS];
350 PRIVATE RgUlSchdInits rgSchUlSchdInits = RGSCH_ULSCHED_INITS;
351 PRIVATE RgDlSchdInits rgSchDlSchdInits = RGSCH_DLSCHED_INITS;
353 PRIVATE RgEmtcUlSchdInits rgSchEmtcUlSchdInits = RGSCH_EMTC_ULSCHED_INITS;
354 PRIVATE RgEmtcDlSchdInits rgSchEmtcDlSchdInits = RGSCH_EMTC_DLSCHED_INITS;
356 #if (defined (RG_PHASE2_SCHED) && defined (TFU_UPGRADE))
357 PRIVATE RgDlfsSchdInits rgSchDlfsSchdInits = RGSCH_DLFSSCHED_INITS;
360 typedef Void (*RgSchCmnDlAllocRbFunc) ARGS((RgSchCellCb *cell, RgSchDlSf *subFrm,
361 RgSchUeCb *ue, uint32_t bo, uint32_t *effBo, RgSchDlHqProcCb *proc,
362 RgSchCmnDlRbAllocInfo *cellWdAllocInfo));
363 typedef uint8_t (*RgSchCmnDlGetPrecInfFunc) ARGS((RgSchCellCb *cell, RgSchUeCb *ue,
364 uint8_t numLyrs, Bool bothCwEnbld));
365 PRIVATE Void rgSCHCmnDlAllocTxRbTM1 ARGS((
371 RgSchDlHqProcCb *proc,
372 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
374 PRIVATE Void rgSCHCmnDlAllocTxRbTM2 ARGS((
380 RgSchDlHqProcCb *proc,
381 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
383 PRIVATE Void rgSCHCmnDlAllocTxRbTM3 ARGS((
389 RgSchDlHqProcCb *proc,
390 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
392 PRIVATE Void rgSCHCmnDlAllocTxRbTM4 ARGS((
398 RgSchDlHqProcCb *proc,
399 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
402 PRIVATE Void rgSCHCmnDlAllocTxRbTM5 ARGS((
408 RgSchDlHqProcCb *proc,
409 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
412 PRIVATE Void rgSCHCmnDlAllocTxRbTM6 ARGS((
418 RgSchDlHqProcCb *proc,
419 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
421 PRIVATE Void rgSCHCmnDlAllocTxRbTM7 ARGS((
427 RgSchDlHqProcCb *proc,
428 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
430 PRIVATE Void rgSCHCmnDlAllocRetxRbTM1 ARGS((
436 RgSchDlHqProcCb *proc,
437 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
439 PRIVATE Void rgSCHCmnDlAllocRetxRbTM2 ARGS((
445 RgSchDlHqProcCb *proc,
446 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
448 PRIVATE Void rgSCHCmnDlAllocRetxRbTM3 ARGS((
454 RgSchDlHqProcCb *proc,
455 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
457 PRIVATE Void rgSCHCmnDlAllocRetxRbTM4 ARGS((
463 RgSchDlHqProcCb *proc,
464 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
467 PRIVATE Void rgSCHCmnDlAllocRetxRbTM5 ARGS((
473 RgSchDlHqProcCb *proc,
474 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
477 PRIVATE Void rgSCHCmnDlAllocRetxRbTM6 ARGS((
483 RgSchDlHqProcCb *proc,
484 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
486 PRIVATE Void rgSCHCmnDlAllocRetxRbTM7 ARGS((
492 RgSchDlHqProcCb *proc,
493 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
497 PRIVATE uint8_t rgSchGetN1ResCount ARGS ((
501 Bool rgSchCmnChkDataOnlyOnPcell
507 uint8_t rgSCHCmnCalcPcqiBitSz
514 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
515 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[7] = {rgSCHCmnDlAllocTxRbTM1,
516 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
517 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7};
519 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
520 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[7] = {rgSCHCmnDlAllocRetxRbTM1,
521 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
522 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7};
524 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
525 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[9] = {rgSCHCmnDlAllocTxRbTM1,
526 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
527 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7, NULLP, NULLP};
529 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
530 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[9] = {rgSCHCmnDlAllocRetxRbTM1,
531 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
532 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7, NULLP, NULLP};
537 PRIVATE uint8_t rgSCHCmnDlTM3PrecInf2 ARGS((
543 PRIVATE uint8_t rgSCHCmnDlTM3PrecInf4 ARGS((
549 PRIVATE uint8_t rgSCHCmnDlTM4PrecInf2 ARGS((
555 PRIVATE uint8_t rgSCHCmnDlTM4PrecInf4 ARGS((
561 /* Functions specific to each transmission mode for DL RB Allocation*/
562 RgSchCmnDlGetPrecInfFunc getPrecInfoFunc[2][2] = {
563 {rgSCHCmnDlTM3PrecInf2, rgSCHCmnDlTM3PrecInf4},
564 {rgSCHCmnDlTM4PrecInf2, rgSCHCmnDlTM4PrecInf4}
567 PRIVATE S16 rgSCHCmnDlAlloc1CwRetxRb ARGS((
571 RgSchDlHqTbCb *tbInfo,
576 PRIVATE S16 rgSCHCmnDlAlloc2CwRetxRb ARGS((
580 RgSchDlHqProcCb *proc,
585 PRIVATE Void rgSCHCmnDlTM3TxTx ARGS((
591 RgSchDlHqProcCb *proc,
592 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
594 PRIVATE Void rgSCHCmnDlTM3TxRetx ARGS((
600 RgSchDlHqProcCb *proc,
601 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
603 PRIVATE Void rgSCHCmnDlTM3RetxRetx ARGS((
609 RgSchDlHqProcCb *proc,
610 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
613 PRIVATE Void rgSCHCmnNonDlfsUpdTyp2Alloc ARGS((
619 /* LTE_ADV_FLAG_REMOVED_START */
621 PRIVATE Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc ARGS((
628 /* LTE_ADV_FLAG_REMOVED_END */
629 PRIVATE Void rgSCHCmnDlRbInfoAddUeTx ARGS((
631 RgSchCmnDlRbAllocInfo *allocInfo,
633 RgSchDlHqProcCb *proc
635 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetx ARGS((
637 RgSchCmnDlRbAllocInfo *allocInfo,
641 PRIVATE Void rgSCHCmnDlAdd2NonSchdRetxLst ARGS((
642 RgSchCmnDlRbAllocInfo *allocInfo,
644 RgSchDlHqProcCb *proc
646 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRetxRb ARGS((
650 RgSchDlHqTbCb *reTxTb,
655 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRb ARGS((
659 RgSchDlHqProcCb *proc,
664 PRIVATE S16 rgSCHCmnDlAlloc1CwTxRb ARGS((
668 RgSchDlHqTbCb *tbInfo,
674 PRIVATE Void rgSCHCmnFillHqPTb ARGS((
676 RgSchDlRbAlloc *rbAllocInfo,
682 PRIVATE Void rgSCHCmnDlGetBestFitHole ARGS((
685 uint32_t *crntAllocMask,
688 uint8_t *allocNumRbs,
691 #ifdef RGSCH_SPS_UNUSED
692 PRIVATE uint32_t rgSCHCmnGetRaType1Mask ARGS((
698 PRIVATE uint32_t rgSCHCmnGetRaType0Mask ARGS((
702 PRIVATE uint32_t rgSCHCmnGetRaType2Mask ARGS((
708 Bool rgSCHCmnRetxAllocAvoid ARGS((
711 RgSchDlHqProcCb *proc
714 uint16_t rgSCHCmnGetSiSetId ARGS((
717 uint16_t minPeriodicity
722 //TODO_SID: Currenly table is only for 100 Prbs. Need to modify wrt VRBG table 8.1.5.2.1-1 V5G_213
723 uint32_t rgSch5gtfTbSzTbl[MAX_5GTF_MCS] =
724 {1864, 5256, 8776, 13176, 17576, 21976, 26376, 31656, 35176, 39576, 43976, 47496, 52776, 59376, 66392};
725 uint32_t g5gtfTtiCnt = 0;
726 uint32_t gUl5gtfSrRecv = 0;
727 uint32_t gUl5gtfBsrRecv = 0;
728 uint32_t gUl5gtfUeSchPick = 0;
729 uint32_t gUl5gtfPdcchSchd = 0;
730 uint32_t gUl5gtfAllocAllocated = 0;
731 uint32_t gUl5gtfUeRbAllocDone = 0;
732 uint32_t gUl5gtfUeRmvFnlzZeroBo = 0;
733 uint32_t gUl5gtfUeFnlzReAdd = 0;
734 uint32_t gUl5gtfPdcchSend = 0;
735 uint32_t gUl5gtfRbAllocFail = 0;
736 uint32_t ul5gtfsidUlMarkUl = 0;
737 uint32_t ul5gtfsidDlSchdPass = 0;
738 uint32_t ul5gtfsidDlAlreadyMarkUl = 0;
739 uint32_t ul5gtfTotSchdCnt = 0;
742 /* CQI Offset Index to Beta CQI Offset value mapping,
743 * stored as parts per 1000. Reserved is set to 0.
744 * Refer 36.213 sec 8.6.3 Tbl 8.6.3-3 */
745 uint32_t rgSchCmnBetaCqiOffstTbl[16] = {0, 0, 1125,
746 1250, 1375, 1625, 1750, 2000, 2250, 2500, 2875,
747 3125, 3500, 4000, 5000, 6250};
748 uint32_t rgSchCmnBetaHqOffstTbl[16] = {2000, 2500, 3125,
749 4000, 5000, 6250, 8000,10000, 12625, 15875, 20000,
750 31000, 50000,80000,126000,0};
751 uint32_t rgSchCmnBetaRiOffstTbl[16] = {1250, 1625, 2000,
752 2500, 3125, 4000, 5000, 6250, 8000, 10000,12625,
754 S8 rgSchCmnDlCqiDiffOfst[8] = {0, 1, 2, 3, -4, -3, -2, -1};
756 /* Include CRS REs while calculating Efficiency */
757 CONSTANT PRIVATE uint8_t rgSchCmnAntIdx[5] = {0,0,1,0,2};
758 CONSTANT PRIVATE uint8_t rgSchCmnNumResForCrs[5] = {0,6,12,0,16};
759 uint32_t cfiSwitchCnt ;
765 S8 rgSchCmnApUeSelDiffCqi[4] = {1, 2, 3, 4};
766 S8 rgSchCmnApEnbConfDiffCqi[4] = {0, 1, 2, -1};
769 typedef struct rgSchCmnDlUeDciFrmtOptns
771 TfuDciFormat spfcDciFrmt; /* TM(Transmission Mode) specific DCI format.
772 * Search space : UE Specific by C-RNTI only. */
773 uint8_t spfcDciRAType; /* Resource Alloctn(RA) type for spfcDciFrmt */
774 TfuDciFormat prfrdDciFrmt; /* Preferred DCI format among the available
775 * options for TD (Transmit Diversity) */
776 uint8_t prfrdDciRAType; /* Resource Alloctn(RA) type for prfrdDciFrmt */
777 }RgSchCmnDlUeDciFrmtOptns;
780 /* DCI Format options for each Transmission Mode */
781 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[7] = {
782 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
783 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
784 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
785 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
786 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
787 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
788 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
792 /* DCI Format options for each Transmission Mode */
793 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[9] = {
794 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
795 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
796 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
797 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
798 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
799 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
800 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
805 typedef struct rgSchCmnDlImcsTbl
807 uint8_t modOdr; /* Modulation Order */
808 uint8_t iTbs; /* ITBS */
809 }RgSchCmnDlImcsTbl[29];
811 CONSTANT struct rgSchCmnMult235Info
813 uint8_t match; /* Closest number satisfying 2^a.3^b.5^c, with a bias
814 * towards the smaller number */
815 uint8_t prvMatch; /* Closest number not greater than array index
816 * satisfying 2^a.3^b.5^c */
817 } rgSchCmnMult235Tbl[110+1] = {
819 {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {6, 6}, {8, 8},
820 {9, 9}, {10, 10}, {10, 10}, {12, 12}, {12, 12}, {15, 12}, {15, 15},
821 {16, 16}, {16, 16}, {18, 18}, {18, 18}, {20, 20}, {20, 20}, {20, 20},
822 {24, 20}, {24, 24}, {25, 25}, {25, 25}, {27, 27}, {27, 27}, {30, 27},
823 {30, 30}, {30, 30}, {32, 32}, {32, 32}, {32, 32}, {36, 32}, {36, 36},
824 {36, 36}, {36, 36}, {40, 36}, {40, 40}, {40, 40}, {40, 40}, {45, 40},
825 {45, 40}, {45, 45}, {45, 45}, {48, 45}, {48, 48}, {48, 48}, {50, 50},
826 {50, 50}, {50, 50}, {54, 50}, {54, 54}, {54, 54}, {54, 54}, {54, 54},
827 {60, 54}, {60, 54}, {60, 60}, {60, 60}, {60, 60}, {64, 60}, {64, 64},
828 {64, 64}, {64, 64}, {64, 64}, {64, 64}, {72, 64}, {72, 64}, {72, 64},
829 {72, 72}, {72, 72}, {75, 72}, {75, 75}, {75, 75}, {75, 75}, {80, 75},
830 {80, 75}, {80, 80}, {81, 81}, {81, 81}, {81, 81}, {81, 81}, {81, 81},
831 {90, 81}, {90, 81}, {90, 81}, {90, 81}, {90, 90}, {90, 90}, {90, 90},
832 {90, 90}, {96, 90}, {96, 90}, {96, 96}, {96, 96}, {96, 96}, {100, 96},
833 {100, 100}, {100, 100}, {100, 100}, {100, 100}, {100, 100}, {108, 100},
834 {108, 100}, {108, 100}, {108, 108}, {108, 108}, {108, 108}
838 /* BI table from 36.321 Table 7.2.1 */
839 CONSTANT PRIVATE S16 rgSchCmnBiTbl[RG_SCH_CMN_NUM_BI_VAL] = {
840 0, 10, 20, 30,40,60,80,120,160,240,320,480,960};
841 RgSchCmnUlCqiInfo rgSchCmnUlCqiTbl[RG_SCH_CMN_UL_NUM_CQI] = {
843 {RGSCH_CMN_QM_CQI_1,RGSCH_CMN_UL_EFF_CQI_1 },
844 {RGSCH_CMN_QM_CQI_2,RGSCH_CMN_UL_EFF_CQI_2 },
845 {RGSCH_CMN_QM_CQI_3,RGSCH_CMN_UL_EFF_CQI_3 },
846 {RGSCH_CMN_QM_CQI_4,RGSCH_CMN_UL_EFF_CQI_4 },
847 {RGSCH_CMN_QM_CQI_5,RGSCH_CMN_UL_EFF_CQI_5 },
848 {RGSCH_CMN_QM_CQI_6,RGSCH_CMN_UL_EFF_CQI_6 },
849 {RGSCH_CMN_QM_CQI_7,RGSCH_CMN_UL_EFF_CQI_7 },
850 {RGSCH_CMN_QM_CQI_8,RGSCH_CMN_UL_EFF_CQI_8 },
851 {RGSCH_CMN_QM_CQI_9,RGSCH_CMN_UL_EFF_CQI_9 },
852 {RGSCH_CMN_QM_CQI_10,RGSCH_CMN_UL_EFF_CQI_10 },
853 {RGSCH_CMN_QM_CQI_11,RGSCH_CMN_UL_EFF_CQI_11 },
854 {RGSCH_CMN_QM_CQI_12,RGSCH_CMN_UL_EFF_CQI_12 },
855 {RGSCH_CMN_QM_CQI_13,RGSCH_CMN_UL_EFF_CQI_13 },
856 {RGSCH_CMN_QM_CQI_14,RGSCH_CMN_UL_EFF_CQI_14 },
857 {RGSCH_CMN_QM_CQI_15,RGSCH_CMN_UL_EFF_CQI_15 },
861 /* This table maps a (delta_offset * 2 + 2) to a (beta * 8)
862 * where beta is 10^-(delta_offset/10) rounded off to nearest 1/8
864 PRIVATE uint16_t rgSchCmnUlBeta8Tbl[29] = {
865 6, RG_SCH_CMN_UL_INVALID_BETA8, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23,
866 25, 28, 32, RG_SCH_CMN_UL_INVALID_BETA8, 40, RG_SCH_CMN_UL_INVALID_BETA8,
867 50, RG_SCH_CMN_UL_INVALID_BETA8, 64, RG_SCH_CMN_UL_INVALID_BETA8, 80,
868 RG_SCH_CMN_UL_INVALID_BETA8, 101, RG_SCH_CMN_UL_INVALID_BETA8, 127,
869 RG_SCH_CMN_UL_INVALID_BETA8, 160
873 /* QCI to SVC priority mapping. Index specifies the Qci*/
874 PRIVATE uint8_t rgSchCmnDlQciPrio[RG_SCH_CMN_MAX_QCI] = RG_SCH_CMN_QCI_TO_PRIO;
876 /* The configuration is efficiency measured per 1024 REs. */
877 /* The first element stands for when CQI is not known */
878 /* This table is used to translate CQI to its corrospoding */
879 /* allocation parameters. These are currently from 36.213 */
880 /* Just this talbe needs to be edited for modifying the */
881 /* the resource allocation behaviour */
883 /* ADD CQI to MCS mapping correction
884 * single dimensional array is replaced by 2 dimensions for different CFI*/
885 PRIVATE uint16_t rgSchCmnCqiPdschEff[4][16] = {RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI1,
886 RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI2,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI3};
888 PRIVATE uint16_t rgSchCmn2LyrCqiPdschEff[4][16] = {RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI1,
889 RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI2, RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI3};
891 /* This configuration determines the transalation of a UEs CQI to its */
892 /* PDCCH coding efficiency. This may be edited based on the installation */
893 PRIVATE uint8_t rgSchCmnDlRvTbl[4] = {0, 2, 3, 1}; /* RVIdx sequence is corrected*/
895 /* Indexed by [DciFrmt].
896 * Considering the following definition in determining the dciFrmt index.
911 PRIVATE uint16_t rgSchCmnDciFrmtSizes[10];
914 PRIVATE uint16_t rgSchCmnCqiPdcchEff[16] = RG_SCH_CMN_CQI_TO_PDCCH_EFF;
918 RgSchTddUlDlSubfrmTbl rgSchTddUlDlSubfrmTbl = {
919 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME},
920 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
921 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
922 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
923 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
924 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
925 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME}
930 uint8_t rgSchTddSpsDlMaxRetxTbl[RGSCH_MAX_TDD_UL_DL_CFG] = {
942 /* Special Subframes in OFDM symbols */
943 /* ccpu00134197-MOD-Correct the number of symbols */
944 RgSchTddSplSubfrmInfoTbl rgSchTddSplSubfrmInfoTbl = {
948 {11, 1, 1, 10, 1, 1},
956 /* PHICH 'm' value Table */
957 RgSchTddPhichMValTbl rgSchTddPhichMValTbl = {
958 {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
959 {0, 1, 0, 0, 1, 0, 1, 0, 0, 1},
960 {0, 0, 0, 1, 0, 0, 0, 0, 1, 0},
961 {1, 0, 0, 0, 0, 0, 0, 0, 1, 1},
962 {0, 0, 0, 0, 0, 0, 0, 0, 1, 1},
963 {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
964 {1, 1, 0, 0, 0, 1, 1, 0, 0, 1}
967 /* PHICH 'K' value Table */
968 RgSchTddKPhichTbl rgSchTddKPhichTbl = {
969 {0, 0, 4, 7, 6, 0, 0, 4, 7, 6},
970 {0, 0, 4, 6, 0, 0, 0, 4, 6, 0},
971 {0, 0, 6, 0, 0, 0, 0, 6, 0, 0},
972 {0, 0, 6, 6, 6, 0, 0, 0, 0, 0},
973 {0, 0, 6, 6, 0, 0, 0, 0, 0, 0},
974 {0, 0, 6, 0, 0, 0, 0, 0, 0, 0},
975 {0, 0, 4, 6, 6, 0, 0, 4, 7, 0}
978 /* Uplink association index 'K' value Table */
979 RgSchTddUlAscIdxKDashTbl rgSchTddUlAscIdxKDashTbl = {
980 {0, 0, 6, 4, 0, 0, 0, 6, 4, 0},
981 {0, 0, 4, 0, 0, 0, 0, 4, 0, 0},
982 {0, 0, 4, 4, 4, 0, 0, 0, 0, 0},
983 {0, 0, 4, 4, 0, 0, 0, 0, 0, 0},
984 {0, 0, 4, 0, 0, 0, 0, 0, 0, 0},
985 {0, 0, 7, 7, 5, 0, 0, 7, 7, 0}
989 /* PUSCH 'K' value Table */
990 RgSchTddPuschTxKTbl rgSchTddPuschTxKTbl = {
991 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
992 {0, 6, 0, 0, 4, 0, 6, 0, 0, 4},
993 {0, 0, 0, 4, 0, 0, 0, 0, 4, 0},
994 {4, 0, 0, 0, 0, 0, 0, 0, 4, 4},
995 {0, 0, 0, 0, 0, 0, 0, 0, 4, 4},
996 {0, 0, 0, 0, 0, 0, 0, 0, 4, 0},
997 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1000 /* PDSCH to PUCCH Table for DL Harq Feed back. Based on the
1001 Downlink association set index 'K' table */
1002 uint8_t rgSchTddPucchTxTbl[7][10] = {
1003 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1004 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1005 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1006 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1007 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1008 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1009 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1012 /* Table to fetch the next DL sf idx for applying the
1013 new CFI. The next Dl sf Idx at which the new CFI
1014 is applied is always the starting Sf of the next ACK/NACK
1017 Ex: In Cfg-2, sf4 and sf9 are the only subframes at which
1018 a new ACK/NACK bundle of DL subframes can start
1020 D S U D D D S U D D D S U D D D S U D D
1023 dlSf Array for Cfg-2:
1024 sfNum: 0 1 3 4 5 6 8 9 0 1 3 4 5 6 8 9
1025 sfIdx: 0 1 2 3 4 5 6 7 8 9 10 11 12 12 14 15
1027 If CFI changes at sf0, nearest DL SF bundle >= 4 TTI is sf4
1028 So at sf4 the new CFI can be applied. To arrive at sf4 from
1029 sf0, the sfIdx has to be increased by 3 */
1031 uint8_t rgSchTddPdcchSfIncTbl[7][10] = {
1032 /* A/N Bundl: 0,1,5,6*/ {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
1033 /* A/N Bundl: 0,4,5,9*/ {2, 2, 0, 0, 3, 2, 2, 0, 0, 3},
1034 /* A/N Bundl: 4,9*/ {3, 6, 0, 5, 4, 3, 6, 0, 5, 4},
1035 /* A/N Bundl: 1,7,9*/ {4, 3, 0, 0, 0, 4, 5, 4, 6, 5},
1036 /* A/N Bundl: 0,6*/ {4, 3, 0, 0, 6, 5, 4, 7, 6, 5},
1037 /* A/N Bundl: 9*/ {8, 7, 0, 6, 5, 4, 12, 11, 10, 9},
1038 /* A/N Bundl: 0,1,5,6,9*/ {2, 1, 0, 0, 0, 2, 2, 0, 0, 3}
1042 /* combine compilation fixes */
1044 /* subframe offset values to be used when twoIntervalsConfig is enabled in UL
1046 RgSchTddSfOffTbl rgSchTddSfOffTbl = {
1047 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1048 {0, 0, 1, -1, 0, 0, 0, 1, -1, 0},
1049 {0, 0, 5, 0, 0, 0, 0, -5, 0, 0},
1050 {0, 0, 1, 1, -2, 0, 0, 0, 0, 0},
1051 {0, 0, 1, -1, 0, 0, 0, 0, 0, 0},
1052 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1053 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1057 /* Table to determine when uplink SPS configured grants should
1058 * explicitly be reserved in a subframe. When enries are same
1059 * as that of Msg3SubfrmTbl, indicates competition with msg3.
1060 * As of now, this is same as Msg3SubfrmTbl (leaving out uldlcfg 2),
1061 * except that all 255s are now zeros. */
1062 RgSchTddSpsUlRsrvTbl rgSchTddSpsUlRsrvTbl = {
1063 {0, 0, 0, 6, 8, 0, 0, 0, 6, 8},
1064 {0, 0, 6, 9, 0, 0, 0, 6, 9, 0},
1065 {0, 0, 10, 0, 0, 0, 0, 10, 0, 0},
1066 {0, 0, 0, 0, 8, 0, 7, 7, 14, 0},
1067 {0, 0, 0, 9, 0, 0, 7, 15, 0, 0},
1068 {0, 0, 10, 0, 0, 0, 16, 0, 0, 0},
1069 {0, 0, 0, 0, 8, 0, 0, 0, 9, 0}
1072 /* Inverse DL Assoc Set index Table */
1073 RgSchTddInvDlAscSetIdxTbl rgSchTddInvDlAscSetIdxTbl = {
1074 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1075 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1076 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1077 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1078 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1079 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1080 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1083 #endif /* (LTEMAC_SPS ) */
1085 /* Number of Uplink subframes Table */
1086 PRIVATE uint8_t rgSchTddNumUlSf[] = {6, 4, 2, 3, 2, 1, 5};
1088 /* Downlink HARQ processes Table */
1089 RgSchTddUlNumHarqProcTbl rgSchTddUlNumHarqProcTbl = { 7, 4, 2, 3, 2, 1, 6};
1091 /* Uplink HARQ processes Table */
1092 RgSchTddDlNumHarqProcTbl rgSchTddDlNumHarqProcTbl = { 4, 7, 10, 9, 12, 15, 6};
1094 /* Downlink association index set 'K' value Table */
1095 RgSchTddDlAscSetIdxKTbl rgSchTddDlAscSetIdxKTbl = {
1096 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1098 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1100 { {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}} },
1102 { {0, {0}}, {0, {0}}, {3, {7, 6, 11}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1104 { {0, {0}}, {0, {0}}, {4, {12, 8, 7, 11}}, {4, {6, 5, 4, 7}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1106 { {0, {0}}, {0, {0}}, {9, {13, 12, 9, 8, 7, 5, 4, 11, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1108 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1111 /* ccpu132282-ADD-the table rgSchTddDlAscSetIdxKTbl is rearranged in
1112 * decreasing order of Km, this is used to calculate the NCE used for
1113 * calculating N1Pucch Resource for Harq*/
1114 RgSchTddDlAscSetIdxKTbl rgSchTddDlHqPucchResCalTbl = {
1115 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1117 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1119 { {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}} },
1121 { {0, {0}}, {0, {0}}, {3, {11, 7, 6}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1123 { {0, {0}}, {0, {0}}, {4, {12, 11, 8, 7}}, {4, {7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1125 { {0, {0}}, {0, {0}}, {9, {13, 12, 11, 9, 8, 7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1127 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1130 /* Minimum number of Ack/Nack feeback information to be
1131 stored for each UL-DL configuration */
1132 RgSchTddANFdbkMapTbl rgSchTddANFdbkMapTbl = {4, 4, 2, 3, 2, 1, 5};
1134 /* Uplink switch points and number of UL subframes Table */
1135 RgSchTddMaxUlSubfrmTbl rgSchTddMaxUlSubfrmTbl = {
1136 {2,3,3}, {2,2,2}, {2,1,1}, {1,3,0}, {1,2,0}, {1,1,0}, {2,3,2}
1139 /* Uplink switch points and number of DL subframes Table */
1140 RgSchTddMaxDlSubfrmTbl rgSchTddMaxDlSubfrmTbl = {
1141 {2,2,2}, {2,3,3}, {2,4,4}, {1,7,0}, {1,8,0}, {1,9,0}, {2,2,3}
1144 /* Number of UL subframes present before a particular subframe */
1145 RgSchTddNumUlSubfrmTbl rgSchTddNumUlSubfrmTbl = {
1146 {0, 0, 1, 2, 3, 3, 3, 4, 5, 6},
1147 {0, 0, 1, 2, 2, 2, 2, 3, 4, 4},
1148 {0, 0, 1, 1, 1, 1, 1, 2, 2, 2},
1149 {0, 0, 1, 2, 3, 3, 3, 3, 3, 3},
1150 {0, 0, 1, 2, 2, 2, 2, 2, 2, 2},
1151 {0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
1152 {0, 0, 1, 2, 3, 3, 3, 4, 5, 5}
1155 /* Number of DL subframes present till a particular subframe */
1156 RgSchTddNumDlSubfrmTbl rgSchTddNumDlSubfrmTbl = {
1157 {1, 2, 2, 2, 2, 3, 4, 4, 4, 4},
1158 {1, 2, 2, 2, 3, 4, 5, 5, 5, 6},
1159 {1, 2, 2, 3, 4, 5, 6, 6, 7, 8},
1160 {1, 2, 2, 2, 2, 3, 4, 5, 6, 7},
1161 {1, 2, 2, 2, 3, 4, 5, 6, 7, 8},
1162 {1, 2, 2, 3, 4, 5, 6, 7, 8, 9},
1163 {1, 2, 2, 2, 2, 3, 4, 4, 4, 5}
1167 /* Nearest possible UL subframe Index from UL subframe
1168 * DL Index < UL Index */
1169 RgSchTddLowDlSubfrmIdxTbl rgSchTddLowDlSubfrmIdxTbl = {
1170 {0, 1, 1, 1, 1, 5, 6, 6, 6, 6},
1171 {0, 1, 1, 1, 4, 5, 6, 6, 6, 9},
1172 {0, 1, 1, 3, 4, 5, 6, 6, 8, 9},
1173 {0, 1, 1, 1, 1, 5, 6, 7, 8, 9},
1174 {0, 1, 1, 1, 4, 5, 6, 7, 8, 9},
1175 {0, 1, 1, 3, 4, 5, 6, 7, 8, 9},
1176 {0, 1, 1, 1, 1, 5, 6, 6, 6, 9}
1179 /* Nearest possible DL subframe Index from UL subframe
1180 * DL Index > UL Index
1181 * 10 represents Next SFN low DL Idx */
1182 RgSchTddHighDlSubfrmIdxTbl rgSchTddHighDlSubfrmIdxTbl = {
1183 {0, 1, 5, 5, 5, 5, 6, 10, 10, 10},
1184 {0, 1, 4, 4, 4, 5, 6, 9, 9, 9},
1185 {0, 1, 3, 3, 4, 5, 6, 8, 8, 9},
1186 {0, 1, 5, 5, 5, 5, 6, 7, 8, 9},
1187 {0, 1, 4, 4, 4, 5, 6, 7, 8, 9},
1188 {0, 1, 3, 3, 4, 5, 6, 7, 8, 9},
1189 {0, 1, 5, 5, 5, 5, 6, 9, 9, 9}
1192 /* RACH Message3 related information */
1193 RgSchTddMsg3SubfrmTbl rgSchTddMsg3SubfrmTbl = {
1194 {7, 6, 255, 255, 255, 7, 6, 255, 255, 255},
1195 {7, 6, 255, 255, 8, 7, 6, 255, 255, 8},
1196 {7, 6, 255, 9, 8, 7, 6, 255, 9, 8},
1197 {12, 11, 255, 255, 255, 7, 6, 6, 6, 13},
1198 {12, 11, 255, 255, 8, 7, 6, 6, 14, 13},
1199 {12, 11, 255, 9, 8, 7, 6, 15, 14, 13},
1200 {7, 6, 255, 255, 255, 7, 6, 255, 255, 8}
1203 /* ccpu00132341-DEL Removed rgSchTddRlsDlSubfrmTbl and used Kset table for
1204 * releasing DL HARQs */
1206 /* DwPTS Scheduling Changes Start */
1207 /* Provides the number of Cell Reference Signals in DwPTS
1209 PRIVATE uint8_t rgSchCmnDwptsCrs[2][3] = {/* [Spl Sf cfg][Ant Port] */
1210 {4, 8, 16}, /* Spl Sf cfg 1,2,3,6,7,8 */
1211 {6, 12, 20}, /* Spl Sf cfg 4 */
1214 PRIVATE S8 rgSchCmnSplSfDeltaItbs[9] = RG_SCH_DWPTS_ITBS_ADJ;
1215 /* DwPTS Scheduling Changes End */
1219 PRIVATE uint32_t rgSchCmnBsrTbl[64] = {
1220 0, 10, 12, 14, 17, 19, 22, 26,
1221 31, 36, 42, 49, 57, 67, 78, 91,
1222 107, 125, 146, 171, 200, 234, 274, 321,
1223 376, 440, 515, 603, 706, 826, 967, 1132,
1224 1326, 1552, 1817, 2127, 2490, 2915, 3413, 3995,
1225 4677, 5476, 6411, 7505, 8787, 10287, 12043, 14099,
1226 16507, 19325, 22624, 26487, 31009, 36304, 42502, 49759,
1227 58255, 68201, 79846, 93479, 109439, 128125, 150000, 220000
1230 PRIVATE uint32_t rgSchCmnExtBsrTbl[64] = {
1231 0, 10, 13, 16, 19, 23, 29, 35,
1232 43, 53, 65, 80, 98, 120, 147, 181,
1233 223, 274, 337, 414, 509, 625, 769, 945,
1234 1162, 1429, 1757, 2161, 2657, 3267, 4017, 4940,
1235 6074, 7469, 9185, 11294, 13888, 17077, 20999, 25822,
1236 31752, 39045, 48012, 59039, 72598, 89272, 109774, 134986,
1237 165989, 204111, 250990, 308634, 379519, 466683, 573866, 705666,
1238 867737, 1067031, 1312097, 1613447, 1984009, 2439678, 3000000, 3100000
1241 uint8_t rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_MAX_CP][RG_SCH_CMN_UL_NUM_CQI];
1243 RgSchTbSzTbl rgTbSzTbl = {
1245 {16, 32, 56, 88, 120, 152, 176, 208, 224, 256, 288, 328, 344, 376, 392, 424, 456, 488, 504, 536, 568, 600, 616, 648, 680, 712, 744, 776, 776, 808, 840, 872, 904, 936, 968, 1000, 1032, 1032, 1064, 1096, 1128, 1160, 1192, 1224, 1256, 1256, 1288, 1320, 1352, 1384, 1416, 1416, 1480, 1480, 1544, 1544, 1608, 1608, 1608, 1672, 1672, 1736, 1736, 1800, 1800, 1800, 1864, 1864, 1928, 1928, 1992, 1992, 2024, 2088, 2088, 2088, 2152, 2152, 2216, 2216, 2280, 2280, 2280, 2344, 2344, 2408, 2408, 2472, 2472, 2536, 2536, 2536, 2600, 2600, 2664, 2664, 2728, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 2984, 2984, 3112},
1246 {24, 56, 88, 144, 176, 208, 224, 256, 328, 344, 376, 424, 456, 488, 520, 568, 600, 632, 680, 712, 744, 776, 808, 872, 904, 936, 968, 1000, 1032, 1064, 1128, 1160, 1192, 1224, 1256, 1288, 1352, 1384, 1416, 1416, 1480, 1544, 1544, 1608, 1608, 1672, 1736, 1736, 1800, 1800, 1864, 1864, 1928, 1992, 1992, 2024, 2088, 2088, 2152, 2152, 2216, 2280, 2280, 2344, 2344, 2408, 2472, 2472, 2536, 2536, 2600, 2600, 2664, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008},
1247 {32, 72, 144, 176, 208, 256, 296, 328, 376, 424, 472, 520, 568, 616, 648, 696, 744, 776, 840, 872, 936, 968, 1000, 1064, 1096, 1160, 1192, 1256, 1288, 1320, 1384, 1416, 1480, 1544, 1544, 1608, 1672, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2024, 2088, 2088, 2152, 2216, 2216, 2280, 2344, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2664, 2728, 2792, 2856, 2856, 2856, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968},
1248 {40, 104, 176, 208, 256, 328, 392, 440, 504, 568, 616, 680, 744, 808, 872, 904, 968, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 1992, 2024, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6200, 6456, 6456},
1249 {56, 120, 208, 256, 328, 408, 488, 552, 632, 696, 776, 840, 904, 1000, 1064, 1128, 1192, 1288, 1352, 1416, 1480, 1544, 1608, 1736, 1800, 1864, 1928, 1992, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2600, 2664, 2728, 2792, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992},
1250 {72, 144, 224, 328, 424, 504, 600, 680, 776, 872, 968, 1032, 1128, 1224, 1320, 1384, 1480, 1544, 1672, 1736, 1864, 1928, 2024, 2088, 2216, 2280, 2344, 2472, 2536, 2664, 2728, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4264, 4392, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9528},
1251 {328, 176, 256, 392, 504, 600, 712, 808, 936, 1032, 1128, 1224, 1352, 1480, 1544, 1672, 1736, 1864, 1992, 2088, 2216, 2280, 2408, 2472, 2600, 2728, 2792, 2984, 2984, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3880, 4008, 4136, 4136, 4264, 4392, 4584, 4584, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448},
1252 {104, 224, 328, 472, 584, 712, 840, 968, 1096, 1224, 1320, 1480, 1608, 1672, 1800, 1928, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536},
1253 {120, 256, 392, 536, 680, 808, 968, 1096, 1256, 1384, 1544, 1672, 1800, 1928, 2088, 2216, 2344, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15264},
1254 {136, 296, 456, 616, 776, 936, 1096, 1256, 1416, 1544, 1736, 1864, 2024, 2216, 2344, 2536, 2664, 2856, 2984, 3112, 3368, 3496, 3624, 3752, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 5160, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568},
1255 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080},
1256 {176, 376, 584, 776, 1000, 1192, 1384, 1608, 1800, 2024, 2216, 2408, 2600, 2792, 2984, 3240, 3496, 3624, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152},
1257 {208, 440, 680, 904, 1128, 1352, 1608, 1800, 2024, 2280, 2472, 2728, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 23688, 24496, 24496, 24496, 24496, 25456},
1258 {224, 488, 744, 1000, 1256, 1544, 1800, 2024, 2280, 2536, 2856, 3112, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 26416, 27376, 27376, 27376, 27376, 28336, 28336},
1259 {256, 552, 840, 1128, 1416, 1736, 1992, 2280, 2600, 2856, 3112, 3496, 3752, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 5992, 6200, 6456, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704},
1260 {280, 600, 904, 1224, 1544, 1800, 2152, 2472, 2728, 3112, 3368, 3624, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 6200, 6456, 6712, 6968, 7224, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008},
1261 {328, 632, 968, 1288, 1608, 1928, 2280, 2600, 2984, 3240, 3624, 3880, 4264, 4584, 4968, 5160, 5544, 5992, 6200, 6456, 6712, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 35160},
1262 {336, 696, 1064, 1416, 1800, 2152, 2536, 2856, 3240, 3624, 4008, 4392, 4776, 5160, 5352, 5736, 6200, 6456, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 39232},
1263 {376, 776, 1160, 1544, 1992, 2344, 2792, 3112, 3624, 4008, 4392, 4776, 5160, 5544, 5992, 6200, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14112, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816},
1264 {408, 840, 1288, 1736, 2152, 2600, 2984, 3496, 3880, 4264, 4776, 5160, 5544, 5992, 6456, 6968, 7224, 7736, 8248, 8504, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19080, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888},
1265 {440, 904, 1384, 1864, 2344, 2792, 3240, 3752, 4136, 4584, 5160, 5544, 5992, 6456, 6968, 7480, 7992, 8248, 8760, 9144, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 20616, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 48936, 51024, 51024, 51024},
1266 {488, 1000, 1480, 1992, 2472, 2984, 3496, 4008, 4584, 4968, 5544, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 9912, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056},
1267 {520, 1064, 1608, 2152, 2664, 3240, 3752, 4264, 4776, 5352, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 10296, 10680, 11448, 11832, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 19080, 19080, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256},
1268 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776},
1269 {584, 1192, 1800, 2408, 2984, 3624, 4264, 4968, 5544, 5992, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 22920, 23688, 24496, 25456, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592},
1270 {616, 1256, 1864, 2536, 3112, 3752, 4392, 5160, 5736, 6200, 6968, 7480, 8248, 8760, 9528, 10296, 10680, 11448, 12216, 12576, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592, 68808, 68808, 68808, 71112},
1271 {712, 1480, 2216, 2984, 3752, 4392, 5160, 5992, 6712, 7480, 8248, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 14688, 15264, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376}
1274 {32, 88, 152, 208, 256, 328, 376, 424, 488, 536, 600, 648, 712, 776, 808, 872, 936, 1000, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2088, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 5992, 6200},
1275 {56, 144, 208, 256, 344, 424, 488, 568, 632, 712, 776, 872, 936, 1000, 1064, 1160, 1224, 1288, 1384, 1416, 1544, 1608, 1672, 1736, 1800, 1864, 1992, 2024, 2088, 2152, 2280, 2344, 2408, 2472, 2536, 2600, 2728, 2792, 2856, 2856, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5352, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992},
1276 {72, 176, 256, 328, 424, 520, 616, 696, 776, 872, 968, 1064, 1160, 1256, 1320, 1416, 1544, 1608, 1672, 1800, 1864, 1992, 2088, 2152, 2216, 2344, 2408, 2536, 2600, 2664, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3368, 3496, 3624, 3624, 3752, 3880, 4008, 4008, 4136, 4264, 4264, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912},
1277 {104, 208, 328, 440, 568, 680, 808, 904, 1032, 1160, 1256, 1384, 1480, 1608, 1736, 1864, 1992, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2856, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4392, 4584, 4776, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12576, 12960, 12960},
1278 {120, 256, 408, 552, 696, 840, 1000, 1128, 1288, 1416, 1544, 1736, 1864, 1992, 2152, 2280, 2408, 2600, 2728, 2856, 2984, 3112, 3240, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840},
1279 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19080},
1280 {176, 392, 600, 808, 1032, 1224, 1480, 1672, 1864, 2088, 2280, 2472, 2728, 2984, 3112, 3368, 3496, 3752, 4008, 4136, 4392, 4584, 4776, 4968, 5160, 5352, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7992, 8248, 8248, 8504, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920},
1281 {224, 472, 712, 968, 1224, 1480, 1672, 1928, 2216, 2472, 2664, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376},
1282 {256, 536, 808, 1096, 1384, 1672, 1928, 2216, 2536, 2792, 3112, 3368, 3624, 3880, 4264, 4584, 4776, 4968, 5352, 5544, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576},
1283 {296, 616, 936, 1256, 1544, 1864, 2216, 2536, 2856, 3112, 3496, 3752, 4136, 4392, 4776, 5160, 5352, 5736, 5992, 6200, 6712, 6968, 7224, 7480, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160},
1284 {328, 680, 1032, 1384, 1736, 2088, 2472, 2792, 3112, 3496, 3880, 4264, 4584, 4968, 5352, 5736, 5992, 6200, 6712, 6968, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 21384, 21384, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888},
1285 {376, 776, 1192, 1608, 2024, 2408, 2792, 3240, 3624, 4008, 4392, 4776, 5352, 5736, 5992, 6456, 6968, 7224, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816},
1286 {440, 904, 1352, 1800, 2280, 2728, 3240, 3624, 4136, 4584, 4968, 5544, 5992, 6456, 6712, 7224, 7736, 8248, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 17568, 17568, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024},
1287 {488, 1000, 1544, 2024, 2536, 3112, 3624, 4136, 4584, 5160, 5736, 6200, 6712, 7224, 7736, 8248, 8760, 9144, 9912, 10296, 10680, 11448, 11832, 12216, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336},
1288 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776},
1289 {600, 1224, 1800, 2472, 3112, 3624, 4264, 4968, 5544, 6200, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 23688, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808},
1290 {632, 1288, 1928, 2600, 3240, 3880, 4584, 5160, 5992, 6456, 7224, 7736, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 68808, 71112, 71112, 71112, 71112},
1291 {696, 1416, 2152, 2856, 3624, 4392, 5160, 5736, 6456, 7224, 7992, 8760, 9528, 10296, 10680, 11448, 12216, 12960, 13536, 14688, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 71112, 73712, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 78704},
1292 {776, 1544, 2344, 3112, 4008, 4776, 5544, 6200, 7224, 7992, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 27376, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936},
1293 {840, 1736, 2600, 3496, 4264, 5160, 5992, 6968, 7736, 8504, 9528, 10296, 11064, 12216, 12960, 13536, 14688, 15264, 16416, 16992, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800},
1294 {904, 1864, 2792, 3752, 4584, 5544, 6456, 7480, 8248, 9144, 10296, 11064, 12216, 12960, 14112, 14688, 15840, 16992, 17568, 18336, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 42368, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 97896, 101840, 101840, 101840},
1295 {1000, 1992, 2984, 4008, 4968, 5992, 6968, 7992, 9144, 9912, 11064, 12216, 12960, 14112, 15264, 15840, 16992, 18336, 19080, 19848, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 31704, 32856, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136},
1296 {1064, 2152, 3240, 4264, 5352, 6456, 7480, 8504, 9528, 10680, 11832, 12960, 14112, 15264, 16416, 16992, 18336, 19080, 20616, 21384, 22920, 23688, 24496, 25456, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816},
1297 {1128, 2280, 3496, 4584, 5736, 6968, 7992, 9144, 10296, 11448, 12576, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 45352, 45352, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840,101840,101840,101840,105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496},
1298 {1192, 2408, 3624, 4968, 5992, 7224, 8504, 9912, 11064, 12216, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 42368, 43816, 45352, 46888, 46888, 48936, 51024, 51024, 52752, 52752, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208},
1299 {1256, 2536, 3752, 5160, 6200, 7480, 8760, 10296, 11448, 12576, 14112, 15264, 16416, 17568, 19080, 20616, 21384, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 43816, 43816, 45352, 46888, 48936, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040,115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208, 137792, 137792, 137792, 142248},
1300 {1480, 2984, 4392, 5992, 7480, 8760, 10296, 11832, 13536, 14688, 16416, 17568, 19080, 20616, 22152, 23688, 25456, 26416, 28336, 29296, 30576, 32856, 34008, 35160, 36696, 37888, 40576, 40576, 42368, 43816, 45352, 46888, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 63776, 63776, 66592, 68808, 68808, 71112, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 81176, 84760, 84760, 87936, 87936, 90816, 90816, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 128496, 128496, 128496, 133208, 133208, 133208, 137792, 137792, 137792, 142248, 142248, 142248, 146856, 146856,149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776}
1303 RgSchUlIMcsTbl rgUlIMcsTbl = {
1304 {2, 0}, {2, 1}, {2, 2}, {2, 3}, {2, 4}, {2, 5},
1305 {2, 6}, {2, 7}, {2, 8}, {2, 9}, {2, 10},
1306 {4, 10}, {4, 11}, {4, 12}, {4, 13}, {4, 14},
1307 {4, 15}, {4, 16}, {4, 17}, {4, 18}, {4, 19},
1308 {6, 19}, {6, 20}, {6, 21}, {6, 22}, {6, 23},
1309 {6, 24}, {6, 25}, {6, 26}
1311 RgSchUeCatTbl rgUeCatTbl = {
1312 /*Column1:Maximum number of bits of an UL-SCH
1313 transport block transmitted within a TTI
1315 Column2:Maximum number of bits of a DLSCH
1316 transport block received within a TTI
1318 Column3:Total number of soft channel bits
1320 Column4:Support for 64QAM in UL
1322 Column5:Maximum number of DL-SCH transport
1323 block bits received within a TTI
1325 Column6:Maximum number of supported layers for
1326 spatial multiplexing in DL
1328 {5160, {10296,0}, 250368, FALSE, 10296, 1},
1329 {25456, {51024,0}, 1237248, FALSE, 51024, 2},
1330 {51024, {75376,0}, 1237248, FALSE, 102048, 2},
1331 {51024, {75376,0}, 1827072, FALSE, 150752, 2},
1332 {75376, {149776,0}, 3667200, TRUE, 299552, 4},
1333 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1334 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1335 {149776,{299856,0}, 35982720,TRUE, 2998560, 8}
1338 /* [ccpu00138532]-ADD-The below table stores the min HARQ RTT time
1339 in Downlink for TDD and FDD. Indices 0 to 6 map to tdd UL DL config 0-6.
1340 Index 7 map to FDD */
1341 uint8_t rgSchCmnHarqRtt[8] = {4,7,10,9,12,15,6,8};
1342 /* Number of CFI Switchover Index is equals to 7 TDD Indexes + 1 FDD index */
1343 uint8_t rgSchCfiSwitchOvrWinLen[] = {7, 4, 2, 3, 2, 1, 6, 8};
1345 /* EffTbl is calculated for single layer and two layers.
1346 * CqiToTbs is calculated for single layer and two layers */
1347 RgSchCmnTbSzEff rgSchCmnNorCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1348 RgSchCmnTbSzEff rgSchCmnNorCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1349 /* New variable to store UL effiency values for normal and extended CP*/
1350 RgSchCmnTbSzEff rgSchCmnNorUlEff[1],rgSchCmnExtUlEff[1];
1351 RgSchCmnCqiToTbs rgSchCmnNorCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1352 RgSchCmnCqiToTbs rgSchCmnNorCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1353 RgSchCmnCqiToTbs *rgSchCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
1354 RgSchCmnTbSzEff rgSchCmnExtCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1355 RgSchCmnTbSzEff rgSchCmnExtCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1356 RgSchCmnCqiToTbs rgSchCmnExtCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1357 RgSchCmnCqiToTbs rgSchCmnExtCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1358 /* Include CRS REs while calculating Efficiency */
1359 RgSchCmnTbSzEff *rgSchCmnEffTbl[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_ANT_CONF][RG_SCH_CMN_MAX_CFI];
1360 RgSchCmnTbSzEff *rgSchCmnUlEffTbl[RG_SCH_CMN_MAX_CP];
1362 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3, 1};
1364 /* Added matrix 'rgRaPrmblToRaFrmTbl'for computation of RA sub-frames from RA preamble */
1365 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3};
1368 EXTERN RgUlSchdInits rgSchUlSchdInits;
1369 EXTERN RgDlSchdInits rgSchDlSchdInits;
1370 EXTERN RgDlfsSchdInits rgSchDlfsSchdInits;
1372 EXTERN RgEmtcUlSchdInits rgSchEmtcUlSchdInits;
1373 EXTERN RgEmtcDlSchdInits rgSchEmtcDlSchdInits;
1377 PRIVATE S16 rgSCHCmnUeIdleExdThrsld ARGS((
1381 RgSchUeCb* rgSCHCmnGetHoUe ARGS((
1385 PRIVATE Void rgSCHCmnDelDedPreamble ARGS((
1389 RgSchUeCb* rgSCHCmnGetPoUe ARGS((
1392 CmLteTimingInfo timingInfo
1394 PRIVATE Void rgSCHCmnDelRachInfo ARGS((
1398 PRIVATE S16 rgSCHCmnUlRbAllocForPoHoUe ARGS((
1404 PRIVATE Void rgSCHCmnHdlHoPo ARGS((
1406 CmLListCp *raRspLst,
1407 RgSchRaReqInfo *raReq
1409 PRIVATE Void rgSCHCmnAllocPoHoGrnt ARGS((
1411 CmLListCp *raRspLst,
1413 RgSchRaReqInfo *raReq
1415 PRIVATE Void rgSCHCmnFillPdcchOdr2Sf ARGS((
1422 PRIVATE Void rgSCHCmnDlAdd2PdcchOdrQ ARGS((
1426 PRIVATE Void rgSCHCmnDlRmvFrmPdcchOdrQ ARGS((
1430 PRIVATE Void rgSCHCmnUpdNxtPrchMskIdx ARGS((
1433 PRIVATE Void rgSCHCmnUpdRachParam ARGS((
1436 PRIVATE S16 rgSCHCmnAllocPOParam ARGS((
1442 uint8_t *prachMskIdx
1444 PRIVATE Void rgSCHCmnGenPdcchOrder ARGS((
1448 PRIVATE Void rgSCHCmnCfgRachDedPrm ARGS((
1453 PRIVATE Void rgSCHCmnHdlUlInactUes ARGS((
1456 PRIVATE Void rgSCHCmnHdlDlInactUes ARGS((
1459 PRIVATE Void rgSCHCmnUlInit ARGS((Void
1461 PRIVATE Void rgSCHCmnDlInit ARGS((Void
1463 PRIVATE Void rgSCHCmnInitDlRbAllocInfo ARGS((
1464 RgSchCmnDlRbAllocInfo *allocInfo
1466 PRIVATE Void rgSCHCmnUpdUlCompEffBsr ARGS((
1470 PRIVATE Void rgSCHCmnUlSetAllUnSched ARGS((
1471 RgSchCmnUlRbAllocInfo *allocInfo
1473 PRIVATE Void rgSCHCmnUlUpdSf ARGS((
1475 RgSchCmnUlRbAllocInfo *allocInfo,
1478 PRIVATE Void rgSCHCmnUlHndlAllocRetx ARGS((
1480 RgSchCmnUlRbAllocInfo *allocInfo,
1485 PRIVATE Void rgSCHCmnGrpPwrCntrlPucch ARGS((
1489 PRIVATE Void rgSCHCmnGrpPwrCntrlPusch ARGS((
1493 PRIVATE Void rgSCHCmnDelUeFrmRefreshQ ARGS((
1497 PRIVATE S16 rgSCHCmnTmrExpiry ARGS((
1498 PTR cb, /* Pointer to timer control block */
1499 S16 tmrEvnt /* Timer Event */
1501 PRIVATE S16 rgSCHCmnTmrProc ARGS((
1504 PRIVATE Void rgSCHCmnAddUeToRefreshQ ARGS((
1509 PRIVATE Void rgSCHCmnDlCcchRetx ARGS((
1511 RgSchCmnDlRbAllocInfo *allocInfo
1513 PRIVATE Void rgSCHCmnUpdUeMimoInfo ARGS((
1517 RgSchCmnCell *cellSchd
1519 PRIVATE Void rgSCHCmnUpdUeUlCqiInfo ARGS((
1523 RgSchCmnUe *ueSchCmn,
1524 RgSchCmnCell *cellSchd,
1528 PRIVATE Void rgSCHCmnDlCcchSduRetx ARGS((
1530 RgSchCmnDlRbAllocInfo *allocInfo
1532 PRIVATE Void rgSCHCmnDlCcchSduTx ARGS((
1534 RgSchCmnDlRbAllocInfo *allocInfo
1536 PRIVATE S16 rgSCHCmnCcchSduAlloc ARGS((
1539 RgSchCmnDlRbAllocInfo *allocInfo
1541 PRIVATE S16 rgSCHCmnCcchSduDedAlloc ARGS((
1545 PRIVATE S16 rgSCHCmnNonDlfsCcchSduRbAlloc ARGS((
1551 PRIVATE Void rgSCHCmnInitVars ARGS((
1555 /*ccpu00117180 - DEL - Moved rgSCHCmnUpdVars to .x as its access is now */
1556 PRIVATE Void rgSCHCmnUlRbAllocForLst ARGS((
1562 CmLListCp *nonSchdLst,
1565 PRIVATE S16 rgSCHCmnUlRbAllocForUe ARGS((
1572 PRIVATE Void rgSCHCmnMsg3GrntReq ARGS((
1576 RgSchUlHqProcCb *hqProc,
1577 RgSchUlAlloc **ulAllocRef,
1578 uint8_t *hqProcIdRef
1580 PRIVATE Void rgSCHCmnDlCcchRarAlloc ARGS((
1583 PRIVATE Void rgSCHCmnDlCcchTx ARGS((
1585 RgSchCmnDlRbAllocInfo *allocInfo
1587 PRIVATE Void rgSCHCmnDlBcchPcch ARGS((
1589 RgSchCmnDlRbAllocInfo *allocInfo,
1590 RgInfSfAlloc *subfrmAlloc
1592 Bool rgSCHCmnChkInWin ARGS((
1593 CmLteTimingInfo frm,
1594 CmLteTimingInfo start,
1597 Bool rgSCHCmnChkPastWin ARGS((
1598 CmLteTimingInfo frm,
1601 PRIVATE Void rgSCHCmnClcAlloc ARGS((
1604 RgSchClcDlLcCb *lch,
1606 RgSchCmnDlRbAllocInfo *allocInfo
1609 PRIVATE Void rgSCHCmnClcRbAlloc ARGS((
1620 PRIVATE S16 rgSCHCmnMsg4Alloc ARGS((
1623 RgSchCmnDlRbAllocInfo *allocInfo
1625 PRIVATE S16 rgSCHCmnMsg4DedAlloc ARGS((
1629 PRIVATE Void rgSCHCmnDlRaRsp ARGS((
1631 RgSchCmnDlRbAllocInfo *allocInfo
1633 PRIVATE S16 rgSCHCmnRaRspAlloc ARGS((
1639 RgSchCmnDlRbAllocInfo *allocInfo
1641 PRIVATE Void rgSCHCmnUlUeDelAllocs ARGS((
1645 PRIVATE Void rgSCHCmnDlSetUeAllocLmt ARGS((
1650 PRIVATE S16 rgSCHCmnDlRgrCellCfg ARGS((
1655 PRIVATE Void rgSCHCmnUlAdapRetx ARGS((
1656 RgSchUlAlloc *alloc,
1657 RgSchUlHqProcCb *proc
1659 PRIVATE Void rgSCHCmnUlUpdAllocRetx ARGS((
1663 PRIVATE Void rgSCHCmnUlSfReTxAllocs ARGS((
1667 /* Fix: syed Adaptive Msg3 Retx crash. */
1669 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg ARGS
1673 RgrUeRecfg *ueRecfg,
1677 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg ARGS
1687 * DL RB allocation specific functions
1690 PRIVATE Void rgSCHCmnDlRbAlloc ARGS((
1692 RgSchCmnDlRbAllocInfo *allocInfo
1694 PRIVATE Void rgSCHCmnNonDlfsRbAlloc ARGS((
1696 RgSchCmnDlRbAllocInfo *allocInfo
1698 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc ARGS((
1700 RgSchDlRbAlloc *cmnAllocInfo));
1703 PRIVATE Void rgSCHCmnNonDlfsPbchRbAllocAdj ARGS((
1705 RgSchDlRbAlloc *cmnAllocInfo,
1706 uint8_t pbchSsRsSym,
1709 /* Added function to adjust TBSize*/
1710 PRIVATE Void rgSCHCmnNonDlfsPbchTbSizeAdj ARGS((
1711 RgSchDlRbAlloc *allocInfo,
1712 uint8_t numOvrlapgPbchRb,
1713 uint8_t pbchSsRsSym,
1718 /* Added function to find num of overlapping PBCH rb*/
1719 PRIVATE Void rgSCHCmnFindNumPbchOvrlapRbs ARGS((
1722 RgSchDlRbAlloc *allocInfo,
1723 uint8_t *numOvrlapgPbchRb
1726 PRIVATE uint8_t rgSCHCmnFindNumAddtlRbsAvl ARGS((
1729 RgSchDlRbAlloc *allocInfo
1733 PRIVATE Void rgSCHCmnFindCodeRate ARGS((
1736 RgSchDlRbAlloc *allocInfo,
1742 PRIVATE Void rgSCHCmnNonDlfsMsg4Alloc ARGS((
1744 RgSchCmnMsg4RbAlloc *msg4AllocInfo,
1747 PRIVATE S16 rgSCHCmnNonDlfsMsg4RbAlloc ARGS((
1753 PRIVATE S16 rgSCHCmnNonDlfsUeRbAlloc ARGS((
1757 uint8_t *isDlBwAvail
1760 PRIVATE uint32_t rgSCHCmnCalcRiv ARGS(( uint8_t bw,
1766 PRIVATE Void rgSCHCmnUpdHqAndDai ARGS((
1767 RgSchDlHqProcCb *hqP,
1769 RgSchDlHqTbCb *tbCb,
1772 PRIVATE S16 rgSCHCmnUlCalcAvailBw ARGS((
1774 RgrCellCfg *cellCfg,
1776 uint8_t *rbStartRef,
1779 PRIVATE S16 rgSCHCmnDlKdashUlAscInit ARGS((
1782 PRIVATE S16 rgSCHCmnDlANFdbkInit ARGS((
1785 PRIVATE S16 rgSCHCmnDlNpValInit ARGS((
1788 PRIVATE S16 rgSCHCmnDlCreateRachPrmLst ARGS((
1791 PRIVATE S16 rgSCHCmnDlCpyRachInfo ARGS((
1793 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
1796 PRIVATE S16 rgSCHCmnDlRachInfoInit ARGS((
1799 PRIVATE S16 rgSCHCmnDlPhichOffsetInit ARGS((
1804 PRIVATE Void rgSCHCmnFindUlCqiUlTxAnt ARGS
1810 PRIVATE RgSchCmnRank rgSCHCmnComputeRank ARGS
1813 uint32_t *pmiBitMap,
1817 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode3 ARGS
1822 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode3 ARGS
1827 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode4 ARGS
1832 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode4 ARGS
1837 PRIVATE uint8_t rgSCHCmnCalcWcqiFrmSnr ARGS
1844 /* comcodsepa : start */
1847 * @brief This function computes efficiency and stores in a table.
1851 * Function: rgSCHCmnCompEff
1852 * Purpose: this function computes the efficiency as number of
1853 * bytes per 1024 symbols. The CFI table is also filled
1854 * with the same information such that comparison is valid
1856 * Invoked by: Scheduler
1858 * @param[in] uint8_t noPdcchSym
1859 * @param[in] uint8_t cpType
1860 * @param[in] uint8_t txAntIdx
1861 * @param[in] RgSchCmnTbSzEff* effTbl
1866 PRIVATE Void rgSCHCmnCompEff
1871 RgSchCmnTbSzEff *effTbl
1874 PRIVATE Void rgSCHCmnCompEff(noPdcchSym, cpType, txAntIdx, effTbl)
1878 RgSchCmnTbSzEff *effTbl;
1883 uint8_t resOfCrs; /* Effective REs occupied by CRS */
1889 case RG_SCH_CMN_NOR_CP:
1892 case RG_SCH_CMN_EXT_CP:
1896 /* Generate a log error. This case should never be executed */
1900 /* Depending on the Tx Antenna Index, deduct the
1901 * Resource elements for the CRS */
1905 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
1908 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
1911 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
1914 /* Generate a log error. This case should never be executed */
1917 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
1918 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1921 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1923 /* This line computes the coding efficiency per 1024 REs */
1924 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1926 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1931 * @brief This function computes efficiency and stores in a table.
1935 * Function: rgSCHCmnCompUlEff
1936 * Purpose: this function computes the efficiency as number of
1937 * bytes per 1024 symbols. The CFI table is also filled
1938 * with the same information such that comparison is valid
1940 * Invoked by: Scheduler
1942 * @param[in] uint8_t noUlRsSym
1943 * @param[in] uint8_t cpType
1944 * @param[in] uint8_t txAntIdx
1945 * @param[in] RgSchCmnTbSzEff* effTbl
1950 PRIVATE Void rgSCHCmnCompUlEff
1954 RgSchCmnTbSzEff *effTbl
1957 PRIVATE Void rgSCHCmnCompUlEff(noUlRsSym, cpType, effTbl)
1960 RgSchCmnTbSzEff *effTbl;
1970 case RG_SCH_CMN_NOR_CP:
1973 case RG_SCH_CMN_EXT_CP:
1977 /* Generate a log error. This case should never be executed */
1981 noResPerRb = ((noSymPerRb - noUlRsSym) * RB_SCH_CMN_NUM_SCS_PER_RB);
1982 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1985 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1987 /* This line computes the coding efficiency per 1024 REs */
1988 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1990 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1996 * @brief This function computes efficiency for 2 layers and stores in a table.
2000 * Function: rgSCHCmn2LyrCompEff
2001 * Purpose: this function computes the efficiency as number of
2002 * bytes per 1024 symbols. The CFI table is also filled
2003 * with the same information such that comparison is valid
2005 * Invoked by: Scheduler
2007 * @param[in] uint8_t noPdcchSym
2008 * @param[in] uint8_t cpType
2009 * @param[in] uint8_t txAntIdx
2010 * @param[in] RgSchCmnTbSzEff* effTbl2Lyr
2015 PRIVATE Void rgSCHCmn2LyrCompEff
2020 RgSchCmnTbSzEff *effTbl2Lyr
2023 PRIVATE Void rgSCHCmn2LyrCompEff(noPdcchSym, cpType, txAntIdx, effTbl2Lyr)
2027 RgSchCmnTbSzEff *effTbl2Lyr;
2032 uint8_t resOfCrs; /* Effective REs occupied by CRS */
2038 case RG_SCH_CMN_NOR_CP:
2041 case RG_SCH_CMN_EXT_CP:
2045 /* Generate a log error. This case should never be executed */
2049 /* Depending on the Tx Antenna Index, deduct the
2050 * Resource elements for the CRS */
2054 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
2057 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
2060 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
2063 /* Generate a log error. This case should never be executed */
2067 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
2068 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
2070 (*effTbl2Lyr)[i] = 0;
2071 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
2073 /* This line computes the coding efficiency per 1024 REs */
2074 (*effTbl2Lyr)[i] += (rgTbSzTbl[1][i][j] * 1024) / (noResPerRb * (j+1));
2076 (*effTbl2Lyr)[i] /= RG_SCH_CMN_NUM_RBS;
2083 * @brief This function initializes the rgSchCmnDciFrmtSizes table.
2087 * Function: rgSCHCmnGetDciFrmtSizes
2088 * Purpose: This function determines the sizes of all
2089 * the available DCI Formats. The order of
2090 * bits addition for each format is inaccordance
2092 * Invoked by: rgSCHCmnRgrCellCfg
2098 PRIVATE Void rgSCHCmnGetDciFrmtSizes
2103 PRIVATE Void rgSCHCmnGetDciFrmtSizes(cell)
2109 /* DCI Format 0 size determination */
2110 rgSchCmnDciFrmtSizes[0] = 1 +
2112 rgSCHUtlLog32bitNbase2((cell->bwCfg.ulTotalBw * \
2113 (cell->bwCfg.ulTotalBw + 1))/2) +
2123 /* DCI Format 1 size determination */
2124 rgSchCmnDciFrmtSizes[1] = 1 +
2125 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2130 4 + 2 + /* HqProc Id and DAI */
2136 /* DCI Format 1A size determination */
2137 rgSchCmnDciFrmtSizes[2] = 1 + /* Flag for format0/format1a differentiation */
2138 1 + /* Localized/distributed VRB assignment flag */
2141 3 + /* Harq process Id */
2143 4 + /* Harq process Id */
2144 2 + /* UL Index or DAI */
2146 1 + /* New Data Indicator */
2149 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2150 (cell->bwCfg.dlTotalBw + 1))/2);
2151 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
2152 Since VRB is local */
2154 /* DCI Format 1B size determination */
2155 rgSchCmnDciFrmtSizes[3] = 1 +
2156 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2157 (cell->bwCfg.dlTotalBw + 1))/2) +
2167 ((cell->numTxAntPorts == 4)? 4:2) +
2170 /* DCI Format 1C size determination */
2171 /* Approximation: NDLVrbGap1 ~= Nprb for DL */
2172 rgSchCmnDciFrmtSizes[4] = (cell->bwCfg.dlTotalBw < 50)? 0:1 +
2173 (cell->bwCfg.dlTotalBw < 50)?
2174 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/2 * \
2175 (cell->bwCfg.dlTotalBw/2 + 1))/2)) :
2176 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/4 * \
2177 (cell->bwCfg.dlTotalBw/4 + 1))/2)) +
2180 /* DCI Format 1D size determination */
2181 rgSchCmnDciFrmtSizes[5] = 1 +
2182 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2183 (cell->bwCfg.dlTotalBw + 1))/2) +
2192 ((cell->numTxAntPorts == 4)? 4:2) +
2195 /* DCI Format 2 size determination */
2196 rgSchCmnDciFrmtSizes[6] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2197 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2205 ((cell->numTxAntPorts == 4)? 6:3);
2207 /* DCI Format 2A size determination */
2208 rgSchCmnDciFrmtSizes[7] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2209 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2217 ((cell->numTxAntPorts == 4)? 2:0);
2219 /* DCI Format 3 size determination */
2220 rgSchCmnDciFrmtSizes[8] = rgSchCmnDciFrmtSizes[0];
2222 /* DCI Format 3A size determination */
2223 rgSchCmnDciFrmtSizes[9] = rgSchCmnDciFrmtSizes[0];
2230 * @brief This function initializes the cmnCell->dciAggrLvl table.
2234 * Function: rgSCHCmnGetCqiDciFrmt2AggrLvl
2235 * Purpose: This function determines the Aggregation level
2236 * for each CQI level against each DCI format.
2237 * Invoked by: rgSCHCmnRgrCellCfg
2243 PRIVATE Void rgSCHCmnGetCqiDciFrmt2AggrLvl
2248 PRIVATE Void rgSCHCmnGetCqiDciFrmt2AggrLvl(cell)
2252 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2257 for (i = 0; i < RG_SCH_CMN_MAX_CQI; i++)
2259 for (j = 0; j < 10; j++)
2261 uint32_t pdcchBits; /* Actual number of phy bits needed for a given DCI Format
2262 * for a given CQI Level */
2263 pdcchBits = (rgSchCmnDciFrmtSizes[j] * 1024)/rgSchCmnCqiPdcchEff[i];
2265 if (pdcchBits < 192)
2267 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL2;
2270 if (pdcchBits < 384)
2272 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL4;
2275 if (pdcchBits < 768)
2277 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL8;
2280 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL16;
2287 * @brief This function initializes all the data for the scheduler.
2291 * Function: rgSCHCmnDlInit
2292 * Purpose: This function initializes the following information:
2293 * 1. Efficiency table
2294 * 2. CQI to table index - It is one row for upto 3 RBs
2295 * and another row for greater than 3 RBs
2296 * currently extended prefix is compiled out.
2297 * Invoked by: MAC intialization code..may be ActvInit
2303 PRIVATE Void rgSCHCmnDlInit
2307 PRIVATE Void rgSCHCmnDlInit()
2314 RgSchCmnTbSzEff *effTbl;
2315 RgSchCmnCqiToTbs *tbsTbl;
2318 /* 0 corresponds to Single layer case, 1 corresponds to 2 layers case*/
2319 /* Init Efficiency table for normal cyclic prefix */
2320 /*Initialize Efficiency table for Layer Index 0 */
2321 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2322 /*Initialize Efficiency table for each of the CFI indices. The
2323 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2324 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[0];
2325 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[0];
2326 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[0];
2327 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[0];
2328 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2329 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[0];
2330 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[0];
2331 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[0];
2332 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[0];
2333 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2334 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[0];
2335 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[0];
2336 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[0];
2337 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[0];
2339 /*Initialize CQI to TBS table for Layer Index 0 for Normal CP */
2340 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[0];
2341 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[0];
2342 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[0];
2343 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[0];
2345 /*Intialize Efficency table for Layer Index 1 */
2346 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2347 /*Initialize Efficiency table for each of the CFI indices. The
2348 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2349 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[1];
2350 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[1];
2351 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[1];
2352 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[1];
2353 /*Initialize Efficiency table for Tx Antenna Port Index 1 */
2354 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[1];
2355 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[1];
2356 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[1];
2357 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[1];
2358 /*Initialize Efficiency table for Tx Antenna Port Index 2 */
2359 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[1];
2360 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[1];
2361 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[1];
2362 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[1];
2364 /*Initialize CQI to TBS table for Layer Index 1 for Normal CP */
2365 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[1];
2366 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[1];
2367 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[1];
2368 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[1];
2370 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2372 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2374 /* EfficiencyTbl calculation incase of 2 layers for normal CP */
2375 rgSCHCmnCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx,\
2376 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i]);
2377 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), RG_SCH_CMN_NOR_CP, idx, \
2378 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i]);
2382 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2384 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2386 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i];
2387 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][i];
2388 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2389 (j >= 0) && (k > 0); --j)
2391 /* ADD CQI to MCS mapping correction
2392 * single dimensional array is replaced by 2 dimensions for different CFI*/
2393 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2395 (*tbsTbl)[k--] = (uint8_t)j;
2402 /* effTbl,tbsTbl calculation incase of 2 layers for normal CP */
2403 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i];
2404 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][i];
2405 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2406 (j >= 0) && (k > 0); --j)
2408 /* ADD CQI to MCS mapping correction
2409 * single dimensional array is replaced by 2 dimensions for different CFI*/
2410 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2412 (*tbsTbl)[k--] = (uint8_t)j;
2422 /* Efficiency Table for Extended CP */
2423 /*Initialize Efficiency table for Layer Index 0 */
2424 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2425 /*Initialize Efficiency table for each of the CFI indices. The
2426 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2427 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[0];
2428 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[0];
2429 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[0];
2430 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[0];
2431 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2432 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[0];
2433 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[0];
2434 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[0];
2435 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[0];
2436 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2437 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[0];
2438 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[0];
2439 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[0];
2440 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[0];
2442 /*Initialize CQI to TBS table for Layer Index 0 for Extended CP */
2443 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[0];
2444 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[0];
2445 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[0];
2446 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[0];
2448 /*Initialize Efficiency table for Layer Index 1 */
2449 /*Initialize Efficiency table for each of the CFI indices. The
2450 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2451 /*Initialize Efficency table for Tx Antenna Port Index 0 */
2452 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[1];
2453 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[1];
2454 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[1];
2455 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[1];
2456 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2457 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[1];
2458 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[1];
2459 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[1];
2460 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[1];
2461 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2462 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[1];
2463 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[1];
2464 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[1];
2465 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[1];
2467 /*Initialize CQI to TBS table for Layer Index 1 for Extended CP */
2468 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[1];
2469 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[1];
2470 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[1];
2471 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[1];
2472 /* Activate this code when extended cp is supported */
2473 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2475 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2477 /* EfficiencyTbl calculation incase of 2 layers for extendedl CP */
2478 rgSCHCmnCompEff( (uint8_t)(i + 1 ), (uint8_t)RG_SCH_CMN_EXT_CP, idx,\
2479 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i]);
2480 rgSCHCmn2LyrCompEff((uint8_t)(i + 1), (uint8_t) RG_SCH_CMN_EXT_CP,idx, \
2481 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i]);
2485 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2487 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2489 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i];
2490 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][i];
2491 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2492 (j >= 0) && (k > 0); --j)
2494 /* ADD CQI to MCS mapping correction
2495 * single dimensional array is replaced by 2 dimensions for different CFI*/
2496 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2498 (*tbsTbl)[k--] = (uint8_t)j;
2505 /* effTbl,tbsTbl calculation incase of 2 layers for extended CP */
2506 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i];
2507 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][i];
2508 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2509 (j >= 0) && (k > 0); --j)
2511 /* ADD CQI to MCS mapping correction
2512 * single dimensional array is replaced by 2 dimensions for different CFI*/
2513 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2515 (*tbsTbl)[k--] = (uint8_t)j;
2528 * @brief This function initializes all the data for the scheduler.
2532 * Function: rgSCHCmnUlInit
2533 * Purpose: This function initializes the following information:
2534 * 1. Efficiency table
2535 * 2. CQI to table index - It is one row for upto 3 RBs
2536 * and another row for greater than 3 RBs
2537 * currently extended prefix is compiled out.
2538 * Invoked by: MAC intialization code..may be ActvInit
2544 PRIVATE Void rgSCHCmnUlInit
2548 PRIVATE Void rgSCHCmnUlInit()
2551 uint8_t *mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_NOR_CP][0];
2552 RgSchCmnTbSzEff *effTbl = &rgSchCmnNorUlEff[0];
2553 CONSTANT RgSchCmnUlCqiInfo *cqiTbl = &rgSchCmnUlCqiTbl[0];
2557 /* Initaializing new variable added for UL eff */
2558 rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP] = &rgSchCmnNorUlEff[0];
2559 /* Reason behind using 3 as the number of symbols to rule out for
2560 * efficiency table computation would be that we are using 2 symbols for
2561 * DMRS(1 in each slot) and 1 symbol for SRS*/
2562 rgSCHCmnCompUlEff(RGSCH_UL_SYM_DMRS_SRS,RG_SCH_CMN_NOR_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP]);
2564 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2565 i >= 0 && j > 0; --i)
2567 if ((*effTbl)[i] <= cqiTbl[j].eff)
2569 mapTbl[j--] = (uint8_t)i;
2576 effTbl = &rgSchCmnExtUlEff[0];
2577 mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_EXT_CP][0];
2579 /* Initaializing new variable added for UL eff */
2580 rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP] = &rgSchCmnExtUlEff[0];
2581 /* Reason behind using 3 as the number of symbols to rule out for
2582 * efficiency table computation would be that we are using 2 symbols for
2583 * DMRS(1 in each slot) and 1 symbol for SRS*/
2584 rgSCHCmnCompUlEff(3,RG_SCH_CMN_EXT_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP]);
2586 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2587 i >= 0 && j > 0; --i)
2589 if ((*effTbl)[i] <= cqiTbl[j].eff)
2591 mapTbl[j--] = (uint8_t)i;
2603 * @brief This function initializes all the data for the scheduler.
2607 * Function: rgSCHCmnInit
2608 * Purpose: This function initializes the following information:
2609 * 1. Efficiency table
2610 * 2. CQI to table index - It is one row for upto 3 RBs
2611 * and another row for greater than 3 RBs
2612 * currently extended prefix is compiled out.
2613 * Invoked by: MAC intialization code..may be ActvInit
2631 rgSCHEmtcCmnDlInit();
2632 rgSCHEmtcCmnUlInit();
2638 /* Init the function pointers */
2639 rgSchCmnApis.rgSCHRgrUeCfg = rgSCHCmnRgrUeCfg;
2640 rgSchCmnApis.rgSCHRgrUeRecfg = rgSCHCmnRgrUeRecfg;
2641 rgSchCmnApis.rgSCHFreeUe = rgSCHCmnUeDel;
2642 rgSchCmnApis.rgSCHRgrCellCfg = rgSCHCmnRgrCellCfg;
2643 rgSchCmnApis.rgSCHRgrCellRecfg = rgSCHCmnRgrCellRecfg;
2644 rgSchCmnApis.rgSCHFreeCell = rgSCHCmnCellDel;
2645 rgSchCmnApis.rgSCHRgrLchCfg = rgSCHCmnRgrLchCfg;
2646 rgSchCmnApis.rgSCHRgrLcgCfg = rgSCHCmnRgrLcgCfg;
2647 rgSchCmnApis.rgSCHRgrLchRecfg = rgSCHCmnRgrLchRecfg;
2648 rgSchCmnApis.rgSCHRgrLcgRecfg = rgSCHCmnRgrLcgRecfg;
2649 rgSchCmnApis.rgSCHFreeDlLc = rgSCHCmnFreeDlLc;
2650 rgSchCmnApis.rgSCHFreeLcg = rgSCHCmnLcgDel;
2651 rgSchCmnApis.rgSCHRgrLchDel = rgSCHCmnRgrLchDel;
2652 rgSchCmnApis.rgSCHActvtUlUe = rgSCHCmnActvtUlUe;
2653 rgSchCmnApis.rgSCHActvtDlUe = rgSCHCmnActvtDlUe;
2654 rgSchCmnApis.rgSCHHdlUlTransInd = rgSCHCmnHdlUlTransInd;
2655 rgSchCmnApis.rgSCHDlDedBoUpd = rgSCHCmnDlDedBoUpd;
2656 rgSchCmnApis.rgSCHUlRecMsg3Alloc = rgSCHCmnUlRecMsg3Alloc;
2657 rgSchCmnApis.rgSCHUlCqiInd = rgSCHCmnUlCqiInd;
2658 rgSchCmnApis.rgSCHPucchDeltaPwrInd = rgSCHPwrPucchDeltaInd;
2659 rgSchCmnApis.rgSCHUlHqProcForUe = rgSCHCmnUlHqProcForUe;
2661 rgSchCmnApis.rgSCHUpdUlHqProc = rgSCHCmnUpdUlHqProc;
2663 rgSchCmnApis.rgSCHUpdBsrShort = rgSCHCmnUpdBsrShort;
2664 rgSchCmnApis.rgSCHUpdBsrTrunc = rgSCHCmnUpdBsrTrunc;
2665 rgSchCmnApis.rgSCHUpdBsrLong = rgSCHCmnUpdBsrLong;
2666 rgSchCmnApis.rgSCHUpdPhr = rgSCHCmnUpdPhr;
2667 rgSchCmnApis.rgSCHUpdExtPhr = rgSCHCmnUpdExtPhr;
2668 rgSchCmnApis.rgSCHContResUlGrant = rgSCHCmnContResUlGrant;
2669 rgSchCmnApis.rgSCHSrRcvd = rgSCHCmnSrRcvd;
2670 rgSchCmnApis.rgSCHFirstRcptnReq = rgSCHCmnFirstRcptnReq;
2671 rgSchCmnApis.rgSCHNextRcptnReq = rgSCHCmnNextRcptnReq;
2672 rgSchCmnApis.rgSCHFirstHqFdbkAlloc = rgSCHCmnFirstHqFdbkAlloc;
2673 rgSchCmnApis.rgSCHNextHqFdbkAlloc = rgSCHCmnNextHqFdbkAlloc;
2674 rgSchCmnApis.rgSCHDlProcAddToRetx = rgSCHCmnDlProcAddToRetx;
2675 rgSchCmnApis.rgSCHDlCqiInd = rgSCHCmnDlCqiInd;
2677 rgSchCmnApis.rgSCHUlProcAddToRetx = rgSCHCmnEmtcUlProcAddToRetx;
2680 rgSchCmnApis.rgSCHSrsInd = rgSCHCmnSrsInd;
2682 rgSchCmnApis.rgSCHDlTARpt = rgSCHCmnDlTARpt;
2683 rgSchCmnApis.rgSCHDlRlsSubFrm = rgSCHCmnDlRlsSubFrm;
2684 rgSchCmnApis.rgSCHUeReset = rgSCHCmnUeReset;
2686 rgSchCmnApis.rgSCHHdlCrntiCE = rgSCHCmnHdlCrntiCE;
2687 rgSchCmnApis.rgSCHDlProcAck = rgSCHCmnDlProcAck;
2688 rgSchCmnApis.rgSCHDlRelPdcchFbk = rgSCHCmnDlRelPdcchFbk;
2689 rgSchCmnApis.rgSCHUlSpsRelInd = rgSCHCmnUlSpsRelInd;
2690 rgSchCmnApis.rgSCHUlSpsActInd = rgSCHCmnUlSpsActInd;
2691 rgSchCmnApis.rgSCHUlCrcFailInd = rgSCHCmnUlCrcFailInd;
2692 rgSchCmnApis.rgSCHUlCrcInd = rgSCHCmnUlCrcInd;
2694 rgSchCmnApis.rgSCHDrxStrtInActvTmrInUl = rgSCHCmnDrxStrtInActvTmrInUl;
2695 rgSchCmnApis.rgSCHUpdUeDataIndLcg = rgSCHCmnUpdUeDataIndLcg;
2697 for (idx = 0; idx < RGSCH_NUM_SCHEDULERS; ++idx)
2699 rgSchUlSchdInits[idx](&rgSchUlSchdTbl[idx]);
2700 rgSchDlSchdInits[idx](&rgSchDlSchdTbl[idx]);
2703 for (idx = 0; idx < RGSCH_NUM_EMTC_SCHEDULERS; ++idx)
2705 rgSchEmtcUlSchdInits[idx](&rgSchEmtcUlSchdTbl[idx]);
2706 rgSchEmtcDlSchdInits[idx](&rgSchEmtcDlSchdTbl[idx]);
2709 #if (defined (RG_PHASE2_SCHED) && defined(TFU_UPGRADE))
2710 for (idx = 0; idx < RGSCH_NUM_DLFS_SCHEDULERS; ++idx)
2712 rgSchDlfsSchdInits[idx](&rgSchDlfsSchdTbl[idx]);
2716 rgSchCmnApis.rgSCHRgrSCellUeCfg = rgSCHCmnRgrSCellUeCfg;
2717 rgSchCmnApis.rgSCHRgrSCellUeDel = rgSCHCmnRgrSCellUeDel;
2724 * @brief This function is a wrapper to call scheduler specific API.
2728 * Function: rgSCHCmnDlRlsSubFrm
2729 * Purpose: Releases scheduler Information from DL SubFrm.
2733 * @param[in] RgSchCellCb *cell
2734 * @param[out] CmLteTimingInfo frm
2739 Void rgSCHCmnDlRlsSubFrm
2745 Void rgSCHCmnDlRlsSubFrm(cell, frm)
2747 CmLteTimingInfo frm;
2750 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2754 /* Get the pointer to the subframe */
2755 sf = rgSCHUtlSubFrmGet(cell, frm);
2757 rgSCHUtlSubFrmPut(cell, sf);
2760 /* Re-initialize DLFS specific information for the sub-frame */
2761 cellSch->apisDlfs->rgSCHDlfsReinitSf(cell, sf);
2769 * @brief This function is the starting function for DL allocation.
2773 * Function: rgSCHCmnDlCmnChAlloc
2774 * Purpose: Scheduling for downlink. It performs allocation in the order
2775 * of priority wich BCCH/PCH first, CCCH, Random Access and TA.
2777 * Invoked by: Scheduler
2779 * @param[in] RgSchCellCb* cell
2780 * @param[out] RgSchCmnDlRbAllocInfo* allocInfo
2785 PRIVATE Void rgSCHCmnDlCcchRarAlloc
2790 PRIVATE Void rgSCHCmnDlCcchRarAlloc(cell)
2794 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2797 rgSCHCmnDlCcchRetx(cell, &cellSch->allocInfo);
2798 /* LTE_ADV_FLAG_REMOVED_START */
2799 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2801 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2803 /*eNodeB need to blank the subframe */
2807 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2812 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2814 /* LTE_ADV_FLAG_REMOVED_END */
2818 /*Added these function calls for processing CCCH SDU arriving
2819 * after guard timer expiry.Functions differ from above two functions
2820 * in using ueCb instead of raCb.*/
2821 rgSCHCmnDlCcchSduRetx(cell, &cellSch->allocInfo);
2822 /* LTE_ADV_FLAG_REMOVED_START */
2823 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2825 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2827 /*eNodeB need to blank the subframe */
2831 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2836 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2838 /* LTE_ADV_FLAG_REMOVED_END */
2842 if(cellSch->ul.msg3SchdIdx != RGSCH_INVALID_INFO)
2844 /* Do not schedule msg3 if there is a CFI change ongoing */
2845 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2847 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2851 /* LTE_ADV_FLAG_REMOVED_START */
2852 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2854 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2856 /*eNodeB need to blank the subframe */
2860 /* Do not schedule msg3 if there is a CFI change ongoing */
2861 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2863 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2869 /* Do not schedule msg3 if there is a CFI change ongoing */
2870 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2872 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2875 /* LTE_ADV_FLAG_REMOVED_END */
2883 * @brief Scheduling for CCCH SDU.
2887 * Function: rgSCHCmnCcchSduAlloc
2888 * Purpose: Scheduling for CCCH SDU
2890 * Invoked by: Scheduler
2892 * @param[in] RgSchCellCb* cell
2893 * @param[in] RgSchUeCb* ueCb
2894 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2899 PRIVATE S16 rgSCHCmnCcchSduAlloc
2903 RgSchCmnDlRbAllocInfo *allocInfo
2906 PRIVATE S16 rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)
2909 RgSchCmnDlRbAllocInfo *allocInfo;
2912 RgSchDlRbAlloc *rbAllocInfo;
2913 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2914 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2917 /* Return if subframe BW exhausted */
2918 if (allocInfo->ccchSduAlloc.ccchSduDlSf->bw <=
2919 allocInfo->ccchSduAlloc.ccchSduDlSf->bwAssigned)
2921 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2922 "bw<=bwAssigned for UEID:%d",ueCb->ueId);
2926 if (rgSCHDhmGetCcchSduHqProc(ueCb, cellSch->dl.time, &(ueDl->proc)) != ROK)
2928 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2929 "rgSCHDhmGetCcchSduHqProc failed UEID:%d",ueCb->ueId);
2933 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
2934 rbAllocInfo->dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2936 if (rgSCHCmnCcchSduDedAlloc(cell, ueCb) != ROK)
2938 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
2939 rgSCHDhmRlsHqpTb(ueDl->proc, 0, FALSE);
2940 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2941 "rgSCHCmnCcchSduDedAlloc failed UEID:%d",ueCb->ueId);
2944 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduTxLst, &ueDl->proc->reqLnk);
2945 ueDl->proc->reqLnk.node = (PTR)ueDl->proc;
2946 allocInfo->ccchSduAlloc.ccchSduDlSf->schdCcchUe++;
2951 * @brief This function scheduler for downlink CCCH messages.
2955 * Function: rgSCHCmnDlCcchSduTx
2956 * Purpose: Scheduling for downlink CCCH
2958 * Invoked by: Scheduler
2960 * @param[in] RgSchCellCb *cell
2961 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2966 PRIVATE Void rgSCHCmnDlCcchSduTx
2969 RgSchCmnDlRbAllocInfo *allocInfo
2972 PRIVATE Void rgSCHCmnDlCcchSduTx(cell, allocInfo)
2974 RgSchCmnDlRbAllocInfo *allocInfo;
2979 RgSchCmnDlUe *ueCmnDl;
2980 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2982 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2985 node = cell->ccchSduUeLst.first;
2988 if(cellSch->dl.maxCcchPerDlSf &&
2989 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2995 ueCb = (RgSchUeCb *)(node->node);
2996 ueCmnDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2998 /* Fix : syed postpone scheduling for this
2999 * until msg4 is done */
3000 /* Fix : syed RLC can erroneously send CCCH SDU BO
3001 * twice. Hence an extra guard to avoid if already
3002 * scheduled for RETX */
3003 if ((!(ueCb->dl.dlInactvMask & RG_HQENT_INACTIVE)) &&
3006 if ((rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)) != ROK)
3013 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"ERROR!! THIS SHOULD "
3014 "NEVER HAPPEN for UEID:%d", ueCb->ueId);
3024 * @brief This function scheduler for downlink CCCH messages.
3028 * Function: rgSCHCmnDlCcchTx
3029 * Purpose: Scheduling for downlink CCCH
3031 * Invoked by: Scheduler
3033 * @param[in] RgSchCellCb *cell
3034 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3039 PRIVATE Void rgSCHCmnDlCcchTx
3042 RgSchCmnDlRbAllocInfo *allocInfo
3045 PRIVATE Void rgSCHCmnDlCcchTx(cell, allocInfo)
3047 RgSchCmnDlRbAllocInfo *allocInfo;
3052 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3053 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3056 node = cell->raInfo.toBeSchdLst.first;
3059 if(cellSch->dl.maxCcchPerDlSf &&
3060 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3067 raCb = (RgSchRaCb *)(node->node);
3069 /* Address allocation for this UE for MSG 4 */
3070 /* Allocation for Msg4 */
3071 if ((rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)) != ROK)
3082 * @brief This function scheduler for downlink CCCH messages.
3086 * Function: rgSCHCmnDlCcchSduRetx
3087 * Purpose: Scheduling for downlink CCCH
3089 * Invoked by: Scheduler
3091 * @param[in] RgSchCellCb *cell
3092 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3097 PRIVATE Void rgSCHCmnDlCcchSduRetx
3100 RgSchCmnDlRbAllocInfo *allocInfo
3103 PRIVATE Void rgSCHCmnDlCcchSduRetx(cell, allocInfo)
3105 RgSchCmnDlRbAllocInfo *allocInfo;
3108 RgSchDlRbAlloc *rbAllocInfo;
3110 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3112 RgSchDlHqProcCb *hqP;
3115 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
3118 node = cellSch->dl.ccchSduRetxLst.first;
3121 if(cellSch->dl.maxCcchPerDlSf &&
3122 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3129 hqP = (RgSchDlHqProcCb *)(node->node);
3132 /* DwPts Scheduling Changes Start */
3134 if (rgSCHCmnRetxAvoidTdd(allocInfo->ccchSduAlloc.ccchSduDlSf,
3140 /* DwPts Scheduling Changes End */
3142 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3146 ueCb = (RgSchUeCb*)(hqP->hqE->ue);
3147 ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
3149 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3150 /* Fill RB Alloc Info */
3151 rbAllocInfo->dlSf = dlSf;
3152 rbAllocInfo->tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3153 rbAllocInfo->rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3154 /* Fix : syed iMcs setting did not correspond to RETX */
3155 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3156 rbAllocInfo->tbInfo[0].imcs);
3157 rbAllocInfo->rnti = ueCb->ueId;
3158 rbAllocInfo->tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3159 /* Fix : syed Copying info in entirety without depending on stale TX information */
3160 rbAllocInfo->tbInfo[0].tbCb = &hqP->tbInfo[0];
3161 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
3162 /* Fix : syed Assigning proc to scratchpad */
3165 retxBw += rbAllocInfo->rbsReq;
3167 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduRetxLst, \
3169 hqP->reqLnk.node = (PTR)hqP;
3173 dlSf->bwAssigned += retxBw;
3179 * @brief This function scheduler for downlink CCCH messages.
3183 * Function: rgSCHCmnDlCcchRetx
3184 * Purpose: Scheduling for downlink CCCH
3186 * Invoked by: Scheduler
3188 * @param[in] RgSchCellCb *cell
3189 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3194 PRIVATE Void rgSCHCmnDlCcchRetx
3197 RgSchCmnDlRbAllocInfo *allocInfo
3200 PRIVATE Void rgSCHCmnDlCcchRetx(cell, allocInfo)
3202 RgSchCmnDlRbAllocInfo *allocInfo;
3206 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3208 RgSchDlHqProcCb *hqP;
3210 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3213 node = cellSch->dl.msg4RetxLst.first;
3216 if(cellSch->dl.maxCcchPerDlSf &&
3217 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3223 hqP = (RgSchDlHqProcCb *)(node->node);
3227 /* DwPts Scheduling Changes Start */
3229 if (rgSCHCmnRetxAvoidTdd(allocInfo->msg4Alloc.msg4DlSf,
3235 /* DwPts Scheduling Changes End */
3237 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3241 raCb = (RgSchRaCb*)(hqP->hqE->raCb);
3242 /* Fill RB Alloc Info */
3243 raCb->rbAllocInfo.dlSf = dlSf;
3244 raCb->rbAllocInfo.tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3245 raCb->rbAllocInfo.rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3246 /* Fix : syed iMcs setting did not correspond to RETX */
3247 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3248 raCb->rbAllocInfo.tbInfo[0].imcs);
3249 raCb->rbAllocInfo.rnti = raCb->tmpCrnti;
3250 raCb->rbAllocInfo.tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3251 /* Fix; syed Copying info in entirety without depending on stale TX information */
3252 raCb->rbAllocInfo.tbInfo[0].tbCb = &hqP->tbInfo[0];
3253 raCb->rbAllocInfo.tbInfo[0].schdlngForTb = TRUE;
3255 retxBw += raCb->rbAllocInfo.rbsReq;
3257 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4RetxLst, \
3259 hqP->reqLnk.node = (PTR)hqP;
3263 dlSf->bwAssigned += retxBw;
3269 * @brief This function implements scheduler DL allocation for
3270 * for broadcast (on PDSCH) and paging.
3274 * Function: rgSCHCmnDlBcchPcch
3275 * Purpose: This function implements scheduler for DL allocation
3276 * for broadcast (on PDSCH) and paging.
3278 * Invoked by: Scheduler
3280 * @param[in] RgSchCellCb* cell
3286 PRIVATE Void rgSCHCmnDlBcchPcch
3289 RgSchCmnDlRbAllocInfo *allocInfo,
3290 RgInfSfAlloc *subfrmAlloc
3293 PRIVATE Void rgSCHCmnDlBcchPcch(cell, allocInfo, subfrmAlloc)
3295 RgSchCmnDlRbAllocInfo *allocInfo;
3296 RgInfSfAlloc *subfrmAlloc;
3299 CmLteTimingInfo frm;
3301 RgSchClcDlLcCb *pcch;
3305 RgSchClcDlLcCb *bcch, *bch;
3306 #endif/*RGR_SI_SCH*/
3310 frm = cell->crntTime;
3312 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
3313 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
3314 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
3316 // RGSCH_SUBFRAME_INDEX(frm);
3317 //RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
3320 /* Compute the subframe for which allocation is being made */
3321 /* essentially, we need pointer to the dl frame for this subframe */
3322 sf = rgSCHUtlSubFrmGet(cell, frm);
3326 bch = rgSCHDbmGetBcchOnBch(cell);
3327 #if (ERRCLASS & ERRCLS_DEBUG)
3330 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on BCH is not configured");
3334 if (bch->boLst.first != NULLP)
3336 bo = (RgSchClcBoRpt *)(bch->boLst.first->node);
3337 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3339 sf->bch.tbSize = bo->bo;
3340 cmLListDelFrm(&bch->boLst, bch->boLst.first);
3341 /* ccpu00117052 - MOD - Passing double pointer
3342 for proper NULLP assignment*/
3343 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(*bo));
3344 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, bch->lcId,TRUE);
3349 if ((frm.sfn % 4 == 0) && (frm.subframe == 0))
3354 allocInfo->bcchAlloc.schdFirst = FALSE;
3355 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
3356 #if (ERRCLASS & ERRCLS_DEBUG)
3359 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3363 if (bcch->boLst.first != NULLP)
3365 bo = (RgSchClcBoRpt *)(bcch->boLst.first->node);
3367 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3369 allocInfo->bcchAlloc.schdFirst = TRUE;
3370 /* Time to perform allocation for this BCCH transmission */
3371 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3375 if(!allocInfo->bcchAlloc.schdFirst)
3378 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
3379 #if (ERRCLASS & ERRCLS_DEBUG)
3382 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3386 lnk = bcch->boLst.first;
3387 while (lnk != NULLP)
3389 bo = (RgSchClcBoRpt *)(lnk->node);
3391 valid = rgSCHCmnChkInWin(frm, bo->timeToTx, bo->maxTimeToTx);
3395 bo->i = RGSCH_CALC_SF_DIFF(frm, bo->timeToTx);
3396 /* Time to perform allocation for this BCCH transmission */
3397 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3402 valid = rgSCHCmnChkPastWin(frm, bo->maxTimeToTx);
3405 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
3406 /* ccpu00117052 - MOD - Passing double pointer
3407 for proper NULLP assignment*/
3408 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo,
3409 sizeof(RgSchClcBoRpt));
3415 rgSCHDlSiSched(cell, allocInfo, subfrmAlloc);
3416 #endif/*RGR_SI_SCH*/
3418 pcch = rgSCHDbmGetPcch(cell);
3422 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"PCCH on DLSCH is not configured");
3426 if (pcch->boLst.first != NULLP)
3428 bo = (RgSchClcBoRpt *)(pcch->boLst.first->node);
3430 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3432 /* Time to perform allocation for this PCCH transmission */
3433 rgSCHCmnClcAlloc(cell, sf, pcch, RGSCH_P_RNTI, allocInfo);
3441 * Fun: rgSCHCmnChkInWin
3443 * Desc: This function checks if frm occurs in window
3445 * Ret: TRUE - if in window
3450 * File: rg_sch_cmn.c
3454 Bool rgSCHCmnChkInWin
3456 CmLteTimingInfo frm,
3457 CmLteTimingInfo start,
3461 Bool rgSCHCmnChkInWin(frm, start, end)
3462 CmLteTimingInfo frm;
3463 CmLteTimingInfo start;
3464 CmLteTimingInfo end;
3470 if (end.sfn > start.sfn)
3472 if (frm.sfn > start.sfn
3473 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3475 if (frm.sfn < end.sfn
3477 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3479 || (frm.sfn == end.sfn && frm.slot <= start.slot))
3486 /* Testing for wrap around, sfn wraparound check should be enough */
3487 else if (end.sfn < start.sfn)
3489 if (frm.sfn > start.sfn
3490 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3496 if (frm.sfn < end.sfn
3497 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3503 else /* start.sfn == end.sfn */
3505 if (frm.sfn == start.sfn
3506 && (frm.slot >= start.slot
3507 && frm.slot <= end.slot))
3514 } /* end of rgSCHCmnChkInWin*/
3518 * Fun: rgSCHCmnChkPastWin
3520 * Desc: This function checks if frm has gone past window edge
3522 * Ret: TRUE - if past window edge
3527 * File: rg_sch_cmn.c
3531 Bool rgSCHCmnChkPastWin
3533 CmLteTimingInfo frm,
3537 Bool rgSCHCmnChkPastWin(frm, end)
3538 CmLteTimingInfo frm;
3539 CmLteTimingInfo end;
3542 CmLteTimingInfo refFrm = end;
3546 RGSCH_INCR_FRAME(refFrm.sfn);
3547 RGSCH_INCR_SUB_FRAME(end, 1);
3548 pastWin = rgSCHCmnChkInWin(frm, end, refFrm);
3551 } /* end of rgSCHCmnChkPastWin*/
3554 * @brief This function implements allocation of the resources for common
3555 * channels BCCH, PCCH.
3559 * Function: rgSCHCmnClcAlloc
3560 * Purpose: This function implements selection of number of RBs based
3561 * the allowed grant for the service. It is also responsible
3562 * for selection of MCS for the transmission.
3564 * Invoked by: Scheduler
3566 * @param[in] RgSchCellCb *cell,
3567 * @param[in] RgSchDlSf *sf,
3568 * @param[in] RgSchClcDlLcCb *lch,
3569 * @param[in] uint16_t rnti,
3570 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3575 PRIVATE Void rgSCHCmnClcAlloc
3579 RgSchClcDlLcCb *lch,
3581 RgSchCmnDlRbAllocInfo *allocInfo
3584 PRIVATE Void rgSCHCmnClcAlloc(cell, sf, lch, rnti, allocInfo)
3587 RgSchClcDlLcCb *lch;
3589 RgSchCmnDlRbAllocInfo *allocInfo;
3592 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3599 uint8_t cfi = cellDl->currCfi;
3603 bo = (RgSchClcBoRpt *)(lch->boLst.first->node);
3607 /* rgSCHCmnClcRbAllocForFxdTb(cell, bo->bo, cellDl->ccchCqi, &rb);*/
3608 if(cellDl->bitsPerRb==0)
3610 while ((rgTbSzTbl[0][0][rb]) < (tbs*8))
3618 rb = RGSCH_CEIL((tbs*8), cellDl->bitsPerRb);
3620 /* DwPTS Scheduling Changes Start */
3622 if(sf->sfType == RG_SCH_SPL_SF_DATA)
3624 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
3626 /* Calculate the less RE's because of DwPTS */
3627 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
3629 /* Increase number of RBs in Spl SF to compensate for lost REs */
3630 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
3633 /* DwPTS Scheduling Changes End */
3634 /*ccpu00115595- end*/
3635 /* additional check to see if required RBs
3636 * exceeds the available */
3637 if (rb > sf->bw - sf->bwAssigned)
3639 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"BW allocation "
3640 "failed for CRNTI:%d",rnti);
3644 /* Update the subframe Allocated BW field */
3645 sf->bwAssigned = sf->bwAssigned + rb;
3646 /* Fill in the BCCH/PCCH transmission info to the RBAllocInfo struct */
3647 if (rnti == RGSCH_SI_RNTI)
3649 allocInfo->bcchAlloc.rnti = rnti;
3650 allocInfo->bcchAlloc.dlSf = sf;
3651 allocInfo->bcchAlloc.tbInfo[0].bytesReq = tbs;
3652 allocInfo->bcchAlloc.rbsReq = rb;
3653 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
3654 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
3655 /* Nprb indication at PHY for common Ch */
3656 allocInfo->bcchAlloc.nPrb = bo->nPrb;
3660 allocInfo->pcchAlloc.rnti = rnti;
3661 allocInfo->pcchAlloc.dlSf = sf;
3662 allocInfo->pcchAlloc.tbInfo[0].bytesReq = tbs;
3663 allocInfo->pcchAlloc.rbsReq = rb;
3664 allocInfo->pcchAlloc.tbInfo[0].imcs = mcs;
3665 allocInfo->pcchAlloc.tbInfo[0].noLyr = 1;
3666 allocInfo->pcchAlloc.nPrb = bo->nPrb;
3673 * @brief This function implements PDCCH allocation for common channels.
3677 * Function: rgSCHCmnCmnPdcchAlloc
3678 * Purpose: This function implements allocation of PDCCH for a UE.
3679 * 1. This uses index 0 of PDCCH table for efficiency.
3680 * 2. Uses he candidate PDCCH count for the aggr level.
3681 * 3. Look for availability for each candidate and choose
3682 * the first one available.
3684 * Invoked by: Scheduler
3686 * @param[in] RgSchCellCb *cell
3687 * @param[in] RgSchDlSf *sf
3688 * @return RgSchPdcch *
3689 * -# NULLP when unsuccessful
3693 RgSchPdcch *rgSCHCmnCmnPdcchAlloc
3699 RgSchPdcch *rgSCHCmnCmnPdcchAlloc(cell, subFrm)
3704 CmLteAggrLvl aggrLvl;
3705 RgSchPdcchInfo *pdcchInfo;
3707 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3708 uint8_t numCce; /*store num CCEs based on
3709 aggregation level */
3711 aggrLvl = cellSch->dl.cmnChAggrLvl;
3713 pdcchInfo = &(subFrm->pdcchInfo);
3715 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3718 if(subFrm->nCce != pdcchInfo->nCce)
3720 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3723 if(cell->nCce != pdcchInfo->nCce)
3725 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3731 case CM_LTE_AGGR_LVL4:
3734 case CM_LTE_AGGR_LVL8:
3737 case CM_LTE_AGGR_LVL16:
3744 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3747 pdcch->isSpsRnti = FALSE;
3749 /* Increment the CCE used counter in the current subframe */
3750 subFrm->cceCnt += numCce;
3751 pdcch->pdcchSearchSpace = RG_SCH_CMN_SEARCH_SPACE;
3756 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3757 subFrm->isCceFailure = TRUE;
3759 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
3760 "PDCCH ERR: NO PDDCH AVAIL IN COMMON SEARCH SPACE aggr:%u",
3767 * @brief This function implements bandwidth allocation for common channels.
3771 * Function: rgSCHCmnClcRbAlloc
3772 * Purpose: This function implements bandwith allocation logic
3773 * for common control channels.
3775 * Invoked by: Scheduler
3777 * @param[in] RgSchCellCb* cell
3778 * @param[in] uint32_t bo
3779 * @param[in] uint8_t cqi
3780 * @param[in] uint8_t *rb
3781 * @param[in] uint32_t *tbs
3782 * @param[in] uint8_t *mcs
3783 * @param[in] RgSchDlSf *sf
3789 Void rgSCHCmnClcRbAlloc
3802 Void rgSCHCmnClcRbAlloc(cell, bo, cqi, rb, tbs, mcs, iTbs, isSpsBo)
3815 PRIVATE Void rgSCHCmnClcRbAlloc
3826 PRIVATE Void rgSCHCmnClcRbAlloc(cell, bo, cqi, rb, tbs, mcs, sf)
3835 #endif /* LTEMAC_SPS */
3838 RgSchCmnTbSzEff *effTbl;
3841 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3842 uint8_t cfi = cellSch->dl.currCfi;
3845 /* first get the CQI to MCS table and determine the number of RBs */
3846 effTbl = (RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]);
3847 iTbsVal = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[cqi];
3848 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3850 /* Efficiency is number of bits per 1024 REs */
3851 eff = (*effTbl)[iTbsVal];
3853 /* Get the number of REs needed for this bo */
3854 noRes = ((bo * 8 * 1024) / eff );
3856 /* Get the number of RBs needed for this transmission */
3857 /* Number of RBs = No of REs / No of REs per RB */
3858 tmpRb = RGSCH_CEIL(noRes, cellSch->dl.noResPerRb[cfi]);
3859 /* KWORK_FIX: added check to see if rb has crossed maxRb*/
3860 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3861 if (tmpRb > cellSch->dl.maxDlBwPerUe)
3863 tmpRb = cellSch->dl.maxDlBwPerUe;
3865 while ((rgTbSzTbl[0][iTbsVal][tmpRb-1]/8) < bo &&
3866 (tmpRb < cellSch->dl.maxDlBwPerUe))
3869 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3871 *tbs = rgTbSzTbl[0][iTbsVal][tmpRb-1]/8;
3872 *rb = (uint8_t)tmpRb;
3873 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3881 * @brief Scheduling for MSG4.
3885 * Function: rgSCHCmnMsg4Alloc
3886 * Purpose: Scheduling for MSG4
3888 * Invoked by: Scheduler
3890 * @param[in] RgSchCellCb* cell
3891 * @param[in] RgSchRaCb* raCb
3892 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3897 PRIVATE S16 rgSCHCmnMsg4Alloc
3901 RgSchCmnDlRbAllocInfo *allocInfo
3904 PRIVATE S16 rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)
3907 RgSchCmnDlRbAllocInfo *allocInfo;
3910 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3913 /* SR_RACH_STATS : MSG4 TO BE TXED */
3915 /* Return if subframe BW exhausted */
3916 if (allocInfo->msg4Alloc.msg4DlSf->bw <=
3917 allocInfo->msg4Alloc.msg4DlSf->bwAssigned)
3919 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId ,
3924 if (rgSCHDhmGetMsg4HqProc(raCb, cellSch->dl.time) != ROK)
3926 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3927 "rgSCHDhmGetMsg4HqProc failed");
3931 raCb->rbAllocInfo.dlSf = allocInfo->msg4Alloc.msg4DlSf;
3933 if (rgSCHCmnMsg4DedAlloc(cell, raCb) != ROK)
3935 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
3936 rgSCHDhmRlsHqpTb(raCb->dlHqE->msg4Proc, 0, FALSE);
3937 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3938 "rgSCHCmnMsg4DedAlloc failed.");
3941 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4TxLst, &raCb->dlHqE->msg4Proc->reqLnk);
3942 raCb->dlHqE->msg4Proc->reqLnk.node = (PTR)raCb->dlHqE->msg4Proc;
3943 allocInfo->msg4Alloc.msg4DlSf->schdCcchUe++;
3950 * @brief This function implements PDCCH allocation for an UE.
3954 * Function: PdcchAlloc
3955 * Purpose: This function implements allocation of PDCCH for an UE.
3956 * 1. Get the aggregation level for the CQI of the UE.
3957 * 2. Get the candidate PDCCH count for the aggr level.
3958 * 3. Look for availability for each candidate and choose
3959 * the first one available.
3961 * Invoked by: Scheduler
3966 * @param[in] dciFrmt
3967 * @return RgSchPdcch *
3968 * -# NULLP when unsuccessful
3972 RgSchPdcch *rgSCHCmnPdcchAlloc
3978 TfuDciFormat dciFrmt,
3982 RgSchPdcch *rgSCHCmnPdcchAlloc(cell, subFrm, cqi, dciFrmt, isDtx)
3987 TfuDciFormat dciFrmt;
3991 CmLteAggrLvl aggrLvl;
3992 RgSchPdcchInfo *pdcchInfo;
3996 /* 3.1 consider the selected DCI format size in determining the
3997 * aggregation level */
3998 //TODO_SID Need to update. Currently using 4 aggregation level
3999 aggrLvl = CM_LTE_AGGR_LVL2;//cellSch->dciAggrLvl[cqi][dciFrmt];
4002 if((dciFrmt == TFU_DCI_FORMAT_1A) &&
4003 ((ue) && (ue->allocCmnUlPdcch)) )
4005 pdcch = rgSCHCmnCmnPdcchAlloc(cell, subFrm);
4006 /* Since CRNTI Scrambled */
4009 pdcch->dciNumOfBits = ue->dciSize.cmnSize[dciFrmt];
4010 // prc_trace_format_string(PRC_TRACE_GROUP_PS, PRC_TRACE_INFO_LOW,"Forcing alloc in CMN search spc size %d fmt %d \n",
4011 // pdcch->dciNumOfBits, dciFrmt);
4017 /* Incrementing aggrLvl by 1 if it not AGGR_LVL8(MAX SIZE)
4018 * inorder to increse the redudancy bits for better decoding of UE */
4021 if (aggrLvl != CM_LTE_AGGR_LVL16)
4025 case CM_LTE_AGGR_LVL2:
4026 aggrLvl = CM_LTE_AGGR_LVL4;
4028 case CM_LTE_AGGR_LVL4:
4029 aggrLvl = CM_LTE_AGGR_LVL8;
4031 case CM_LTE_AGGR_LVL8:
4032 aggrLvl = CM_LTE_AGGR_LVL16;
4041 pdcchInfo = &subFrm->pdcchInfo;
4043 /* Updating the no. of nCce in pdcchInfo, in case if CFI
4046 if(subFrm->nCce != pdcchInfo->nCce)
4048 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
4051 if(cell->nCce != pdcchInfo->nCce)
4053 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
4057 if (pdcchInfo->nCce < (1 << (aggrLvl - 1)))
4059 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
4060 subFrm->isCceFailure = TRUE;
4061 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
4062 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
4068 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
4070 /* SR_RACH_STATS : Reset isTBMsg4 */
4071 pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4= FALSE;
4072 pdcch->dci.u.format0Info.isSrGrant = FALSE;
4074 pdcch->isSpsRnti = FALSE;
4076 /* Increment the CCE used counter in the current subframe */
4077 subFrm->cceCnt += aggrLvl;
4078 pdcch->pdcchSearchSpace = RG_SCH_UE_SPECIFIC_SEARCH_SPACE;
4082 if (ue->cell != cell)
4084 /* Secondary Cell */
4085 //pdcch->dciNumOfBits = ue->dciSize.noUlCcSize[dciFrmt];
4086 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
4091 //pdcch->dciNumOfBits = ue->dciSize.dedSize[dciFrmt];
4092 //TODO_SID Need to update dci size.
4093 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
4099 pdcch->dciNumOfBits = cell->dciSize.size[dciFrmt];
4104 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
4105 subFrm->isCceFailure = TRUE;
4107 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
4108 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
4115 * @brief This function implements BW allocation for CCCH SDU
4119 * Function: rgSCHCmnCcchSduDedAlloc
4120 * Purpose: Downlink bandwidth Allocation for CCCH SDU.
4122 * Invoked by: Scheduler
4124 * @param[in] RgSchCellCb* cell
4125 * @param[out] RgSchUeCb *ueCb
4130 PRIVATE S16 rgSCHCmnCcchSduDedAlloc
4136 PRIVATE S16 rgSCHCmnCcchSduDedAlloc(cell, ueCb)
4141 RgSchDlHqEnt *hqE = NULLP;
4143 RgSchDlRbAlloc *rbAllocinfo = NULLP;
4144 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4148 uint8_t cfi = cellDl->currCfi;
4152 rbAllocinfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
4154 effBo = ueCb->dlCcchInfo.bo + RGSCH_CCCH_SDU_HDRSIZE;
4157 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
4158 &rbAllocinfo->tbInfo[0].bytesReq,
4159 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
4160 #else /* LTEMAC_SPS */
4161 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
4162 &rbAllocinfo->tbInfo[0].bytesReq,\
4163 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
4165 #endif /* LTEMAC_SPS */
4168 /* Cannot exceed the total number of RBs in the cell */
4169 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
4170 rbAllocinfo->dlSf->bwAssigned)))
4172 /* Check if atleast one allocation was possible.
4173 This may be the case where the Bw is very less and
4174 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
4175 if (rbAllocinfo->dlSf->bwAssigned == 0)
4177 numRb = rbAllocinfo->dlSf->bw;
4178 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
4179 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
4183 rbAllocinfo->rbsReq = numRb;
4184 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
4185 /* DwPTS Scheduling Changes Start */
4187 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
4189 rbAllocinfo->tbInfo[0].bytesReq =
4190 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1,cfi);
4193 /* DwPTS Scheduling Changes End */
4194 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4202 /* Update the subframe Allocated BW field */
4203 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4204 rbAllocinfo->rbsReq;
4205 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
4206 rbAllocinfo->tbInfo[0].tbCb = &hqE->ccchSduProc->tbInfo[0];
4207 rbAllocinfo->rnti = ueCb->ueId;
4208 rbAllocinfo->tbInfo[0].noLyr = 1;
4215 * @brief This function implements BW allocation for MSG4
4219 * Function: rgSCHCmnMsg4DedAlloc
4220 * Purpose: Downlink bandwidth Allocation for MSG4.
4222 * Invoked by: Scheduler
4224 * @param[in] RgSchCellCb* cell
4225 * @param[out] RgSchRaCb *raCb
4230 PRIVATE S16 rgSCHCmnMsg4DedAlloc
4236 PRIVATE S16 rgSCHCmnMsg4DedAlloc(cell, raCb)
4242 RgSchDlRbAlloc *rbAllocinfo = &raCb->rbAllocInfo;
4246 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4247 uint8_t cfi = cellDl->currCfi;
4251 effBo = raCb->dlCcchInfo.bo + RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE;
4254 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
4255 &rbAllocinfo->tbInfo[0].bytesReq,\
4256 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
4257 #else /* LTEMAC_SPS */
4258 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
4259 &rbAllocinfo->tbInfo[0].bytesReq,\
4260 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
4262 #endif /* LTEMAC_SPS */
4265 /* Cannot exceed the total number of RBs in the cell */
4266 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
4267 rbAllocinfo->dlSf->bwAssigned)))
4269 /* Check if atleast one allocation was possible.
4270 This may be the case where the Bw is very less and
4271 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
4272 if (rbAllocinfo->dlSf->bwAssigned == 0)
4274 numRb = rbAllocinfo->dlSf->bw;
4275 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
4276 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
4280 rbAllocinfo->rbsReq = numRb;
4281 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
4282 /* DwPTS Scheduling Changes Start */
4284 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
4286 rbAllocinfo->tbInfo[0].bytesReq =
4287 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1, cfi);
4290 /* DwPTS Scheduling Changes End */
4291 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4299 /* Update the subframe Allocated BW field */
4300 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4301 rbAllocinfo->rbsReq;
4302 rbAllocinfo->rnti = raCb->tmpCrnti;
4303 rbAllocinfo->tbInfo[0].tbCb = &raCb->dlHqE->msg4Proc->tbInfo[0];
4304 rbAllocinfo->tbInfo[0].schdlngForTb = TRUE;
4305 rbAllocinfo->tbInfo[0].noLyr = 1;
4312 * @brief This function implements scheduling for RA Response.
4316 * Function: rgSCHCmnDlRaRsp
4317 * Purpose: Downlink scheduling for RA responses.
4319 * Invoked by: Scheduler
4321 * @param[in] RgSchCellCb* cell
4326 PRIVATE Void rgSCHCmnDlRaRsp
4329 RgSchCmnDlRbAllocInfo *allocInfo
4332 PRIVATE Void rgSCHCmnDlRaRsp(cell, allocInfo)
4334 RgSchCmnDlRbAllocInfo *allocInfo;
4337 CmLteTimingInfo frm;
4338 CmLteTimingInfo schFrm;
4344 RgSchTddRachRspLst *rachRsp;
4345 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
4350 frm = cell->crntTime;
4351 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4353 /* Compute the subframe for which allocation is being made */
4354 /* essentially, we need pointer to the dl frame for this subframe */
4355 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4357 /* Get the RACH Response scheduling related information
4358 * for the subframe with RA index */
4359 raIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][frm.subframe]-1;
4361 rachRsp = &cell->rachRspLst[raIdx];
4363 for(sfnIdx = 0; sfnIdx < rachRsp->numRadiofrms; sfnIdx++)
4365 /* For all scheduled RACH Responses in SFNs */
4367 RG_SCH_CMN_DECR_FRAME(schFrm.sfn, rachRsp->rachRsp[sfnIdx].sfnOffset);
4368 /* For all scheduled RACH Responses in subframes */
4370 subfrmIdx < rachRsp->rachRsp[sfnIdx].numSubfrms; subfrmIdx++)
4372 schFrm.subframe = rachRsp->rachRsp[sfnIdx].subframe[subfrmIdx];
4373 /* compute the last RA RNTI used in the previous subframe */
4374 raIdx = (((schFrm.sfn % cell->raInfo.maxRaSize) * \
4375 RGSCH_NUM_SUB_FRAMES * RGSCH_MAX_RA_RNTI_PER_SUBFRM) \
4378 /* For all RA RNTIs within a subframe */
4380 for(i=0; (i < RGSCH_MAX_RA_RNTI_PER_SUBFRM) && \
4381 (noRaRnti < RGSCH_MAX_TDD_RA_RSP_ALLOC); i++)
4383 rarnti = (schFrm.subframe + RGSCH_NUM_SUB_FRAMES*i + 1);
4384 rntiIdx = (raIdx + RGSCH_NUM_SUB_FRAMES*i);
4386 if (cell->raInfo.raReqLst[rntiIdx].first != NULLP)
4388 /* compute the next RA RNTI */
4389 if (rgSCHCmnRaRspAlloc(cell, subFrm, rntiIdx,
4390 rarnti, noRaRnti, allocInfo) != ROK)
4392 /* The resources are exhausted */
4406 * @brief This function implements scheduling for RA Response.
4410 * Function: rgSCHCmnDlRaRsp
4411 * Purpose: Downlink scheduling for RA responses.
4413 * Invoked by: Scheduler
4415 * @param[in] RgSchCellCb* cell
4416 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4421 PRIVATE Void rgSCHCmnDlRaRsp //FDD
4424 RgSchCmnDlRbAllocInfo *allocInfo
4427 PRIVATE Void rgSCHCmnDlRaRsp(cell, allocInfo)
4429 RgSchCmnDlRbAllocInfo *allocInfo;
4432 CmLteTimingInfo frm;
4433 CmLteTimingInfo winStartFrm;
4435 uint8_t winStartIdx;
4439 RgSchCmnCell *sched;
4440 uint8_t i,noRaRnti=0;
4442 frm = cell->crntTime;
4443 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4445 /* Compute the subframe for which allocation is being made */
4446 /* essentially, we need pointer to the dl frame for this subframe */
4447 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4448 sched = RG_SCH_CMN_GET_CELL(cell);
4450 /* ccpu00132523 - Window Start calculated by considering RAR window size,
4451 * RAR Wait period, Subframes occuppied for respective preamble format*/
4452 winGap = (sched->dl.numRaSubFrms-1) + (cell->rachCfg.raWinSize-1)
4453 +RGSCH_RARSP_WAIT_PERIOD;
4455 /* Window starting occassion is retrieved using the gap and tried to
4456 * fit to the size of raReqLst array*/
4457 RGSCHDECRFRMCRNTTIME(frm, winStartFrm, winGap);
4459 //5G_TODO TIMING update. Need to check
4460 winStartIdx = (winStartFrm.sfn & 1) * RGSCH_MAX_RA_RNTI+ winStartFrm.slot;
4462 for(i = 0; ((i < cell->rachCfg.raWinSize) && (noRaRnti < RG_SCH_CMN_MAX_CMN_PDCCH)); i++)
4464 raIdx = (winStartIdx + i) % RGSCH_RAREQ_ARRAY_SIZE;
4466 if (cell->raInfo.raReqLst[raIdx].first != NULLP)
4468 allocInfo->raRspAlloc[noRaRnti].biEstmt = \
4469 (!i * RGSCH_ONE_BIHDR_SIZE);
4470 rarnti = raIdx % RGSCH_MAX_RA_RNTI+ 1;
4471 if (rgSCHCmnRaRspAlloc(cell, subFrm, raIdx,
4472 rarnti, noRaRnti, allocInfo) != ROK)
4474 /* The resources are exhausted */
4477 /* ccpu00132523- If all the RAP ids are not scheduled then need not
4478 * proceed for next RA RNTIs*/
4479 if(allocInfo->raRspAlloc[noRaRnti].numRapids < cell->raInfo.raReqLst[raIdx].count)
4483 noRaRnti++; /* Max of RG_SCH_CMN_MAX_CMN_PDCCH RARNTIs
4484 for response allocation */
4493 * @brief This function allocates the resources for an RARNTI.
4497 * Function: rgSCHCmnRaRspAlloc
4498 * Purpose: Allocate resources to a RARNTI.
4499 * 0. Allocate PDCCH for sending the response.
4500 * 1. Locate the number of RA requests pending for the RARNTI.
4501 * 2. Compute the size of data to be built.
4502 * 3. Using common channel CQI, compute the number of RBs.
4504 * Invoked by: Scheduler
4506 * @param[in] RgSchCellCb *cell,
4507 * @param[in] RgSchDlSf *subFrm,
4508 * @param[in] uint16_t rarnti,
4509 * @param[in] uint8_t noRaRnti
4510 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4515 PRIVATE S16 rgSCHCmnRaRspAlloc
4522 RgSchCmnDlRbAllocInfo *allocInfo
4525 PRIVATE S16 rgSCHCmnRaRspAlloc(cell,subFrm,raIndex,rarnti,noRaRnti,allocInfo)
4531 RgSchCmnDlRbAllocInfo *allocInfo;
4534 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4535 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4539 /*ccpu00116700,ccpu00116708- Corrected the wrong type for mcs*/
4542 /* RACH handling related changes */
4543 Bool isAlloc = FALSE;
4544 static uint8_t schdNumRapid = 0;
4545 uint8_t remNumRapid = 0;
4550 uint8_t cfi = cellDl->currCfi;
4557 /* ccpu00132523: Resetting the schdRap Id count in every scheduling subframe*/
4564 if (subFrm->bw == subFrm->bwAssigned)
4566 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4567 "bw == bwAssigned RARNTI:%d",rarnti);
4571 reqLst = &cell->raInfo.raReqLst[raIndex];
4572 if (reqLst->count == 0)
4574 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4575 "reqLst Count=0 RARNTI:%d",rarnti);
4578 remNumRapid = reqLst->count;
4581 /* Limit number of rach rsps to maxMsg3PerUlsf */
4582 if ( schdNumRapid+remNumRapid > cellUl->maxMsg3PerUlSf )
4584 remNumRapid = cellUl->maxMsg3PerUlSf-schdNumRapid;
4590 /* Try allocating for as many RAPIDs as possible */
4591 /* BI sub-header size to the tbSize requirement */
4592 noBytes = RGSCH_GET_RAR_BYTES(remNumRapid) +\
4593 allocInfo->raRspAlloc[noRaRnti].biEstmt;
4594 if ((allwdTbSz = rgSCHUtlGetAllwdCchTbSz(noBytes*8, &nPrb, &mcs)) == -1)
4600 /* rgSCHCmnClcRbAllocForFxdTb(cell, allwdTbSz/8, cellDl->ccchCqi, &rb);*/
4601 if(cellDl->bitsPerRb==0)
4603 while ((rgTbSzTbl[0][0][rb]) <(uint32_t) allwdTbSz)
4611 rb = RGSCH_CEIL(allwdTbSz, cellDl->bitsPerRb);
4613 /* DwPTS Scheduling Changes Start */
4615 if (subFrm->sfType == RG_SCH_SPL_SF_DATA)
4617 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
4619 /* Calculate the less RE's because of DwPTS */
4620 lostRe = rb * (cellDl->noResPerRb[cfi] -
4621 cellDl->numReDwPts[cfi]);
4623 /* Increase number of RBs in Spl SF to compensate for lost REs */
4624 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
4627 /* DwPTS Scheduling Changes End */
4629 /*ccpu00115595- end*/
4630 if (rb > subFrm->bw - subFrm->bwAssigned)
4635 /* Allocation succeeded for 'remNumRapid' */
4638 printf("\n!!!RAR alloc noBytes:%u,allwdTbSz:%u,tbs:%u,rb:%u\n",
4639 noBytes,allwdTbSz,tbs,rb);
4644 RLOG_ARG0(L_INFO,DBG_CELLID,cell->cellId,"BW alloc Failed");
4648 subFrm->bwAssigned = subFrm->bwAssigned + rb;
4650 /* Fill AllocInfo structure */
4651 allocInfo->raRspAlloc[noRaRnti].rnti = rarnti;
4652 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].bytesReq = tbs;
4653 allocInfo->raRspAlloc[noRaRnti].rbsReq = rb;
4654 allocInfo->raRspAlloc[noRaRnti].dlSf = subFrm;
4655 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].imcs = mcs;
4656 allocInfo->raRspAlloc[noRaRnti].raIndex = raIndex;
4657 /* RACH changes for multiple RAPID handling */
4658 allocInfo->raRspAlloc[noRaRnti].numRapids = remNumRapid;
4659 allocInfo->raRspAlloc[noRaRnti].nPrb = nPrb;
4660 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].noLyr = 1;
4661 allocInfo->raRspAlloc[noRaRnti].vrbgReq = RGSCH_CEIL(nPrb,MAX_5GTF_VRBG_SIZE);
4662 schdNumRapid += remNumRapid;
4666 /***********************************************************
4668 * Func : rgSCHCmnUlAllocFillRbInfo
4670 * Desc : Fills the start RB and the number of RBs for
4671 * uplink allocation.
4679 **********************************************************/
4681 Void rgSCHCmnUlAllocFillRbInfo
4688 Void rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc)
4691 RgSchUlAlloc *alloc;
4694 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4695 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4696 uint8_t cfi = cellDl->currCfi;
4699 alloc->grnt.rbStart = (alloc->sbStart * cellUl->sbSize) +
4700 cell->dynCfiCb.bwInfo[cfi].startRb;
4702 /* Num RBs = numSbAllocated * sbSize - less RBs in the last SB */
4703 alloc->grnt.numRb = (alloc->numSb * cellUl->sbSize);
4709 * @brief Grant request for Msg3.
4713 * Function : rgSCHCmnMsg3GrntReq
4715 * This is invoked by downlink scheduler to request allocation
4718 * - Attempt to allocate msg3 in the current msg3 subframe
4719 * Allocation attempt based on whether preamble is from group A
4720 * and the value of MESSAGE_SIZE_GROUP_A
4721 * - Link allocation with passed RNTI and msg3 HARQ process
4722 * - Set the HARQ process ID (*hqProcIdRef)
4724 * @param[in] RgSchCellCb *cell
4725 * @param[in] CmLteRnti rnti
4726 * @param[in] Bool preamGrpA
4727 * @param[in] RgSchUlHqProcCb *hqProc
4728 * @param[out] RgSchUlAlloc **ulAllocRef
4729 * @param[out] uint8_t *hqProcIdRef
4733 PRIVATE Void rgSCHCmnMsg3GrntReq
4738 RgSchUlHqProcCb *hqProc,
4739 RgSchUlAlloc **ulAllocRef,
4740 uint8_t *hqProcIdRef
4743 PRIVATE Void rgSCHCmnMsg3GrntReq(cell, rnti, preamGrpA, hqProc,
4744 ulAllocRef, hqProcIdRef)
4748 RgSchUlHqProcCb *hqProc;
4749 RgSchUlAlloc **ulAllocRef;
4750 uint8_t *hqProcIdRef;
4753 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4754 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
4756 RgSchUlAlloc *alloc;
4761 *ulAllocRef = NULLP;
4763 /* Fix: ccpu00120610 Use remAllocs from subframe during msg3 allocation */
4764 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
4768 if (preamGrpA == FALSE)
4770 numSb = cellUl->ra.prmblBNumSb;
4771 iMcs = cellUl->ra.prmblBIMcs;
4775 numSb = cellUl->ra.prmblANumSb;
4776 iMcs = cellUl->ra.prmblAIMcs;
4779 if ((hole = rgSCHUtlUlHoleFirst(sf)) != NULLP)
4781 if(*sf->allocCountRef == 0)
4783 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4784 /* Reinitialize the hole */
4785 if (sf->holeDb->count == 1 && (hole->start == 0)) /* Sanity check of holeDb */
4787 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4788 /* Re-Initialize available subbands because of CFI change*/
4789 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4793 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4794 "Error! holeDb sanity check failed RNTI:%d",rnti);
4797 if (numSb <= hole->num)
4800 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
4801 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
4802 alloc->grnt.iMcs = iMcs;
4803 alloc->grnt.iMcsCrnt = iMcs;
4804 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
4805 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
4806 /* To include the length and ModOrder in DataRecp Req.*/
4807 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
4808 RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
4809 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
4810 alloc->grnt.nDmrs = 0;
4811 alloc->grnt.hop = 0;
4812 alloc->grnt.delayBit = 0;
4813 alloc->grnt.isRtx = FALSE;
4814 *ulAllocRef = alloc;
4815 *hqProcIdRef = (cellUl->msg3SchdHqProcIdx);
4816 hqProc->procId = *hqProcIdRef;
4817 hqProc->ulSfIdx = (cellUl->msg3SchdIdx);
4820 alloc->pdcch = FALSE;
4821 alloc->forMsg3 = TRUE;
4822 alloc->hqProc = hqProc;
4823 rgSCHUhmNewTx(hqProc, (uint8_t)(cell->rachCfg.maxMsg3Tx - 1), alloc);
4824 //RLOG_ARG4(L_DEBUG,DBG_CELLID,cell->cellId,
4826 "\nRNTI:%d MSG3 ALLOC proc(%lu)procId(%d)schdIdx(%d)\n",
4828 ((PTR)alloc->hqProc),
4829 alloc->hqProc->procId,
4830 alloc->hqProc->ulSfIdx);
4831 RLOG_ARG2(L_DEBUG,DBG_CELLID,cell->cellId,
4832 "alloc(%p)maxMsg3Tx(%d)",
4834 cell->rachCfg.maxMsg3Tx);
4843 * @brief This function determines the allocation limits and
4844 * parameters that aid in DL scheduling.
4848 * Function: rgSCHCmnDlSetUeAllocLmt
4849 * Purpose: This function determines the Maximum RBs
4850 * a UE is eligible to get based on softbuffer
4851 * limitation and cell->>>maxDlBwPerUe. The Codeword
4852 * specific parameters like iTbs, eff and noLyrs
4853 * are also set in this function. This function
4854 * is called while UE configuration and UeDlCqiInd.
4856 * Invoked by: Scheduler
4858 * @param[in] RgSchCellCb *cellCb
4859 * @param[in] RgSchCmnDlUe *ueDl
4864 PRIVATE Void rgSCHCmnDlSetUeAllocLmt
4871 PRIVATE Void rgSCHCmnDlSetUeAllocLmt(cell, ueDl, isEmtcUe)
4879 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4880 uint8_t cfi = cellSch->dl.currCfi;
4884 if(TRUE == isEmtcUe)
4886 /* ITbs for CW0 for 1 Layer Tx */
4887 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4888 [ueDl->mimoInfo.cwInfo[0].cqi];
4889 /* ITbs for CW0 for 2 Layer Tx */
4890 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4891 [ueDl->mimoInfo.cwInfo[0].cqi];
4892 /* Eff for CW0 for 1 Layer Tx */
4893 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4894 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4895 /* Eff for CW0 for 2 Layer Tx */
4896 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4897 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4899 /* ITbs for CW1 for 1 Layer Tx */
4900 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4901 [ueDl->mimoInfo.cwInfo[1].cqi];
4902 /* ITbs for CW1 for 2 Layer Tx */
4903 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4904 [ueDl->mimoInfo.cwInfo[1].cqi];
4905 /* Eff for CW1 for 1 Layer Tx */
4906 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4907 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4908 /* Eff for CW1 for 2 Layer Tx */
4909 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4910 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4915 /* ITbs for CW0 for 1 Layer Tx */
4916 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4917 [ueDl->mimoInfo.cwInfo[0].cqi];
4918 /* ITbs for CW0 for 2 Layer Tx */
4919 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4920 [ueDl->mimoInfo.cwInfo[0].cqi];
4921 /* Eff for CW0 for 1 Layer Tx */
4922 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4923 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4924 /* Eff for CW0 for 2 Layer Tx */
4925 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4926 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4928 /* ITbs for CW1 for 1 Layer Tx */
4929 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4930 [ueDl->mimoInfo.cwInfo[1].cqi];
4931 /* ITbs for CW1 for 2 Layer Tx */
4932 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4933 [ueDl->mimoInfo.cwInfo[1].cqi];
4934 /* Eff for CW1 for 1 Layer Tx */
4935 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4936 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4937 /* Eff for CW1 for 2 Layer Tx */
4938 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4939 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4943 // ueDl->laCb.cqiBasediTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0] * 100;
4945 /* Assigning noLyrs to each CW assuming optimal Spatial multiplexing
4947 (ueDl->mimoInfo.ri/2 == 0)? (ueDl->mimoInfo.cwInfo[0].noLyr = 1) : \
4948 (ueDl->mimoInfo.cwInfo[0].noLyr = ueDl->mimoInfo.ri/2);
4949 ueDl->mimoInfo.cwInfo[1].noLyr = ueDl->mimoInfo.ri - ueDl->mimoInfo.cwInfo[0].noLyr;
4950 /* rg002.101:ccpu00102106: correcting DL harq softbuffer limitation logic.
4951 * The maxTbSz is the maximum number of PHY bits a harq process can
4952 * hold. Hence we limit our allocation per harq process based on this.
4953 * Earlier implementation we misinterpreted the maxTbSz to be per UE
4954 * per TTI, but in fact it is per Harq per TTI. */
4955 /* rg002.101:ccpu00102106: cannot exceed the harq Tb Size
4956 * and harq Soft Bits limit.*/
4958 /* Considering iTbs corresponding to 2 layer transmission for
4959 * codeword0(approximation) and the maxLayers supported by
4960 * this UE at this point of time. */
4961 RG_SCH_CMN_TBS_TO_MODODR(ueDl->mimoInfo.cwInfo[0].iTbs[1], modOrder);
4963 /* Bits/modOrder gives #REs, #REs/noResPerRb gives #RBs */
4964 /* rg001.301 -MOD- [ccpu00119213] : avoiding wraparound */
4965 maxRb = ((ueDl->maxSbSz)/(cellSch->dl.noResPerRb[cfi] * modOrder *\
4966 ueDl->mimoInfo.ri));
4967 if (cellSch->dl.isDlFreqSel)
4969 /* Rounding off to left nearest multiple of RBG size */
4970 maxRb -= maxRb % cell->rbgSize;
4972 ueDl->maxRb = RGSCH_MIN(maxRb, cellSch->dl.maxDlBwPerUe);
4973 if (cellSch->dl.isDlFreqSel)
4975 /* Rounding off to right nearest multiple of RBG size */
4976 if (ueDl->maxRb % cell->rbgSize)
4978 ueDl->maxRb += (cell->rbgSize -
4979 (ueDl->maxRb % cell->rbgSize));
4983 /* Set the index of the cwInfo, which is better in terms of
4984 * efficiency. If RI<2, only 1 CW, hence btrCwIdx shall be 0 */
4985 if (ueDl->mimoInfo.ri < 2)
4987 ueDl->mimoInfo.btrCwIdx = 0;
4991 if (ueDl->mimoInfo.cwInfo[0].eff[ueDl->mimoInfo.cwInfo[0].noLyr-1] <\
4992 ueDl->mimoInfo.cwInfo[1].eff[ueDl->mimoInfo.cwInfo[1].noLyr-1])
4994 ueDl->mimoInfo.btrCwIdx = 1;
4998 ueDl->mimoInfo.btrCwIdx = 0;
5008 * @brief This function updates TX Scheme.
5012 * Function: rgSCHCheckAndSetTxScheme
5013 * Purpose: This function determines the Maximum RBs
5014 * a UE is eligible to get based on softbuffer
5015 * limitation and cell->>>maxDlBwPerUe. The Codeword
5016 * specific parameters like iTbs, eff and noLyrs
5017 * are also set in this function. This function
5018 * is called while UE configuration and UeDlCqiInd.
5020 * Invoked by: Scheduler
5022 * @param[in] RgSchCellCb *cell
5023 * @param[in] RgSchUeCb *ue
5028 PRIVATE Void rgSCHCheckAndSetTxScheme
5034 PRIVATE Void rgSCHCheckAndSetTxScheme(cell, ue)
5039 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
5040 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue ,cell);
5041 uint8_t cfi = cellSch->dl.currCfi;
5043 uint8_t cqiBasediTbs;
5047 maxiTbs = (*(RgSchCmnCqiToTbs*)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
5048 [RG_SCH_CMN_MAX_CQI - 1];
5049 cqiBasediTbs = (ueDl->laCb[0].cqiBasediTbs)/100;
5050 actualiTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0];
5052 if((actualiTbs < RG_SCH_TXSCHEME_CHNG_ITBS_FACTOR) && (cqiBasediTbs >
5053 actualiTbs) && ((cqiBasediTbs - actualiTbs) > RG_SCH_TXSCHEME_CHNG_THRSHD))
5055 RG_SCH_CMN_SET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
5058 if(actualiTbs >= maxiTbs)
5060 RG_SCH_CMN_UNSET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
5067 * @brief This function determines the allocation limits and
5068 * parameters that aid in DL scheduling.
5072 * Function: rgSCHCmnDlSetUeAllocLmtLa
5073 * Purpose: This function determines the Maximum RBs
5074 * a UE is eligible to get based on softbuffer
5075 * limitation and cell->>>maxDlBwPerUe. The Codeword
5076 * specific parameters like iTbs, eff and noLyrs
5077 * are also set in this function. This function
5078 * is called while UE configuration and UeDlCqiInd.
5080 * Invoked by: Scheduler
5082 * @param[in] RgSchCellCb *cell
5083 * @param[in] RgSchUeCb *ue
5088 Void rgSCHCmnDlSetUeAllocLmtLa
5094 Void rgSCHCmnDlSetUeAllocLmtLa(cell, ue)
5101 uint8_t reportediTbs;
5102 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
5103 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
5104 uint8_t cfi = cellSch->dl.currCfi;
5109 maxiTbs = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[RG_SCH_CMN_MAX_CQI - 1];
5110 if(ueDl->cqiFlag == TRUE)
5112 for(cwIdx=0; cwIdx < RG_SCH_CMN_MAX_CW_PER_UE; cwIdx++)
5116 /* Calcluating the reported iTbs for code word 0 */
5117 reportediTbs = ue->ue5gtfCb.mcs;
5119 iTbsNew = (S32) reportediTbs;
5121 if(!ueDl->laCb[cwIdx].notFirstCqi)
5123 /* This is the first CQI report from UE */
5124 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
5125 ueDl->laCb[cwIdx].notFirstCqi = TRUE;
5127 else if ((RG_ITBS_DIFF(reportediTbs, ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0])) > 5)
5129 /* Ignore this iTBS report and mark that last iTBS report was */
5130 /* ignored so that subsequently we reset the LA algorithm */
5131 ueDl->laCb[cwIdx].lastiTbsIgnored = TRUE;
5132 ueDl->laCb[cwIdx].numLastiTbsIgnored++;
5133 if( ueDl->laCb[cwIdx].numLastiTbsIgnored > 10)
5135 /* CQI reported by UE is not catching up. Reset the LA algorithm */
5136 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
5137 ueDl->laCb[cwIdx].deltaiTbs = 0;
5138 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
5139 ueDl->laCb[cwIdx].numLastiTbsIgnored = 0;
5144 if (ueDl->laCb[cwIdx].lastiTbsIgnored != TRUE)
5146 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
5147 (80 * ueDl->laCb[cwIdx].cqiBasediTbs))/100;
5151 /* Reset the LA as iTbs in use caught up with the value */
5152 /* reported by UE. */
5153 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
5154 (80 * ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] * 100))/100;
5155 ueDl->laCb[cwIdx].deltaiTbs = 0;
5156 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
5160 iTbsNew = (ueDl->laCb[cwIdx].cqiBasediTbs + ueDl->laCb[cwIdx].deltaiTbs)/100;
5162 RG_SCH_CHK_ITBS_RANGE(iTbsNew, maxiTbs);
5164 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] = RGSCH_MIN(iTbsNew, cell->thresholds.maxDlItbs);
5165 //ueDl->mimoInfo.cwInfo[cwIdx].iTbs[1] = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
5167 ue->ue5gtfCb.mcs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
5169 printf("reportediTbs[%d] cqiBasediTbs[%d] deltaiTbs[%d] iTbsNew[%d] mcs[%d] cwIdx[%d]\n",
5170 reportediTbs, ueDl->laCb[cwIdx].cqiBasediTbs, ueDl->laCb[cwIdx].deltaiTbs,
5171 iTbsNew, ue->ue5gtfCb.mcs, cwIdx);
5175 if((ue->mimoInfo.txMode != RGR_UE_TM_3) && (ue->mimoInfo.txMode != RGR_UE_TM_4))
5180 ueDl->cqiFlag = FALSE;
5187 /***********************************************************
5189 * Func : rgSCHCmnDlUeResetTemp
5191 * Desc : Reset whatever variables where temporarily used
5192 * during UE scheduling.
5200 **********************************************************/
5202 Void rgSCHCmnDlHqPResetTemp
5204 RgSchDlHqProcCb *hqP
5207 Void rgSCHCmnDlHqPResetTemp(hqP)
5208 RgSchDlHqProcCb *hqP;
5213 /* Fix: syed having a hqP added to Lists for RB assignment rather than
5214 * a UE, as adding UE was limiting handling some scenarios */
5215 hqP->reqLnk.node = (PTR)NULLP;
5216 hqP->schdLstLnk.node = (PTR)NULLP;
5219 } /* rgSCHCmnDlHqPResetTemp */
5221 /***********************************************************
5223 * Func : rgSCHCmnDlUeResetTemp
5225 * Desc : Reset whatever variables where temporarily used
5226 * during UE scheduling.
5234 **********************************************************/
5236 Void rgSCHCmnDlUeResetTemp
5239 RgSchDlHqProcCb *hqP
5242 Void rgSCHCmnDlUeResetTemp(ue, hqP)
5244 RgSchDlHqProcCb *hqP;
5247 RgSchDlRbAlloc *allocInfo;
5248 RgSchCmnDlUe *cmnUe = RG_SCH_CMN_GET_DL_UE(ue,hqP->hqE->cell);
5254 /* Fix : syed check for UE's existence was useless.
5255 * Instead we need to check that reset is done only for the
5256 * information of a scheduled harq proc, which is cmnUe->proc.
5257 * Reset should not be done for non-scheduled hqP */
5258 if((cmnUe->proc == hqP) || (cmnUe->proc == NULLP))
5260 cmnUe->proc = NULLP;
5261 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, hqP->hqE->cell);
5263 tmpCb = allocInfo->laaCb;
5265 memset(allocInfo, 0, sizeof(RgSchDlRbAlloc));
5266 allocInfo->rnti = ue->ueId;
5268 allocInfo->laaCb = tmpCb;
5270 /* Fix: syed moving this to a common function for both scheduled
5271 * and non-scheduled UEs */
5272 cmnUe->outStndAlloc = 0;
5274 rgSCHCmnDlHqPResetTemp(hqP);
5277 } /* rgSCHCmnDlUeResetTemp */
5279 /***********************************************************
5281 * Func : rgSCHCmnUlUeResetTemp
5283 * Desc : Reset whatever variables where temporarily used
5284 * during UE scheduling.
5292 **********************************************************/
5294 Void rgSCHCmnUlUeResetTemp
5300 Void rgSCHCmnUlUeResetTemp(cell, ue)
5305 RgSchCmnUlUe *cmnUlUe = RG_SCH_CMN_GET_UL_UE(ue,cell);
5308 memset(&cmnUlUe->alloc, 0, sizeof(cmnUlUe->alloc));
5311 } /* rgSCHCmnUlUeResetTemp */
5316 * @brief This function fills the PDCCH information from dlProc.
5320 * Function: rgSCHCmnFillPdcch
5321 * Purpose: This function fills in the PDCCH information
5322 * obtained from the RgSchDlRbAlloc
5323 * during common channel scheduling(P, SI, RA - RNTI's).
5325 * Invoked by: Downlink Scheduler
5327 * @param[out] RgSchPdcch* pdcch
5328 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5333 Void rgSCHCmnFillPdcch
5337 RgSchDlRbAlloc *rbAllocInfo
5340 Void rgSCHCmnFillPdcch(cell, pdcch, rbAllocInfo)
5343 RgSchDlRbAlloc *rbAllocInfo;
5348 /* common channel pdcch filling,
5349 * only 1A and Local is supported */
5350 pdcch->rnti = rbAllocInfo->rnti;
5351 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
5352 switch(rbAllocInfo->dciFormat)
5354 #ifdef RG_5GTF /* ANOOP: ToDo: DCI format B1/B2 filling */
5355 case TFU_DCI_FORMAT_B1:
5358 pdcch->dci.u.formatB1Info.formatType = 0;
5359 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].cmnGrnt.xPDSCHRange;
5360 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].cmnGrnt.rbAssign;
5361 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = 0;
5362 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5363 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = 0;
5364 //pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].ndi;
5365 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].cmnGrnt.rv;
5366 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5367 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5368 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5369 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5370 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5371 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5372 //TODO_SID: Need to update
5373 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5374 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5375 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5376 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5377 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5378 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5379 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].cmnGrnt.SCID;
5380 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5381 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5382 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5384 break; /* case TFU_DCI_FORMAT_B1: */
5387 case TFU_DCI_FORMAT_B2:
5389 //printf(" RG_5GTF:: Pdcch filling with DCI format B2\n");
5391 break; /* case TFU_DCI_FORMAT_B2: */
5394 case TFU_DCI_FORMAT_1A:
5395 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
5397 /*Nprb indication at PHY for common Ch
5398 *setting least significant bit of tpc field to 1 if
5399 nPrb=3 and 0 otherwise. */
5400 if (rbAllocInfo->nPrb == 3)
5402 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 1;
5406 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 0;
5408 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
5409 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
5410 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
5411 rbAllocInfo->tbInfo[0].imcs;
5412 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = 0;
5413 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = 0;
5415 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
5417 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
5418 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5419 rbAllocInfo->allocInfo.raType2.rbStart,
5420 rbAllocInfo->allocInfo.raType2.numRb);
5423 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = \
5426 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5427 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
5430 break; /* case TFU_DCI_FORMAT_1A: */
5431 case TFU_DCI_FORMAT_1:
5432 pdcch->dci.u.format1Info.tpcCmd = 0;
5433 /* Avoiding this check,as we dont support Type1 RA */
5435 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5438 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5439 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5440 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5442 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5443 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5445 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5446 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5448 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5449 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5453 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5454 pdcch->dci.u.format1Info.allocInfo.ndi = 0;
5455 pdcch->dci.u.format1Info.allocInfo.mcs = rbAllocInfo->tbInfo[0].imcs;
5456 pdcch->dci.u.format1Info.allocInfo.rv = 0;
5458 pdcch->dci.u.format1Info.dai = 1;
5462 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Allocator's icorrect "
5463 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5471 * @brief This function finds whether the subframe is special subframe or not.
5475 * Function: rgSCHCmnIsSplSubfrm
5476 * Purpose: This function finds the subframe index of the special subframe
5477 * and finds whether the current DL index matches it or not.
5479 * Invoked by: Scheduler
5481 * @param[in] uint8_t splfrmCnt
5482 * @param[in] uint8_t curSubfrmIdx
5483 * @param[in] uint8_t periodicity
5484 * @param[in] RgSchTddSubfrmInfo *subfrmInfo
5489 PRIVATE Bool rgSCHCmnIsSplSubfrm
5492 uint8_t curSubfrmIdx,
5493 uint8_t periodicity,
5494 RgSchTddSubfrmInfo *subfrmInfo
5497 PRIVATE Bool rgSCHCmnIsSplSubfrm(splfrmCnt, curSubfrmIdx, periodicity, subfrmInfo)
5499 uint8_t curSubfrmIdx;
5500 uint8_t periodicity;
5501 RgSchTddSubfrmInfo *subfrmInfo;
5504 uint8_t dlSfCnt = 0;
5505 uint8_t splfrmIdx = 0;
5510 if(periodicity == RG_SCH_CMN_5_MS_PRD)
5514 dlSfCnt = ((splfrmCnt-1)/2) *\
5515 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5516 dlSfCnt = dlSfCnt + subfrmInfo->numFrmHf1;
5520 dlSfCnt = (splfrmCnt/2) * \
5521 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5526 dlSfCnt = splfrmCnt * subfrmInfo->numFrmHf1;
5528 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1 +\
5529 (periodicity*splfrmCnt - dlSfCnt);
5533 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1;
5536 if(splfrmIdx == curSubfrmIdx)
5545 * @brief This function updates DAI or UL index.
5549 * Function: rgSCHCmnUpdHqAndDai
5550 * Purpose: Updates the DAI based on UL-DL Configuration
5551 * index and UE. It also updates the HARQ feedback
5552 * time and 'm' index.
5556 * @param[in] RgDlHqProcCb *hqP
5557 * @param[in] RgSchDlSf *subFrm
5558 * @param[in] RgSchDlHqTbCb *tbCb
5559 * @param[in] uint8_t tbAllocIdx
5564 PRIVATE Void rgSCHCmnUpdHqAndDai
5566 RgSchDlHqProcCb *hqP,
5568 RgSchDlHqTbCb *tbCb,
5572 PRIVATE Void rgSCHCmnUpdHqAndDai(hqP, subFrm, tbCb,tbAllocIdx)
5573 RgSchDlHqProcCb *hqP;
5575 RgSchDlHqTbCb *tbCb;
5579 RgSchUeCb *ue = hqP->hqE->ue;
5584 /* set the time at which UE shall send the feedback
5585 * for this process */
5586 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5587 subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5588 tbCb->fdbkTime.subframe = subFrm->dlFdbkInfo.subframe;
5589 tbCb->m = subFrm->dlFdbkInfo.m;
5593 /* set the time at which UE shall send the feedback
5594 * for this process */
5595 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5596 hqP->subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5597 tbCb->fdbkTime.subframe = hqP->subFrm->dlFdbkInfo.subframe;
5598 tbCb->m = hqP->subFrm->dlFdbkInfo.m;
5601 /* ccpu00132340-MOD- DAI need to be updated for first TB only*/
5602 if(ue && !tbAllocIdx)
5604 Bool havePdcch = (tbCb->hqP->pdcch ? TRUE : FALSE);
5607 dlDai = rgSCHCmnUpdDai(ue, &tbCb->fdbkTime, tbCb->m, havePdcch,tbCb->hqP,
5610 {/* Non SPS occasions */
5611 tbCb->hqP->pdcch->dlDai = dlDai;
5612 /* hqP->ulDai is used for N1 resource filling
5613 * when SPS occaions present in a bundle */
5614 tbCb->hqP->ulDai = tbCb->dai;
5615 tbCb->hqP->dlDai = dlDai;
5619 /* Updatijng pucchFdbkIdx for both PUCCH or PUSCH
5621 tbCb->pucchFdbkIdx = tbCb->hqP->ulDai;
5628 * @brief This function updates DAI or UL index.
5632 * Function: rgSCHCmnUpdDai
5633 * Purpose: Updates the DAI in the ack-nack info, a valid
5634 * ue should be passed
5638 * @param[in] RgDlHqProcCb *hqP
5639 * @param[in] RgSchDlSf *subFrm
5640 * @param[in] RgSchDlHqTbCb *tbCb
5641 * @return uint8_t dlDai
5645 uint8_t rgSCHCmnUpdDai
5648 CmLteTimingInfo *fdbkTime,
5651 RgSchDlHqProcCb *hqP,
5655 uint8_t rgSCHCmnUpdDai(ue, fdbkTime, m, havePdcch,tbCb,servCellId,hqP,ulDai)
5657 CmLteTimingInfo *fdbkTime;
5660 RgSchDlHqProcCb *hqP;
5664 RgSchTddANInfo *anInfo;
5665 uint8_t servCellIdx;
5666 uint8_t ackNackFdbkArrSize;
5673 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5674 hqP->hqE->cell->cellId,
5677 servCellIdx = RGSCH_PCELL_INDEX;
5679 ackNackFdbkArrSize = hqP->hqE->cell->ackNackFdbkArrSize;
5681 {/* SPS on primary cell */
5682 servCellIdx = RGSCH_PCELL_INDEX;
5683 ackNackFdbkArrSize = ue->cell->ackNackFdbkArrSize;
5687 anInfo = rgSCHUtlGetUeANFdbkInfo(ue, fdbkTime,servCellIdx);
5689 /* If no ACK/NACK feedback already present, create a new one */
5692 anInfo = &ue->cellInfo[servCellIdx]->anInfo[ue->cellInfo[servCellIdx]->nextFreeANIdx];
5693 anInfo->sfn = fdbkTime->sfn;
5694 anInfo->subframe = fdbkTime->subframe;
5695 anInfo->latestMIdx = m;
5696 /* Fixing DAI value - ccpu00109162 */
5697 /* Handle TDD case as in MIMO definition of the function */
5703 anInfo->isSpsOccasion = FALSE;
5704 /* set the free Index to store Ack/Nack Information*/
5705 ue->cellInfo[servCellIdx]->nextFreeANIdx = (ue->cellInfo[servCellIdx]->nextFreeANIdx + 1) %
5711 anInfo->latestMIdx = m;
5712 /* Fixing DAI value - ccpu00109162 */
5713 /* Handle TDD case as in MIMO definition of the function */
5714 anInfo->ulDai = anInfo->ulDai + 1;
5717 anInfo->dlDai = anInfo->dlDai + 1;
5721 /* ignoring the Scell check,
5722 * for primary cell this field is unused*/
5725 anInfo->n1ResTpcIdx = hqP->tpc;
5729 {/* As this not required for release pdcch */
5730 *ulDai = anInfo->ulDai;
5733 return (anInfo->dlDai);
5736 #endif /* ifdef LTE_TDD */
5738 uint32_t rgHqRvRetxCnt[4][2];
5739 uint32_t rgUlrate_grant;
5742 * @brief This function fills the HqP TB with rbAllocInfo.
5746 * Function: rgSCHCmnFillHqPTb
5747 * Purpose: This function fills in the HqP TB with rbAllocInfo.
5749 * Invoked by: rgSCHCmnFillHqPTb
5751 * @param[in] RgSchCellCb* cell
5752 * @param[in] RgSchDlRbAlloc *rbAllocInfo,
5753 * @param[in] uint8_t tbAllocIdx
5754 * @param[in] RgSchPdcch *pdcch
5760 Void rgSCHCmnFillHqPTb
5763 RgSchDlRbAlloc *rbAllocInfo,
5768 Void rgSCHCmnFillHqPTb(cell, rbAllocInfo, tbAllocIdx, pdcch)
5770 RgSchDlRbAlloc *rbAllocInfo;
5776 PRIVATE Void rgSCHCmnFillHqPTb
5779 RgSchDlRbAlloc *rbAllocInfo,
5784 PRIVATE Void rgSCHCmnFillHqPTb(cell, rbAllocInfo, tbAllocIdx, pdcch)
5786 RgSchDlRbAlloc *rbAllocInfo;
5790 #endif /* LTEMAC_SPS */
5792 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
5793 RgSchDlTbAllocInfo *tbAllocInfo = &rbAllocInfo->tbInfo[tbAllocIdx];
5794 RgSchDlHqTbCb *tbInfo = tbAllocInfo->tbCb;
5795 RgSchDlHqProcCb *hqP = tbInfo->hqP;
5798 /*ccpu00120365-ADD-if tb is disabled, set mcs=0,rv=1.
5799 * Relevant for DCI format 2 & 2A as per 36.213-7.1.7.2
5801 if ( tbAllocInfo->isDisabled)
5804 tbInfo->dlGrnt.iMcs = 0;
5805 tbInfo->dlGrnt.rv = 1;
5807 /* Fill for TB retransmission */
5808 else if (tbInfo->txCntr > 0)
5811 tbInfo->timingInfo = cmnCellDl->time;
5813 if ((tbInfo->isAckNackDtx == TFU_HQFDB_DTX))
5815 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5816 rgHqRvRetxCnt[tbInfo->dlGrnt.rv][tbInfo->tbIdx]++;
5820 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[++(tbInfo->ccchSchdInfo.rvIdx) & 0x03];
5823 /* fill the scheduler information of hqProc */
5824 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5825 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx,hqP->tbInfo,tbInfo->tbIdx );
5826 rgSCHDhmHqTbRetx(hqP->hqE, tbInfo->timingInfo, hqP, tbInfo->tbIdx);
5828 /* Fill for TB transmission */
5831 /* Fill the HqProc */
5832 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5833 tbInfo->tbSz = tbAllocInfo->bytesAlloc;
5834 tbInfo->timingInfo = cmnCellDl->time;
5836 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[0];
5837 /* fill the scheduler information of hqProc */
5838 tbInfo->ccchSchdInfo.rvIdx = 0;
5839 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5840 /* DwPts Scheduling Changes Start */
5841 /* DwPts Scheduling Changes End */
5842 cell->measurements.dlBytesCnt += tbAllocInfo->bytesAlloc;
5845 /*ccpu00120365:-ADD-only add to subFrm list if tb is not disabled */
5846 if ( tbAllocInfo->isDisabled == FALSE )
5848 /* Set the number of transmitting SM layers for this TB */
5849 tbInfo->numLyrs = tbAllocInfo->noLyr;
5850 /* Set the TB state as WAITING to indicate TB has been
5851 * considered for transmission */
5852 tbInfo->state = HQ_TB_WAITING;
5853 hqP->subFrm = rbAllocInfo->dlSf;
5854 tbInfo->hqP->pdcch = pdcch;
5855 //tbInfo->dlGrnt.numRb = rbAllocInfo->rbsAlloc;
5856 rgSCHUtlDlHqPTbAddToTx(hqP->subFrm, hqP, tbInfo->tbIdx);
5862 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5866 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5867 * Purpose: This function fills in the PDCCH information
5868 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5869 * for dedicated service scheduling. It also
5870 * obtains TPC to be filled in from the power module.
5871 * Assign the PDCCH to HQProc.
5873 * Invoked by: Downlink Scheduler
5875 * @param[in] RgSchCellCb* cell
5876 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5877 * @param[in] RgDlHqProc* hqP
5878 * @param[out] RgSchPdcch *pdcch
5879 * @param[in] uint8_t tpc
5884 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmtB1B2
5887 RgSchDlRbAlloc *rbAllocInfo,
5888 RgSchDlHqProcCb *hqP,
5893 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, pdcch, tpc)
5895 RgSchDlRbAlloc *rbAllocInfo;
5896 RgSchDlHqProcCb *hqP;
5903 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5904 //Currently hardcoding values here.
5905 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
5906 switch(rbAllocInfo->dciFormat)
5908 case TFU_DCI_FORMAT_B1:
5910 pdcch->dci.u.formatB1Info.formatType = 0;
5911 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5912 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5913 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5914 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5915 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5916 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5917 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5918 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5919 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5920 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5921 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5922 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5923 //TODO_SID: Need to update
5924 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5925 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5926 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5927 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5928 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5929 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5930 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5931 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5932 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5933 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5936 case TFU_DCI_FORMAT_B2:
5938 pdcch->dci.u.formatB2Info.formatType = 1;
5939 pdcch->dci.u.formatB2Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5940 pdcch->dci.u.formatB2Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5941 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5942 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5943 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5944 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5945 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5946 pdcch->dci.u.formatB2Info.CSI_BSI_BRI_Req = 0;
5947 pdcch->dci.u.formatB2Info.CSIRS_BRRS_TxTiming = 0;
5948 pdcch->dci.u.formatB2Info.CSIRS_BRRS_SymbIdx = 0;
5949 pdcch->dci.u.formatB2Info.CSIRS_BRRS_ProcInd = 0;
5950 pdcch->dci.u.formatB2Info.xPUCCH_TxTiming = 0;
5951 //TODO_SID: Need to update
5952 pdcch->dci.u.formatB2Info.freqResIdx_xPUCCH = 0;
5953 pdcch->dci.u.formatB2Info.beamSwitch = 0;
5954 pdcch->dci.u.formatB2Info.SRS_Config = 0;
5955 pdcch->dci.u.formatB2Info.SRS_Symbol = 0;
5956 //TODO_SID: Need to check.Currently setting 4(2 layer, ports(8,9) w/o OCC).
5957 pdcch->dci.u.formatB2Info.AntPorts_numLayers = 4;
5958 pdcch->dci.u.formatB2Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5959 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5960 pdcch->dci.u.formatB2Info.tpcCmd = 1; //tpc;
5961 pdcch->dci.u.formatB2Info.DL_PCRS = 0;
5965 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId," 5GTF_ERROR Allocator's icorrect "
5966 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5973 extern uint32_t totPcellSCell;
5974 extern uint32_t addedForScell;
5975 extern uint32_t addedForScell1;
5976 extern uint32_t addedForScell2;
5978 * @brief This function fills the PDCCH information from dlProc.
5982 * Function: rgSCHCmnFillHqPPdcch
5983 * Purpose: This function fills in the PDCCH information
5984 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5985 * for dedicated service scheduling. It also
5986 * obtains TPC to be filled in from the power module.
5987 * Assign the PDCCH to HQProc.
5989 * Invoked by: Downlink Scheduler
5991 * @param[in] RgSchCellCb* cell
5992 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5993 * @param[in] RgDlHqProc* hqP
5998 Void rgSCHCmnFillHqPPdcch
6001 RgSchDlRbAlloc *rbAllocInfo,
6002 RgSchDlHqProcCb *hqP
6005 Void rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP)
6007 RgSchDlRbAlloc *rbAllocInfo;
6008 RgSchDlHqProcCb *hqP;
6011 RgSchCmnDlCell *cmnCell = RG_SCH_CMN_GET_DL_CELL(cell);
6012 RgSchPdcch *pdcch = rbAllocInfo->pdcch;
6019 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6026 tpc = rgSCHPwrPucchTpcForUe(cell, hqP->hqE->ue);
6028 /* Fix: syed moving this to a common function for both scheduled
6029 * and non-scheduled UEs */
6031 pdcch->ue = hqP->hqE->ue;
6032 if (hqP->hqE->ue->csgMmbrSta == FALSE)
6034 cmnCell->ncsgPrbCnt += rbAllocInfo->rbsAlloc;
6036 cmnCell->totPrbCnt += rbAllocInfo->rbsAlloc;
6039 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlPrbUsg +=
6040 rbAllocInfo->rbsAlloc;
6041 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw0iTbs +=
6042 rbAllocInfo->tbInfo[0].iTbs;
6043 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw0iTbs ++;
6044 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
6045 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6048 totPcellSCell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6049 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6051 addedForScell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6052 addedForScell1 += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6054 printf (" Hqp %d cell %d addedForScell %lu addedForScell1 %lu sfn:sf %d:%d \n",
6056 hqP->hqE->cell->cellId,
6060 cell->crntTime.slot);
6064 hqP->hqE->cell->tenbStats->sch.dlPrbUsage[0] +=
6065 rbAllocInfo->rbsAlloc;
6066 hqP->hqE->cell->tenbStats->sch.dlSumCw0iTbs +=
6067 rbAllocInfo->tbInfo[0].iTbs;
6068 hqP->hqE->cell->tenbStats->sch.dlNumCw0iTbs ++;
6069 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
6070 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6071 if (rbAllocInfo->tbInfo[1].schdlngForTb)
6073 hqP->hqE->cell->tenbStats->sch.dlSumCw1iTbs +=
6074 rbAllocInfo->tbInfo[1].iTbs;
6075 hqP->hqE->cell->tenbStats->sch.dlNumCw1iTbs ++;
6076 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw1iTbs +=
6077 rbAllocInfo->tbInfo[1].iTbs;
6078 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw1iTbs ++;
6079 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
6080 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6084 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6086 addedForScell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6087 addedForScell2 += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6089 printf (" Hqp %d cell %d addedForScell %lu addedForScell2 %lu \n",
6091 hqP->hqE->cell->cellId,
6096 totPcellSCell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6100 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
6101 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6104 printf ("add DL TPT is %lu sfn:sf %d:%d \n", hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt ,
6106 cell->crntTime.slot);
6112 pdcch->rnti = rbAllocInfo->rnti;
6113 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
6114 /* Update subframe and pdcch info in HqTb control block */
6115 switch(rbAllocInfo->dciFormat)
6118 case TFU_DCI_FORMAT_B1:
6119 case TFU_DCI_FORMAT_B2:
6121 // printf(" RG_5GTF:: Pdcch filling with DCI format B1/B2\n");
6122 rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, \
6128 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6129 "Allocator's incorrect dciForamt Fill for RNTI:%d",rbAllocInfo->rnti);
6136 * @brief This function fills the PDCCH DCI format 1 information from dlProc.
6140 * Function: rgSCHCmnFillHqPPdcchDciFrmt1
6141 * Purpose: This function fills in the PDCCH information
6142 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6143 * for dedicated service scheduling. It also
6144 * obtains TPC to be filled in from the power module.
6145 * Assign the PDCCH to HQProc.
6147 * Invoked by: Downlink Scheduler
6149 * @param[in] RgSchCellCb* cell
6150 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6151 * @param[in] RgDlHqProc* hqP
6152 * @param[out] RgSchPdcch *pdcch
6153 * @param[in] uint8_t tpc
6159 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1
6162 RgSchDlRbAlloc *rbAllocInfo,
6163 RgSchDlHqProcCb *hqP,
6168 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1(cell, rbAllocInfo, hqP, pdcch, tpc)
6170 RgSchDlRbAlloc *rbAllocInfo;
6171 RgSchDlHqProcCb *hqP;
6178 RgSchTddANInfo *anInfo;
6182 /* For activation or reactivation,
6183 * Harq ProcId should be 0 */
6184 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6188 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6189 pdcch->dci.u.format1Info.tpcCmd = tpc;
6190 /* Avoiding this check,as we dont support Type1 RA */
6192 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6195 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
6196 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
6197 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6199 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
6200 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6202 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
6203 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6205 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
6206 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6211 if ((!(hqP->tbInfo[0].txCntr)) &&
6212 (cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6213 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6214 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV)))
6217 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
6221 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
6224 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
6227 pdcch->dci.u.format1Info.allocInfo.ndi =
6228 rbAllocInfo->tbInfo[0].tbCb->ndi;
6229 pdcch->dci.u.format1Info.allocInfo.mcs =
6230 rbAllocInfo->tbInfo[0].imcs;
6231 pdcch->dci.u.format1Info.allocInfo.rv =
6232 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6234 if(hqP->hqE->ue != NULLP)
6237 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6238 hqP->hqE->cell->cellId,
6241 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6242 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6244 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6245 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6250 pdcch->dci.u.format1Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6254 /* Fixing DAI value - ccpu00109162 */
6255 pdcch->dci.u.format1Info.dai = RG_SCH_MAX_DAI_IDX;
6261 /* always 0 for RACH */
6262 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
6264 /* Fixing DAI value - ccpu00109162 */
6265 pdcch->dci.u.format1Info.dai = 1;
6274 * @brief This function fills the PDCCH DCI format 1A information from dlProc.
6278 * Function: rgSCHCmnFillHqPPdcchDciFrmt1A
6279 * Purpose: This function fills in the PDCCH information
6280 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6281 * for dedicated service scheduling. It also
6282 * obtains TPC to be filled in from the power module.
6283 * Assign the PDCCH to HQProc.
6285 * Invoked by: Downlink Scheduler
6287 * @param[in] RgSchCellCb* cell
6288 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6289 * @param[in] RgDlHqProc* hqP
6290 * @param[out] RgSchPdcch *pdcch
6291 * @param[in] uint8_t tpc
6296 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1A
6299 RgSchDlRbAlloc *rbAllocInfo,
6300 RgSchDlHqProcCb *hqP,
6305 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1A(cell, rbAllocInfo, hqP, pdcch, tpc)
6307 RgSchDlRbAlloc *rbAllocInfo;
6308 RgSchDlHqProcCb *hqP;
6315 RgSchTddANInfo *anInfo;
6319 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6323 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6324 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
6325 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = tpc;
6326 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
6327 rbAllocInfo->tbInfo[0].imcs;
6328 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = TRUE;
6330 if ((!(hqP->tbInfo[0].txCntr)) &&
6331 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6332 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6333 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6336 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val = 0;
6340 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val
6344 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val =
6347 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = \
6348 rbAllocInfo->tbInfo[0].tbCb->ndi;
6349 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = \
6350 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6351 /* As of now, we do not support Distributed allocations */
6352 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
6353 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
6354 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
6356 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
6357 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
6358 rbAllocInfo->allocInfo.raType2.rbStart,
6359 rbAllocInfo->allocInfo.raType2.numRb);
6361 if(hqP->hqE->ue != NULLP)
6364 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6365 hqP->hqE->cell->cellId,
6367 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6368 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6370 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6371 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6374 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
6377 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val =
6378 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6382 /* Fixing DAI value - ccpu00109162 */
6383 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = RG_SCH_MAX_DAI_IDX;
6384 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6385 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6392 /* always 0 for RACH */
6393 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres
6396 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
6397 /* Fixing DAI value - ccpu00109162 */
6398 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
6406 * @brief This function fills the PDCCH DCI format 1B information from dlProc.
6410 * Function: rgSCHCmnFillHqPPdcchDciFrmt1B
6411 * Purpose: This function fills in the PDCCH information
6412 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6413 * for dedicated service scheduling. It also
6414 * obtains TPC to be filled in from the power module.
6415 * Assign the PDCCH to HQProc.
6417 * Invoked by: Downlink Scheduler
6419 * @param[in] RgSchCellCb* cell
6420 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6421 * @param[in] RgDlHqProc* hqP
6422 * @param[out] RgSchPdcch *pdcch
6423 * @param[in] uint8_t tpc
6428 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1B
6431 RgSchDlRbAlloc *rbAllocInfo,
6432 RgSchDlHqProcCb *hqP,
6437 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1B(cell, rbAllocInfo, hqP, pdcch, tpc)
6439 RgSchDlRbAlloc *rbAllocInfo;
6440 RgSchDlHqProcCb *hqP;
6447 RgSchTddANInfo *anInfo;
6451 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6455 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6456 pdcch->dci.u.format1bInfo.tpcCmd = tpc;
6457 pdcch->dci.u.format1bInfo.allocInfo.mcs = \
6458 rbAllocInfo->tbInfo[0].imcs;
6460 if ((!(hqP->tbInfo[0].txCntr)) &&
6461 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6462 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6463 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6466 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = 0;
6470 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
6473 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
6475 pdcch->dci.u.format1bInfo.allocInfo.ndi = \
6476 rbAllocInfo->tbInfo[0].tbCb->ndi;
6477 pdcch->dci.u.format1bInfo.allocInfo.rv = \
6478 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6479 /* As of now, we do not support Distributed allocations */
6480 pdcch->dci.u.format1bInfo.allocInfo.isLocal = TRUE;
6481 pdcch->dci.u.format1bInfo.allocInfo.nGap2.pres = NOTPRSNT;
6482 pdcch->dci.u.format1bInfo.allocInfo.alloc.type =
6484 pdcch->dci.u.format1bInfo.allocInfo.alloc.u.riv =
6485 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
6486 rbAllocInfo->allocInfo.raType2.rbStart,
6487 rbAllocInfo->allocInfo.raType2.numRb);
6488 /* Fill precoding Info */
6489 pdcch->dci.u.format1bInfo.allocInfo.pmiCfm = \
6490 rbAllocInfo->mimoAllocInfo.precIdxInfo >> 4;
6491 pdcch->dci.u.format1bInfo.allocInfo.tPmi = \
6492 rbAllocInfo->mimoAllocInfo.precIdxInfo & 0x0F;
6494 if(hqP->hqE->ue != NULLP)
6497 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6498 hqP->hqE->cell->cellId,
6500 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6501 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6503 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6504 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6509 pdcch->dci.u.format1bInfo.dai =
6510 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6514 pdcch->dci.u.format1bInfo.dai = RG_SCH_MAX_DAI_IDX;
6515 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6516 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6527 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
6531 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
6532 * Purpose: This function fills in the PDCCH information
6533 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6534 * for dedicated service scheduling. It also
6535 * obtains TPC to be filled in from the power module.
6536 * Assign the PDCCH to HQProc.
6538 * Invoked by: Downlink Scheduler
6540 * @param[in] RgSchCellCb* cell
6541 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6542 * @param[in] RgDlHqProc* hqP
6543 * @param[out] RgSchPdcch *pdcch
6544 * @param[in] uint8_t tpc
6549 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2
6552 RgSchDlRbAlloc *rbAllocInfo,
6553 RgSchDlHqProcCb *hqP,
6558 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2(cell, rbAllocInfo, hqP, pdcch, tpc)
6560 RgSchDlRbAlloc *rbAllocInfo;
6561 RgSchDlHqProcCb *hqP;
6568 RgSchTddANInfo *anInfo;
6572 /* ccpu00119023-ADD-For activation or reactivation,
6573 * Harq ProcId should be 0 */
6574 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6578 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6579 /*ccpu00120365:-ADD-call also if tb is disabled */
6580 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6581 rbAllocInfo->tbInfo[1].isDisabled)
6583 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6585 pdcch->dci.u.format2Info.tpcCmd = tpc;
6586 /* Avoiding this check,as we dont support Type1 RA */
6588 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6591 pdcch->dci.u.format2Info.allocInfo.isAllocType0 = TRUE;
6592 pdcch->dci.u.format2Info.allocInfo.resAllocMap[0] =
6593 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6595 pdcch->dci.u.format2Info.allocInfo.resAllocMap[1] =
6596 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6598 pdcch->dci.u.format2Info.allocInfo.resAllocMap[2] =
6599 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6601 pdcch->dci.u.format2Info.allocInfo.resAllocMap[3] =
6602 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6607 if ((!(hqP->tbInfo[0].txCntr)) &&
6608 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6609 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6610 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6613 pdcch->dci.u.format2Info.allocInfo.harqProcId = 0;
6617 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6620 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6622 /* Initialize the TB info for both the TBs */
6623 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].mcs = 0;
6624 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].rv = 1;
6625 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].mcs = 0;
6626 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].rv = 1;
6627 /* Fill tbInfo for scheduled TBs */
6628 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6629 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6630 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6631 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6632 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6633 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6634 /* If we reach this function. It is safely assumed that
6635 * rbAllocInfo->tbInfo[0] always has non default valid values.
6636 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6637 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6639 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6640 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6641 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6642 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6643 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6644 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6646 pdcch->dci.u.format2Info.allocInfo.transSwap =
6647 rbAllocInfo->mimoAllocInfo.swpFlg;
6648 pdcch->dci.u.format2Info.allocInfo.precoding =
6649 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6651 if(hqP->hqE->ue != NULLP)
6655 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6656 hqP->hqE->cell->cellId,
6658 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6659 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6661 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6662 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6667 pdcch->dci.u.format2Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6671 pdcch->dci.u.format2Info.dai = RG_SCH_MAX_DAI_IDX;
6672 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6673 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6683 * @brief This function fills the PDCCH DCI format 2A information from dlProc.
6687 * Function: rgSCHCmnFillHqPPdcchDciFrmt2A
6688 * Purpose: This function fills in the PDCCH information
6689 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6690 * for dedicated service scheduling. It also
6691 * obtains TPC to be filled in from the power module.
6692 * Assign the PDCCH to HQProc.
6694 * Invoked by: Downlink Scheduler
6696 * @param[in] RgSchCellCb* cell
6697 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6698 * @param[in] RgDlHqProc* hqP
6699 * @param[out] RgSchPdcch *pdcch
6700 * @param[in] uint8_t tpc
6705 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2A
6708 RgSchDlRbAlloc *rbAllocInfo,
6709 RgSchDlHqProcCb *hqP,
6714 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2A(cell, rbAllocInfo, hqP, pdcch, tpc)
6716 RgSchDlRbAlloc *rbAllocInfo;
6717 RgSchDlHqProcCb *hqP;
6723 RgSchTddANInfo *anInfo;
6727 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6731 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6732 /*ccpu00120365:-ADD-call also if tb is disabled */
6733 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6734 rbAllocInfo->tbInfo[1].isDisabled)
6737 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6740 pdcch->dci.u.format2AInfo.tpcCmd = tpc;
6741 /* Avoiding this check,as we dont support Type1 RA */
6743 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6746 pdcch->dci.u.format2AInfo.allocInfo.isAllocType0 = TRUE;
6747 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[0] =
6748 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6750 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[1] =
6751 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6753 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[2] =
6754 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6756 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[3] =
6757 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6762 if ((!(hqP->tbInfo[0].txCntr)) &&
6763 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6764 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6765 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6768 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = 0;
6772 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6775 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6777 /* Initialize the TB info for both the TBs */
6778 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].mcs = 0;
6779 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].rv = 1;
6780 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].mcs = 0;
6781 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].rv = 1;
6782 /* Fill tbInfo for scheduled TBs */
6783 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6784 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6785 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6786 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6787 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6788 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6789 /* If we reach this function. It is safely assumed that
6790 * rbAllocInfo->tbInfo[0] always has non default valid values.
6791 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6793 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6795 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6796 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6797 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6798 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6799 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6800 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6803 pdcch->dci.u.format2AInfo.allocInfo.transSwap =
6804 rbAllocInfo->mimoAllocInfo.swpFlg;
6805 pdcch->dci.u.format2AInfo.allocInfo.precoding =
6806 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6808 if(hqP->hqE->ue != NULLP)
6811 uint8_t servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6812 hqP->hqE->cell->cellId,
6814 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6815 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6817 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6818 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6823 pdcch->dci.u.format2AInfo.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6827 pdcch->dci.u.format2AInfo.dai = RG_SCH_MAX_DAI_IDX;
6828 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6829 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6841 * @brief init of Sch vars.
6845 * Function: rgSCHCmnInitVars
6846 Purpose: Initialization of various UL subframe indices
6848 * @param[in] RgSchCellCb *cell
6853 PRIVATE Void rgSCHCmnInitVars
6858 PRIVATE Void rgSCHCmnInitVars(cell)
6862 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6865 cellUl->idx = RGSCH_INVALID_INFO;
6866 cellUl->schdIdx = RGSCH_INVALID_INFO;
6867 cellUl->schdHqProcIdx = RGSCH_INVALID_INFO;
6868 cellUl->msg3SchdIdx = RGSCH_INVALID_INFO;
6870 cellUl->emtcMsg3SchdIdx = RGSCH_INVALID_INFO;
6872 cellUl->msg3SchdHqProcIdx = RGSCH_INVALID_INFO;
6873 cellUl->rcpReqIdx = RGSCH_INVALID_INFO;
6874 cellUl->hqFdbkIdx[0] = RGSCH_INVALID_INFO;
6875 cellUl->hqFdbkIdx[1] = RGSCH_INVALID_INFO;
6876 cellUl->reTxIdx[0] = RGSCH_INVALID_INFO;
6877 cellUl->reTxIdx[1] = RGSCH_INVALID_INFO;
6878 /* Stack Crash problem for TRACE5 Changes. Added the return below */
6885 * @brief Updation of Sch vars per TTI.
6889 * Function: rgSCHCmnUpdVars
6890 * Purpose: Updation of Sch vars per TTI.
6892 * @param[in] RgSchCellCb *cell
6897 Void rgSCHCmnUpdVars
6902 Void rgSCHCmnUpdVars(cell)
6906 CmLteTimingInfo timeInfo;
6907 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6911 idx = (cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot);
6912 cellUl->idx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6914 printf("idx %d cellUl->idx %d RGSCH_NUM_SUB_FRAMES_5G %d time(%d %d) \n",idx,cellUl->idx ,RGSCH_NUM_SUB_FRAMES_5G,cell->crntTime.sfn,cell->crntTime.slot);
6916 /* Need to scheduler for after SCHED_DELTA */
6917 /* UL allocation has been advanced by 1 subframe
6918 * so that we do not wrap around and send feedback
6919 * before the data is even received by the PHY */
6920 /* Introduced timing delta for UL control */
6921 idx = (cellUl->idx + TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA);
6922 cellUl->schdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6924 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6925 TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA)
6926 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6928 /* ccpu00127193 filling schdTime for logging and enhancement purpose*/
6929 cellUl->schdTime = timeInfo;
6931 /* msg3 scheduling two subframes after general scheduling */
6932 idx = (cellUl->idx + RG_SCH_CMN_DL_DELTA + RGSCH_RARSP_MSG3_DELTA);
6933 cellUl->msg3SchdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6935 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6936 RG_SCH_CMN_DL_DELTA+ RGSCH_RARSP_MSG3_DELTA)
6937 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6939 idx = (cellUl->idx + TFU_RECPREQ_DLDELTA);
6941 cellUl->rcpReqIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6943 /* Downlink harq feedback is sometime after data reception / harq failure */
6944 /* Since feedback happens prior to scheduling being called, we add 1 to */
6945 /* take care of getting the correct subframe for feedback */
6946 idx = (cellUl->idx - TFU_CRCIND_ULDELTA + RG_SCH_CMN_UL_NUM_SF);
6948 printf("Finally setting cellUl->hqFdbkIdx[0] = %d TFU_CRCIND_ULDELTA %d RG_SCH_CMN_UL_NUM_SF %d\n",idx,TFU_CRCIND_ULDELTA,RG_SCH_CMN_UL_NUM_SF);
6950 cellUl->hqFdbkIdx[0] = (idx % (RG_SCH_CMN_UL_NUM_SF));
6952 idx = ((cellUl->schdIdx) % (RG_SCH_CMN_UL_NUM_SF));
6954 cellUl->reTxIdx[0] = (uint8_t) idx;
6956 printf("cellUl->hqFdbkIdx[0] %d cellUl->reTxIdx[0] %d \n",cellUl->hqFdbkIdx[0], cellUl->reTxIdx[0] );
6958 /* RACHO: update cmn sched specific RACH variables,
6959 * mainly the prachMaskIndex */
6960 rgSCHCmnUpdRachParam(cell);
6969 * @brief To get uplink subframe index associated with current PHICH
6974 * Function: rgSCHCmnGetPhichUlSfIdx
6975 * Purpose: Gets uplink subframe index associated with current PHICH
6976 * transmission based on SFN and subframe no
6978 * @param[in] CmLteTimingInfo *timeInfo
6979 * @param[in] RgSchCellCb *cell
6984 uint8_t rgSCHCmnGetPhichUlSfIdx
6986 CmLteTimingInfo *timeInfo,
6990 uint8_t rgSCHCmnGetPhichUlSfIdx(timeInfo, cell)
6991 CmLteTimingInfo *timeInfo;
6995 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6997 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
7004 dlsf = rgSCHUtlSubFrmGet(cell, *timeInfo);
7006 if(dlsf->phichOffInfo.sfnOffset == RGSCH_INVALID_INFO)
7008 return (RGSCH_INVALID_INFO);
7010 subframe = dlsf->phichOffInfo.subframe;
7012 sfn = (RGSCH_MAX_SFN + timeInfo->sfn -
7013 dlsf->phichOffInfo.sfnOffset) % RGSCH_MAX_SFN;
7015 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
7016 * wrap case such that idx will be proper*/
7017 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7018 numUlSf = ((numUlSf * sfn) + rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subframe]) - 1;
7019 idx = numUlSf % (cellUl->numUlSubfrms);
7025 * @brief To get uplink subframe index.
7030 * Function: rgSCHCmnGetUlSfIdx
7031 * Purpose: Gets uplink subframe index based on SFN and subframe number.
7033 * @param[in] CmLteTimingInfo *timeInfo
7034 * @param[in] uint8_t ulDlCfgIdx
7039 uint8_t rgSCHCmnGetUlSfIdx
7041 CmLteTimingInfo *timeInfo,
7045 uint8_t rgSCHCmnGetUlSfIdx(timeInfo, cell)
7046 CmLteTimingInfo *timeInfo;
7050 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
7051 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
7056 /* ccpu00130980: numUlSf(uint16_t) parameter added to avoid integer
7057 * wrap case such that idx will be proper*/
7058 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7059 numUlSf = ((numUlSf * timeInfo->sfn) + \
7060 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->subframe]) - 1;
7061 idx = numUlSf % (cellUl->numUlSubfrms);
7069 * @brief To get uplink hq index.
7074 * Function: rgSCHCmnGetUlHqProcIdx
7075 * Purpose: Gets uplink subframe index based on SFN and subframe number.
7077 * @param[in] CmLteTimingInfo *timeInfo
7078 * @param[in] uint8_t ulDlCfgIdx
7083 uint8_t rgSCHCmnGetUlHqProcIdx
7085 CmLteTimingInfo *timeInfo,
7089 uint8_t rgSCHCmnGetUlHqProcIdx(timeInfo, cell)
7090 CmLteTimingInfo *timeInfo;
7098 numUlSf = (timeInfo->sfn * RGSCH_NUM_SUB_FRAMES_5G + timeInfo->slot);
7099 procId = numUlSf % RGSCH_NUM_UL_HQ_PROC;
7101 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
7102 /*ccpu00130639 - MOD - To get correct UL HARQ Proc IDs for all UL/DL Configs*/
7103 uint8_t numUlSfInSfn;
7104 S8 sfnCycle = cell->tddHqSfnCycle;
7105 uint8_t numUlHarq = rgSchTddUlNumHarqProcTbl[ulDlCfgIdx]
7107 /* TRACE 5 Changes */
7109 /* Calculate the number of UL SF in one SFN */
7110 numUlSfInSfn = RGSCH_NUM_SUB_FRAMES -
7111 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7113 /* Check for the SFN wrap around case */
7114 if(cell->crntTime.sfn == 1023 && timeInfo->sfn == 0)
7118 else if(cell->crntTime.sfn == 0 && timeInfo->sfn == 1023)
7120 /* sfnCycle decremented by 1 */
7121 sfnCycle = (sfnCycle + numUlHarq-1) % numUlHarq;
7123 /* Calculate the total number of UL sf */
7124 /* -1 is done since uplink sf are counted from 0 */
7125 numUlSf = numUlSfInSfn * (timeInfo->sfn + (sfnCycle*1024)) +
7126 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->slot] - 1;
7128 procId = numUlSf % numUlHarq;
7134 /* UL_ALLOC_CHANGES */
7135 /***********************************************************
7137 * Func : rgSCHCmnUlFreeAlloc
7139 * Desc : Free an allocation - invokes UHM and releases
7140 * alloc for the scheduler
7141 * Doest need subframe as argument
7149 **********************************************************/
7151 Void rgSCHCmnUlFreeAlloc
7157 Void rgSCHCmnUlFreeAlloc(cell, alloc)
7159 RgSchUlAlloc *alloc;
7162 RgSchUlHqProcCb *hqProc;
7166 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
7167 if ((alloc->hqProc->remTx == 0) &&
7168 (alloc->hqProc->rcvdCrcInd == FALSE) &&
7171 RgSchRaCb *raCb = alloc->raCb;
7172 rgSCHUhmFreeProc(alloc->hqProc, cell);
7173 rgSCHUtlUlAllocRelease(alloc);
7174 rgSCHRamDelRaCb(cell, raCb, TRUE);
7179 hqProc = alloc->hqProc;
7180 rgSCHUtlUlAllocRelease(alloc);
7181 rgSCHUhmFreeProc(hqProc, cell);
7186 /***********************************************************
7188 * Func : rgSCHCmnUlFreeAllocation
7190 * Desc : Free an allocation - invokes UHM and releases
7191 * alloc for the scheduler
7199 **********************************************************/
7201 Void rgSCHCmnUlFreeAllocation
7208 Void rgSCHCmnUlFreeAllocation(cell, sf, alloc)
7211 RgSchUlAlloc *alloc;
7214 RgSchUlHqProcCb *hqProc;
7219 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
7220 if ((alloc->hqProc->remTx == 0) &&
7221 (alloc->hqProc->rcvdCrcInd == FALSE) &&
7224 RgSchRaCb *raCb = alloc->raCb;
7225 rgSCHUhmFreeProc(alloc->hqProc, cell);
7226 rgSCHUtlUlAllocRls(sf, alloc);
7227 rgSCHRamDelRaCb(cell, raCb, TRUE);
7232 hqProc = alloc->hqProc;
7233 rgSCHUhmFreeProc(hqProc, cell);
7235 /* re-setting the PRB count while freeing the allocations */
7238 rgSCHUtlUlAllocRls(sf, alloc);
7244 * @brief This function implements PDCCH allocation for an UE
7245 * in the currently running subframe.
7249 * Function: rgSCHCmnPdcchAllocCrntSf
7250 * Purpose: This function determines current DL subframe
7251 * and UE DL CQI to call the actual pdcch allocator
7253 * Note that this function is called only
7254 * when PDCCH request needs to be made during
7255 * uplink scheduling.
7257 * Invoked by: Scheduler
7259 * @param[in] RgSchCellCb *cell
7260 * @param[in] RgSchUeCb *ue
7261 * @return RgSchPdcch *
7262 * -# NULLP when unsuccessful
7265 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf
7271 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf(cell, ue)
7276 CmLteTimingInfo frm = cell->crntTime;
7277 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7279 RgSchPdcch *pdcch = NULLP;
7281 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
7282 sf = rgSCHUtlSubFrmGet(cell, frm);
7285 if (ue->allocCmnUlPdcch)
7287 pdcch = rgSCHCmnCmnPdcchAlloc(cell, sf);
7288 /* Since CRNTI Scrambled */
7291 pdcch->dciNumOfBits = ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
7297 //pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, y, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_0, FALSE);
7298 pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_A1, FALSE);
7303 /***********************************************************
7305 * Func : rgSCHCmnUlAllocFillNdmrs
7307 * Desc : Determines and fills N_dmrs for a UE uplink
7312 * Notes: N_dmrs determination is straightforward, so
7313 * it is configured per subband
7317 **********************************************************/
7319 Void rgSCHCmnUlAllocFillNdmrs
7321 RgSchCmnUlCell *cellUl,
7325 Void rgSCHCmnUlAllocFillNdmrs(cellUl, alloc)
7326 RgSchCmnUlCell *cellUl;
7327 RgSchUlAlloc *alloc;
7330 alloc->grnt.nDmrs = cellUl->dmrsArr[alloc->sbStart];
7334 /***********************************************************
7336 * Func : rgSCHCmnUlAllocLnkHqProc
7338 * Desc : Links a new allocation for an UE with the
7339 * appropriate HARQ process of the UE.
7347 **********************************************************/
7349 Void rgSCHCmnUlAllocLnkHqProc
7352 RgSchUlAlloc *alloc,
7353 RgSchUlHqProcCb *proc,
7357 Void rgSCHCmnUlAllocLnkHqProc(ue, alloc, proc, isRetx)
7359 RgSchUlAlloc *alloc;
7360 RgSchUlHqProcCb *proc;
7367 rgSCHCmnUlAdapRetx(alloc, proc);
7371 #ifdef LTE_L2_MEAS /* L2_COUNTERS */
7374 rgSCHUhmNewTx(proc, (((RgUeUlHqCb*)proc->hqEnt)->maxHqRetx), alloc);
7380 * @brief This function releases a PDCCH in the subframe that is
7381 * currently being allocated for.
7385 * Function: rgSCHCmnPdcchRlsCrntSf
7386 * Purpose: This function determines current DL subframe
7387 * which is considered for PDCCH allocation,
7388 * and then calls the actual function that
7389 * releases a PDCCH in a specific subframe.
7390 * Note that this function is called only
7391 * when PDCCH release needs to be made during
7392 * uplink scheduling.
7394 * Invoked by: Scheduler
7396 * @param[in] RgSchCellCb *cell
7397 * @param[in] RgSchPdcch *pdcch
7401 Void rgSCHCmnPdcchRlsCrntSf
7407 Void rgSCHCmnPdcchRlsCrntSf(cell, pdcch)
7412 CmLteTimingInfo frm = cell->crntTime;
7416 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
7417 sf = rgSCHUtlSubFrmGet(cell, frm);
7418 rgSCHUtlPdcchPut(cell, &sf->pdcchInfo, pdcch);
7421 /***********************************************************
7423 * Func : rgSCHCmnUlFillPdcchWithAlloc
7425 * Desc : Fills a PDCCH with format 0 information.
7433 **********************************************************/
7435 Void rgSCHCmnUlFillPdcchWithAlloc
7438 RgSchUlAlloc *alloc,
7442 Void rgSCHCmnUlFillPdcchWithAlloc(pdcch, alloc, ue)
7444 RgSchUlAlloc *alloc;
7451 pdcch->rnti = alloc->rnti;
7452 //pdcch->dci.dciFormat = TFU_DCI_FORMAT_A2;
7453 pdcch->dci.dciFormat = alloc->grnt.dciFrmt;
7455 //Currently hardcoding values here.
7456 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
7457 switch(pdcch->dci.dciFormat)
7459 case TFU_DCI_FORMAT_A1:
7461 pdcch->dci.u.formatA1Info.formatType = 0;
7462 pdcch->dci.u.formatA1Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
7463 pdcch->dci.u.formatA1Info.xPUSCH_TxTiming = 0;
7464 pdcch->dci.u.formatA1Info.RBAssign = alloc->grnt.rbAssign;
7465 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
7466 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
7467 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
7468 pdcch->dci.u.formatA1Info.CSI_BSI_BRI_Req = 0;
7469 pdcch->dci.u.formatA1Info.CSIRS_BRRS_TxTiming = 0;
7470 pdcch->dci.u.formatA1Info.CSIRS_BRRS_SymbIdx = 0;
7471 pdcch->dci.u.formatA1Info.CSIRS_BRRS_ProcInd = 0;
7472 pdcch->dci.u.formatA1Info.numBSI_Reports = 0;
7473 pdcch->dci.u.formatA1Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
7474 pdcch->dci.u.formatA1Info.beamSwitch = 0;
7475 pdcch->dci.u.formatA1Info.SRS_Config = 0;
7476 pdcch->dci.u.formatA1Info.SRS_Symbol = 0;
7477 pdcch->dci.u.formatA1Info.REMapIdx_DMRS_PCRS_numLayers = 0;
7478 pdcch->dci.u.formatA1Info.SCID = alloc->grnt.SCID;
7479 pdcch->dci.u.formatA1Info.PMI = alloc->grnt.PMI;
7480 pdcch->dci.u.formatA1Info.UL_PCRS = 0;
7481 pdcch->dci.u.formatA1Info.tpcCmd = alloc->grnt.tpc;
7484 case TFU_DCI_FORMAT_A2:
7486 pdcch->dci.u.formatA2Info.formatType = 1;
7487 pdcch->dci.u.formatA2Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
7488 pdcch->dci.u.formatA2Info.xPUSCH_TxTiming = 0;
7489 pdcch->dci.u.formatA2Info.RBAssign = alloc->grnt.rbAssign;
7490 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
7491 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
7492 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
7493 pdcch->dci.u.formatA2Info.CSI_BSI_BRI_Req = 0;
7494 pdcch->dci.u.formatA2Info.CSIRS_BRRS_TxTiming = 0;
7495 pdcch->dci.u.formatA2Info.CSIRS_BRRS_SymbIdx = 0;
7496 pdcch->dci.u.formatA2Info.CSIRS_BRRS_ProcInd = 0;
7497 pdcch->dci.u.formatA2Info.numBSI_Reports = 0;
7498 pdcch->dci.u.formatA2Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
7499 pdcch->dci.u.formatA2Info.beamSwitch = 0;
7500 pdcch->dci.u.formatA2Info.SRS_Config = 0;
7501 pdcch->dci.u.formatA2Info.SRS_Symbol = 0;
7502 pdcch->dci.u.formatA2Info.REMapIdx_DMRS_PCRS_numLayers = 0;
7503 pdcch->dci.u.formatA2Info.SCID = alloc->grnt.SCID;
7504 pdcch->dci.u.formatA2Info.PMI = alloc->grnt.PMI;
7505 pdcch->dci.u.formatA2Info.UL_PCRS = 0;
7506 pdcch->dci.u.formatA2Info.tpcCmd = alloc->grnt.tpc;
7510 RLOG1(L_ERROR," 5GTF_ERROR UL Allocator's icorrect "
7511 "dciForamt Fill RNTI:%d",alloc->rnti);
7519 /***********************************************************
7521 * Func : rgSCHCmnUlAllocFillTpc
7523 * Desc : Determines and fills TPC for an UE allocation.
7531 **********************************************************/
7533 Void rgSCHCmnUlAllocFillTpc
7540 Void rgSCHCmnUlAllocFillTpc(cell, ue, alloc)
7543 RgSchUlAlloc *alloc;
7546 alloc->grnt.tpc = rgSCHPwrPuschTpcForUe(cell, ue);
7551 /***********************************************************
7553 * Func : rgSCHCmnAddUeToRefreshQ
7555 * Desc : Adds a UE to refresh queue, so that the UE is
7556 * periodically triggered to refresh it's GBR and
7565 **********************************************************/
7567 PRIVATE Void rgSCHCmnAddUeToRefreshQ
7574 PRIVATE Void rgSCHCmnAddUeToRefreshQ(cell, ue, wait)
7580 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
7582 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
7586 memset(&arg, 0, sizeof(arg));
7587 arg.tqCp = &sched->tmrTqCp;
7588 arg.tq = sched->tmrTq;
7589 arg.timers = &ueSchd->tmr;
7593 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
7600 * @brief Perform UE reset procedure.
7604 * Function : rgSCHCmnUlUeReset
7606 * This functions performs BSR resetting and
7607 * triggers UL specific scheduler
7608 * to Perform UE reset procedure.
7610 * @param[in] RgSchCellCb *cell
7611 * @param[in] RgSchUeCb *ue
7615 PRIVATE Void rgSCHCmnUlUeReset
7621 PRIVATE Void rgSCHCmnUlUeReset(cell, ue)
7626 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7627 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
7629 RgSchCmnLcg *lcgCmn;
7631 RgSchCmnAllocRecord *allRcd;
7633 ue->ul.minReqBytes = 0;
7634 ue->ul.totalBsr = 0;
7636 ue->ul.nonGbrLcgBs = 0;
7637 ue->ul.effAmbr = ue->ul.cfgdAmbr;
7639 node = ueUl->ulAllocLst.first;
7642 allRcd = (RgSchCmnAllocRecord *)node->node;
7646 for(lcgCnt = 0; lcgCnt < RGSCH_MAX_LCG_PER_UE; lcgCnt++)
7648 lcgCmn = RG_SCH_CMN_GET_UL_LCG(&ue->ul.lcgArr[lcgCnt]);
7650 lcgCmn->reportedBs = 0;
7651 lcgCmn->effGbr = lcgCmn->cfgdGbr;
7652 lcgCmn->effDeltaMbr = lcgCmn->deltaMbr;
7654 rgSCHCmnUlUeDelAllocs(cell, ue);
7656 ue->isSrGrant = FALSE;
7658 cellSchd->apisUl->rgSCHUlUeReset(cell, ue);
7660 /* Stack Crash problem for TRACE5 changes. Added the return below */
7666 * @brief RESET UL CQI and DL CQI&RI to conservative values
7667 * for a reestablishing UE.
7671 * Function : rgSCHCmnResetRiCqi
7673 * RESET UL CQI and DL CQI&RI to conservative values
7674 * for a reestablishing UE
7676 * @param[in] RgSchCellCb *cell
7677 * @param[in] RgSchUeCb *ue
7681 PRIVATE Void rgSCHCmnResetRiCqi
7687 PRIVATE Void rgSCHCmnResetRiCqi(cell, ue)
7692 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7693 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
7694 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7695 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
7698 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
7699 cell->isCpUlExtend);
7701 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
7702 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
7703 ueDl->mimoInfo.ri = 1;
7704 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
7705 (ue->mimoInfo.txMode == RGR_UE_TM_6))
7707 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
7709 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
7711 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
7714 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
7716 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
7720 /* Request for an early Aper CQI in case of reest */
7721 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
7722 if(acqiCb && acqiCb->aCqiCfg.pres)
7724 acqiCb->aCqiTrigWt = 0;
7732 * @brief Perform UE reset procedure.
7736 * Function : rgSCHCmnDlUeReset
7738 * This functions performs BO resetting and
7739 * triggers DL specific scheduler
7740 * to Perform UE reset procedure.
7742 * @param[in] RgSchCellCb *cell
7743 * @param[in] RgSchUeCb *ue
7747 PRIVATE Void rgSCHCmnDlUeReset
7753 PRIVATE Void rgSCHCmnDlUeReset(cell, ue)
7758 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7759 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
7760 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7763 if (ueDl->rachInfo.poLnk.node != NULLP)
7765 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
7768 /* Fix: syed Remove from TA List if this UE is there.
7769 * If TA Timer is running. Stop it */
7770 if (ue->dlTaLnk.node)
7772 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
7773 ue->dlTaLnk.node = (PTR)NULLP;
7775 else if (ue->taTmr.tmrEvnt != TMR_NONE)
7777 rgSCHTmrStopTmr(cell, ue->taTmr.tmrEvnt, ue);
7780 cellSchd->apisDl->rgSCHDlUeReset(cell, ue);
7784 rgSCHSCellDlUeReset(cell,ue);
7790 * @brief Perform UE reset procedure.
7794 * Function : rgSCHCmnUeReset
7796 * This functions triggers specific scheduler
7797 * to Perform UE reset procedure.
7799 * @param[in] RgSchCellCb *cell
7800 * @param[in] RgSchUeCb *ue
7806 Void rgSCHCmnUeReset
7812 Void rgSCHCmnUeReset(cell, ue)
7819 RgInfResetHqEnt hqEntRstInfo;
7821 /* RACHO: remove UE from pdcch, handover and rapId assoc Qs */
7822 rgSCHCmnDelRachInfo(cell, ue);
7824 rgSCHPwrUeReset(cell, ue);
7826 rgSCHCmnUlUeReset(cell, ue);
7827 rgSCHCmnDlUeReset(cell, ue);
7830 /* Making allocCmnUlPdcch TRUE to allocate DCI0/1A from Common search space.
7831 As because multiple cells are added hence 2 bits CqiReq is there
7832 This flag will be set to FALSE once we will get Scell READY */
7833 ue->allocCmnUlPdcch = TRUE;
7836 /* Fix : syed RESET UL CQI and DL CQI&RI to conservative values
7837 * for a reestablishing UE */
7838 /*Reset Cqi Config for all the configured cells*/
7839 for (idx = 0;idx < CM_LTE_MAX_CELLS; idx++)
7841 if (ue->cellInfo[idx] != NULLP)
7843 rgSCHCmnResetRiCqi(ue->cellInfo[idx]->cell, ue);
7846 /*After Reset Trigger APCQI for Pcell*/
7847 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7848 if(pCellInfo->acqiCb.aCqiCfg.pres)
7850 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
7853 /* sending HqEnt reset to MAC */
7854 hqEntRstInfo.cellId = cell->cellId;
7855 hqEntRstInfo.crnti = ue->ueId;
7857 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
7858 RgSchMacRstHqEnt(&pst,&hqEntRstInfo);
7864 * @brief UE out of MeasGap or AckNackReptn.
7868 * Function : rgSCHCmnActvtUlUe
7870 * This functions triggers specific scheduler
7871 * to start considering it for scheduling.
7873 * @param[in] RgSchCellCb *cell
7874 * @param[in] RgSchUeCb *ue
7880 Void rgSCHCmnActvtUlUe
7886 Void rgSCHCmnActvtUlUe(cell, ue)
7891 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7893 /* : take care of this in UL retransmission */
7894 cellSchd->apisUl->rgSCHUlActvtUe(cell, ue);
7899 * @brief UE out of MeasGap or AckNackReptn.
7903 * Function : rgSCHCmnActvtDlUe
7905 * This functions triggers specific scheduler
7906 * to start considering it for scheduling.
7908 * @param[in] RgSchCellCb *cell
7909 * @param[in] RgSchUeCb *ue
7915 Void rgSCHCmnActvtDlUe
7921 Void rgSCHCmnActvtDlUe(cell, ue)
7926 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7928 cellSchd->apisDl->rgSCHDlActvtUe(cell, ue);
7933 * @brief This API is invoked to indicate scheduler of a CRC indication.
7937 * Function : rgSCHCmnHdlUlTransInd
7938 * This API is invoked to indicate scheduler of a CRC indication.
7940 * @param[in] RgSchCellCb *cell
7941 * @param[in] RgSchUeCb *ue
7942 * @param[in] CmLteTimingInfo timingInfo
7947 Void rgSCHCmnHdlUlTransInd
7951 CmLteTimingInfo timingInfo
7954 Void rgSCHCmnHdlUlTransInd(cell, ue, timingInfo)
7957 CmLteTimingInfo timingInfo;
7961 /* Update the latest UL dat/sig transmission time */
7962 RGSCHCPYTIMEINFO(timingInfo, ue->ul.ulTransTime);
7963 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
7965 /* Some UL Transmission from this UE.
7966 * Activate this UE if it was inactive */
7967 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7968 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7976 * @brief Compute the minimum Rank based on Codebook subset
7977 * restriction configuration for 4 Tx Ports and Tx Mode 4.
7981 * Function : rgSCHCmnComp4TxMode4
7983 * Depending on BitMap set at CBSR during Configuration
7984 * - return the least possible Rank
7987 * @param[in] uint32_t *pmiBitMap
7988 * @return RgSchCmnRank
7991 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode4
7996 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode4(pmiBitMap)
7997 uint32_t *pmiBitMap;
8000 uint32_t bitMap0, bitMap1;
8001 bitMap0 = pmiBitMap[0];
8002 bitMap1 = pmiBitMap[1];
8003 if((bitMap1) & 0xFFFF)
8005 return (RG_SCH_CMN_RANK_1);
8007 else if((bitMap1>>16) & 0xFFFF)
8009 return (RG_SCH_CMN_RANK_2);
8011 else if((bitMap0) & 0xFFFF)
8013 return (RG_SCH_CMN_RANK_3);
8015 else if((bitMap0>>16) & 0xFFFF)
8017 return (RG_SCH_CMN_RANK_4);
8021 return (RG_SCH_CMN_RANK_1);
8027 * @brief Compute the minimum Rank based on Codebook subset
8028 * restriction configuration for 2 Tx Ports and Tx Mode 4.
8032 * Function : rgSCHCmnComp2TxMode4
8034 * Depending on BitMap set at CBSR during Configuration
8035 * - return the least possible Rank
8038 * @param[in] uint32_t *pmiBitMap
8039 * @return RgSchCmnRank
8042 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode4
8047 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode4(pmiBitMap)
8048 uint32_t *pmiBitMap;
8052 bitMap0 = pmiBitMap[0];
8053 if((bitMap0>>26)& 0x0F)
8055 return (RG_SCH_CMN_RANK_1);
8057 else if((bitMap0>>30) & 3)
8059 return (RG_SCH_CMN_RANK_2);
8063 return (RG_SCH_CMN_RANK_1);
8068 * @brief Compute the minimum Rank based on Codebook subset
8069 * restriction configuration for 4 Tx Ports and Tx Mode 3.
8073 * Function : rgSCHCmnComp4TxMode3
8075 * Depending on BitMap set at CBSR during Configuration
8076 * - return the least possible Rank
8079 * @param[in] uint32_t *pmiBitMap
8080 * @return RgSchCmnRank
8083 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode3
8088 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode3(pmiBitMap)
8089 uint32_t *pmiBitMap;
8093 bitMap0 = pmiBitMap[0];
8094 if((bitMap0>>28)& 1)
8096 return (RG_SCH_CMN_RANK_1);
8098 else if((bitMap0>>29) &1)
8100 return (RG_SCH_CMN_RANK_2);
8102 else if((bitMap0>>30) &1)
8104 return (RG_SCH_CMN_RANK_3);
8106 else if((bitMap0>>31) &1)
8108 return (RG_SCH_CMN_RANK_4);
8112 return (RG_SCH_CMN_RANK_1);
8117 * @brief Compute the minimum Rank based on Codebook subset
8118 * restriction configuration for 2 Tx Ports and Tx Mode 3.
8122 * Function : rgSCHCmnComp2TxMode3
8124 * Depending on BitMap set at CBSR during Configuration
8125 * - return the least possible Rank
8128 * @param[in] uint32_t *pmiBitMap
8129 * @return RgSchCmnRank
8132 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode3
8137 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode3(pmiBitMap)
8138 uint32_t *pmiBitMap;
8142 bitMap0 = pmiBitMap[0];
8143 if((bitMap0>>30)& 1)
8145 return (RG_SCH_CMN_RANK_1);
8147 else if((bitMap0>>31) &1)
8149 return (RG_SCH_CMN_RANK_2);
8153 return (RG_SCH_CMN_RANK_1);
8158 * @brief Compute the minimum Rank based on Codebook subset
8159 * restriction configuration.
8163 * Function : rgSCHCmnComputeRank
8165 * Depending on Num Tx Ports and Transmission mode
8166 * - return the least possible Rank
8169 * @param[in] RgrTxMode txMode
8170 * @param[in] uint32_t *pmiBitMap
8171 * @param[in] uint8_t numTxPorts
8172 * @return RgSchCmnRank
8175 PRIVATE RgSchCmnRank rgSCHCmnComputeRank
8178 uint32_t *pmiBitMap,
8182 PRIVATE RgSchCmnRank rgSCHCmnComputeRank(txMode, pmiBitMap, numTxPorts)
8184 uint32_t *pmiBitMap;
8189 if (numTxPorts ==2 && txMode == RGR_UE_TM_3)
8191 return (rgSCHCmnComp2TxMode3(pmiBitMap));
8193 else if (numTxPorts ==4 && txMode == RGR_UE_TM_3)
8195 return (rgSCHCmnComp4TxMode3(pmiBitMap));
8197 else if (numTxPorts ==2 && txMode == RGR_UE_TM_4)
8199 return (rgSCHCmnComp2TxMode4(pmiBitMap));
8201 else if (numTxPorts ==4 && txMode == RGR_UE_TM_4)
8203 return (rgSCHCmnComp4TxMode4(pmiBitMap));
8207 return (RG_SCH_CMN_RANK_1);
8214 * @brief Harq Entity Deinitialization for CMN SCH.
8218 * Function : rgSCHCmnDlDeInitHqEnt
8220 * Harq Entity Deinitialization for CMN SCH
8222 * @param[in] RgSchCellCb *cell
8223 * @param[in] RgSchDlHqEnt *hqE
8226 /*KWORK_FIX:Changed function return type to void */
8228 Void rgSCHCmnDlDeInitHqEnt
8234 Void rgSCHCmnDlDeInitHqEnt(cell, hqE)
8239 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8240 RgSchDlHqProcCb *hqP;
8245 ret = cellSchd->apisDl->rgSCHDlUeHqEntDeInit(cell, hqE);
8246 /* Free only If the Harq proc are created*/
8251 for(cnt = 0; cnt < hqE->numHqPrcs; cnt++)
8253 hqP = &hqE->procs[cnt];
8254 if ((RG_SCH_CMN_GET_DL_HQP(hqP)))
8256 rgSCHUtlFreeSBuf(cell->instIdx,
8257 (Data**)(&(hqP->sch)), (sizeof(RgSchCmnDlHqProc)));
8261 rgSCHLaaDeInitDlHqProcCb (cell, hqE);
8268 * @brief Harq Entity initialization for CMN SCH.
8272 * Function : rgSCHCmnDlInitHqEnt
8274 * Harq Entity initialization for CMN SCH
8276 * @param[in] RgSchCellCb *cell
8277 * @param[in] RgSchUeCb *ue
8283 S16 rgSCHCmnDlInitHqEnt
8289 S16 rgSCHCmnDlInitHqEnt(cell, hqEnt)
8291 RgSchDlHqEnt *hqEnt;
8295 RgSchDlHqProcCb *hqP;
8298 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8300 for(cnt = 0; cnt < hqEnt->numHqPrcs; cnt++)
8302 hqP = &hqEnt->procs[cnt];
8303 if (rgSCHUtlAllocSBuf(cell->instIdx,
8304 (Data**)&(hqP->sch), (sizeof(RgSchCmnDlHqProc))) != ROK)
8310 if((cell->emtcEnable) &&(hqEnt->ue->isEmtcUe))
8312 if(ROK != cellSchd->apisEmtcDl->rgSCHDlUeHqEntInit(cell, hqEnt))
8321 if(ROK != cellSchd->apisDl->rgSCHDlUeHqEntInit(cell, hqEnt))
8328 } /* rgSCHCmnDlInitHqEnt */
8331 * @brief This function computes distribution of refresh period
8335 * Function: rgSCHCmnGetRefreshDist
8336 * Purpose: This function computes distribution of refresh period
8337 * This is required to align set of UEs refresh
8338 * around the different consecutive subframe.
8340 * Invoked by: rgSCHCmnGetRefreshPerDist
8342 * @param[in] RgSchCellCb *cell
8343 * @param[in] RgSchUeCb *ue
8348 PRIVATE uint8_t rgSCHCmnGetRefreshDist
8354 PRIVATE uint8_t rgSCHCmnGetRefreshDist(cell, ue)
8361 Inst inst = cell->instIdx;
8364 for(refOffst = 0; refOffst < RGSCH_MAX_REFRESH_OFFSET; refOffst++)
8366 if(cell->refreshUeCnt[refOffst] < RGSCH_MAX_REFRESH_GRPSZ)
8368 cell->refreshUeCnt[refOffst]++;
8369 ue->refreshOffset = refOffst;
8370 /* printf("UE[%d] refresh offset[%d]. Cell refresh ue count[%d].\n", ue->ueId, refOffst, cell->refreshUeCnt[refOffst]); */
8375 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Allocation of refresh distribution failed\n"));
8376 /* We should not enter here normally, but incase of failure, allocating from last offset*/
8377 cell->refreshUeCnt[refOffst-1]++;
8378 ue->refreshOffset = refOffst-1;
8380 return (refOffst-1);
8383 * @brief This function computes initial Refresh Wait Period.
8387 * Function: rgSCHCmnGetRefreshPer
8388 * Purpose: This function computes initial Refresh Wait Period.
8389 * This is required to align multiple UEs refresh
8390 * around the same time.
8392 * Invoked by: rgSCHCmnGetRefreshPer
8394 * @param[in] RgSchCellCb *cell
8395 * @param[in] RgSchUeCb *ue
8396 * @param[in] uint32_t *waitPer
8401 PRIVATE Void rgSCHCmnGetRefreshPer
8408 PRIVATE Void rgSCHCmnGetRefreshPer(cell, ue, waitPer)
8414 uint32_t refreshPer;
8415 uint32_t crntSubFrm;
8418 refreshPer = RG_SCH_CMN_REFRESH_TIME * RG_SCH_CMN_REFRESH_TIMERES;
8419 crntSubFrm = cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot;
8420 /* Fix: syed align multiple UEs to refresh at same time */
8421 *waitPer = refreshPer - (crntSubFrm % refreshPer);
8422 *waitPer = RGSCH_CEIL(*waitPer, RG_SCH_CMN_REFRESH_TIMERES);
8423 *waitPer = *waitPer + rgSCHCmnGetRefreshDist(cell, ue);
8431 * @brief UE initialisation for scheduler.
8435 * Function : rgSCHCmnRgrSCellUeCfg
8437 * This functions intialises UE specific scheduler
8438 * information for SCELL
8439 * 0. Perform basic validations
8440 * 1. Allocate common sched UE cntrl blk
8441 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
8443 * 4. Perform DLFS cfg
8445 * @param[in] RgSchCellCb *cell
8446 * @param[in] RgSchUeCb *ue
8447 * @param[out] RgSchErrInfo *err
8453 S16 rgSCHCmnRgrSCellUeCfg
8457 RgrUeSecCellCfg *sCellInfoCfg,
8461 S16 rgSCHCmnRgrSCellUeCfg(sCell, ue, sCellInfoCfg, err)
8464 RgrUeSecCellCfg *sCellInfoCfg;
8471 RgSchCmnAllocRecord *allRcd;
8472 RgSchDlRbAlloc *allocInfo;
8473 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
8475 RgSchCmnUlUe *ueUlPcell;
8476 RgSchCmnUe *pCellUeSchCmn;
8477 RgSchCmnUe *ueSchCmn;
8479 RgSchCmnDlUe *pCellUeDl;
8481 Inst inst = ue->cell->instIdx;
8483 uint32_t idx = (uint8_t)((sCell->cellId - rgSchCb[sCell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
8485 pCellUeSchCmn = RG_SCH_CMN_GET_UE(ue,ue->cell);
8486 pCellUeDl = &pCellUeSchCmn->dl;
8488 /* 1. Allocate Common sched control block */
8489 if((rgSCHUtlAllocSBuf(sCell->instIdx,
8490 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
8492 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Memory allocation FAILED\n"));
8493 err->errCause = RGSCHERR_SCH_CFG;
8496 ueSchCmn = RG_SCH_CMN_GET_UE(ue,sCell);
8498 /*2. Perform UEs downlink configuration */
8499 ueDl = &ueSchCmn->dl;
8502 ueDl->mimoInfo = pCellUeDl->mimoInfo;
8504 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
8505 (ue->mimoInfo.txMode == RGR_UE_TM_6))
8507 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_NO_PMI);
8509 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
8511 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_RI_1);
8513 RGSCH_ARRAY_BOUND_CHECK(sCell->instIdx, rgUeCatTbl, pCellUeSchCmn->cmn.ueCat);
8514 ueDl->maxTbBits = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlTbBits;
8517 ri = RGSCH_MIN(ri, sCell->numTxAntPorts);
8518 if(((CM_LTE_UE_CAT_6 == pCellUeSchCmn->cmn.ueCat )
8519 ||(CM_LTE_UE_CAT_7 == pCellUeSchCmn->cmn.ueCat))
8522 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[1];
8526 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[0];
8529 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
8531 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
8532 rgSchTddDlNumHarqProcTbl[sCell->ulDlCfgIdx]);
8534 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
8535 RGSCH_NUM_DL_HQ_PROC);
8538 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, ue->isEmtcUe);
8540 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, FALSE);
8544 /* ambrCfgd config moved to ueCb.dl, as it's not needed for per cell wise*/
8546 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, sCell);
8547 allocInfo->rnti = ue->ueId;
8549 /* Initializing the lastCfi value to current cfi value */
8550 ueDl->lastCfi = cellSchd->dl.currCfi;
8552 if ((cellSchd->apisDl->rgSCHRgrSCellDlUeCfg(sCell, ue, err)) != ROK)
8554 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Spec Sched DL UE CFG FAILED\n"));
8558 /* TODO: enhance for DLFS RB Allocation for SCELLs in future dev */
8560 /* DLFS UE Config */
8561 if (cellSchd->dl.isDlFreqSel)
8563 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeCfg(sCell, ue, sCellInfoCfg, err)) != ROK)
8565 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS UE config FAILED\n"));
8570 /* TODO: Do UL SCELL CFG during UL CA dev */
8572 ueUl = RG_SCH_CMN_GET_UL_UE(ue, sCell);
8574 /* TODO_ULCA: SRS for SCELL needs to be handled in the below function call */
8575 rgSCHCmnUpdUeUlCqiInfo(sCell, ue, ueUl, ueSchCmn, cellSchd,
8576 sCell->isCpUlExtend);
8578 ret = rgSCHUhmHqEntInit(sCell, ue);
8581 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL UHM HARQ Ent Init "
8582 "Failed for CRNTI:%d", ue->ueId);
8586 ueUlPcell = RG_SCH_CMN_GET_UL_UE(ue, ue->cell);
8587 /* Initialize uplink HARQ related information for UE */
8588 ueUl->hqEnt.maxHqRetx = ueUlPcell->hqEnt.maxHqRetx;
8589 cmLListInit(&ueUl->hqEnt.free);
8590 cmLListInit(&ueUl->hqEnt.inUse);
8591 for(i=0; i < ueUl->hqEnt.numHqPrcs; i++)
8593 ueUl->hqEnt.hqProcCb[i].hqEnt = (void*)(&ueUl->hqEnt);
8594 ueUl->hqEnt.hqProcCb[i].procId = i;
8595 ueUl->hqEnt.hqProcCb[i].ulSfIdx = RGSCH_INVALID_INFO;
8596 ueUl->hqEnt.hqProcCb[i].alloc = NULLP;
8598 /* ccpu00139513- Initializing SPS flags*/
8599 ueUl->hqEnt.hqProcCb[i].isSpsActvnHqP = FALSE;
8600 ueUl->hqEnt.hqProcCb[i].isSpsOccnHqP = FALSE;
8602 cmLListAdd2Tail(&ueUl->hqEnt.free, &ueUl->hqEnt.hqProcCb[i].lnk);
8603 ueUl->hqEnt.hqProcCb[i].lnk.node = (PTR)&ueUl->hqEnt.hqProcCb[i];
8606 /* Allocate UL BSR allocation tracking List */
8607 cmLListInit(&ueUl->ulAllocLst);
8609 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8611 if((rgSCHUtlAllocSBuf(sCell->instIdx,
8612 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8614 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL Memory allocation FAILED"
8615 "for CRNTI:%d",ue->ueId);
8616 err->errCause = RGSCHERR_SCH_CFG;
8619 allRcd->allocTime = sCell->crntTime;
8620 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8621 allRcd->lnk.node = (PTR)allRcd;
8624 /* After initialising UL part, do power related init */
8625 ret = rgSCHPwrUeSCellCfg(sCell, ue, sCellInfoCfg);
8628 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Could not do "
8629 "power config for UE CRNTI:%d",ue->ueId);
8634 if(TRUE == ue->isEmtcUe)
8636 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
8638 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
8639 "for CRNTI:%d",ue->ueId);
8646 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
8648 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
8649 "for CRNTI:%d",ue->ueId);
8654 ue->ul.isUlCaEnabled = TRUE;
8658 } /* rgSCHCmnRgrSCellUeCfg */
8662 * @brief UE initialisation for scheduler.
8666 * Function : rgSCHCmnRgrSCellUeDel
8668 * This functions Delete UE specific scheduler
8669 * information for SCELL
8671 * @param[in] RgSchCellCb *cell
8672 * @param[in] RgSchUeCb *ue
8678 S16 rgSCHCmnRgrSCellUeDel
8680 RgSchUeCellInfo *sCellInfo,
8684 S16 rgSCHCmnRgrSCellUeDel(sCellInfo, ue)
8685 RgSchUeCellInfo *sCellInfo;
8689 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
8690 Inst inst = ue->cell->instIdx;
8693 cellSchd->apisDl->rgSCHRgrSCellDlUeDel(sCellInfo, ue);
8696 rgSCHCmnUlUeDelAllocs(sCellInfo->cell, ue);
8699 if(TRUE == ue->isEmtcUe)
8701 cellSchd->apisEmtcUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
8706 cellSchd->apisUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
8709 /* DLFS UE Config */
8710 if (cellSchd->dl.isDlFreqSel)
8712 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeDel(sCellInfo->cell, ue)) != ROK)
8714 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS Scell del FAILED\n"));
8719 rgSCHUtlFreeSBuf(sCellInfo->cell->instIdx,
8720 (Data**)(&(sCellInfo->sch)), (sizeof(RgSchCmnUe)));
8724 } /* rgSCHCmnRgrSCellUeDel */
8730 * @brief Handles 5gtf configuration for a UE
8734 * Function : rgSCHCmn5gtfUeCfg
8740 * @param[in] RgSchCellCb *cell
8741 * @param[in] RgSchUeCb *ue
8742 * @param[in] RgrUeCfg *cfg
8748 S16 rgSCHCmn5gtfUeCfg
8755 S16 rgSCHCmn5gtfUeCfg(cell, ue, cfg)
8762 RgSchUeGrp *ue5gtfGrp;
8763 ue->ue5gtfCb.grpId = cfg->ue5gtfCfg.grpId;
8764 ue->ue5gtfCb.BeamId = cfg->ue5gtfCfg.BeamId;
8765 ue->ue5gtfCb.numCC = cfg->ue5gtfCfg.numCC;
8766 ue->ue5gtfCb.mcs = cfg->ue5gtfCfg.mcs;
8767 ue->ue5gtfCb.maxPrb = cfg->ue5gtfCfg.maxPrb;
8769 ue->ue5gtfCb.cqiRiPer = 100;
8770 /* 5gtf TODO: CQIs to start from (10,0)*/
8771 ue->ue5gtfCb.nxtCqiRiOccn.sfn = 10;
8772 ue->ue5gtfCb.nxtCqiRiOccn.slot = 0;
8773 ue->ue5gtfCb.rank = 1;
8775 printf("\nschd cfg at mac,%u,%u,%u,%u,%u\n",ue->ue5gtfCb.grpId,ue->ue5gtfCb.BeamId,ue->ue5gtfCb.numCC,
8776 ue->ue5gtfCb.mcs,ue->ue5gtfCb.maxPrb);
8778 ue5gtfGrp = &(cell->cell5gtfCb.ueGrp5gConf[ue->ue5gtfCb.BeamId]);
8780 /* TODO_5GTF: Currently handling 1 group only. Need to update when multi group
8781 scheduling comes into picture */
8782 if(ue5gtfGrp->beamBitMask & (1 << ue->ue5gtfCb.BeamId))
8784 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8785 "5GTF_ERROR Invalid beam id CRNTI:%d",cfg->crnti);
8788 ue5gtfGrp->beamBitMask |= (1 << ue->ue5gtfCb.BeamId);
8795 * @brief UE initialisation for scheduler.
8799 * Function : rgSCHCmnRgrUeCfg
8801 * This functions intialises UE specific scheduler
8803 * 0. Perform basic validations
8804 * 1. Allocate common sched UE cntrl blk
8805 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
8807 * 4. Perform DLFS cfg
8809 * @param[in] RgSchCellCb *cell
8810 * @param[in] RgSchUeCb *ue
8811 * @param[int] RgrUeCfg *ueCfg
8812 * @param[out] RgSchErrInfo *err
8818 S16 rgSCHCmnRgrUeCfg
8826 S16 rgSCHCmnRgrUeCfg(cell, ue, ueCfg, err)
8833 RgSchDlRbAlloc *allocInfo;
8835 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8836 RgSchCmnUe *ueSchCmn;
8840 RgSchCmnAllocRecord *allRcd;
8842 uint32_t idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
8843 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
8846 /* 1. Allocate Common sched control block */
8847 if((rgSCHUtlAllocSBuf(cell->instIdx,
8848 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
8850 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8851 "Memory allocation FAILED for CRNTI:%d",ueCfg->crnti);
8852 err->errCause = RGSCHERR_SCH_CFG;
8855 ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
8856 ue->dl.ueDlCqiCfg = ueCfg->ueDlCqiCfg;
8857 pCellInfo->acqiCb.aCqiCfg = ueCfg->ueDlCqiCfg.aprdCqiCfg;
8858 if(ueCfg->ueCatEnum > 0 )
8860 /*KWORK_FIX removed NULL chk for ueSchCmn*/
8861 ueSchCmn->cmn.ueCat = ueCfg->ueCatEnum - 1;
8865 ueSchCmn->cmn.ueCat = 0; /* Assuming enum values correctly set */
8867 cmInitTimers(&ueSchCmn->cmn.tmr, 1);
8869 /*2. Perform UEs downlink configuration */
8870 ueDl = &ueSchCmn->dl;
8871 /* RACHO : store the rapId assigned for HandOver UE.
8872 * Append UE to handover list of cmnCell */
8873 if (ueCfg->dedPreambleId.pres == PRSNT_NODEF)
8875 rgSCHCmnDelDedPreamble(cell, ueCfg->dedPreambleId.val);
8876 ueDl->rachInfo.hoRapId = ueCfg->dedPreambleId.val;
8877 cmLListAdd2Tail(&cellSchd->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
8878 ueDl->rachInfo.hoLnk.node = (PTR)ue;
8881 rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd);
8883 if (ueCfg->txMode.pres == TRUE)
8885 if ((ueCfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8886 (ueCfg->txMode.txModeEnum == RGR_UE_TM_6))
8888 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8890 if (ueCfg->txMode.txModeEnum == RGR_UE_TM_3)
8892 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8895 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
8896 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
8899 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
8900 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
8901 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
8904 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
8908 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
8911 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
8913 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8914 rgSchTddDlNumHarqProcTbl[cell->ulDlCfgIdx]);
8916 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8917 RGSCH_NUM_DL_HQ_PROC);
8920 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
8922 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
8924 /* if none of the DL and UL AMBR are configured then fail the configuration
8926 if((ueCfg->ueQosCfg.dlAmbr == 0) && (ueCfg->ueQosCfg.ueBr == 0))
8928 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"UL Ambr and DL Ambr are"
8929 "configured as 0 for CRNTI:%d",ueCfg->crnti);
8930 err->errCause = RGSCHERR_SCH_CFG;
8934 ue->dl.ambrCfgd = (ueCfg->ueQosCfg.dlAmbr * RG_SCH_CMN_REFRESH_TIME)/100;
8936 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
8937 allocInfo->rnti = ue->ueId;
8939 /* Initializing the lastCfi value to current cfi value */
8940 ueDl->lastCfi = cellSchd->dl.currCfi;
8942 if(cell->emtcEnable && ue->isEmtcUe)
8944 if ((cellSchd->apisEmtcDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8946 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8947 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8955 if ((cellSchd->apisDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8957 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8958 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8965 /* 3. Initialize ul part */
8966 ueUl = &ueSchCmn->ul;
8968 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
8969 cell->isCpUlExtend);
8971 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8972 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8974 ue->ul.cfgdAmbr = (ueCfg->ueQosCfg.ueBr * RG_SCH_CMN_REFRESH_TIME)/100;
8975 ue->ul.effAmbr = ue->ul.cfgdAmbr;
8976 RGSCHCPYTIMEINFO(cell->crntTime, ue->ul.ulTransTime);
8978 /* Allocate UL BSR allocation tracking List */
8979 cmLListInit(&ueUl->ulAllocLst);
8981 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8983 if((rgSCHUtlAllocSBuf(cell->instIdx,
8984 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8986 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation FAILED"
8987 "for CRNTI:%d",ueCfg->crnti);
8988 err->errCause = RGSCHERR_SCH_CFG;
8991 allRcd->allocTime = cell->crntTime;
8992 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8993 allRcd->lnk.node = (PTR)allRcd;
8995 /* Allocate common sch cntrl blocks for LCGs */
8996 for (cnt=0; cnt<RGSCH_MAX_LCG_PER_UE; cnt++)
8998 ret = rgSCHUtlAllocSBuf(cell->instIdx,
8999 (Data**)&(ue->ul.lcgArr[cnt].sch), (sizeof(RgSchCmnLcg)));
9002 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9003 "SCH struct alloc failed for CRNTI:%d",ueCfg->crnti);
9004 err->errCause = RGSCHERR_SCH_CFG;
9008 /* After initialising UL part, do power related init */
9009 ret = rgSCHPwrUeCfg(cell, ue, ueCfg);
9012 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
9013 "power config for UE CRNTI:%d",ueCfg->crnti);
9017 ret = rgSCHCmnSpsUeCfg(cell, ue, ueCfg, err);
9020 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
9021 "SPS config for CRNTI:%d",ueCfg->crnti);
9024 #endif /* LTEMAC_SPS */
9027 if(TRUE == ue->isEmtcUe)
9029 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
9031 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
9032 "for CRNTI:%d",ueCfg->crnti);
9039 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
9041 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
9042 "for CRNTI:%d",ueCfg->crnti);
9047 /* DLFS UE Config */
9048 if (cellSchd->dl.isDlFreqSel)
9050 if ((cellSchd->apisDlfs->rgSCHDlfsUeCfg(cell, ue, ueCfg, err)) != ROK)
9052 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "DLFS UE config FAILED"
9053 "for CRNTI:%d",ueCfg->crnti);
9058 /* Fix: syed align multiple UEs to refresh at same time */
9059 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
9060 /* Start UE Qos Refresh Timer */
9061 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
9063 rgSCHCmn5gtfUeCfg(cell, ue, ueCfg);
9067 } /* rgSCHCmnRgrUeCfg */
9070 * @brief UE TX mode reconfiguration handler.
9074 * Function : rgSCHCmnDlHdlTxModeRecfg
9076 * This functions updates UE specific scheduler
9077 * information upon UE reconfiguration.
9079 * @param[in] RgSchUeCb *ue
9080 * @param[in] RgrUeRecfg *ueRecfg
9085 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg
9089 RgrUeRecfg *ueRecfg,
9093 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, numTxPorts)
9096 RgrUeRecfg *ueRecfg;
9101 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg
9108 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg)
9111 RgrUeRecfg *ueRecfg;
9115 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
9117 if (ueRecfg->txMode.pres != PRSNT_NODEF)
9121 /* ccpu00140894- Starting Timer for TxMode Transition Completion*/
9122 ue->txModeTransCmplt =FALSE;
9123 rgSCHTmrStartTmr (ue->cell, ue, RG_SCH_TMR_TXMODE_TRNSTN, RG_SCH_TXMODE_TRANS_TIMER);
9124 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_CMPLT)
9126 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell,
9127 RG_SCH_CMN_TD_TXMODE_RECFG);
9128 /* MS_WORKAROUND for ccpu00123186 MIMO Fix Start: need to set FORCE TD bitmap based on TX mode */
9129 ueDl->mimoInfo.ri = 1;
9130 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
9131 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
9133 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
9135 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
9137 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
9139 /* MIMO Fix End: need to set FORCE TD bitmap based on TX mode */
9142 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_START)
9144 /* start afresh forceTD masking */
9145 RG_SCH_CMN_INIT_FORCE_TD(ue, cell, 0);
9146 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_TXMODE_RECFG);
9147 /* Intialize MIMO related parameters of UE */
9150 if(ueRecfg->txMode.pres)
9152 if((ueRecfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
9153 (ueRecfg->txMode.txModeEnum ==RGR_UE_TM_4))
9155 if(ueRecfg->ueCodeBookRstRecfg.pres)
9158 rgSCHCmnComputeRank(ueRecfg->txMode.txModeEnum,
9159 ueRecfg->ueCodeBookRstRecfg.pmiBitMap, numTxPorts);
9163 ueDl->mimoInfo.ri = 1;
9168 ueDl->mimoInfo.ri = 1;
9173 ueDl->mimoInfo.ri = 1;
9176 ueDl->mimoInfo.ri = 1;
9177 #endif /* TFU_UPGRADE */
9178 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
9179 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
9181 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
9183 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
9185 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
9190 /***********************************************************
9192 * Func : rgSCHCmnUpdUeMimoInfo
9194 * Desc : Updates UL and DL Ue Information
9202 **********************************************************/
9204 PRIVATE Void rgSCHCmnUpdUeMimoInfo
9209 RgSchCmnCell *cellSchd
9212 PRIVATE Void rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd)
9216 RgSchCmnCell *cellSchd;
9220 if(ueCfg->txMode.pres)
9222 if((ueCfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
9223 (ueCfg->txMode.txModeEnum ==RGR_UE_TM_4))
9225 if(ueCfg->ueCodeBookRstCfg.pres)
9228 rgSCHCmnComputeRank(ueCfg->txMode.txModeEnum,
9229 ueCfg->ueCodeBookRstCfg.pmiBitMap, cell->numTxAntPorts);
9233 ueDl->mimoInfo.ri = 1;
9238 ueDl->mimoInfo.ri = 1;
9243 ueDl->mimoInfo.ri = 1;
9247 ueDl->mimoInfo.ri = 1;
9248 #endif /*TFU_UPGRADE */
9249 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
9250 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
9254 /***********************************************************
9256 * Func : rgSCHCmnUpdUeUlCqiInfo
9258 * Desc : Updates UL and DL Ue Information
9266 **********************************************************/
9268 PRIVATE Void rgSCHCmnUpdUeUlCqiInfo
9273 RgSchCmnUe *ueSchCmn,
9274 RgSchCmnCell *cellSchd,
9278 PRIVATE Void rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd, isEcp)
9282 RgSchCmnUe *ueSchCmn;
9283 RgSchCmnCell *cellSchd;
9290 if(ue->srsCb.srsCfg.type == RGR_SCH_SRS_SETUP)
9292 if(ue->ul.ulTxAntSel.pres)
9294 ueUl->crntUlCqi[ue->srsCb.selectedAnt] = cellSchd->ul.dfltUlCqi;
9295 ueUl->validUlCqi = ueUl->crntUlCqi[ue->srsCb.selectedAnt];
9299 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
9300 ueUl->validUlCqi = ueUl->crntUlCqi[0];
9302 ue->validTxAnt = ue->srsCb.selectedAnt;
9306 ueUl->validUlCqi = cellSchd->ul.dfltUlCqi;
9310 ueUl->ulLaCb.cqiBasediTbs = rgSchCmnUlCqiToTbsTbl[isEcp]
9311 [ueUl->validUlCqi] * 100;
9312 ueUl->ulLaCb.deltaiTbs = 0;
9316 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
9317 #endif /*TFU_UPGRADE */
9318 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
9319 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
9321 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
9325 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
9330 /***********************************************************
9332 * Func : rgSCHCmnUpdUeCatCfg
9334 * Desc : Updates UL and DL Ue Information
9342 **********************************************************/
9344 PRIVATE Void rgSCHCmnUpdUeCatCfg
9350 PRIVATE Void rgSCHCmnUpdUeCatCfg(ue, cell)
9355 RgSchDlHqEnt *hqE = NULLP;
9356 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
9357 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
9358 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
9359 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
9362 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
9364 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
9367 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
9368 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
9369 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
9370 && (RG_SCH_MAX_TX_LYRS_4 == ri))
9372 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
9376 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
9379 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
9381 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
9383 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
9387 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
9389 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
9390 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
9395 * @brief UE reconfiguration for scheduler.
9399 * Function : rgSChCmnRgrUeRecfg
9401 * This functions updates UE specific scheduler
9402 * information upon UE reconfiguration.
9404 * @param[in] RgSchCellCb *cell
9405 * @param[in] RgSchUeCb *ue
9406 * @param[int] RgrUeRecfg *ueRecfg
9407 * @param[out] RgSchErrInfo *err
9413 S16 rgSCHCmnRgrUeRecfg
9417 RgrUeRecfg *ueRecfg,
9421 S16 rgSCHCmnRgrUeRecfg(cell, ue, ueRecfg, err)
9424 RgrUeRecfg *ueRecfg;
9428 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
9431 /* Basic validations */
9432 if (ueRecfg->ueRecfgTypes & RGR_UE_TXMODE_RECFG)
9435 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, cell->numTxAntPorts);
9437 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg);
9438 #endif /* TFU_UPGRADE */
9440 if(ueRecfg->ueRecfgTypes & RGR_UE_CSG_PARAM_RECFG)
9442 ue->csgMmbrSta = ueRecfg->csgMmbrSta;
9444 /* Changes for UE Category reconfiguration feature */
9445 if(ueRecfg->ueRecfgTypes & RGR_UE_UECAT_RECFG)
9447 rgSCHCmnUpdUeCatCfg(ue, cell);
9449 if (ueRecfg->ueRecfgTypes & RGR_UE_APRD_DLCQI_RECFG)
9451 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
9452 pCellInfo->acqiCb.aCqiCfg = ueRecfg->aprdDlCqiRecfg;
9455 if (ueRecfg->ueRecfgTypes & RGR_UE_PRD_DLCQI_RECFG)
9457 if ((ueRecfg->prdDlCqiRecfg.pres == TRUE)
9458 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD10)
9459 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD20))
9461 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unsupported periodic CQI "
9462 "reporting mode %d for old CRNIT:%d",
9463 (int)ueRecfg->prdDlCqiRecfg.prdModeEnum,ueRecfg->oldCrnti);
9464 err->errCause = RGSCHERR_SCH_CFG;
9467 ue->dl.ueDlCqiCfg.prdCqiCfg = ueRecfg->prdDlCqiRecfg;
9471 if (ueRecfg->ueRecfgTypes & RGR_UE_ULPWR_RECFG)
9473 if (rgSCHPwrUeRecfg(cell, ue, ueRecfg) != ROK)
9475 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9476 "Power Reconfiguration Failed for OLD CRNTI:%d",ueRecfg->oldCrnti);
9481 if (ueRecfg->ueRecfgTypes & RGR_UE_QOS_RECFG)
9483 /* Uplink Sched related Initialization */
9484 if ((ueRecfg->ueQosRecfg.dlAmbr == 0) && (ueRecfg->ueQosRecfg.ueBr == 0))
9486 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Ul Ambr and DL Ambr "
9487 "configured as 0 for OLD CRNTI:%d",ueRecfg->oldCrnti);
9488 err->errCause = RGSCHERR_SCH_CFG;
9491 ue->ul.cfgdAmbr = (ueRecfg->ueQosRecfg.ueBr * \
9492 RG_SCH_CMN_REFRESH_TIME)/100;
9493 /* Downlink Sched related Initialization */
9494 ue->dl.ambrCfgd = (ueRecfg->ueQosRecfg.dlAmbr * \
9495 RG_SCH_CMN_REFRESH_TIME)/100;
9496 /* Fix: syed Update the effAmbr and effUeBR fields w.r.t the
9497 * new QOS configuration */
9498 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
9499 /* Fix: syed align multiple UEs to refresh at same time */
9500 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
9501 rgSCHCmnApplyUeRefresh(cell, ue);
9502 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
9505 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
9507 if ((cellSchCmn->apisEmtcUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9509 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9510 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9513 if ((cellSchCmn->apisEmtcDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9515 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9516 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9523 if ((cellSchCmn->apisUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9525 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9526 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9529 if ((cellSchCmn->apisDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9531 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9532 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9536 /* DLFS UE Config */
9537 if (cellSchCmn->dl.isDlFreqSel)
9539 if ((cellSchCmn->apisDlfs->rgSCHDlfsUeRecfg(cell, ue, \
9540 ueRecfg, err)) != ROK)
9542 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9543 "DLFS UE re-config FAILED for CRNTI:%d",ue->ueId);
9549 /* Invoke re-configuration on SPS module */
9550 if (rgSCHCmnSpsUeRecfg(cell, ue, ueRecfg, err) != ROK)
9552 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9553 "DL SPS ReCFG FAILED for UE CRNTI:%d", ue->ueId);
9559 } /* rgSCHCmnRgrUeRecfg*/
9561 /***********************************************************
9563 * Func : rgSCHCmnUlUeDelAllocs
9565 * Desc : Deletion of all UE allocations.
9573 **********************************************************/
9575 PRIVATE Void rgSCHCmnUlUeDelAllocs
9581 PRIVATE Void rgSCHCmnUlUeDelAllocs(cell, ue)
9586 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
9587 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
9590 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
9593 for (i = 0; i < ueUl->hqEnt.numHqPrcs; ++i)
9595 RgSchUlHqProcCb *proc = rgSCHUhmGetUlHqProc(cell, ue, i);
9598 /* proc can't be NULL here */
9606 /* Added Insure Fixes Of reading Dangling memory.NULLed crntAlloc */
9608 if(proc->alloc == ulSpsUe->ulSpsSchdInfo.crntAlloc)
9610 ulSpsUe->ulSpsSchdInfo.crntAlloc = NULLP;
9611 ulSpsUe->ulSpsSchdInfo.crntAllocSf = NULLP;
9615 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
9616 proc->alloc,ue->isEmtcUe);
9618 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
9621 /* PHY probably needn't be intimated since
9622 * whatever intimation it needs happens at the last minute
9625 /* Fix: syed Adaptive Msg3 Retx crash. Remove the harqProc
9626 * from adaptive retx List. */
9627 if (proc->reTxLnk.node)
9630 //TODO_SID: Need to take care
9631 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
9632 proc->reTxLnk.node = (PTR)NULLP;
9640 /***********************************************************
9642 * Func : rgSCHCmnDelUeFrmRefreshQ
9644 * Desc : Adds a UE to refresh queue, so that the UE is
9645 * periodically triggered to refresh it's GBR and
9654 **********************************************************/
9656 PRIVATE Void rgSCHCmnDelUeFrmRefreshQ
9662 PRIVATE Void rgSCHCmnDelUeFrmRefreshQ(cell, ue)
9667 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
9669 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
9672 #ifdef RGL_SPECIFIC_CHANGES
9673 if(ue->refreshOffset < RGSCH_MAX_REFRESH_GRPSZ)
9675 if(cell->refreshUeCnt[ue->refreshOffset])
9677 cell->refreshUeCnt[ue->refreshOffset]--;
9683 memset(&arg, 0, sizeof(arg));
9684 arg.tqCp = &sched->tmrTqCp;
9685 arg.tq = sched->tmrTq;
9686 arg.timers = &ueSchd->tmr;
9690 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
9696 /***********************************************************
9698 * Func : rgSCHCmnUeCcchSduDel
9700 * Desc : Clear CCCH SDU scheduling context.
9708 **********************************************************/
9710 PRIVATE Void rgSCHCmnUeCcchSduDel
9716 PRIVATE Void rgSCHCmnUeCcchSduDel(cell, ueCb)
9721 RgSchDlHqEnt *hqE = NULLP;
9722 RgSchDlHqProcCb *ccchSduHqP = NULLP;
9723 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
9726 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
9731 ccchSduHqP = hqE->ccchSduProc;
9732 if(ueCb->ccchSduLnk.node != NULLP)
9734 /* Remove the ccchSduProc if it is in the Tx list */
9735 cmLListDelFrm(&(cell->ccchSduUeLst), &(ueCb->ccchSduLnk));
9736 ueCb->ccchSduLnk.node = NULLP;
9738 else if(ccchSduHqP != NULLP)
9740 /* Fix for crash due to stale pdcch. Release ccch pdcch*/
9741 if(ccchSduHqP->pdcch)
9743 cmLListDelFrm(&ccchSduHqP->subFrm->pdcchInfo.pdcchs,
9744 &ccchSduHqP->pdcch->lnk);
9745 cmLListAdd2Tail(&cell->pdcchLst, &ccchSduHqP->pdcch->lnk);
9746 ccchSduHqP->pdcch = NULLP;
9748 if(ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node != NULLP)
9750 /* Remove the ccchSduProc if it is in the retx list */
9751 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
9752 &ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk);
9753 /* ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node = NULLP; */
9754 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
9756 else if ((ccchSduHqP->subFrm != NULLP) &&
9757 (ccchSduHqP->hqPSfLnk.node != NULLP))
9759 rgSCHUtlDlHqPTbRmvFrmTx(ccchSduHqP->subFrm,
9760 ccchSduHqP, 0, FALSE);
9761 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
9771 * @brief UE deletion for scheduler.
9775 * Function : rgSCHCmnUeDel
9777 * This functions deletes all scheduler information
9778 * pertaining to an UE.
9780 * @param[in] RgSchCellCb *cell
9781 * @param[in] RgSchUeCb *ue
9791 Void rgSCHCmnUeDel(cell, ue)
9796 RgSchDlHqEnt *hqE = NULLP;
9797 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
9799 RgSchCmnAllocRecord *allRcd;
9801 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
9804 if (RG_SCH_CMN_GET_UE(ue,cell) == NULLP)
9806 /* Common scheduler config has not happened yet */
9809 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
9812 /* UE Free can be triggered before MSG4 done when dlHqE is not updated */
9816 rgSCHEmtcCmnUeCcchSduDel(cell, ue);
9821 rgSCHCmnUeCcchSduDel(cell, ue);
9824 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
9826 rgSCHCmnUlUeDelAllocs(cell, ue);
9828 rgSCHCmnDelRachInfo(cell, ue);
9831 if(TRUE == ue->isEmtcUe)
9833 cellSchCmn->apisEmtcUl->rgSCHFreeUlUe(cell, ue);
9838 cellSchCmn->apisUl->rgSCHFreeUlUe(cell, ue);
9843 for(idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
9845 if(ue->cellInfo[idx] != NULLP)
9847 rgSCHSCellDelUeSCell(cell,ue,idx);
9854 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
9856 cellSchCmn->apisEmtcDl->rgSCHFreeDlUe(cell, ue);
9861 cellSchCmn->apisDl->rgSCHFreeDlUe(cell, ue);
9863 rgSCHPwrUeDel(cell, ue);
9866 rgSCHCmnSpsUeDel(cell, ue);
9867 #endif /* LTEMAC_SPS*/
9870 rgSchCmnDlSfHqDel(ue, cell);
9872 /* DLFS UE delete */
9873 if (cellSchCmn->dl.isDlFreqSel)
9875 cellSchCmn->apisDlfs->rgSCHDlfsUeDel(cell, ue);
9877 node = ueUl->ulAllocLst.first;
9879 /* ccpu00117052 - MOD - Passing double pointer in all the places of
9880 rgSCHUtlFreeSBuf function call for proper NULLP assignment*/
9883 allRcd = (RgSchCmnAllocRecord *)node->node;
9885 cmLListDelFrm(&ueUl->ulAllocLst, &allRcd->lnk);
9886 rgSCHUtlFreeSBuf(cell->instIdx,
9887 (Data**)(&allRcd), (sizeof(RgSchCmnAllocRecord)));
9890 for(cnt = 0; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
9892 if (ue->ul.lcgArr[cnt].sch != NULLP)
9894 rgSCHUtlFreeSBuf(cell->instIdx,
9895 (Data**)(&(ue->ul.lcgArr[cnt].sch)), (sizeof(RgSchCmnLcg)));
9899 /* Fix : syed Moved hqEnt deinit to rgSCHCmnDlDeInitHqEnt */
9900 idx = (uint8_t)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId) & (CM_LTE_MAX_CELLS - 1));
9901 rgSCHUtlFreeSBuf(cell->instIdx,
9902 (Data**)(&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch))), (sizeof(RgSchCmnUe)));
9904 } /* rgSCHCmnUeDel */
9908 * @brief This function handles the common code rate configurations
9909 * done as part of RgrCellCfg/RgrCellRecfg.
9913 * Function: rgSCHCmnDlCnsdrCmnRt
9914 * Purpose: This function handles the common code rate configurations
9915 * done as part of RgrCellCfg/RgrCellRecfg.
9917 * Invoked by: Scheduler
9919 * @param[in] RgSchCellCb *cell
9920 * @param[in] RgrDlCmnCodeRateCfg *dlCmnCodeRate
9925 PRIVATE S16 rgSCHCmnDlCnsdrCmnRt
9928 RgrDlCmnCodeRateCfg *dlCmnCodeRate
9931 PRIVATE S16 rgSCHCmnDlCnsdrCmnRt(cell, dlCmnCodeRate)
9933 RgrDlCmnCodeRateCfg *dlCmnCodeRate;
9936 RgSchCmnCell *cellDl = RG_SCH_CMN_GET_CELL(cell);
9938 uint32_t bitsPer2Rb;
9939 uint32_t bitsPer3Rb;
9944 /* code rate is bits per 1024 phy bits, since modl'n scheme is 2. it is
9945 * bits per 1024/2 REs */
9946 if (dlCmnCodeRate->bcchPchRaCodeRate != 0)
9948 bitsPerRb = ((dlCmnCodeRate->bcchPchRaCodeRate * 2) *
9949 cellDl->dl.noResPerRb[3])/1024;
9953 bitsPerRb = ((RG_SCH_CMN_DEF_BCCHPCCH_CODERATE * 2) *
9954 cellDl->dl.noResPerRb[3])/1024;
9956 /* Store bitsPerRb in cellDl->dl to use later to determine
9957 * Number of RBs for UEs with SI-RNTI, P-RNTI and RA-RNTI */
9958 cellDl->dl.bitsPerRb = bitsPerRb;
9959 /* ccpu00115595 end*/
9960 /* calculate the ITbs for 2 RBs. Initialize ITbs to MAX value */
9963 bitsPer2Rb = bitsPerRb * rbNum;
9964 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer2Rb))
9967 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs2Rbs = 0) :
9968 (cellDl->dl.cmnChITbs.iTbs2Rbs = i-1);
9970 /* calculate the ITbs for 3 RBs. Initialize ITbs to MAX value */
9973 bitsPer3Rb = bitsPerRb * rbNum;
9974 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer3Rb))
9977 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs3Rbs = 0) :
9978 (cellDl->dl.cmnChITbs.iTbs3Rbs = i-1);
9981 pdcchBits = 1 + /* Flag for format0/format1a differentiation */
9982 1 + /* Localized/distributed VRB assignment flag */
9985 3 + /* Harq process Id */
9987 4 + /* Harq process Id */
9988 2 + /* UL Index or DAI */
9990 1 + /* New Data Indicator */
9993 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
9994 (cell->bwCfg.dlTotalBw + 1))/2);
9995 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
9996 Since VRB is local */
9997 /* For TDD consider DAI */
9999 /* Convert the pdcchBits to actual pdcchBits required for transmission */
10000 if (dlCmnCodeRate->pdcchCodeRate != 0)
10002 pdcchBits = (pdcchBits * 1024)/dlCmnCodeRate->pdcchCodeRate;
10003 if (pdcchBits <= 288) /* 288 : Num of pdcch bits for aggrLvl=4 */
10005 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
10007 else /* 576 : Num of pdcch bits for aggrLvl=8 */
10009 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL8;
10014 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
10016 if (dlCmnCodeRate->ccchCqi == 0)
10022 cellDl->dl.ccchCqi = dlCmnCodeRate->ccchCqi;
10029 * @brief This function handles the configuration of cell for the first
10030 * time by the scheduler.
10034 * Function: rgSCHCmnDlRgrCellCfg
10035 * Purpose: Configuration received is stored into the data structures
10036 * Also, update the scheduler with the number of frames of
10037 * RACH preamble transmission.
10039 * Invoked by: BO and Scheduler
10041 * @param[in] RgSchCellCb* cell
10042 * @param[in] RgrCellCfg* cfg
10047 PRIVATE S16 rgSCHCmnDlRgrCellCfg
10054 PRIVATE S16 rgSCHCmnDlRgrCellCfg(cell, cfg, err)
10060 RgSchCmnCell *cellSch;
10063 uint8_t numPdcchSym;
10064 uint8_t noSymPerSlot;
10065 uint8_t maxDlSubfrms = cell->numDlSubfrms;
10066 uint8_t splSubfrmIdx = cfg->spclSfCfgIdx;
10067 uint8_t swPtCnt = 0;
10069 RgSchTddSubfrmInfo subfrmInfo = rgSchTddMaxUlSubfrmTbl[cell->ulDlCfgIdx];
10072 uint8_t antPortIdx;
10082 cellSch = RG_SCH_CMN_GET_CELL(cell);
10083 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->\
10084 rachCfg.preambleFormat];
10085 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
10086 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
10088 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
10089 3 TTI (MAX L1+L2 processing delay at the UE) */
10090 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
10091 rgSchCmnHarqRtt[cell->ulDlCfgIdx] + 3;
10092 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10093 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10094 if (cfg->maxUePerDlSf == 0)
10096 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10098 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
10104 if (cell->bwCfg.dlTotalBw <= 10)
10114 /* DwPTS Scheduling Changes Start */
10115 cellSch->dl.splSfCfg = splSubfrmIdx;
10117 if (cfg->isCpDlExtend == TRUE)
10119 if((0 == splSubfrmIdx) || (4 == splSubfrmIdx) ||
10120 (7 == splSubfrmIdx) || (8 == splSubfrmIdx)
10123 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
10127 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
10132 /* Refer to 36.213 Section 7.1.7 */
10133 if((0 == splSubfrmIdx) || (5 == splSubfrmIdx))
10135 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
10139 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
10142 /* DwPTS Scheduling Changes End */
10144 splSfCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
10145 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
10147 for (sfCount = 0; sfCount < maxDlSubfrms; sfCount++)
10149 sf = cell->subFrms[sfCount];
10150 /* Sfcount matches the first special subframe occurs at Index 0
10151 * or subsequent special subframes */
10152 if(subfrmInfo.switchPoints == 1)
10154 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
10155 RG_SCH_CMN_10_MS_PRD, &subfrmInfo);
10159 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
10160 RG_SCH_CMN_5_MS_PRD, &subfrmInfo);
10162 if(isSplfrm == TRUE)
10165 /* DwPTS Scheduling Changes Start */
10166 if (cell->splSubfrmCfg.isDlDataAllowed == TRUE)
10168 sf->sfType = RG_SCH_SPL_SF_DATA;
10172 sf->sfType = RG_SCH_SPL_SF_NO_DATA;
10174 /* DwPTS Scheduling Changes End */
10178 /* DwPTS Scheduling Changes Start */
10179 if (sf->sfNum != 0)
10181 sf->sfType = RG_SCH_DL_SF;
10185 sf->sfType = RG_SCH_DL_SF_0;
10187 /* DwPTS Scheduling Changes End */
10190 /* Calculate the number of CCEs per subframe in the cell */
10191 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][sf->sfNum];
10192 if(cell->dynCfiCb.isDynCfiEnb == TRUE)
10194 /* In case if Dynamic CFI feature is enabled, default CFI
10195 * value 1 is used */
10196 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][1];
10200 if (sf->sfType == RG_SCH_SPL_SF_DATA)
10202 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
10206 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi)];
10211 /* Intialize the RACH response scheduling related infromation */
10212 if(rgSCHCmnDlRachInfoInit(cell) != ROK)
10217 /* Allocate PRACH preamble list */
10218 rgSCHCmnDlCreateRachPrmLst(cell);
10220 /* Initialize PHICH offset information */
10221 rgSCHCmnDlPhichOffsetInit(cell);
10223 /* Update the size of HARQ ACK/NACK feedback table */
10224 /* The array size is increased by 2 to have enough free indices, where other
10225 * indices are busy waiting for HARQ feedback */
10226 cell->ackNackFdbkArrSize = rgSchTddANFdbkMapTbl[cell->ulDlCfgIdx] + 2;
10228 /* Initialize expected HARQ ACK/NACK feedback time */
10229 rgSCHCmnDlANFdbkInit(cell);
10231 /* Initialize UL association set index */
10232 if(cell->ulDlCfgIdx != 0)
10234 rgSCHCmnDlKdashUlAscInit(cell);
10237 if (cfg->isCpDlExtend == TRUE)
10239 cp = RG_SCH_CMN_EXT_CP;
10241 cell->splSubfrmCfg.dwPts =
10242 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlDwPts;
10244 if ( cell->splSubfrmCfg.dwPts == 0 )
10246 cell->isDwPtsCnted = FALSE;
10250 cell->isDwPtsCnted = TRUE;
10253 if(cfg->isCpUlExtend == TRUE)
10255 cell->splSubfrmCfg.upPts =
10256 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlExtUpPts;
10260 cell->splSubfrmCfg.upPts =
10261 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlNorUpPts;
10266 cp = RG_SCH_CMN_NOR_CP;
10268 cell->splSubfrmCfg.dwPts =
10269 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlDwPts;
10270 cell->isDwPtsCnted = TRUE;
10272 if(cfg->isCpUlExtend == TRUE)
10274 cell->splSubfrmCfg.upPts =
10275 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlExtUpPts;
10279 cell->splSubfrmCfg.upPts =
10280 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlNorUpPts;
10284 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
10285 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++,cfiIdx++)
10287 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
10288 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
10289 [cell->numTxAntPorts]][cfiIdx];
10290 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
10291 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
10292 [cell->numTxAntPorts]][cfiIdx];
10295 /* Initializing the values of CFI parameters */
10296 if(cell->dynCfiCb.isDynCfiEnb)
10298 /* If DCFI is enabled, current CFI value will start from 1 */
10299 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
10303 /* If DCFI is disabled, current CFI value is set as default max allowed CFI value */
10304 cellSch->dl.currCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
10305 cellSch->dl.newCfi = cellSch->dl.currCfi;
10308 /* Include CRS REs while calculating Efficiency
10309 * The number of Resource Elements occupied by CRS depends on Number of
10310 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
10311 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
10312 * details of the same. Please note that PDCCH overlap symbols would not
10313 * considered in CRS REs deduction */
10314 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
10316 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
10317 - numPdcchSym) *RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
10320 /* DwPTS Scheduling Changes Start */
10321 antPortIdx = (cell->numTxAntPorts == 1)? 0:
10322 ((cell->numTxAntPorts == 2)? 1: 2);
10324 if (cp == RG_SCH_CMN_NOR_CP)
10326 splSfIdx = (splSubfrmIdx == 4)? 1: 0;
10330 splSfIdx = (splSubfrmIdx == 3)? 1: 0;
10333 numCrs = rgSchCmnDwptsCrs[splSfIdx][antPortIdx];
10335 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI-1; cfi++)
10337 /* If CFI is 2 and Ant Port is 4, don't consider the sym 1 CRS REs */
10338 if (antPortIdx == 2 && cfi == 2)
10342 cellSch->dl.numReDwPts[cfi] = ((cell->splSubfrmCfg.dwPts - cfi)*
10343 RB_SCH_CMN_NUM_SCS_PER_RB) - numCrs;
10345 /* DwPTS Scheduling Changes End */
10347 if (cfg->maxDlBwPerUe == 0)
10349 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
10353 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
10355 if (cfg->maxDlRetxBw == 0)
10357 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
10361 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
10363 /* Fix: MUE_PERTTI_DL*/
10364 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10365 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10366 if (cfg->maxUePerDlSf == 0)
10368 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10370 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
10371 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
10372 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
10374 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10375 "Invalid configuration !: "
10376 "maxCcchPerDlSf %u > maxUePerDlSf %u",
10377 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
10381 else if (!cfg->maxCcchPerDlSf)
10383 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
10384 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
10385 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
10386 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
10387 * FLE crash in PHY as PHY has limit of 16 max*/
10388 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
10392 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
10394 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
10399 /*ccpu00118273 - ADD - start */
10400 cmLListInit(&cellSch->dl.msg4RetxLst);
10402 cmLListInit(&cellSch->dl.ccchSduRetxLst);
10405 #ifdef RG_PHASE2_SCHED
10406 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
10408 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
10410 if (cfg->dlfsCfg.isDlFreqSel)
10412 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
10418 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
10421 /* Power related configuration */
10422 ret = rgSCHPwrCellCfg(cell, cfg);
10428 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
10429 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
10430 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
10431 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
10432 cellSch->dl.msg4pAVal = cfg->msg4pAVal;
10435 #else /* LTE_TDD */
10437 * @brief This function handles the configuration of cell for the first
10438 * time by the scheduler.
10442 * Function: rgSCHCmnDlRgrCellCfg
10443 * Purpose: Configuration received is stored into the data structures
10444 * Also, update the scheduler with the number of frames of
10445 * RACH preamble transmission.
10447 * Invoked by: BO and Scheduler
10449 * @param[in] RgSchCellCb* cell
10450 * @param[in] RgrCellCfg* cfg
10451 * @param[in] RgSchErrInfo* err
10456 PRIVATE S16 rgSCHCmnDlRgrCellCfg
10463 PRIVATE S16 rgSCHCmnDlRgrCellCfg(cell, cfg, err)
10470 RgSchCmnCell *cellSch;
10472 uint8_t numPdcchSym;
10473 uint8_t noSymPerSlot;
10478 cellSch = RG_SCH_CMN_GET_CELL(cell);
10480 /* Initialize the parameters with the ones received in the */
10481 /* configuration. */
10483 /* Added matrix 'rgRaPrmblToRaFrmTbl' for computation of RA
10484 * sub-frames from preamble format */
10485 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->rachCfg.preambleFormat];
10487 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
10488 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
10490 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
10491 3 TTI (MAX L1+L2 processing delay at the UE) */
10492 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
10493 rgSchCmnHarqRtt[7] + 3;
10495 if (cell->bwCfg.dlTotalBw <= 10)
10506 if (cell->isCpDlExtend == TRUE)
10508 cp = RG_SCH_CMN_EXT_CP;
10513 cp = RG_SCH_CMN_NOR_CP;
10517 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
10518 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, cfiIdx++)
10520 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
10522 cellSch->dl.emtcCqiToTbsTbl[0][cfi] = rgSchEmtcCmnCqiToTbs[0][cp][cfiIdx];
10524 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
10525 [cell->numTxAntPorts]][cfiIdx];
10526 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
10528 cellSch->dl.emtcCqiToTbsTbl[1][cfi] = rgSchEmtcCmnCqiToTbs[1][cp][cfiIdx];
10530 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
10531 [cell->numTxAntPorts]][cfiIdx];
10534 /* Initializing the values of CFI parameters */
10535 if(cell->dynCfiCb.isDynCfiEnb)
10537 /* If DCFI is enabled, current CFI value will start from 1 */
10538 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
10542 /* If DCFI is disabled, current CFI value is set as default CFI value */
10543 cellSch->dl.currCfi = cellSch->cfiCfg.cfi;
10544 cellSch->dl.newCfi = cellSch->dl.currCfi;
10547 /* Include CRS REs while calculating Efficiency
10548 * The number of Resource Elements occupied by CRS depends on Number of
10549 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
10550 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
10551 * details of the same. Please note that PDCCH overlap symbols would not
10552 * considered in CRS REs deduction */
10553 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
10555 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
10556 - numPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
10559 if (cfg->maxDlBwPerUe == 0)
10561 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
10565 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
10567 if (cfg->maxDlRetxBw == 0)
10569 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
10573 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
10576 /* Fix: MUE_PERTTI_DL*/
10577 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10578 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10579 if (cfg->maxUePerDlSf == 0)
10581 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10583 /* Fix: MUE_PERTTI_DL syed validating Cell Configuration */
10584 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
10586 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10587 "FAILED MaxUePerDlSf(%u) < MaxDlUeNewTxPerTti(%u)",
10588 cellSch->dl.maxUePerDlSf,
10589 cellSch->dl.maxUeNewTxPerTti);
10592 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
10593 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
10595 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid configuration !: "
10596 "maxCcchPerDlSf %u > maxUePerDlSf %u",
10597 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
10601 else if (!cfg->maxCcchPerDlSf)
10603 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
10604 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
10605 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
10606 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
10607 * FLE crash in PHY as PHY has limit of 16 max*/
10608 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
10612 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
10616 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
10620 cmLListInit(&cellSch->dl.msg4RetxLst);
10622 cmLListInit(&cellSch->dl.ccchSduRetxLst);
10625 #ifdef RG_PHASE2_SCHED
10626 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
10628 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
10630 if (cfg->dlfsCfg.isDlFreqSel)
10632 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
10638 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
10641 /* Power related configuration */
10642 ret = rgSCHPwrCellCfg(cell, cfg);
10648 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
10649 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
10650 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
10651 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
10652 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
10655 #endif /* LTE_TDD */
10657 /***********************************************************
10659 * Func : rgSCHCmnUlCalcReqRbCeil
10661 * Desc : Calculate RB required to satisfy 'bytes' for
10663 * Returns number of RBs such that requirement
10664 * is necessarily satisfied (does a 'ceiling'
10667 * Ret : Required RBs (uint8_t)
10673 **********************************************************/
10675 uint8_t rgSCHCmnUlCalcReqRbCeil
10679 RgSchCmnUlCell *cellUl
10682 uint8_t rgSCHCmnUlCalcReqRbCeil(bytes, cqi, cellUl)
10685 RgSchCmnUlCell *cellUl;
10688 uint32_t numRe = RGSCH_CEIL((bytes * 8) * 1024, rgSchCmnUlCqiTbl[cqi].eff);
10689 return ((uint8_t)RGSCH_CEIL(numRe, RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl)));
10692 /***********************************************************
10694 * Func : rgSCHCmnPrecompMsg3Vars
10696 * Desc : Precomputes the following for msg3 allocation:
10697 * 1. numSb and Imcs for msg size A
10698 * 2. numSb and Imcs otherwise
10702 * Notes: The corresponding vars in cellUl struct is filled
10707 **********************************************************/
10709 PRIVATE S16 rgSCHCmnPrecompMsg3Vars
10711 RgSchCmnUlCell *cellUl,
10718 PRIVATE S16 rgSCHCmnPrecompMsg3Vars(cellUl, ccchCqi, msgSzA, sbSize, isEcp)
10719 RgSchCmnUlCell *cellUl;
10731 uint16_t msg3GrntSz = 0;
10734 if (ccchCqi > cellUl->max16qamCqi)
10736 ccchCqi = cellUl->max16qamCqi;
10738 /* #ifndef RG_SCH_CMN_EXP_CP_SUP For ECP Pick the index 1 */
10740 ccchTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
10741 ccchMcs = rgSCHCmnUlGetIMcsFrmITbs(ccchTbs, CM_LTE_UE_CAT_1);
10743 /* MCS should fit in 4 bits in RAR */
10749 /* Limit the ccchMcs to 15 as it
10750 * can be inferred from 36.213, section 6.2 that msg3 imcs
10752 * Since, UE doesn't exist right now, we use CAT_1 for ue
10754 while((ccchMcs = (rgSCHCmnUlGetIMcsFrmITbs(
10755 rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi],CM_LTE_UE_CAT_1))
10757 RG_SCH_CMN_MAX_MSG3_IMCS)
10762 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ccchCqi];
10764 if (msgSzA < RGSCH_MIN_MSG3_GRNT_SZ)
10768 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(msgSzA, ccchCqi, cellUl), sbSize);
10770 numRb = numSb * sbSize;
10771 msg3GrntSz = 8 * msgSzA;
10773 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
10776 numRb = numSb * sbSize;
10778 while (rgSchCmnMult235Tbl[numSb].match != numSb)
10782 /* Reversed(Corrected) the assignment for preamble-GrpA
10783 * Refer- TG36.321- section- 5.1.2*/
10784 cellUl->ra.prmblBNumSb = numSb;
10785 cellUl->ra.prmblBIMcs = ccchMcs;
10786 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(RGSCH_MIN_MSG3_GRNT_SZ, \
10790 numRb = numSb * sbSize;
10791 msg3GrntSz = 8 * RGSCH_MIN_MSG3_GRNT_SZ;
10792 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
10795 numRb = numSb * sbSize;
10797 while (rgSchCmnMult235Tbl[numSb].match != numSb)
10801 /* Reversed(Corrected) the assignment for preamble-GrpA
10802 * Refer- TG36.321- section- 5.1.2*/
10803 cellUl->ra.prmblANumSb = numSb;
10804 cellUl->ra.prmblAIMcs = ccchMcs;
10808 uint32_t gPrntPucchDet=0;
10811 /***********************************************************
10813 * Func : rgSCHCmnUlCalcAvailBw
10815 * Desc : Calculates bandwidth available for PUSCH scheduling.
10817 * Ret : S16 (ROK/RFAILED)
10823 **********************************************************/
10825 PRIVATE S16 rgSCHCmnUlCalcAvailBw
10828 RgrCellCfg *cellCfg,
10830 uint8_t *rbStartRef,
10831 uint8_t *bwAvailRef
10834 PRIVATE S16 rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, rbStartRef, bwAvailRef)
10836 RgrCellCfg *cellCfg;
10838 uint8_t *rbStartRef;
10839 uint8_t *bwAvailRef;
10843 uint8_t ulBw = cell->bwCfg.ulTotalBw;
10844 uint8_t n2Rb = cell->pucchCfg.resourceSize;
10845 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
10846 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
10847 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
10854 uint8_t exclRb; /* RBs to exclude */
10856 uint8_t puschRbStart;
10857 /* To avoid PUCCH and PUSCH collision issue */
10861 /* Maximum value of M as per Table 10.1-1 */
10862 uint8_t M[RGSCH_MAX_TDD_UL_DL_CFG] = {1, 2, 4, 3, 4, 9, 1};
10865 if (cell->isCpUlExtend)
10870 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
10872 /* Considering the max no. of CCEs for PUSCH BW calculation
10873 * based on min mi value */
10874 if (cell->ulDlCfgIdx == 0 || cell->ulDlCfgIdx == 6)
10883 totalCce = cell->dynCfiCb.cfi2NCceTbl[mi][cfi];
10885 P = rgSCHCmnGetPValFrmCCE(cell, totalCce-1);
10886 n1PlusOne = cell->rgSchTddNpValTbl[P + 1];
10887 n1Max = (M[cell->ulDlCfgIdx] - 1)*n1PlusOne + (totalCce-1) + n1Pucch;
10889 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
10891 n1RbPart = (c*n1Cs)/pucchDeltaShft;
10892 n1Rb = (n1Max - n1RbPart)/ n1PerRb;
10893 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
10895 /* get the total Number of RB's to be excluded for PUSCH */
10897 if(n1Pucch < n1RbPart)
10903 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
10905 puschRbStart = exclRb/2 + 1;
10907 /* Num of PUCCH RBs = puschRbStart*2 */
10908 if (puschRbStart * 2 >= ulBw)
10910 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
10914 *rbStartRef = puschRbStart;
10915 *bwAvailRef = ulBw - puschRbStart * 2;
10917 if(cell->pucchCfg.maxPucchRb !=0 &&
10918 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
10920 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
10927 /***********************************************************
10929 * Func : rgSCHCmnUlCalcAvailBw
10931 * Desc : Calculates bandwidth available for PUSCH scheduling.
10933 * Ret : S16 (ROK/RFAILED)
10939 **********************************************************/
10941 PRIVATE S16 rgSCHCmnUlCalcAvailBw
10944 RgrCellCfg *cellCfg,
10946 uint8_t *rbStartRef,
10947 uint8_t *bwAvailRef
10950 PRIVATE S16 rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, rbStartRef, bwAvailRef)
10952 RgrCellCfg *cellCfg;
10954 uint8_t *rbStartRef;
10955 uint8_t *bwAvailRef;
10959 uint8_t ulBw = cell->bwCfg.ulTotalBw;
10960 uint8_t n2Rb = cell->pucchCfg.resourceSize;
10961 uint8_t pucchDeltaShft = cell->pucchCfg.deltaShift;
10962 uint16_t n1Pucch = cell->pucchCfg.n1PucchAn;
10963 uint8_t n1Cs = cell->pucchCfg.cyclicShift;
10969 uint8_t exclRb; /* RBs to exclude */
10971 uint8_t puschRbStart;
10973 uint16_t numOfN3PucchRb;
10974 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10978 if (cell->isCpUlExtend)
10983 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
10985 totalCce = cell->dynCfiCb.cfi2NCceTbl[0][cfi];
10987 n1Max = n1Pucch + totalCce-1;
10989 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
10991 n1RbPart = (c*n1Cs)/pucchDeltaShft;
10992 n1Rb = (uint8_t)((n1Max - n1RbPart) / n1PerRb);
10993 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
10995 /* get the total Number of RB's to be excluded for PUSCH */
10997 if(n1Pucch < n1RbPart)
11003 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
11005 /*Support for PUCCH Format 3*/
11007 if (cell->isPucchFormat3Sptd)
11009 numOfN3PucchRb = RGSCH_CEIL(cellSch->dl.maxUePerDlSf,5);
11010 exclRb = exclRb + numOfN3PucchRb;
11013 puschRbStart = exclRb/2 + 1;
11017 #ifndef ALIGN_64BIT
11018 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%ld:%d:%d:%d:%d:%d]\n",
11019 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
11021 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%d:%d:%d:%d:%d:%d]\n",
11022 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
11026 if (puschRbStart*2 >= ulBw)
11028 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
11032 *rbStartRef = puschRbStart;
11033 *bwAvailRef = ulBw - puschRbStart * 2;
11035 if(cell->pucchCfg.maxPucchRb !=0 &&
11036 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
11038 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
11047 /***********************************************************
11049 * Func : rgSCHCmnUlCellInit
11051 * Desc : Uplink scheduler initialisation for cell.
11059 **********************************************************/
11061 PRIVATE S16 rgSCHCmnUlCellInit
11064 RgrCellCfg *cellCfg
11067 PRIVATE S16 rgSCHCmnUlCellInit(cell, cellCfg)
11069 RgrCellCfg *cellCfg;
11073 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11074 uint8_t maxUePerUlSf = cellCfg->maxUePerUlSf;
11076 /* Added configuration for maximum number of MSG3s */
11077 uint8_t maxMsg3PerUlSf = cellCfg->maxMsg3PerUlSf;
11079 uint8_t maxUlBwPerUe = cellCfg->maxUlBwPerUe;
11080 uint8_t sbSize = cellCfg->puschSubBand.size;
11085 uint8_t maxSbPerUe;
11088 uint16_t ulDlCfgIdx = cell->ulDlCfgIdx;
11089 /* [ccpu00127294]-MOD-Change the max Ul subfrms size in TDD */
11090 uint8_t maxSubfrms = 2 * rgSchTddNumUlSf[ulDlCfgIdx];
11091 uint8_t ulToDlMap[12] = {0}; /* maximum 6 Subframes in UL * 2 */
11092 uint8_t maxUlsubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
11093 [RGSCH_NUM_SUB_FRAMES-1];
11097 uint8_t maxSubfrms = RG_SCH_CMN_UL_NUM_SF;
11103 #if (defined(LTE_L2_MEAS) )
11104 Inst inst = cell->instIdx;
11105 #endif /* #if (defined(LTE_L2_MEAS) || defined(DEBUGP) */
11106 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
11109 cellUl->maxUeNewTxPerTti = cellCfg->maxUlUeNewTxPerTti;
11110 if (maxUePerUlSf == 0)
11112 maxUePerUlSf = RG_SCH_CMN_MAX_UE_PER_UL_SF;
11115 if (maxMsg3PerUlSf == 0)
11117 maxMsg3PerUlSf = RG_SCH_CMN_MAX_MSG3_PER_UL_SF;
11119 /* fixed the problem while sending raRsp
11120 * if maxMsg3PerUlSf is greater than
11121 * RGSCH_MAX_RNTI_PER_RARNTI
11123 if(maxMsg3PerUlSf > RGSCH_MAX_RNTI_PER_RARNTI)
11125 maxMsg3PerUlSf = RGSCH_MAX_RNTI_PER_RARNTI;
11128 if(maxMsg3PerUlSf > maxUePerUlSf)
11130 maxMsg3PerUlSf = maxUePerUlSf;
11133 /*cellUl->maxAllocPerUlSf = maxUePerUlSf + maxMsg3PerUlSf;*/
11134 /*Max MSG3 should be a subset of Max UEs*/
11135 cellUl->maxAllocPerUlSf = maxUePerUlSf;
11136 cellUl->maxMsg3PerUlSf = maxMsg3PerUlSf;
11138 cellUl->maxAllocPerUlSf = maxUePerUlSf;
11140 /* Fix: MUE_PERTTI_UL syed validating Cell Configuration */
11141 if (cellUl->maxAllocPerUlSf < cellUl->maxUeNewTxPerTti)
11143 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
11144 "FAILED: MaxUePerUlSf(%u) < MaxUlUeNewTxPerTti(%u)",
11145 cellUl->maxAllocPerUlSf,
11146 cellUl->maxUeNewTxPerTti);
11152 for(idx = 0; idx < RGSCH_SF_ALLOC_SIZE; idx++)
11154 for(idx = 0; idx < RGSCH_NUM_SUB_FRAMES; idx++)
11158 ret = rgSCHUtlAllocSBuf(inst, (Data **)&(cell->sfAllocArr[idx].
11159 ulUeInfo.ulAllocInfo), (cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc)));
11162 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation failed ");
11167 if (maxUlBwPerUe == 0)
11169 /* ccpu00139362- Setting to configured UL BW instead of MAX BW(100)*/
11170 maxUlBwPerUe = cell->bwCfg.ulTotalBw;
11172 cellUl->maxUlBwPerUe = maxUlBwPerUe;
11174 /* FOR RG_SCH_CMN_EXT_CP_SUP */
11175 if (!cellCfg->isCpUlExtend)
11177 cellUl->ulNumRePerRb = 12 * (14 - RGSCH_UL_SYM_DMRS_SRS);
11181 cellUl->ulNumRePerRb = 12 * (12 - RGSCH_UL_SYM_DMRS_SRS);
11184 if (sbSize != rgSchCmnMult235Tbl[sbSize].match)
11186 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Invalid subband size %d", sbSize);
11189 //Setting the subband size to 4 which is size of VRBG in 5GTF
11191 sbSize = MAX_5GTF_VRBG_SIZE;
11194 maxSbPerUe = maxUlBwPerUe / sbSize;
11195 if (maxSbPerUe == 0)
11197 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnUlCellInit(): "
11198 "maxUlBwPerUe/sbSize is zero");
11201 cellUl->maxSbPerUe = rgSchCmnMult235Tbl[maxSbPerUe].prvMatch;
11203 /* CQI related updations */
11204 if ((!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->ulCmnCodeRate.ccchCqi))
11205 || (!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->trgUlCqi.trgCqi)))
11207 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnUlCellInit(): "
11211 cellUl->dfltUlCqi = cellCfg->ulCmnCodeRate.ccchCqi;
11213 /* Changed the logic to determine maxUlCqi.
11214 * For a 16qam UE, maxUlCqi is the CQI Index at which
11215 * efficiency is as close as possible to RG_SCH_MAX_CODE_RATE_16QAM
11216 * Refer to 36.213-8.6.1 */
11217 for (i = RG_SCH_CMN_UL_NUM_CQI - 1;i > 0; --i)
11219 RLOG_ARG2(L_INFO,DBG_CELLID,cell->cellId,
11222 rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i]);
11223 #ifdef MAC_SCH_STATS
11224 /* ccpu00128489 ADD Update mcs in hqFailStats here instead of at CRC
11225 * since CQI to MCS mapping does not change. The only exception is for
11226 * ITBS = 19 where the MCS can be 20 or 21 based on the UE cat. We
11227 * choose 20, instead of 21, ie UE_CAT_3 */
11228 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
11229 RG_SCH_CMN_UL_TBS_TO_MCS(iTbs, hqFailStats.ulCqiStat[i - 1].mcs);
11232 for (i = RG_SCH_CMN_UL_NUM_CQI - 1; i != 0; --i)
11234 /* Fix for ccpu00123912*/
11235 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
11236 if (iTbs <= RGSCH_UL_16QAM_MAX_ITBS) /* corresponds to 16QAM */
11238 RLOG_ARG1(L_INFO,DBG_CELLID,cell->cellId,
11239 "16 QAM CQI %u", i);
11240 cellUl->max16qamCqi = i;
11246 /* Precompute useful values for RA msg3 */
11247 ret = rgSCHCmnPrecompEmtcMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
11248 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
11255 /* Precompute useful values for RA msg3 */
11256 ret = rgSCHCmnPrecompMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
11257 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
11263 cellUl->sbSize = sbSize;
11266 cellUl->numUlSubfrms = maxSubfrms;
11268 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->ulSfArr,
11269 cellUl->numUlSubfrms * sizeof(RgSchUlSf));
11273 cellUl->numUlSubfrms = 0;
11277 /* store the DL subframe corresponding to the PUSCH offset
11278 * in their respective UL subframe */
11279 for(i=0; i < RGSCH_NUM_SUB_FRAMES; i++)
11281 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][i] != 0)
11283 subfrm = (i + rgSchTddPuschTxKTbl[ulDlCfgIdx][i]) % \
11284 RGSCH_NUM_SUB_FRAMES;
11285 subfrm = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subfrm]-1;
11286 dlIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][i]-1;
11287 RGSCH_ARRAY_BOUND_CHECK( cell->instIdx, ulToDlMap, subfrm);
11288 ulToDlMap[subfrm] = dlIdx;
11291 /* Copy the information in the remaining UL subframes based
11292 * on number of HARQ processes */
11293 for(i=maxUlsubfrms; i < maxSubfrms; i++)
11295 subfrm = i-maxUlsubfrms;
11296 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, i);
11297 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, subfrm)
11298 ulToDlMap[i] = ulToDlMap[subfrm];
11302 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++)
11305 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
11307 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
11316 cell->ulAvailBw = bwAvail;
11319 numSb = bwAvail/sbSize;
11321 cell->dynCfiCb.bwInfo[cfi].startRb = rbStart;
11322 cell->dynCfiCb.bwInfo[cfi].numSb = numSb;
11325 if(0 == cell->dynCfiCb.maxCfi)
11327 RLOG_ARG3(L_ERROR,DBG_CELLID,cell->cellId,
11328 "Incorrect Default CFI(%u), maxCfi(%u), maxPucchRb(%d)",
11329 cellSch->cfiCfg.cfi, cell->dynCfiCb.maxCfi,
11330 cell->pucchCfg.maxPucchRb);
11336 cellUl->dmrsArrSize = cell->dynCfiCb.bwInfo[1].numSb;
11337 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->dmrsArr,
11338 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11343 for (i = 0; i < cellUl->dmrsArrSize; ++i)
11345 cellUl->dmrsArr[i] = cellCfg->puschSubBand.dmrs[i];
11348 /* Init subframes */
11349 for (i = 0; i < maxSubfrms; ++i)
11351 ret = rgSCHUtlUlSfInit(cell, &cellUl->ulSfArr[i], i,
11352 cellUl->maxAllocPerUlSf);
11355 for (; i != 0; --i)
11357 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[i-1]);
11359 /* ccpu00117052 - MOD - Passing double pointer
11360 for proper NULLP assignment*/
11361 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)(&(cellUl->dmrsArr)),
11362 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11364 /* ccpu00117052 - MOD - Passing double pointer
11365 for proper NULLP assignment*/
11366 rgSCHUtlFreeSBuf(cell->instIdx,
11367 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
11372 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cellUl);
11377 * @brief Scheduler processing on cell configuration.
11381 * Function : rgSCHCmnRgrCellCfg
11383 * This function does requisite initialisation
11384 * and setup for scheduler1 when a cell is
11387 * @param[in] RgSchCellCb *cell
11388 * @param[in] RgrCellCfg *cellCfg
11389 * @param[out] RgSchErrInfo *err
11395 S16 rgSCHCmnRgrCellCfg
11398 RgrCellCfg *cellCfg,
11402 S16 rgSCHCmnRgrCellCfg(cell, cellCfg, err)
11404 RgrCellCfg *cellCfg;
11409 RgSchCmnCell *cellSch;
11411 /* As part of RGR cell configuration, validate the CRGCellCfg
11412 * There is no trigger for crgCellCfg from SC1 */
11413 /* Removed failure check for Extended CP */
11415 if (((ret = rgSCHUtlAllocSBuf(cell->instIdx,
11416 (Data**)&(cell->sc.sch), (sizeof(RgSchCmnCell)))) != ROK))
11418 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
11419 "Memory allocation FAILED");
11420 err->errCause = RGSCHERR_SCH_CFG;
11423 cellSch = (RgSchCmnCell *)(cell->sc.sch);
11424 cellSch->cfiCfg = cellCfg->cfiCfg;
11425 cellSch->trgUlCqi.trgCqi = cellCfg->trgUlCqi.trgCqi;
11426 /* Initialize the scheduler refresh timer queues */
11427 cellSch->tmrTqCp.nxtEnt = 0;
11428 cellSch->tmrTqCp.tmrLen = RG_SCH_CMN_NUM_REFRESH_Q;
11430 /* RACHO Intialize the RACH ded Preamble Information */
11431 rgSCHCmnCfgRachDedPrm(cell);
11433 /* Initialize 'Np' value for each 'p' used for
11434 * HARQ ACK/NACK reception */
11435 rgSCHCmnDlNpValInit(cell);
11438 /* Initialize 'Np' value for each 'p' used for
11439 * HARQ ACK/NACK reception */
11441 rgSCHCmnDlNpValInit(cell);
11444 /* Now perform uplink related initializations */
11445 ret = rgSCHCmnUlCellInit(cell, cellCfg);
11448 /* There is no downlink deinit to be performed */
11449 err->errCause = RGSCHERR_SCH_CFG;
11452 ret = rgSCHCmnDlRgrCellCfg(cell, cellCfg, err);
11455 err->errCause = RGSCHERR_SCH_CFG;
11458 /* DL scheduler has no initializations to make */
11459 /* As of now DL scheduler always returns ROK */
11461 rgSCHCmnGetDciFrmtSizes(cell);
11462 rgSCHCmnGetCqiDciFrmt2AggrLvl(cell);
11464 rgSCHCmnGetEmtcDciFrmtSizes(cell);
11465 rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl(cell);
11466 #endif /* EMTC_ENABLE */
11469 if(TRUE == cellCfg->emtcEnable)
11471 cellSch->apisEmtcUl = &rgSchEmtcUlSchdTbl[0];
11472 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
11479 cellSch->apisUl = &rgSchUlSchdTbl[RG_SCH_CMN_GET_UL_SCHED_TYPE(cell)];
11480 ret = cellSch->apisUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
11486 if(TRUE == cellCfg->emtcEnable)
11488 cellSch->apisEmtcDl = &rgSchEmtcDlSchdTbl[0];
11489 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
11496 cellSch->apisDl = &rgSchDlSchdTbl[RG_SCH_CMN_GET_DL_SCHED_TYPE(cell)];
11498 /* Perform SPS specific initialization for the cell */
11499 ret = rgSCHCmnSpsCellCfg(cell, cellCfg, err);
11505 ret = cellSch->apisDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
11510 rgSCHCmnInitVars(cell);
11513 } /* rgSCHCmnRgrCellCfg*/
11517 * @brief This function handles the reconfiguration of cell.
11521 * Function: rgSCHCmnRgrCellRecfg
11522 * Purpose: Update the reconfiguration parameters.
11524 * Invoked by: Scheduler
11526 * @param[in] RgSchCellCb* cell
11531 S16 rgSCHCmnRgrCellRecfg
11534 RgrCellRecfg *recfg,
11538 S16 rgSCHCmnRgrCellRecfg(cell, recfg, err)
11540 RgrCellRecfg *recfg;
11545 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11546 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11549 if (recfg->recfgTypes & RGR_CELL_UL_CMNRATE_RECFG)
11551 uint8_t oldCqi = cellUl->dfltUlCqi;
11552 if (!RG_SCH_CMN_UL_IS_CQI_VALID(recfg->ulCmnCodeRate.ccchCqi))
11554 err->errCause = RGSCHERR_SCH_CFG;
11555 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnRgrCellRecfg(): "
11559 cellUl->dfltUlCqi = recfg->ulCmnCodeRate.ccchCqi;
11560 ret = rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
11561 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
11564 cellUl->dfltUlCqi = oldCqi;
11565 rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
11566 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
11571 if (recfg->recfgTypes & RGR_CELL_DL_CMNRATE_RECFG)
11573 if (rgSCHCmnDlCnsdrCmnRt(cell, &recfg->dlCmnCodeRate) != ROK)
11575 err->errCause = RGSCHERR_SCH_CFG;
11581 if(TRUE == cell->emtcEnable)
11583 /* Invoke UL sched for cell Recfg */
11584 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
11590 /* Invoke DL sched for cell Recfg */
11591 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
11600 /* Invoke UL sched for cell Recfg */
11601 ret = cellSch->apisUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
11607 /* Invoke DL sched for cell Recfg */
11608 ret = cellSch->apisDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
11615 if (recfg->recfgTypes & RGR_CELL_DLFS_RECFG)
11617 ret = cellSch->apisDlfs->rgSCHDlfsCellRecfg(cell, recfg, err);
11622 cellSch->dl.isDlFreqSel = recfg->dlfsRecfg.isDlFreqSel;
11625 if (recfg->recfgTypes & RGR_CELL_PWR_RECFG)
11627 ret = rgSCHPwrCellRecfg(cell, recfg);
11637 /***********************************************************
11639 * Func : rgSCHCmnUlCellDeinit
11641 * Desc : Uplink scheduler de-initialisation for cell.
11649 **********************************************************/
11651 PRIVATE Void rgSCHCmnUlCellDeinit
11656 PRIVATE Void rgSCHCmnUlCellDeinit(cell)
11660 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11663 uint8_t maxSubfrms = cellUl->numUlSubfrms;
11666 CmLList *lnk = NULLP;
11667 RgSchL2MeasCb *measCb;
11671 for(ulSfIdx = 0; ulSfIdx < RGSCH_SF_ALLOC_SIZE; ulSfIdx++)
11673 for(ulSfIdx = 0; ulSfIdx < RGSCH_NUM_SUB_FRAMES; ulSfIdx++)
11676 if(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo != NULLP)
11678 /* ccpu00117052 - MOD - Passing double pointer
11679 for proper NULLP assignment*/
11680 rgSCHUtlFreeSBuf(cell->instIdx,
11681 (Data **)(&(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo)),
11682 cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc));
11684 /* ccpu00117052 - DEL - removed explicit NULLP assignment
11685 as it is done in above utility function */
11688 /* Free the memory allocated to measCb */
11689 lnk = cell->l2mList.first;
11690 while(lnk != NULLP)
11692 measCb = (RgSchL2MeasCb *)lnk->node;
11693 cmLListDelFrm(&cell->l2mList, lnk);
11695 /* ccpu00117052 - MOD - Passing double pointer
11696 for proper NULLP assignment*/
11697 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&measCb,\
11698 sizeof(RgSchL2MeasCb));
11701 if (cellUl->dmrsArr != NULLP)
11703 /* ccpu00117052 - MOD - Passing double pointer
11704 for proper NULLP assignment*/
11705 rgSCHUtlFreeSBuf(cell->instIdx,(Data **)(&(cellUl->dmrsArr)),
11706 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11708 /* De-init subframes */
11710 for (ulSfIdx = 0; ulSfIdx < maxSubfrms; ++ulSfIdx)
11712 for (ulSfIdx = 0; ulSfIdx < RG_SCH_CMN_UL_NUM_SF; ++ulSfIdx)
11715 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[ulSfIdx]);
11719 if (cellUl->ulSfArr != NULLP)
11721 /* ccpu00117052 - MOD - Passing double pointer
11722 for proper NULLP assignment*/
11723 rgSCHUtlFreeSBuf(cell->instIdx,
11724 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
11732 * @brief Scheduler processing for cell delete.
11736 * Function : rgSCHCmnCellDel
11738 * This functions de-initialises and frees memory
11739 * taken up by scheduler1 for the entire cell.
11741 * @param[in] RgSchCellCb *cell
11745 Void rgSCHCmnCellDel
11750 Void rgSCHCmnCellDel(cell)
11754 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11759 if (cellSch == NULLP)
11763 /* Perform the deinit for the UL scheduler */
11764 rgSCHCmnUlCellDeinit(cell);
11766 if(TRUE == cell->emtcEnable)
11768 if (cellSch->apisEmtcUl)
11770 cellSch->apisEmtcUl->rgSCHFreeUlCell(cell);
11774 if (cellSch->apisUl)
11776 /* api pointer checks added (here and below in
11777 * this function). pl check. - antriksh */
11778 cellSch->apisUl->rgSCHFreeUlCell(cell);
11781 /* Perform the deinit for the DL scheduler */
11782 cmLListInit(&cellSch->dl.taLst);
11783 if (cellSch->apisDl)
11785 cellSch->apisDl->rgSCHFreeDlCell(cell);
11788 if (cellSch->apisEmtcDl)
11790 rgSCHEmtcInitTaLst(&cellSch->dl);
11792 cellSch->apisEmtcDl->rgSCHFreeDlCell(cell);
11796 /* DLFS de-initialization */
11797 if (cellSch->dl.isDlFreqSel && cellSch->apisDlfs)
11799 cellSch->apisDlfs->rgSCHDlfsCellDel(cell);
11802 rgSCHPwrCellDel(cell);
11804 rgSCHCmnSpsCellDel(cell);
11807 /* ccpu00117052 - MOD - Passing double pointer
11808 for proper NULLP assignment*/
11809 rgSCHUtlFreeSBuf(cell->instIdx,
11810 (Data**)(&(cell->sc.sch)), (sizeof(RgSchCmnCell)));
11812 } /* rgSCHCmnCellDel */
11816 * @brief This function validates QOS parameters for DL.
11820 * Function: rgSCHCmnValidateDlQos
11821 * Purpose: This function validates QOS parameters for DL.
11823 * Invoked by: Scheduler
11825 * @param[in] CrgLchQosCfg *dlQos
11830 PRIVATE S16 rgSCHCmnValidateDlQos
11832 RgrLchQosCfg *dlQos
11835 PRIVATE S16 rgSCHCmnValidateDlQos(dlQos)
11836 RgrLchQosCfg *dlQos;
11839 uint8_t qci = dlQos->qci;
11842 if ( qci < RG_SCH_CMN_MIN_QCI || qci > RG_SCH_CMN_MAX_QCI )
11847 if ((qci >= RG_SCH_CMN_GBR_QCI_START) &&
11848 (qci <= RG_SCH_CMN_GBR_QCI_END))
11850 if ((dlQos->mbr == 0) || (dlQos->mbr < dlQos->gbr))
11859 * @brief Scheduler invocation on logical channel addition.
11863 * Function : rgSCHCmnRgrLchCfg
11865 * This functions does required processing when a new
11866 * (dedicated) logical channel is added. Assumes lcg
11867 * pointer in ulLc is set.
11869 * @param[in] RgSchCellCb *cell
11870 * @param[in] RgSchUeCb *ue
11871 * @param[in] RgSchDlLcCb *dlLc
11872 * @param[int] RgrLchCfg *lcCfg
11873 * @param[out] RgSchErrInfo *err
11879 S16 rgSCHCmnRgrLchCfg
11888 S16 rgSCHCmnRgrLchCfg(cell, ue, dlLc, lcCfg, err)
11898 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11901 ret = rgSCHUtlAllocSBuf(cell->instIdx,
11902 (Data**)&((dlLc)->sch), (sizeof(RgSchCmnDlSvc)));
11905 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRgrLchCfg(): "
11906 "SCH struct alloc failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11907 err->errCause = RGSCHERR_SCH_CFG;
11910 if(lcCfg->lcType != CM_LTE_LCH_DCCH)
11912 ret = rgSCHCmnValidateDlQos(&lcCfg->dlInfo.dlQos);
11915 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSchCmnCrgLcCfg(): "
11916 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11917 err->errCause = RGSCHERR_SCH_CFG;
11920 /* Perform DL service activation in the scheduler */
11921 ((RgSchCmnDlSvc *)(dlLc->sch))->qci = lcCfg->dlInfo.dlQos.qci;
11922 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = rgSchCmnDlQciPrio[lcCfg->dlInfo.dlQos.qci - 1];
11923 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcCfg->dlInfo.dlQos.gbr * \
11924 RG_SCH_CMN_REFRESH_TIME)/100;
11925 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcCfg->dlInfo.dlQos.mbr * \
11926 RG_SCH_CMN_REFRESH_TIME)/100;
11930 /*assigning highest priority to DCCH */
11931 ((RgSchCmnDlSvc *)(dlLc->sch))->prio=RG_SCH_CMN_DCCH_PRIO;
11934 dlLc->lcType=lcCfg->lcType;
11937 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
11939 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcCfg(cell, ue,dlLc ,lcCfg, err);
11948 ret = cellSch->apisDl->rgSCHRgrDlLcCfg(cell, ue, dlLc, lcCfg, err);
11956 if(TRUE == ue->isEmtcUe)
11958 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
11967 ret = cellSch->apisUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
11977 rgSCHSCellDlLcCfg(cell, ue, dlLc);
11983 if(lcCfg->dlInfo.dlSpsCfg.isSpsEnabled)
11985 /* Invoke SPS module if SPS is enabled for the service */
11986 ret = rgSCHCmnSpsDlLcCfg(cell, ue, dlLc, lcCfg, err);
11989 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "rgSchCmnRgrLchCfg(): "
11990 "SPS configuration failed for DL LC for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11991 err->errCause = RGSCHERR_SCH_CFG;
12001 * @brief Scheduler invocation on logical channel addition.
12005 * Function : rgSCHCmnRgrLchRecfg
12007 * This functions does required processing when an existing
12008 * (dedicated) logical channel is reconfigured. Assumes lcg
12009 * pointer in ulLc is set to the old value.
12010 * Independent of whether new LCG is meant to be configured,
12011 * the new LCG scheduler information is accessed and possibly modified.
12013 * @param[in] RgSchCellCb *cell
12014 * @param[in] RgSchUeCb *ue
12015 * @param[in] RgSchDlLcCb *dlLc
12016 * @param[int] RgrLchRecfg *lcRecfg
12017 * @param[out] RgSchErrInfo *err
12023 S16 rgSCHCmnRgrLchRecfg
12028 RgrLchRecfg *lcRecfg,
12032 S16 rgSCHCmnRgrLchRecfg(cell, ue, dlLc, lcRecfg, err)
12036 RgrLchRecfg *lcRecfg;
12041 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12044 if(dlLc->lcType != CM_LTE_LCH_DCCH)
12046 ret = rgSCHCmnValidateDlQos(&lcRecfg->dlRecfg.dlQos);
12050 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
12051 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12052 err->errCause = RGSCHERR_SCH_CFG;
12055 if (((RgSchCmnDlSvc *)(dlLc->sch))->qci != lcRecfg->dlRecfg.dlQos.qci)
12057 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Qci, hence lc Priority change "
12058 "not supported for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12059 err->errCause = RGSCHERR_SCH_CFG;
12062 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcRecfg->dlRecfg.dlQos.gbr * \
12063 RG_SCH_CMN_REFRESH_TIME)/100;
12064 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcRecfg->dlRecfg.dlQos.mbr * \
12065 RG_SCH_CMN_REFRESH_TIME)/100;
12069 /*assigning highest priority to DCCH */
12070 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = RG_SCH_CMN_DCCH_PRIO;
12074 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
12076 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12081 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
12090 ret = cellSch->apisDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12095 ret = cellSch->apisUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
12103 if (lcRecfg->recfgTypes & RGR_DL_LC_SPS_RECFG)
12105 /* Invoke SPS module if SPS is enabled for the service */
12106 if(lcRecfg->dlRecfg.dlSpsRecfg.isSpsEnabled)
12108 ret = rgSCHCmnSpsDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12111 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"SPS re-configuration not "
12112 "supported for dlLC Ignore this CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12123 * @brief Scheduler invocation on logical channel addition.
12127 * Function : rgSCHCmnRgrLcgCfg
12129 * This functions does required processing when a new
12130 * (dedicated) logical channel is added. Assumes lcg
12131 * pointer in ulLc is set.
12133 * @param[in] RgSchCellCb *cell,
12134 * @param[in] RgSchUeCb *ue,
12135 * @param[in] RgSchLcgCb *lcg,
12136 * @param[in] RgrLcgCfg *lcgCfg,
12137 * @param[out] RgSchErrInfo *err
12143 S16 rgSCHCmnRgrLcgCfg
12152 S16 rgSCHCmnRgrLcgCfg(cell, ue, lcg, lcgCfg, err)
12161 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12162 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCfg->ulInfo.lcgId].sch));
12165 ulLcg->cfgdGbr = (lcgCfg->ulInfo.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
12166 ulLcg->effGbr = ulLcg->cfgdGbr;
12167 ulLcg->deltaMbr = ((lcgCfg->ulInfo.mbr - lcgCfg->ulInfo.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
12168 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
12171 if(TRUE == ue->isEmtcUe)
12173 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
12182 ret = cellSch->apisUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
12188 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
12190 /* Indicate MAC that this LCG is GBR LCG */
12191 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcgCfg->ulInfo.lcgId, TRUE);
12197 * @brief Scheduler invocation on logical channel addition.
12201 * Function : rgSCHCmnRgrLcgRecfg
12203 * This functions does required processing when a new
12204 * (dedicated) logical channel is added. Assumes lcg
12205 * pointer in ulLc is set.
12207 * @param[in] RgSchCellCb *cell,
12208 * @param[in] RgSchUeCb *ue,
12209 * @param[in] RgSchLcgCb *lcg,
12210 * @param[in] RgrLcgRecfg *reCfg,
12211 * @param[out] RgSchErrInfo *err
12217 S16 rgSCHCmnRgrLcgRecfg
12222 RgrLcgRecfg *reCfg,
12226 S16 rgSCHCmnRgrLcgRecfg(cell, ue, lcg, reCfg, err)
12230 RgrLcgRecfg *reCfg;
12235 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12236 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[reCfg->ulRecfg.lcgId].sch));
12239 ulLcg->cfgdGbr = (reCfg->ulRecfg.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
12240 ulLcg->effGbr = ulLcg->cfgdGbr;
12241 ulLcg->deltaMbr = ((reCfg->ulRecfg.mbr - reCfg->ulRecfg.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
12242 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
12245 if(TRUE == ue->isEmtcUe)
12247 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
12256 ret = cellSch->apisUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
12262 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
12264 /* Indicate MAC that this LCG is GBR LCG */
12265 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, TRUE);
12269 /* In case of RAB modification */
12270 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, FALSE);
12275 /***********************************************************
12277 * Func : rgSCHCmnRgrLchDel
12279 * Desc : Scheduler handling for a (dedicated)
12280 * uplink logical channel being deleted.
12287 **********************************************************/
12289 S16 rgSCHCmnRgrLchDel
12297 S16 rgSCHCmnRgrLchDel(cell, ue, lcId, lcgId)
12304 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12306 if(TRUE == ue->isEmtcUe)
12308 cellSch->apisEmtcUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
12313 cellSch->apisUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
12318 /***********************************************************
12320 * Func : rgSCHCmnLcgDel
12322 * Desc : Scheduler handling for a (dedicated)
12323 * uplink logical channel being deleted.
12331 **********************************************************/
12333 Void rgSCHCmnLcgDel
12340 Void rgSCHCmnLcgDel(cell, ue, lcg)
12346 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12347 RgSchCmnLcg *lcgCmn = RG_SCH_CMN_GET_UL_LCG(lcg);
12349 if (lcgCmn == NULLP)
12354 if (RGSCH_IS_GBR_BEARER(lcgCmn->cfgdGbr))
12356 /* Indicate MAC that this LCG is GBR LCG */
12357 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcg->lcgId, FALSE);
12361 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
12363 rgSCHCmnSpsUlLcgDel(cell, ue, lcg);
12365 #endif /* LTEMAC_SPS */
12367 lcgCmn->effGbr = 0;
12368 lcgCmn->reportedBs = 0;
12369 lcgCmn->cfgdGbr = 0;
12370 /* set lcg bs to 0. Deletion of control block happens
12371 * at the time of UE deletion. */
12374 if(TRUE == ue->isEmtcUe)
12376 cellSch->apisEmtcUl->rgSCHFreeUlLcg(cell, ue, lcg);
12381 cellSch->apisUl->rgSCHFreeUlLcg(cell, ue, lcg);
12388 * @brief This function deletes a service from scheduler.
12392 * Function: rgSCHCmnFreeDlLc
12393 * Purpose: This function is made available through a FP for
12394 * making scheduler aware of a service being deleted from UE.
12396 * Invoked by: BO and Scheduler
12398 * @param[in] RgSchCellCb* cell
12399 * @param[in] RgSchUeCb* ue
12400 * @param[in] RgSchDlLcCb* svc
12405 Void rgSCHCmnFreeDlLc
12412 Void rgSCHCmnFreeDlLc(cell, ue, svc)
12418 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12419 if (svc->sch == NULLP)
12424 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
12426 cellSch->apisEmtcDl->rgSCHFreeDlLc(cell, ue, svc);
12431 cellSch->apisDl->rgSCHFreeDlLc(cell, ue, svc);
12437 rgSCHSCellDlLcDel(cell, ue, svc);
12442 /* If SPS service, invoke SPS module */
12443 if (svc->dlLcSpsCfg.isSpsEnabled)
12445 rgSCHCmnSpsDlLcDel(cell, ue, svc);
12449 /* ccpu00117052 - MOD - Passing double pointer
12450 for proper NULLP assignment*/
12451 rgSCHUtlFreeSBuf(cell->instIdx,
12452 (Data**)(&(svc->sch)), (sizeof(RgSchCmnDlSvc)));
12455 rgSCHLaaDeInitDlLchCb(cell, svc);
12464 * @brief This function Processes the Final Allocations
12465 * made by the RB Allocator against the requested
12466 * CCCH SDURetx Allocations.
12470 * Function: rgSCHCmnDlCcchSduRetxFnlz
12471 * Purpose: This function Processes the Final Allocations
12472 * made by the RB Allocator against the requested
12473 * CCCH Retx Allocations.
12474 * Scans through the scheduled list of ccchSdu retrans
12475 * fills the corresponding pdcch, adds the hqProc to
12476 * the corresponding SubFrm and removes the hqP from
12479 * Invoked by: Common Scheduler
12481 * @param[in] RgSchCellCb *cell
12482 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12487 PRIVATE Void rgSCHCmnDlCcchSduRetxFnlz
12490 RgSchCmnDlRbAllocInfo *allocInfo
12493 PRIVATE Void rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo)
12495 RgSchCmnDlRbAllocInfo *allocInfo;
12499 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12500 RgSchDlRbAlloc *rbAllocInfo;
12501 RgSchDlHqProcCb *hqP;
12504 /* Traverse through the Scheduled Retx List */
12505 node = allocInfo->ccchSduAlloc.schdCcchSduRetxLst.first;
12508 hqP = (RgSchDlHqProcCb *)(node->node);
12510 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
12512 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12514 /* Remove the HqP from cell's ccchSduRetxLst */
12515 cmLListDelFrm(&cmnCellDl->ccchSduRetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
12516 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
12518 /* Fix: syed dlAllocCb reset should be performed.
12519 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12520 rgSCHCmnDlUeResetTemp(ue, hqP);
12522 /* Fix: syed dlAllocCb reset should be performed.
12523 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12524 node = allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst.first;
12527 hqP = (RgSchDlHqProcCb *)(node->node);
12530 /* reset the UE allocation Information */
12531 rgSCHCmnDlUeResetTemp(ue, hqP);
12537 * @brief This function Processes the Final Allocations
12538 * made by the RB Allocator against the requested
12539 * CCCH Retx Allocations.
12543 * Function: rgSCHCmnDlCcchRetxFnlz
12544 * Purpose: This function Processes the Final Allocations
12545 * made by the RB Allocator against the requested
12546 * CCCH Retx Allocations.
12547 * Scans through the scheduled list of msg4 retrans
12548 * fills the corresponding pdcch, adds the hqProc to
12549 * the corresponding SubFrm and removes the hqP from
12552 * Invoked by: Common Scheduler
12554 * @param[in] RgSchCellCb *cell
12555 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12560 PRIVATE Void rgSCHCmnDlCcchRetxFnlz
12563 RgSchCmnDlRbAllocInfo *allocInfo
12566 PRIVATE Void rgSCHCmnDlCcchRetxFnlz(cell, allocInfo)
12568 RgSchCmnDlRbAllocInfo *allocInfo;
12572 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12573 RgSchDlRbAlloc *rbAllocInfo;
12574 RgSchDlHqProcCb *hqP;
12577 /* Traverse through the Scheduled Retx List */
12578 node = allocInfo->msg4Alloc.schdMsg4RetxLst.first;
12581 hqP = (RgSchDlHqProcCb *)(node->node);
12582 raCb = hqP->hqE->raCb;
12583 rbAllocInfo = &raCb->rbAllocInfo;
12585 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12587 /* Remove the HqP from cell's msg4RetxLst */
12588 cmLListDelFrm(&cmnCellDl->msg4RetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
12589 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
12590 /* Fix: syed dlAllocCb reset should be performed.
12591 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12592 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
12593 rgSCHCmnDlHqPResetTemp(hqP);
12595 /* Fix: syed dlAllocCb reset should be performed.
12596 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12597 node = allocInfo->msg4Alloc.nonSchdMsg4RetxLst.first;
12600 hqP = (RgSchDlHqProcCb *)(node->node);
12601 raCb = hqP->hqE->raCb;
12603 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
12604 rgSCHCmnDlHqPResetTemp(hqP);
12611 * @brief This function Processes the Final Allocations
12612 * made by the RB Allocator against the requested
12613 * CCCH SDU tx Allocations.
12617 * Function: rgSCHCmnDlCcchSduTxFnlz
12618 * Purpose: This function Processes the Final Allocations
12619 * made by the RB Allocator against the requested
12620 * CCCH tx Allocations.
12621 * Scans through the scheduled list of CCCH SDU trans
12622 * fills the corresponding pdcch, adds the hqProc to
12623 * the corresponding SubFrm and removes the hqP from
12626 * Invoked by: Common Scheduler
12628 * @param[in] RgSchCellCb *cell
12629 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12634 PRIVATE Void rgSCHCmnDlCcchSduTxFnlz
12637 RgSchCmnDlRbAllocInfo *allocInfo
12640 PRIVATE Void rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo)
12642 RgSchCmnDlRbAllocInfo *allocInfo;
12647 RgSchDlRbAlloc *rbAllocInfo;
12648 RgSchDlHqProcCb *hqP;
12649 RgSchLchAllocInfo lchSchdData;
12651 /* Traverse through the Scheduled Retx List */
12652 node = allocInfo->ccchSduAlloc.schdCcchSduTxLst.first;
12655 hqP = (RgSchDlHqProcCb *)(node->node);
12656 ueCb = hqP->hqE->ue;
12658 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
12660 /* fill the pdcch and HqProc */
12661 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12663 /* Remove the raCb from cell's toBeSchdLst */
12664 cmLListDelFrm(&cell->ccchSduUeLst, &ueCb->ccchSduLnk);
12665 ueCb->ccchSduLnk.node = (PTR)NULLP;
12667 /* Fix : Resetting this required to avoid complication
12668 * in reestablishment case */
12669 ueCb->dlCcchInfo.bo = 0;
12671 /* Indicate DHM of the CCCH LC scheduling */
12672 hqP->tbInfo[0].contResCe = NOTPRSNT;
12673 lchSchdData.lcId = 0;
12674 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
12675 (RGSCH_MSG4_HDRSIZE);
12676 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
12678 /* Fix: syed dlAllocCb reset should be performed.
12679 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12680 rgSCHCmnDlUeResetTemp(ueCb, hqP);
12682 /* Fix: syed dlAllocCb reset should be performed.
12683 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12684 node = allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst.first;
12687 hqP = (RgSchDlHqProcCb *)(node->node);
12688 ueCb = hqP->hqE->ue;
12690 /* Release HqProc */
12691 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
12692 /*Fix: Removing releasing of TB1 as it will not exist for CCCH SDU and hence caused a crash*/
12693 /*rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
12694 /* reset the UE allocation Information */
12695 rgSCHCmnDlUeResetTemp(ueCb, hqP);
12702 * @brief This function Processes the Final Allocations
12703 * made by the RB Allocator against the requested
12704 * CCCH tx Allocations.
12708 * Function: rgSCHCmnDlCcchTxFnlz
12709 * Purpose: This function Processes the Final Allocations
12710 * made by the RB Allocator against the requested
12711 * CCCH tx Allocations.
12712 * Scans through the scheduled list of msg4 trans
12713 * fills the corresponding pdcch, adds the hqProc to
12714 * the corresponding SubFrm and removes the hqP from
12717 * Invoked by: Common Scheduler
12719 * @param[in] RgSchCellCb *cell
12720 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12725 PRIVATE Void rgSCHCmnDlCcchTxFnlz
12728 RgSchCmnDlRbAllocInfo *allocInfo
12731 PRIVATE Void rgSCHCmnDlCcchTxFnlz(cell, allocInfo)
12733 RgSchCmnDlRbAllocInfo *allocInfo;
12738 RgSchDlRbAlloc *rbAllocInfo;
12739 RgSchDlHqProcCb *hqP;
12740 RgSchLchAllocInfo lchSchdData;
12742 /* Traverse through the Scheduled Retx List */
12743 node = allocInfo->msg4Alloc.schdMsg4TxLst.first;
12746 hqP = (RgSchDlHqProcCb *)(node->node);
12747 raCb = hqP->hqE->raCb;
12749 rbAllocInfo = &raCb->rbAllocInfo;
12751 /* fill the pdcch and HqProc */
12752 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12753 /* MSG4 Fix Start */
12755 rgSCHRamRmvFrmRaInfoSchdLst(cell, raCb);
12758 /* Indicate DHM of the CCCH LC scheduling */
12759 lchSchdData.lcId = 0;
12760 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
12761 (RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE);
12762 /* TRansmitting presence of cont Res CE across MAC-SCH interface to
12763 * identify CCCH SDU transmissions which need to be done
12765 * contention resolution CE*/
12766 hqP->tbInfo[0].contResCe = PRSNT_NODEF;
12767 /*Dont add lc if only cont res CE is being transmitted*/
12768 if(raCb->dlCcchInfo.bo)
12770 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
12775 /* Fix: syed dlAllocCb reset should be performed.
12776 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12777 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
12778 rgSCHCmnDlHqPResetTemp(hqP);
12780 node = allocInfo->msg4Alloc.nonSchdMsg4TxLst.first;
12783 hqP = (RgSchDlHqProcCb *)(node->node);
12784 raCb = hqP->hqE->raCb;
12786 rbAllocInfo = &raCb->rbAllocInfo;
12787 /* Release HqProc */
12788 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
12789 /*Fix: Removing releasing of TB1 as it will not exist for MSG4 and hence caused a crash*/
12790 /* rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
12791 /* reset the UE allocation Information */
12792 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
12793 rgSCHCmnDlHqPResetTemp(hqP);
12800 * @brief This function calculates the BI Index to be sent in the Bi header
12804 * Function: rgSCHCmnGetBiIndex
12805 * Purpose: This function Processes utilizes the previous BI time value
12806 * calculated and the difference last BI sent time and current time. To
12807 * calculate the latest BI Index. It also considers the how many UE's
12808 * Unserved in this subframe.
12810 * Invoked by: Common Scheduler
12812 * @param[in] RgSchCellCb *cell
12813 * @param[in] uint32_t ueCount
12818 uint8_t rgSCHCmnGetBiIndex
12824 uint8_t rgSCHCmnGetBiIndex(cell, ueCount)
12829 S16 prevVal = 0; /* To Store Intermediate Value */
12830 uint16_t newBiVal = 0; /* To store Bi Value in millisecond */
12832 uint16_t timeDiff = 0;
12835 if (cell->biInfo.prevBiTime != 0)
12838 if(cell->emtcEnable == TRUE)
12840 timeDiff =(RGSCH_CALC_SF_DIFF_EMTC(cell->crntTime, cell->biInfo.biTime));
12845 timeDiff =(RGSCH_CALC_SF_DIFF(cell->crntTime, cell->biInfo.biTime));
12848 prevVal = cell->biInfo.prevBiTime - timeDiff;
12854 newBiVal = RG_SCH_CMN_GET_BI_VAL(prevVal,ueCount);
12855 /* To be used next time when BI is calculated */
12857 if(cell->emtcEnable == TRUE)
12859 RGSCHCPYTIMEINFO_EMTC(cell->crntTime, cell->biInfo.biTime)
12864 RGSCHCPYTIMEINFO(cell->crntTime, cell->biInfo.biTime)
12867 /* Search the actual BI Index from table Backoff Parameters Value and
12868 * return that Index */
12871 if (rgSchCmnBiTbl[idx] > newBiVal)
12876 }while(idx < RG_SCH_CMN_NUM_BI_VAL-1);
12877 cell->biInfo.prevBiTime = rgSchCmnBiTbl[idx];
12878 /* For 16 Entries in Table 7.2.1 36.321.880 - 3 reserved so total 13 Entries */
12879 return (idx); /* Returning reserved value from table UE treats it has 960 ms */
12880 } /* rgSCHCmnGetBiIndex */
12884 * @brief This function Processes the Final Allocations
12885 * made by the RB Allocator against the requested
12886 * RAR allocations. Assumption: The reuqested
12887 * allocations are always satisfied completely.
12888 * Hence no roll back.
12892 * Function: rgSCHCmnDlRaRspFnlz
12893 * Purpose: This function Processes the Final Allocations
12894 * made by the RB Allocator against the requested.
12895 * Takes care of PDCCH filling.
12897 * Invoked by: Common Scheduler
12899 * @param[in] RgSchCellCb *cell
12900 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12905 PRIVATE Void rgSCHCmnDlRaRspFnlz
12908 RgSchCmnDlRbAllocInfo *allocInfo
12911 PRIVATE Void rgSCHCmnDlRaRspFnlz(cell, allocInfo)
12913 RgSchCmnDlRbAllocInfo *allocInfo;
12916 uint32_t rarCnt = 0;
12917 RgSchDlRbAlloc *raRspAlloc;
12918 RgSchDlSf *subFrm = NULLP;
12922 RgSchRaReqInfo *raReq;
12924 RgSchUlAlloc *ulAllocRef=NULLP;
12925 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12926 uint8_t allocRapidCnt = 0;
12928 uint32_t msg3SchdIdx = 0;
12929 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
12930 uint8_t msg3Subfrm;
12934 for (rarCnt=0; rarCnt<RG_SCH_CMN_MAX_CMN_PDCCH; rarCnt++)
12936 raRspAlloc = &allocInfo->raRspAlloc[rarCnt];
12937 /* Having likely condition first for optimization */
12938 if (!raRspAlloc->pdcch)
12944 subFrm = raRspAlloc->dlSf;
12945 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
12946 /* Corrected RACH handling for multiple RAPIDs per RARNTI */
12947 allocRapidCnt = raRspAlloc->numRapids;
12948 while (allocRapidCnt)
12950 raReq = (RgSchRaReqInfo *)(reqLst->first->node);
12951 /* RACHO: If dedicated preamble, then allocate UL Grant
12952 * (consequence of handover/pdcchOrder) and continue */
12953 if (RGSCH_IS_DEDPRM(cell, raReq->raReq.rapId))
12955 rgSCHCmnHdlHoPo(cell, &subFrm->raRsp[rarCnt].contFreeUeLst,
12957 cmLListDelFrm(reqLst, reqLst->first);
12959 /* ccpu00117052 - MOD - Passing double pointer
12960 for proper NULLP assignment*/
12961 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
12962 sizeof(RgSchRaReqInfo));
12966 if(cell->overLoadBackOffEnab)
12967 {/* rach Overlaod conrol is triggerd, Skipping this rach */
12968 cmLListDelFrm(reqLst, reqLst->first);
12970 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
12971 sizeof(RgSchRaReqInfo));
12974 /* Attempt to include each RA request into the RSP */
12975 /* Any failure in the procedure is considered to */
12976 /* affect futher allocations in the same TTI. When */
12977 /* a failure happens, we break out and complete */
12978 /* the processing for random access */
12979 if (rgSCHRamCreateRaCb(cell, &raCb, &err) != ROK)
12983 /* Msg3 allocation request to USM */
12984 if (raReq->raReq.rapId < cell->rachCfg.sizeRaPreambleGrpA)
12988 /*ccpu00128820 - MOD - Msg3 alloc double delete issue*/
12989 rgSCHCmnMsg3GrntReq(cell, raCb->tmpCrnti, preamGrpA, \
12990 &(raCb->msg3HqProc), &ulAllocRef, &raCb->msg3HqProcId);
12991 if (ulAllocRef == NULLP)
12993 rgSCHRamDelRaCb(cell, raCb, TRUE);
12996 if (raReq->raReq.cqiPres)
12998 raCb->ccchCqi = raReq->raReq.cqiIdx;
13002 raCb->ccchCqi = cellDl->ccchCqi;
13004 raCb->rapId = raReq->raReq.rapId;
13005 raCb->ta.pres = TRUE;
13006 raCb->ta.val = raReq->raReq.ta;
13007 raCb->msg3Grnt = ulAllocRef->grnt;
13008 /* Populating the tpc value received */
13009 raCb->msg3Grnt.tpc = raReq->raReq.tpc;
13010 /* PHR handling for MSG3 */
13011 ulAllocRef->raCb = raCb;
13013 /* To the crntTime, add the MIN time at which UE will
13014 * actually send MSG3 i.e DL_DELTA+6 */
13015 raCb->msg3AllocTime = cell->crntTime;
13016 RGSCH_INCR_SUB_FRAME(raCb->msg3AllocTime, RG_SCH_CMN_MIN_MSG3_RECP_INTRVL);
13018 msg3SchdIdx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) %
13019 RGSCH_NUM_SUB_FRAMES;
13020 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
13021 special subframe */
13022 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][msg3SchdIdx] !=
13023 RG_SCH_TDD_UL_SUBFRAME)
13025 RGSCHCMNADDTOCRNTTIME(cell->crntTime,raCb->msg3AllocTime,
13026 RG_SCH_CMN_DL_DELTA)
13027 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][
13028 raCb->msg3AllocTime.slot];
13029 RGSCHCMNADDTOCRNTTIME(raCb->msg3AllocTime, raCb->msg3AllocTime,
13033 cmLListAdd2Tail(&subFrm->raRsp[rarCnt].raRspLst, &raCb->rspLnk);
13034 raCb->rspLnk.node = (PTR)raCb;
13035 cmLListDelFrm(reqLst, reqLst->first);
13037 /* ccpu00117052 - MOD - Passing double pointer
13038 for proper NULLP assignment*/
13039 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
13040 sizeof(RgSchRaReqInfo));
13042 /* SR_RACH_STATS : RAR scheduled */
13047 /* Fill subframe data members */
13048 subFrm->raRsp[rarCnt].raRnti = raRspAlloc->rnti;
13049 subFrm->raRsp[rarCnt].pdcch = raRspAlloc->pdcch;
13050 subFrm->raRsp[rarCnt].tbSz = raRspAlloc->tbInfo[0].bytesAlloc;
13051 /* Fill PDCCH data members */
13052 rgSCHCmnFillPdcch(cell, subFrm->raRsp[rarCnt].pdcch, raRspAlloc);
13055 if(cell->overLoadBackOffEnab)
13056 {/* rach Overlaod conrol is triggerd, Skipping this rach */
13057 subFrm->raRsp[rarCnt].backOffInd.pres = PRSNT_NODEF;
13058 subFrm->raRsp[rarCnt].backOffInd.val = cell->overLoadBackOffval;
13063 subFrm->raRsp[rarCnt].backOffInd.pres = NOTPRSNT;
13066 /*[ccpu00125212] Avoiding sending of empty RAR in case of RAR window
13067 is short and UE is sending unauthorised preamble.*/
13068 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
13069 if ((raRspAlloc->biEstmt) && (reqLst->count))
13071 subFrm->raRsp[0].backOffInd.pres = PRSNT_NODEF;
13072 /* Added as part of Upgrade */
13073 subFrm->raRsp[0].backOffInd.val =
13074 rgSCHCmnGetBiIndex(cell, reqLst->count);
13076 /* SR_RACH_STATS : Back Off Inds */
13080 else if ((subFrm->raRsp[rarCnt].raRspLst.first == NULLP) &&
13081 (subFrm->raRsp[rarCnt].contFreeUeLst.first == NULLP))
13083 /* Return the grabbed PDCCH */
13084 rgSCHUtlPdcchPut(cell, &subFrm->pdcchInfo, raRspAlloc->pdcch);
13085 subFrm->raRsp[rarCnt].pdcch = NULLP;
13086 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRaRspAlloc(): "
13087 "Not even one RaReq.");
13091 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId,
13092 "RNTI:%d Scheduled RAR @ (%u,%u) ",
13094 cell->crntTime.sfn,
13095 cell->crntTime.slot);
13101 * @brief This function computes rv.
13105 * Function: rgSCHCmnDlCalcRvForBcch
13106 * Purpose: This function computes rv.
13108 * Invoked by: Common Scheduler
13110 * @param[in] RgSchCellCb *cell
13111 * @param[in] Bool si
13112 * @param[in] uint16_t i
13117 PRIVATE uint8_t rgSCHCmnDlCalcRvForBcch
13124 PRIVATE uint8_t rgSCHCmnDlCalcRvForBcch(cell, si, i)
13131 CmLteTimingInfo frm;
13133 frm = cell->crntTime;
13134 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
13142 k = (frm.sfn/2) % 4;
13144 rv = RGSCH_CEIL(3*k, 2) % 4;
13149 * @brief This function Processes the Final Allocations
13150 * made by the RB Allocator against the requested
13151 * BCCH/PCCH allocations. Assumption: The reuqested
13152 * allocations are always satisfied completely.
13153 * Hence no roll back.
13157 * Function: rgSCHCmnDlBcchPcchFnlz
13158 * Purpose: This function Processes the Final Allocations
13159 * made by the RB Allocator against the requested.
13160 * Takes care of PDCCH filling.
13162 * Invoked by: Common Scheduler
13164 * @param[in] RgSchCellCb *cell
13165 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
13170 PRIVATE Void rgSCHCmnDlBcchPcchFnlz
13173 RgSchCmnDlRbAllocInfo *allocInfo
13176 PRIVATE Void rgSCHCmnDlBcchPcchFnlz(cell, allocInfo)
13178 RgSchCmnDlRbAllocInfo *allocInfo;
13181 RgSchDlRbAlloc *rbAllocInfo;
13185 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
13187 #ifdef LTEMAC_HDFDD
13188 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13190 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13194 /* Moving variables to available scope for optimization */
13195 RgSchClcDlLcCb *pcch;
13198 RgSchClcDlLcCb *bcch;
13201 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
13205 rbAllocInfo = &allocInfo->pcchAlloc;
13206 if (rbAllocInfo->pdcch)
13208 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
13210 /* Added sfIdx calculation for TDD as well */
13212 #ifdef LTEMAC_HDFDD
13213 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13215 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13218 subFrm = rbAllocInfo->dlSf;
13219 pcch = rgSCHDbmGetPcch(cell);
13222 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnDlBcchPcchFnlz( ): "
13223 "No Pcch Present");
13227 /* Added Dl TB count for paging message transmission*/
13229 cell->dlUlTbCnt.tbTransDlTotalCnt++;
13231 bo = (RgSchClcBoRpt *)pcch->boLst.first->node;
13232 cmLListDelFrm(&pcch->boLst, &bo->boLstEnt);
13233 /* ccpu00117052 - MOD - Passing double pointer
13234 for proper NULLP assignment*/
13235 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
13236 /* Fill subframe data members */
13237 subFrm->pcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
13238 subFrm->pcch.pdcch = rbAllocInfo->pdcch;
13239 /* Fill PDCCH data members */
13240 rgSCHCmnFillPdcch(cell, subFrm->pcch.pdcch, rbAllocInfo);
13241 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, pcch->lcId, TRUE);
13242 /* ccpu00132314-ADD-Update the tx power allocation info
13243 TODO-Need to add a check for max tx power per symbol */
13244 subfrmAlloc->cmnLcInfo.pcchInfo.txPwrOffset = cellDl->pcchTxPwrOffset;
13248 rbAllocInfo = &allocInfo->bcchAlloc;
13249 if (rbAllocInfo->pdcch)
13251 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
13253 #ifdef LTEMAC_HDFDD
13254 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13256 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13259 subFrm = rbAllocInfo->dlSf;
13261 /* Fill subframe data members */
13262 subFrm->bcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
13263 subFrm->bcch.pdcch = rbAllocInfo->pdcch;
13264 /* Fill PDCCH data members */
13265 rgSCHCmnFillPdcch(cell, subFrm->bcch.pdcch, rbAllocInfo);
13267 if(rbAllocInfo->schdFirst)
13270 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
13271 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
13273 /*Copy the SIB1 msg buff into interface buffer */
13274 SCpyMsgMsg(cell->siCb.crntSiInfo.sib1Info.sib1,
13275 rgSchCb[cell->instIdx].rgSchInit.region,
13276 rgSchCb[cell->instIdx].rgSchInit.pool,
13277 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13278 #endif/*RGR_SI_SCH*/
13279 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
13280 rgSCHCmnDlCalcRvForBcch(cell, FALSE, 0);
13288 i = cell->siCb.siCtx.i;
13289 /*Decrement the retransmission count */
13290 cell->siCb.siCtx.retxCntRem--;
13292 /*Copy the SI msg buff into interface buffer */
13293 if(cell->siCb.siCtx.warningSiFlag == FALSE)
13295 SCpyMsgMsg(cell->siCb.siArray[cell->siCb.siCtx.siId-1].si,
13296 rgSchCb[cell->instIdx].rgSchInit.region,
13297 rgSchCb[cell->instIdx].rgSchInit.pool,
13298 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13302 pdu = rgSCHUtlGetWarningSiPdu(cell);
13303 RGSCH_NULL_CHECK(cell->instIdx, pdu);
13305 rgSchCb[cell->instIdx].rgSchInit.region,
13306 rgSchCb[cell->instIdx].rgSchInit.pool,
13307 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13308 if(cell->siCb.siCtx.retxCntRem == 0)
13310 rgSCHUtlFreeWarningSiPdu(cell);
13311 cell->siCb.siCtx.warningSiFlag = FALSE;
13316 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
13317 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
13319 if(bo->retxCnt != cell->siCfg.retxCnt-1)
13324 #endif/*RGR_SI_SCH*/
13325 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
13326 rgSCHCmnDlCalcRvForBcch(cell, TRUE, i);
13329 /* Added Dl TB count for SIB1 and SI messages transmission.
13330 * This counter will be incremented only for the first transmission
13331 * (with RV 0) of these messages*/
13333 if(subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv == 0)
13335 cell->dlUlTbCnt.tbTransDlTotalCnt++;
13339 if(bo->retxCnt == 0)
13341 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
13342 /* ccpu00117052 - MOD - Passing double pointer
13343 for proper NULLP assignment*/
13344 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
13346 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, bcch->lcId, sendInd);
13348 /*Fill the interface info */
13349 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, NULLD, NULLD);
13351 /* ccpu00132314-ADD-Update the tx power allocation info
13352 TODO-Need to add a check for max tx power per symbol */
13353 subfrmAlloc->cmnLcInfo.bcchInfo.txPwrOffset = cellDl->bcchTxPwrOffset;
13355 /*mBuf has been already copied above */
13356 #endif/*RGR_SI_SCH*/
13369 * Function: rgSCHCmnUlSetAllUnSched
13372 * Invoked by: Common Scheduler
13374 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13379 PRIVATE Void rgSCHCmnUlSetAllUnSched
13381 RgSchCmnUlRbAllocInfo *allocInfo
13384 PRIVATE Void rgSCHCmnUlSetAllUnSched(allocInfo)
13385 RgSchCmnUlRbAllocInfo *allocInfo;
13391 node = allocInfo->contResLst.first;
13394 rgSCHCmnUlMov2NonSchdCntResLst(allocInfo, (RgSchUeCb *)node->node);
13395 node = allocInfo->contResLst.first;
13398 node = allocInfo->retxUeLst.first;
13401 rgSCHCmnUlMov2NonSchdRetxUeLst(allocInfo, (RgSchUeCb *)node->node);
13402 node = allocInfo->retxUeLst.first;
13405 node = allocInfo->ueLst.first;
13408 rgSCHCmnUlMov2NonSchdUeLst(allocInfo, (RgSchUeCb *)node->node);
13409 node = allocInfo->ueLst.first;
13421 * Function: rgSCHCmnUlAdd2CntResLst
13424 * Invoked by: Common Scheduler
13426 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13427 * @param[in] RgSchUeCb *ue
13432 Void rgSCHCmnUlAdd2CntResLst
13434 RgSchCmnUlRbAllocInfo *allocInfo,
13438 Void rgSCHCmnUlAdd2CntResLst(allocInfo, ue)
13439 RgSchCmnUlRbAllocInfo *allocInfo;
13443 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,ue->cell))->alloc);
13444 cmLListAdd2Tail(&allocInfo->contResLst, &ulAllocInfo->reqLnk);
13445 ulAllocInfo->reqLnk.node = (PTR)ue;
13454 * Function: rgSCHCmnUlAdd2UeLst
13457 * Invoked by: Common Scheduler
13459 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13460 * @param[in] RgSchUeCb *ue
13465 Void rgSCHCmnUlAdd2UeLst
13468 RgSchCmnUlRbAllocInfo *allocInfo,
13472 Void rgSCHCmnUlAdd2UeLst(cell, allocInfo, ue)
13474 RgSchCmnUlRbAllocInfo *allocInfo;
13478 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,cell))->alloc);
13479 if (ulAllocInfo->reqLnk.node == NULLP)
13481 cmLListAdd2Tail(&allocInfo->ueLst, &ulAllocInfo->reqLnk);
13482 ulAllocInfo->reqLnk.node = (PTR)ue;
13492 * Function: rgSCHCmnAllocUlRb
13493 * Purpose: To do RB allocations for uplink
13495 * Invoked by: Common Scheduler
13497 * @param[in] RgSchCellCb *cell
13498 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
13502 Void rgSCHCmnAllocUlRb
13505 RgSchCmnUlRbAllocInfo *allocInfo
13508 Void rgSCHCmnAllocUlRb(cell, allocInfo)
13510 RgSchCmnUlRbAllocInfo *allocInfo;
13513 RgSchUlSf *sf = allocInfo->sf;
13515 /* Schedule for new transmissions */
13516 rgSCHCmnUlRbAllocForLst(cell, sf, allocInfo->ueLst.count,
13517 &allocInfo->ueLst, &allocInfo->schdUeLst,
13518 &allocInfo->nonSchdUeLst, (Bool)TRUE);
13522 /***********************************************************
13524 * Func : rgSCHCmnUlRbAllocForLst
13526 * Desc : Allocate for a list in cmn rb alloc information passed
13535 **********************************************************/
13537 PRIVATE Void rgSCHCmnUlRbAllocForLst
13543 CmLListCp *schdLst,
13544 CmLListCp *nonSchdLst,
13548 PRIVATE Void rgSCHCmnUlRbAllocForLst(cell, sf, count, reqLst, schdLst,
13549 nonSchdLst, isNewTx)
13554 CmLListCp *schdLst;
13555 CmLListCp *nonSchdLst;
13564 CmLteTimingInfo timeInfo;
13568 if(schdLst->count == 0)
13570 cmLListInit(schdLst);
13573 cmLListInit(nonSchdLst);
13575 if(isNewTx == TRUE)
13577 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.numUes = (uint8_t) count;
13579 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime, timeInfo, TFU_ULCNTRL_DLDELTA);
13580 k = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][timeInfo.subframe];
13581 RG_SCH_ADD_TO_CRNT_TIME(timeInfo,
13582 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo, k);
13584 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime,cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo,
13585 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
13590 for (lnk = reqLst->first; count; lnk = lnk->next, --count)
13592 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
13593 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
13598 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
13603 ueUl->subbandShare = ueUl->subbandRequired;
13604 if(isNewTx == TRUE)
13606 maxRb = RGSCH_MIN((ueUl->subbandRequired * MAX_5GTF_VRBG_SIZE), ue->ue5gtfCb.maxPrb);
13608 ret = rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole);
13611 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, schdLst);
13612 rgSCHCmnUlUeFillAllocInfo(cell, ue);
13616 gUl5gtfRbAllocFail++;
13617 #if defined (TENB_STATS) && defined (RG_5GTF)
13618 cell->tenbStats->sch.ul5gtfRbAllocFail++;
13620 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
13621 ue->isMsg4PdcchWithCrnti = FALSE;
13622 ue->isSrGrant = FALSE;
13625 if(isNewTx == TRUE)
13627 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
13628 ulAllocInfo[count - 1].rnti = ue->ueId;
13629 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
13630 ulAllocInfo[count - 1].numPrb = ue->ul.nPrb;
13633 ueUl->subbandShare = 0; /* This reset will take care of
13634 * all scheduler types */
13636 for (; count; lnk = lnk->next, --count)
13638 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
13639 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
13640 ue->isMsg4PdcchWithCrnti = FALSE;
13647 /***********************************************************
13649 * Func : rgSCHCmnUlMdfyGrntForCqi
13651 * Desc : Modify UL Grant to consider presence of
13652 * CQI along with PUSCH Data.
13657 * - Scale down iTbs based on betaOffset and
13658 * size of Acqi Size.
13659 * - Optionally attempt to increase numSb by 1
13660 * if input payload size does not fit in due
13661 * to reduced tbSz as a result of iTbsNew.
13665 **********************************************************/
13667 PRIVATE S16 rgSCHCmnUlMdfyGrntForCqi
13675 uint32_t stepDownItbs,
13679 PRIVATE S16 rgSCHCmnUlMdfyGrntForCqi(cell, ue, maxRb, numSb, iTbs, hqSz, stepDownItbs, effTgt)
13686 uint32_t stepDownItbs;
13690 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(ue->cell);
13695 uint32_t remREsForPusch;
13696 uint32_t bitsPerRe;
13698 uint32_t betaOffVal = ue->ul.betaOffstVal;
13699 uint32_t cqiRiRptSz = ue->ul.cqiRiSz;
13700 uint32_t betaOffHqVal = rgSchCmnBetaHqOffstTbl[ue->ul.betaHqOffst];
13701 uint32_t resNumSb = *numSb;
13702 uint32_t puschEff = 1000;
13705 Bool mdfyiTbsFlg = FALSE;
13706 uint8_t resiTbs = *iTbs;
13712 iMcs = rgSCHCmnUlGetIMcsFrmITbs(resiTbs, RG_SCH_CMN_GET_UE_CTGY(ue));
13713 RG_SCH_UL_MCS_TO_MODODR(iMcs, modOdr);
13714 if (RG_SCH_CMN_GET_UE_CTGY(ue) != CM_LTE_UE_CAT_5)
13716 modOdr = RGSCH_MIN(RGSCH_QM_QPSK, modOdr);
13720 modOdr = RGSCH_MIN(RGSCH_QM_64QAM, modOdr);
13722 nPrb = resNumSb * cellUl->sbSize;
13723 /* Restricting the minumum iTbs requried to modify to 10 */
13724 if ((nPrb >= maxRb) && (resiTbs <= 10))
13726 /* Could not accomodate ACQI */
13729 totREs = nPrb * RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
13730 tbSz = rgTbSzTbl[0][resiTbs][nPrb-1];
13731 /* totalREs/tbSz = num of bits perRE. */
13732 cqiRiREs = (totREs * betaOffVal * cqiRiRptSz)/(1000 * tbSz); /* betaOffVal is represented
13733 as parts per 1000 */
13734 hqREs = (totREs * betaOffHqVal * hqSz)/(1000 * tbSz);
13735 if ((cqiRiREs + hqREs) < totREs)
13737 remREsForPusch = totREs - cqiRiREs - hqREs;
13738 bitsPerRe = (tbSz * 1000)/remREsForPusch; /* Multiplying by 1000 for Interger Oper */
13739 puschEff = bitsPerRe/modOdr;
13741 if (puschEff < effTgt)
13743 /* ensure resultant efficiency for PUSCH Data is within 0.93*/
13748 /* Alternate between increasing SB or decreasing iTbs until eff is met */
13749 if (mdfyiTbsFlg == FALSE)
13753 resNumSb = resNumSb + 1;
13755 mdfyiTbsFlg = TRUE;
13761 resiTbs-= stepDownItbs;
13763 mdfyiTbsFlg = FALSE;
13766 }while (1); /* Loop breaks if efficency is met
13767 or returns RFAILED if not able to meet the efficiency */
13776 /***********************************************************
13778 * Func : rgSCHCmnUlRbAllocForUe
13780 * Desc : Do uplink RB allocation for an UE.
13784 * Notes: Note that as of now, for retx, maxRb
13785 * is not considered. Alternatives, such
13786 * as dropping retx if it crosses maxRb
13787 * could be considered.
13791 **********************************************************/
13793 PRIVATE S16 rgSCHCmnUlRbAllocForUe
13802 PRIVATE S16 rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole)
13810 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
13811 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
13812 RgSchUlAlloc *alloc = NULLP;
13818 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->schdHqProcIdx];
13820 RgSchUlHqProcCb *proc = NULLP;
13824 uint8_t numVrbgTemp;
13826 TfuDciFormat dciFrmt;
13831 rgSCHUhmGetAvlHqProc(cell, ue, &proc);
13834 //printf("UE [%d] HQ Proc unavailable\n", ue->ueId);
13839 if (ue->ue5gtfCb.rank == 2)
13841 dciFrmt = TFU_DCI_FORMAT_A2;
13846 dciFrmt = TFU_DCI_FORMAT_A1;
13849 /* 5gtf TODO : To pass dci frmt to this function */
13850 pdcch = rgSCHCmnPdcchAllocCrntSf(cell, ue);
13853 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
13854 "rgSCHCmnUlRbAllocForUe(): Could not get PDCCH for CRNTI:%d",ue->ueId);
13857 gUl5gtfPdcchSchd++;
13858 #if defined (TENB_STATS) && defined (RG_5GTF)
13859 cell->tenbStats->sch.ul5gtfPdcchSchd++;
13862 //TODO_SID using configured prb as of now
13863 nPrb = ue->ue5gtfCb.maxPrb;
13864 reqVrbg = nPrb/MAX_5GTF_VRBG_SIZE;
13865 iMcs = ue->ue5gtfCb.mcs; //gSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
13869 if((sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart > MAX_5GTF_VRBG)
13870 || (sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated > MAX_5GTF_VRBG))
13872 printf("5GTF_ERROR vrbg > 25 valstart = %d valalloc %d\n", sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart
13873 , sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated);
13878 /*TODO_SID: Workaround for alloc. Currently alloc is ulsf based. To handle multiple beams, we need a different
13879 design. Now alloc are formed based on MAX_5GTF_UE_SCH macro. */
13880 numVrbgTemp = MAX_5GTF_VRBG/MAX_5GTF_UE_SCH;
13883 alloc = rgSCHCmnUlSbAlloc(sf, numVrbgTemp,\
13886 if (alloc == NULLP)
13888 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
13889 "rgSCHCmnUlRbAllocForUe(): Could not get UlAlloc %d CRNTI:%d",numVrbg,ue->ueId);
13890 rgSCHCmnPdcchRlsCrntSf(cell, pdcch);
13893 gUl5gtfAllocAllocated++;
13894 #if defined (TENB_STATS) && defined (RG_5GTF)
13895 cell->tenbStats->sch.ul5gtfAllocAllocated++;
13897 alloc->grnt.vrbgStart = sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart;
13898 alloc->grnt.numVrbg = numVrbg;
13899 alloc->grnt.numLyr = numLyr;
13900 alloc->grnt.dciFrmt = dciFrmt;
13902 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart += numVrbg;
13903 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated += numVrbg;
13905 //rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
13907 sf->totPrb += alloc->grnt.numRb;
13908 ue->ul.nPrb = alloc->grnt.numRb;
13910 if (ue->csgMmbrSta != TRUE)
13912 cellUl->ncsgPrbCnt += alloc->grnt.numRb;
13914 cellUl->totPrbCnt += (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
13915 alloc->pdcch = pdcch;
13916 alloc->grnt.iMcs = iMcs;
13917 alloc->grnt.iMcsCrnt = iMcsCrnt;
13918 alloc->grnt.hop = 0;
13919 /* Initial Num RBs support for UCI on PUSCH */
13921 ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
13923 alloc->forMsg3 = FALSE;
13924 //RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTb5gtfSzTbl[0], (iTbs));
13926 //ueUl->alloc.allocdBytes = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
13927 /* TODO_SID Allocating based on configured MCS as of now.
13928 Currently for format A2. When doing multi grp per tti, need to update this. */
13929 ueUl->alloc.allocdBytes = (rgSch5gtfTbSzTbl[iMcs]/8) * ue->ue5gtfCb.rank;
13931 alloc->grnt.datSz = ueUl->alloc.allocdBytes;
13932 //TODO_SID Need to check mod order.
13933 RG_SCH_CMN_TBS_TO_MODODR(iMcs, alloc->grnt.modOdr);
13934 //alloc->grnt.modOdr = 6;
13935 alloc->grnt.isRtx = FALSE;
13937 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
13938 alloc->grnt.SCID = 0;
13939 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
13940 alloc->grnt.PMI = 0;
13941 alloc->grnt.uciOnxPUSCH = 0;
13942 alloc->grnt.hqProcId = proc->procId;
13944 alloc->hqProc = proc;
13945 alloc->hqProc->ulSfIdx = cellUl->schdIdx;
13947 /*commenting to retain the rnti used for transmission SPS/c-rnti */
13948 alloc->rnti = ue->ueId;
13949 ueUl->alloc.alloc = alloc;
13950 /*rntiwari-Adding the debug for generating the graph.*/
13951 /* No grant attr recorded now */
13955 /***********************************************************
13957 * Func : rgSCHCmnUlRbAllocAddUeToLst
13959 * Desc : Add UE to list (scheduled/non-scheduled list)
13960 * for UL RB allocation information.
13968 **********************************************************/
13970 Void rgSCHCmnUlRbAllocAddUeToLst
13977 Void rgSCHCmnUlRbAllocAddUeToLst(cell, ue, lst)
13983 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
13986 gUl5gtfUeRbAllocDone++;
13987 #if defined (TENB_STATS) && defined (RG_5GTF)
13988 cell->tenbStats->sch.ul5gtfUeRbAllocDone++;
13990 cmLListAdd2Tail(lst, &ueUl->alloc.schdLstLnk);
13991 ueUl->alloc.schdLstLnk.node = (PTR)ue;
13996 * @brief This function Processes the Final Allocations
13997 * made by the RB Allocator against the requested.
14001 * Function: rgSCHCmnUlAllocFnlz
14002 * Purpose: This function Processes the Final Allocations
14003 * made by the RB Allocator against the requested.
14005 * Invoked by: Common Scheduler
14007 * @param[in] RgSchCellCb *cell
14008 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
14013 PRIVATE Void rgSCHCmnUlAllocFnlz
14016 RgSchCmnUlRbAllocInfo *allocInfo
14019 PRIVATE Void rgSCHCmnUlAllocFnlz(cell, allocInfo)
14021 RgSchCmnUlRbAllocInfo *allocInfo;
14024 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14026 /* call scheduler specific Finalization */
14027 cellSch->apisUl->rgSCHUlAllocFnlz(cell, allocInfo);
14033 * @brief This function Processes the Final Allocations
14034 * made by the RB Allocator against the requested.
14038 * Function: rgSCHCmnDlAllocFnlz
14039 * Purpose: This function Processes the Final Allocations
14040 * made by the RB Allocator against the requested.
14042 * Invoked by: Common Scheduler
14044 * @param[in] RgSchCellCb *cell
14049 Void rgSCHCmnDlAllocFnlz
14054 Void rgSCHCmnDlAllocFnlz(cell)
14058 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14059 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
14062 rgSCHCmnDlCcchRetxFnlz(cell, allocInfo);
14063 rgSCHCmnDlCcchTxFnlz(cell, allocInfo);
14065 /* Added below functions for handling CCCH SDU transmission received
14067 * * guard timer expiry*/
14068 rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo);
14069 rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo);
14071 rgSCHCmnDlRaRspFnlz(cell, allocInfo);
14072 /* call scheduler specific Finalization */
14073 cellSch->apisDl->rgSCHDlAllocFnlz(cell, allocInfo);
14075 /* Stack Crash problem for TRACE5 Changes. Added the return below */
14082 * @brief Update an uplink subframe.
14086 * Function : rgSCHCmnUlUpdSf
14088 * For each allocation
14089 * - if no more tx needed
14090 * - Release allocation
14092 * - Perform retransmission
14094 * @param[in] RgSchUlSf *sf
14098 PRIVATE Void rgSCHCmnUlUpdSf
14101 RgSchCmnUlRbAllocInfo *allocInfo,
14105 PRIVATE Void rgSCHCmnUlUpdSf(cell, allocInfo, sf)
14107 RgSchCmnUlRbAllocInfo *allocInfo;
14113 while ((lnk = sf->allocs.first))
14115 RgSchUlAlloc *alloc = (RgSchUlAlloc *)lnk->node;
14118 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
14123 /* If need to handle all retx together, run another loop separately */
14124 rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc);
14126 rgSCHCmnUlRlsUlAlloc(cell, sf, alloc);
14129 /* By this time, all allocs would have been cleared and
14130 * SF is reset to be made ready for new allocations. */
14131 rgSCHCmnUlSfReset(cell, sf);
14132 /* In case there are timing problems due to msg3
14133 * allocations being done in advance, (which will
14134 * probably happen with the current FDD code that
14135 * handles 8 subframes) one solution
14136 * could be to hold the (recent) msg3 allocs in a separate
14137 * list, and then possibly add that to the actual
14138 * list later. So at this time while allocations are
14139 * traversed, the recent msg3 ones are not seen. Anytime after
14140 * this (a good time is when the usual allocations
14141 * are made), msg3 allocations could be transferred to the
14142 * normal list. Not doing this now as it is assumed
14143 * that incorporation of TDD shall take care of this.
14151 * @brief Handle uplink allocation for retransmission.
14155 * Function : rgSCHCmnUlHndlAllocRetx
14157 * Processing Steps:
14158 * - Add to queue for retx.
14159 * - Do not release here, release happends as part
14160 * of the loop that calls this function.
14162 * @param[in] RgSchCellCb *cell
14163 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
14164 * @param[in] RgSchUlSf *sf
14165 * @param[in] RgSchUlAlloc *alloc
14169 PRIVATE Void rgSCHCmnUlHndlAllocRetx
14172 RgSchCmnUlRbAllocInfo *allocInfo,
14174 RgSchUlAlloc *alloc
14177 PRIVATE Void rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc)
14179 RgSchCmnUlRbAllocInfo *allocInfo;
14181 RgSchUlAlloc *alloc;
14185 RgSchCmnUlUe *ueUl;
14187 rgTbSzTbl[0][rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs)]\
14188 [alloc->grnt.numRb-1]/8;
14189 if (!alloc->forMsg3)
14191 ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue);
14192 ueUl->alloc.reqBytes = bytes;
14193 rgSCHUhmRetx(alloc->hqProc);
14194 rgSCHCmnUlAdd2RetxUeLst(allocInfo, alloc->ue);
14198 /* RACHO msg3 retx handling. Part of RACH procedure changes. */
14199 retxAlloc = rgSCHCmnUlGetUlAlloc(cell, sf, alloc->numSb);
14200 if (retxAlloc == NULLP)
14202 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
14203 "rgSCHCmnUlRbAllocForUe():Could not get UlAlloc for msg3Retx RNTI:%d",
14207 retxAlloc->grnt.iMcs = alloc->grnt.iMcs;
14208 retxAlloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl\
14209 [alloc->hqProc->rvIdx];
14210 retxAlloc->grnt.nDmrs = 0;
14211 retxAlloc->grnt.hop = 0;
14212 retxAlloc->grnt.delayBit = 0;
14213 retxAlloc->rnti = alloc->rnti;
14214 retxAlloc->ue = NULLP;
14215 retxAlloc->pdcch = FALSE;
14216 retxAlloc->forMsg3 = TRUE;
14217 retxAlloc->raCb = alloc->raCb;
14218 retxAlloc->hqProc = alloc->hqProc;
14219 rgSCHUhmRetx(retxAlloc->hqProc);
14226 * @brief Uplink Scheduling Handler.
14230 * Function: rgSCHCmnUlAlloc
14231 * Purpose: This function Handles Uplink Scheduling.
14233 * Invoked by: Common Scheduler
14235 * @param[in] RgSchCellCb *cell
14238 /* ccpu00132653- The definition of this function made common for TDD and FDD*/
14240 PRIVATE Void rgSCHCmnUlAlloc
14245 PRIVATE Void rgSCHCmnUlAlloc(cell)
14249 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14250 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
14251 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
14252 RgSchCmnUlRbAllocInfo allocInfo;
14253 RgSchCmnUlRbAllocInfo *allocInfoRef = &allocInfo;
14260 /* Initializing RgSchCmnUlRbAllocInfo structure */
14261 rgSCHCmnInitUlRbAllocInfo(allocInfoRef);
14263 /* Get Uplink Subframe */
14264 allocInfoRef->sf = &cellUl->ulSfArr[cellUl->schdIdx];
14266 /* initializing the UL PRB count */
14267 allocInfoRef->sf->totPrb = 0;
14271 rgSCHCmnSpsUlTti(cell, allocInfoRef);
14274 if(*allocInfoRef->sf->allocCountRef == 0)
14278 if ((hole = rgSCHUtlUlHoleFirst(allocInfoRef->sf)) != NULLP)
14280 /* Sanity check of holeDb */
14281 if (allocInfoRef->sf->holeDb->count == 1 && hole->start == 0)
14283 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
14284 /* Re-Initialize available subbands because of CFI change*/
14285 allocInfoRef->sf->availSubbands = cell->dynCfiCb.\
14286 bwInfo[cellDl->currCfi].numSb;
14287 /*Currently initializing 5gtf ulsf specific initialization here.
14288 need to do at proper place */
14290 allocInfoRef->sf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
14291 allocInfoRef->sf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
14292 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
14294 allocInfoRef->sf->sfBeamInfo[idx].totVrbgAllocated = 0;
14295 allocInfoRef->sf->sfBeamInfo[idx].totVrbgRequired = 0;
14296 allocInfoRef->sf->sfBeamInfo[idx].vrbgStart = 0;
14302 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
14303 "Error! holeDb sanity check failed");
14308 /* Fix: Adaptive re-transmissions prioritised over other transmissions */
14309 /* perform adaptive retransmissions */
14310 rgSCHCmnUlSfReTxAllocs(cell, allocInfoRef->sf);
14314 /* Fix: syed Adaptive Msg3 Retx crash. Release all
14315 Harq processes for which adap Retx failed, to avoid
14316 blocking. This step should be done before New TX
14317 scheduling to make hqProc available. Right now we
14318 dont check if proc is in adap Retx list for considering
14319 it to be available. But now with this release that
14320 functionality would be correct. */
14322 rgSCHCmnUlSfRlsRetxProcs(cell, allocInfoRef->sf);
14325 /* Specific UL scheduler to perform UE scheduling */
14326 cellSch->apisUl->rgSCHUlSched(cell, allocInfoRef);
14328 /* Call UL RB allocator module */
14329 rgSCHCmnAllocUlRb(cell, allocInfoRef);
14331 /* Do group power control for PUSCH */
14332 rgSCHCmnGrpPwrCntrlPusch(cell, allocInfoRef->sf);
14334 cell->sc.apis->rgSCHDrxStrtInActvTmrInUl(cell);
14336 rgSCHCmnUlAllocFnlz(cell, allocInfoRef);
14337 if(5000 == g5gtfTtiCnt)
14339 ul5gtfsidDlAlreadyMarkUl = 0;
14340 ul5gtfsidDlSchdPass = 0;
14341 ul5gtfsidUlMarkUl = 0;
14342 ul5gtfTotSchdCnt = 0;
14350 * @brief send Subframe Allocations.
14354 * Function: rgSCHCmnSndCnsldtInfo
14355 * Purpose: Send the scheduled
14356 * allocations to MAC for StaInd generation to Higher layers and
14357 * for MUXing. PST's RgInfSfAlloc to MAC instance.
14359 * Invoked by: Common Scheduler
14361 * @param[in] RgSchCellCb *cell
14365 Void rgSCHCmnSndCnsldtInfo
14370 Void rgSCHCmnSndCnsldtInfo(cell)
14374 RgInfSfAlloc *subfrmAlloc;
14376 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14379 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
14381 /* Send the allocations to MAC for MUXing */
14382 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
14383 subfrmAlloc->cellId = cell->cellId;
14384 /* Populate the List of UEs needing PDB-based Flow control */
14385 cellSch->apisDl->rgSCHDlFillFlwCtrlInfo(cell, subfrmAlloc);
14387 if((subfrmAlloc->rarInfo.numRaRntis) ||
14389 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
14390 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
14391 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
14393 (subfrmAlloc->ueInfo.numUes) ||
14394 (subfrmAlloc->cmnLcInfo.bitMask) ||
14395 (subfrmAlloc->ulUeInfo.numUes) ||
14396 (subfrmAlloc->flowCntrlInfo.numUes))
14398 if((subfrmAlloc->rarInfo.numRaRntis) ||
14400 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
14401 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
14402 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
14404 (subfrmAlloc->ueInfo.numUes) ||
14405 (subfrmAlloc->cmnLcInfo.bitMask) ||
14406 (subfrmAlloc->flowCntrlInfo.numUes))
14409 RgSchMacSfAlloc(&pst, subfrmAlloc);
14412 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_NUM_SUB_FRAMES;
14414 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_SF_ALLOC_SIZE;
14420 * @brief Consolidate Subframe Allocations.
14424 * Function: rgSCHCmnCnsldtSfAlloc
14425 * Purpose: Consolidate Subframe Allocations.
14427 * Invoked by: Common Scheduler
14429 * @param[in] RgSchCellCb *cell
14433 Void rgSCHCmnCnsldtSfAlloc
14438 Void rgSCHCmnCnsldtSfAlloc(cell)
14442 RgInfSfAlloc *subfrmAlloc;
14443 CmLteTimingInfo frm;
14445 CmLListCp dlDrxInactvTmrLst;
14446 CmLListCp dlInActvLst;
14447 CmLListCp ulInActvLst;
14448 RgSchCmnCell *cellSch = NULLP;
14451 cmLListInit(&dlDrxInactvTmrLst);
14452 cmLListInit(&dlInActvLst);
14453 cmLListInit(&ulInActvLst);
14455 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
14457 /* Get Downlink Subframe */
14458 frm = cell->crntTime;
14459 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
14460 dlSf = rgSCHUtlSubFrmGet(cell, frm);
14462 /* Fill the allocation Info */
14463 rgSCHUtlFillRgInfRarInfo(dlSf, subfrmAlloc, cell);
14466 rgSCHUtlFillRgInfUeInfo(dlSf, cell, &dlDrxInactvTmrLst,
14467 &dlInActvLst, &ulInActvLst);
14468 #ifdef RG_PFS_STATS
14469 cell->totalPrb += dlSf->bwAssigned;
14471 /* Mark the following Ues inactive for UL*/
14472 cellSch = RG_SCH_CMN_GET_CELL(cell);
14474 /* Calling Scheduler specific function with DRX inactive UE list*/
14475 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInActvLst);
14476 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInActvLst);
14479 /*re/start DRX inactivity timer for the UEs*/
14480 (Void)rgSCHDrxStrtInActvTmr(cell,&dlDrxInactvTmrLst,RG_SCH_DRX_DL);
14486 * @brief Initialize the DL Allocation Information Structure.
14490 * Function: rgSCHCmnInitDlRbAllocInfo
14491 * Purpose: Initialize the DL Allocation Information Structure.
14493 * Invoked by: Common Scheduler
14495 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
14499 PRIVATE Void rgSCHCmnInitDlRbAllocInfo
14501 RgSchCmnDlRbAllocInfo *allocInfo
14504 PRIVATE Void rgSCHCmnInitDlRbAllocInfo(allocInfo)
14505 RgSchCmnDlRbAllocInfo *allocInfo;
14508 memset(&allocInfo->pcchAlloc, 0, sizeof(RgSchDlRbAlloc));
14509 memset(&allocInfo->bcchAlloc, 0, sizeof(RgSchDlRbAlloc));
14510 memset(allocInfo->raRspAlloc, 0, RG_SCH_CMN_MAX_CMN_PDCCH*sizeof(RgSchDlRbAlloc));
14512 allocInfo->msg4Alloc.msg4DlSf = NULLP;
14513 cmLListInit(&allocInfo->msg4Alloc.msg4TxLst);
14514 cmLListInit(&allocInfo->msg4Alloc.msg4RetxLst);
14515 cmLListInit(&allocInfo->msg4Alloc.schdMsg4TxLst);
14516 cmLListInit(&allocInfo->msg4Alloc.schdMsg4RetxLst);
14517 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4TxLst);
14518 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4RetxLst);
14520 allocInfo->ccchSduAlloc.ccchSduDlSf = NULLP;
14521 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduTxLst);
14522 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduRetxLst);
14523 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduTxLst);
14524 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduRetxLst);
14525 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst);
14526 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst);
14529 allocInfo->dedAlloc.dedDlSf = NULLP;
14530 cmLListInit(&allocInfo->dedAlloc.txHqPLst);
14531 cmLListInit(&allocInfo->dedAlloc.retxHqPLst);
14532 cmLListInit(&allocInfo->dedAlloc.schdTxHqPLst);
14533 cmLListInit(&allocInfo->dedAlloc.schdRetxHqPLst);
14534 cmLListInit(&allocInfo->dedAlloc.nonSchdTxHqPLst);
14535 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxHqPLst);
14537 cmLListInit(&allocInfo->dedAlloc.txRetxHqPLst);
14538 cmLListInit(&allocInfo->dedAlloc.schdTxRetxHqPLst);
14539 cmLListInit(&allocInfo->dedAlloc.nonSchdTxRetxHqPLst);
14541 cmLListInit(&allocInfo->dedAlloc.txSpsHqPLst);
14542 cmLListInit(&allocInfo->dedAlloc.retxSpsHqPLst);
14543 cmLListInit(&allocInfo->dedAlloc.schdTxSpsHqPLst);
14544 cmLListInit(&allocInfo->dedAlloc.schdRetxSpsHqPLst);
14545 cmLListInit(&allocInfo->dedAlloc.nonSchdTxSpsHqPLst);
14546 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxSpsHqPLst);
14550 rgSCHLaaCmnInitDlRbAllocInfo (allocInfo);
14553 cmLListInit(&allocInfo->dedAlloc.errIndTxHqPLst);
14554 cmLListInit(&allocInfo->dedAlloc.schdErrIndTxHqPLst);
14555 cmLListInit(&allocInfo->dedAlloc.nonSchdErrIndTxHqPLst);
14560 * @brief Initialize the UL Allocation Information Structure.
14564 * Function: rgSCHCmnInitUlRbAllocInfo
14565 * Purpose: Initialize the UL Allocation Information Structure.
14567 * Invoked by: Common Scheduler
14569 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
14573 Void rgSCHCmnInitUlRbAllocInfo
14575 RgSchCmnUlRbAllocInfo *allocInfo
14578 Void rgSCHCmnInitUlRbAllocInfo(allocInfo)
14579 RgSchCmnUlRbAllocInfo *allocInfo;
14582 allocInfo->sf = NULLP;
14583 cmLListInit(&allocInfo->contResLst);
14584 cmLListInit(&allocInfo->schdContResLst);
14585 cmLListInit(&allocInfo->nonSchdContResLst);
14586 cmLListInit(&allocInfo->ueLst);
14587 cmLListInit(&allocInfo->schdUeLst);
14588 cmLListInit(&allocInfo->nonSchdUeLst);
14594 * @brief Scheduling for PUCCH group power control.
14598 * Function: rgSCHCmnGrpPwrCntrlPucch
14599 * Purpose: This function does group power control for PUCCH
14600 * corresponding to the subframe for which DL UE allocations
14603 * Invoked by: Common Scheduler
14605 * @param[in] RgSchCellCb *cell
14609 PRIVATE Void rgSCHCmnGrpPwrCntrlPucch
14615 PRIVATE Void rgSCHCmnGrpPwrCntrlPucch(cell, dlSf)
14621 rgSCHPwrGrpCntrlPucch(cell, dlSf);
14627 * @brief Scheduling for PUSCH group power control.
14631 * Function: rgSCHCmnGrpPwrCntrlPusch
14632 * Purpose: This function does group power control, for
14633 * the subframe for which UL allocation has (just) happened.
14635 * Invoked by: Common Scheduler
14637 * @param[in] RgSchCellCb *cell
14638 * @param[in] RgSchUlSf *ulSf
14642 PRIVATE Void rgSCHCmnGrpPwrCntrlPusch
14648 PRIVATE Void rgSCHCmnGrpPwrCntrlPusch(cell, ulSf)
14653 /*removed unused variable *cellSch*/
14654 CmLteTimingInfo frm;
14658 /* Got to pass DL SF corresponding to UL SF, so get that first.
14659 * There is no easy way of getting dlSf by having the RgSchUlSf*,
14660 * so use the UL delta from current time to get the DL SF. */
14661 frm = cell->crntTime;
14664 if(cell->emtcEnable == TRUE)
14666 RGSCH_INCR_SUB_FRAME_EMTC(frm, TFU_DLCNTRL_DLDELTA);
14671 RGSCH_INCR_SUB_FRAME(frm, TFU_DLCNTRL_DLDELTA);
14673 /* Del filling of dl.time */
14674 dlSf = rgSCHUtlSubFrmGet(cell, frm);
14676 rgSCHPwrGrpCntrlPusch(cell, dlSf, ulSf);
14681 /* Fix: syed align multiple UEs to refresh at same time */
14682 /***********************************************************
14684 * Func : rgSCHCmnApplyUeRefresh
14686 * Desc : Apply UE refresh in CMN and Specific
14687 * schedulers. Data rates and corresponding
14688 * scratchpad variables are updated.
14696 **********************************************************/
14698 PRIVATE S16 rgSCHCmnApplyUeRefresh
14704 PRIVATE S16 rgSCHCmnApplyUeRefresh(cell, ue)
14709 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14710 uint32_t effGbrBsr = 0;
14711 uint32_t effNonGbrBsr = 0;
14715 /* Reset the refresh cycle variableCAP */
14716 ue->ul.effAmbr = ue->ul.cfgdAmbr;
14718 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
14720 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
14722 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
14724 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
14726 cmnLcg->effGbr = cmnLcg->cfgdGbr;
14727 cmnLcg->effDeltaMbr = cmnLcg->deltaMbr;
14728 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
14729 /* Considering GBR LCG will be prioritised by UE */
14730 effGbrBsr += cmnLcg->bs;
14731 }/* Else no remaing BS so nonLcg0 will be updated when BSR will be received */
14734 effNonGbrBsr += cmnLcg->reportedBs;
14735 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
14739 effNonGbrBsr = RGSCH_MIN(effNonGbrBsr,ue->ul.effAmbr);
14740 ue->ul.nonGbrLcgBs = effNonGbrBsr;
14742 ue->ul.nonLcg0Bs = effGbrBsr + effNonGbrBsr;
14743 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
14744 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
14747 /* call scheduler specific event handlers
14748 * for refresh timer expiry */
14749 cellSch->apisUl->rgSCHUlUeRefresh(cell, ue);
14750 cellSch->apisDl->rgSCHDlUeRefresh(cell, ue);
14755 /***********************************************************
14757 * Func : rgSCHCmnTmrExpiry
14759 * Desc : Adds an UE to refresh queue, so that the UE is
14760 * periodically triggered to refresh it's GBR and
14769 **********************************************************/
14771 PRIVATE S16 rgSCHCmnTmrExpiry
14773 PTR cb, /* Pointer to timer control block */
14774 S16 tmrEvnt /* Timer Event */
14777 PRIVATE S16 rgSCHCmnTmrExpiry(cb, tmrEvnt)
14778 PTR cb; /* Pointer to timer control block */
14779 S16 tmrEvnt; /* Timer Event */
14782 RgSchUeCb *ue = (RgSchUeCb *)cb;
14783 RgSchCellCb *cell = ue->cell;
14784 #if (ERRCLASS & ERRCLS_DEBUG)
14788 #if (ERRCLASS & ERRCLS_DEBUG)
14789 if (tmrEvnt != RG_SCH_CMN_EVNT_UE_REFRESH)
14791 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnTmrExpiry(): Invalid "
14792 "timer event CRNTI:%d",ue->ueId);
14799 rgSCHCmnApplyUeRefresh(cell, ue);
14801 rgSCHCmnAddUeToRefreshQ(cell, ue, RG_SCH_CMN_REFRESH_TIME);
14806 /***********************************************************
14808 * Func : rgSCHCmnTmrProc
14810 * Desc : Timer entry point per cell. Timer
14811 * processing is triggered at every frame boundary
14820 **********************************************************/
14822 PRIVATE S16 rgSCHCmnTmrProc
14827 PRIVATE S16 rgSCHCmnTmrProc(cell)
14831 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
14832 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
14833 /* Moving the assignment of scheduler pointer
14834 to available scope for optimization */
14836 if ((cell->crntTime.slot % RGSCH_NUM_SUB_FRAMES_5G) == 0)
14838 /* Reset the counters periodically */
14839 if ((cell->crntTime.sfn % RG_SCH_CMN_CSG_REFRESH_TIME) == 0)
14841 RG_SCH_RESET_HCSG_DL_PRB_CNTR(cmnDlCell);
14842 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cmnUlCell);
14844 if ((cell->crntTime.sfn % RG_SCH_CMN_OVRLDCTRL_REFRESH_TIME) == 0)
14847 cell->measurements.ulTpt = ((cell->measurements.ulTpt * 95) + ( cell->measurements.ulBytesCnt * 5))/100;
14848 cell->measurements.dlTpt = ((cell->measurements.dlTpt * 95) + ( cell->measurements.dlBytesCnt * 5))/100;
14850 rgSCHUtlCpuOvrLdAdjItbsCap(cell);
14851 /* reset cell level tpt measurements for next cycle */
14852 cell->measurements.ulBytesCnt = 0;
14853 cell->measurements.dlBytesCnt = 0;
14855 /* Comparing with Zero instead of % is being done for efficiency.
14856 * If Timer resolution changes then accordingly update the
14857 * macro RG_SCH_CMN_REFRESH_TIMERES */
14858 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
14859 cmPrcTmr(&sched->tmrTqCp, sched->tmrTq, (PFV)rgSCHCmnTmrExpiry);
14866 /***********************************************************
14868 * Func : rgSchCmnUpdCfiVal
14870 * Desc : Update the CFI value if CFI switch was done
14878 **********************************************************/
14880 PRIVATE Void rgSchCmnUpdCfiVal
14886 PRIVATE Void rgSchCmnUpdCfiVal(cell, delta)
14892 CmLteTimingInfo pdsch;
14893 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
14899 uint8_t splSfCfi = 0;
14903 pdsch = cell->crntTime;
14904 RGSCH_INCR_SUB_FRAME(pdsch, delta);
14905 dlSf = rgSCHUtlSubFrmGet(cell, pdsch);
14906 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
14907 *change happens in that SF then UL PDCCH allocation happens with old CFI
14908 *but CFI in control Req goes updated one since it was stored in the CELL
14910 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
14911 if(cell->dynCfiCb.pdcchSfIdx != 0xFF)
14914 dlIdx = rgSCHUtlGetDlSfIdx(cell, &pdsch);
14916 dlIdx = (((pdsch.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (pdsch.slot % RGSCH_NUM_SUB_FRAMES));
14917 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
14919 /* If current downlink subframe index is same as pdcch SF index,
14920 * perform the switching of CFI in this subframe */
14921 if(cell->dynCfiCb.pdcchSfIdx == dlIdx)
14923 cellCmnDl->currCfi = cellCmnDl->newCfi;
14924 cell->dynCfiCb.pdcchSfIdx = 0xFF;
14926 /* Updating the nCce value based on the new CFI */
14928 splSfCfi = cellCmnDl->newCfi;
14929 for(idx = 0; idx < cell->numDlSubfrms; idx++)
14931 tddSf = cell->subFrms[idx];
14933 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][tddSf->sfNum];
14935 if(tddSf->sfType == RG_SCH_SPL_SF_DATA)
14937 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
14939 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
14943 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][cellCmnDl->currCfi];
14946 /* Setting the switch over window length based on config index.
14947 * During switch over period all the UL trnsmissions are Acked
14949 cell->dynCfiCb.switchOvrWinLen =
14950 rgSchCfiSwitchOvrWinLen[cell->ulDlCfgIdx];
14952 cell->nCce = cell->dynCfiCb.cfi2NCceTbl[0][cellCmnDl->currCfi];
14953 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
14954 *change happens in that SF then UL PDCCH allocation happens with old CFI
14955 *but CFI in control Req goes updated one since it was stored in the CELL
14957 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
14958 cell->dynCfiCb.switchOvrWinLen = rgSchCfiSwitchOvrWinLen[7];
14966 /***********************************************************
14968 * Func : rgSchCmnUpdtPdcchSfIdx
14970 * Desc : Update the switch over window length
14978 **********************************************************/
14981 PRIVATE Void rgSchCmnUpdtPdcchSfIdx
14988 PRIVATE Void rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, sfNum)
14995 PRIVATE Void rgSchCmnUpdtPdcchSfIdx
15001 PRIVATE Void rgSchCmnUpdtPdcchSfIdx(cell, dlIdx)
15010 /* Resetting the parameters on CFI switching */
15011 cell->dynCfiCb.cceUsed = 0;
15012 cell->dynCfiCb.lowCceCnt = 0;
15014 cell->dynCfiCb.cceFailSum = 0;
15015 cell->dynCfiCb.cceFailCnt = 0;
15016 cell->dynCfiCb.prevCceFailIdx = 0;
15018 cell->dynCfiCb.switchOvrInProgress = TRUE;
15020 for(idx = 0; idx < cell->dynCfiCb.numFailSamples; idx++)
15022 cell->dynCfiCb.cceFailSamples[idx] = 0;
15025 cell->dynCfiCb.ttiCnt = 0;
15027 cell->dynCfiCb.cfiSwitches++;
15028 cfiSwitchCnt = cell->dynCfiCb.cfiSwitches;
15031 cell->dynCfiCb.pdcchSfIdx = (dlIdx +
15032 rgSchTddPdcchSfIncTbl[cell->ulDlCfgIdx][sfNum]) % cell->numDlSubfrms;
15034 cell->dynCfiCb.pdcchSfIdx = (dlIdx + RG_SCH_CFI_APPLY_DELTA) % \
15035 RGSCH_NUM_DL_slotS;
15039 /***********************************************************
15041 * Func : rgSchCmnUpdCfiDb
15043 * Desc : Update the counters related to dynamic
15044 * CFI feature in cellCb.
15052 **********************************************************/
15054 Void rgSchCmnUpdCfiDb
15060 Void rgSchCmnUpdCfiDb(cell, delta)
15065 CmLteTimingInfo frm;
15071 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15072 uint8_t nCceLowerCfi = 0;
15074 uint8_t cceFailIdx;
15080 /* Get Downlink Subframe */
15081 frm = cell->crntTime;
15082 RGSCH_INCR_SUB_FRAME(frm, delta);
15085 dlIdx = rgSCHUtlGetDlSfIdx(cell, &frm);
15086 dlSf = cell->subFrms[dlIdx];
15087 isHiDci0 = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][dlSf->sfNum];
15089 /* Changing the idexing
15090 so that proper subframe is selected */
15091 dlIdx = (((frm.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (frm.slot % RGSCH_NUM_SUB_FRAMES));
15092 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
15093 dlSf = cell->subFrms[dlIdx];
15096 currCfi = cellSch->dl.currCfi;
15098 if(!cell->dynCfiCb.switchOvrInProgress)
15101 if(!cell->dynCfiCb.isDynCfiEnb)
15103 if(currCfi != cellSch->cfiCfg.cfi)
15105 if(currCfi < cellSch->cfiCfg.cfi)
15107 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
15108 cfiIncr = cell->dynCfiCb.cfiIncr;
15112 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
15113 cfiDecr = cell->dynCfiCb.cfiDecr;
15120 /* Setting ttiMod to 0 for ttiCnt > 1000 in case if this
15121 * function was not called in UL subframe*/
15122 if(cell->dynCfiCb.ttiCnt > RGSCH_CFI_TTI_MON_INTRVL)
15129 ttiMod = cell->dynCfiCb.ttiCnt % RGSCH_CFI_TTI_MON_INTRVL;
15132 dlSf->dlUlBothCmplt++;
15134 if((dlSf->dlUlBothCmplt == 2) || (!isHiDci0))
15136 if(dlSf->dlUlBothCmplt == 2)
15139 /********************STEP UP CRITERIA********************/
15140 /* Updating the CCE failure count parameter */
15141 cell->dynCfiCb.cceFailCnt += dlSf->isCceFailure;
15142 cell->dynCfiCb.cceFailSum += dlSf->isCceFailure;
15144 /* Check if cfi step up can be performed */
15145 if(currCfi < cell->dynCfiCb.maxCfi)
15147 if(cell->dynCfiCb.cceFailSum >= cell->dynCfiCb.cfiStepUpTtiCnt)
15149 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
15150 cfiIncr = cell->dynCfiCb.cfiIncr;
15155 /********************STEP DOWN CRITERIA********************/
15157 /* Updating the no. of CCE used in this dl subframe */
15158 cell->dynCfiCb.cceUsed += dlSf->cceCnt;
15160 if(currCfi > RGSCH_MIN_CFI_VAL)
15162 /* calculating the number of CCE for next lower CFI */
15164 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][dlSf->sfNum];
15165 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[mPhich][currCfi-1];
15167 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[0][currCfi-1];
15169 if(dlSf->cceCnt < nCceLowerCfi)
15171 /* Updating the count of TTIs in which no. of CCEs
15172 * used were less than the CCEs of next lower CFI */
15173 cell->dynCfiCb.lowCceCnt++;
15178 totalCce = (nCceLowerCfi * cell->dynCfiCb.cfiStepDownTtiCnt *
15179 RGSCH_CFI_CCE_PERCNTG)/100;
15181 if((!cell->dynCfiCb.cceFailSum) &&
15182 (cell->dynCfiCb.lowCceCnt >=
15183 cell->dynCfiCb.cfiStepDownTtiCnt) &&
15184 (cell->dynCfiCb.cceUsed < totalCce))
15186 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
15187 cfiDecr = cell->dynCfiCb.cfiDecr;
15193 cceFailIdx = ttiMod/cell->dynCfiCb.failSamplePrd;
15195 if(cceFailIdx != cell->dynCfiCb.prevCceFailIdx)
15197 /* New sample period has started. Subtract the old count
15198 * from the new sample period */
15199 cell->dynCfiCb.cceFailSum -= cell->dynCfiCb.cceFailSamples[cceFailIdx];
15201 /* Store the previous sample period data */
15202 cell->dynCfiCb.cceFailSamples[cell->dynCfiCb.prevCceFailIdx]
15203 = cell->dynCfiCb.cceFailCnt;
15205 cell->dynCfiCb.prevCceFailIdx = cceFailIdx;
15207 /* Resetting the CCE failure count as zero for next sample period */
15208 cell->dynCfiCb.cceFailCnt = 0;
15213 /* Restting the parametrs after Monitoring Interval expired */
15214 cell->dynCfiCb.cceUsed = 0;
15215 cell->dynCfiCb.lowCceCnt = 0;
15216 cell->dynCfiCb.ttiCnt = 0;
15219 cell->dynCfiCb.ttiCnt++;
15223 if(cellSch->dl.newCfi != cellSch->dl.currCfi)
15226 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, dlSf->sfNum);
15228 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx);
15235 * @brief Dl Scheduler for Broadcast and Common channel scheduling.
15239 * Function: rgSCHCmnDlCommonChSch
15240 * Purpose: This function schedules DL Common channels for LTE.
15241 * Invoked by TTI processing in TOM. Scheduling is done for
15242 * BCCH, PCCH, Msg4, CCCH SDU, RAR in that order
15244 * Invoked by: TOM (TTI processing)
15246 * @param[in] RgSchCellCb *cell
15250 Void rgSCHCmnDlCommonChSch
15255 Void rgSCHCmnDlCommonChSch(cell)
15259 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15262 cellSch->apisDl->rgSCHDlTickForPdbTrkng(cell);
15263 rgSchCmnUpdCfiVal(cell, RG_SCH_CMN_DL_DELTA);
15265 /* handle Inactive UEs for DL */
15266 rgSCHCmnHdlDlInactUes(cell);
15268 /* Send a Tick to Refresh Timer */
15269 rgSCHCmnTmrProc(cell);
15271 if (cell->isDlDataAllwd && (cell->stopSiSch == FALSE))
15273 rgSCHCmnInitRbAlloc(cell);
15274 /* Perform DL scheduling of BCCH, PCCH */
15275 rgSCHCmnDlBcchPcchAlloc(cell);
15279 if(cell->siCb.inWindow != 0)
15281 cell->siCb.inWindow--;
15284 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
15286 rgSCHCmnDlCcchRarAlloc(cell);
15292 * @brief Scheduler invocation per TTI.
15296 * Function: rgSCHCmnUlSch
15297 * Purpose: This function implements UL scheduler alone. This is to
15298 * be able to perform scheduling with more flexibility.
15300 * Invoked by: TOM (TTI processing)
15302 * @param[in] RgSchCellCb *cell
15311 Void rgSCHCmnUlSch(cell)
15315 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15320 if(TRUE == rgSCHLaaSCellEnabled(cell))
15326 if(cellSch->ul.schdIdx != RGSCH_INVALID_INFO)
15328 rgSchCmnUpdCfiVal(cell, TFU_ULCNTRL_DLDELTA);
15330 /* Handle Inactive UEs for UL */
15331 rgSCHCmnHdlUlInactUes(cell);
15332 /* Perform UL Scheduling EVERY TTI */
15333 rgSCHCmnUlAlloc(cell);
15335 /* Calling function to update CFI parameters*/
15336 rgSchCmnUpdCfiDb(cell, TFU_ULCNTRL_DLDELTA);
15338 if(cell->dynCfiCb.switchOvrWinLen > 0)
15340 /* Decrementing the switchover window length */
15341 cell->dynCfiCb.switchOvrWinLen--;
15343 if(!cell->dynCfiCb.switchOvrWinLen)
15345 if(cell->dynCfiCb.dynCfiRecfgPend)
15347 /* Toggling the Dynamic CFI enabling */
15348 cell->dynCfiCb.isDynCfiEnb ^= 1;
15349 rgSCHDynCfiReCfg(cell, cell->dynCfiCb.isDynCfiEnb);
15350 cell->dynCfiCb.dynCfiRecfgPend = FALSE;
15352 cell->dynCfiCb.switchOvrInProgress = FALSE;
15360 rgSCHCmnSpsUlTti(cell, NULLP);
15370 * @brief This function updates the scheduler with service for an UE.
15374 * Function: rgSCHCmnDlDedBoUpd
15375 * Purpose: This function should be called whenever there is a
15376 * change BO for a service.
15378 * Invoked by: BO and Scheduler
15380 * @param[in] RgSchCellCb* cell
15381 * @param[in] RgSchUeCb* ue
15382 * @param[in] RgSchDlLcCb* svc
15387 Void rgSCHCmnDlDedBoUpd
15394 Void rgSCHCmnDlDedBoUpd(cell, ue, svc)
15400 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15402 /* RACHO : if UEs idle time exceeded and a BO update
15403 * is received, then add UE to the pdcch Order Q */
15404 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
15406 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue, cell);
15407 /* If PDCCH order is already triggered and we are waiting for
15408 * RACH from UE then do not add to PdcchOdrQ. */
15409 if (ueDl->rachInfo.rapIdLnk.node == NULLP)
15411 rgSCHCmnDlAdd2PdcchOdrQ(cell, ue);
15417 /* If SPS service, invoke SPS module */
15418 if (svc->dlLcSpsCfg.isSpsEnabled)
15420 rgSCHCmnSpsDlDedBoUpd(cell, ue, svc);
15421 /* Note: Retrun from here, no update needed in other schedulers */
15426 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
15428 cellSch->apisEmtcDl->rgSCHDlDedBoUpd(cell, ue, svc);
15429 //printf("rgSCHEMTCDlDedBoUpd\n");
15434 cellSch->apisDl->rgSCHDlDedBoUpd(cell, ue, svc);
15439 rgSCHSCellDlDedBoUpd(cell, ue, svc);
15447 * @brief Removes an UE from Cell's TA List.
15451 * Function: rgSCHCmnRmvFrmTaLst
15452 * Purpose: Removes an UE from Cell's TA List.
15454 * Invoked by: Specific Scheduler
15456 * @param[in] RgSchCellCb* cell
15457 * @param[in] RgSchUeCb* ue
15462 Void rgSCHCmnRmvFrmTaLst
15468 Void rgSCHCmnRmvFrmTaLst(cell, ue)
15473 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
15476 if(cell->emtcEnable && ue->isEmtcUe)
15478 rgSCHEmtcRmvFrmTaLst(cellCmnDl,ue);
15483 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
15484 ue->dlTaLnk.node = (PTR)NULLP;
15489 /* Fix: syed Remove the msg4Proc from cell
15490 * msg4Retx Queue. I have used CMN scheduler function
15491 * directly. Please define a new API and call this
15492 * function through that. */
15495 * @brief This function removes MSG4 HARQ process from cell RETX Queues.
15499 * Function: rgSCHCmnDlMsg4ProcRmvFrmRetx
15500 * Purpose: This function removes MSG4 HARQ process from cell RETX Queues.
15502 * Invoked by: UE/RACB deletion.
15504 * @param[in] RgSchCellCb* cell
15505 * @param[in] RgSchDlHqProc* hqP
15510 Void rgSCHCmnDlMsg4ProcRmvFrmRetx
15513 RgSchDlHqProcCb *hqP
15516 Void rgSCHCmnDlMsg4ProcRmvFrmRetx(cell, hqP)
15518 RgSchDlHqProcCb *hqP;
15521 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15523 if (hqP->tbInfo[0].ccchSchdInfo.retxLnk.node)
15525 if (hqP->hqE->msg4Proc == hqP)
15527 cmLListDelFrm(&cellSch->dl.msg4RetxLst, \
15528 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15529 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
15532 else if(hqP->hqE->ccchSduProc == hqP)
15534 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
15535 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15536 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
15545 * @brief This function adds a HARQ process for retx.
15549 * Function: rgSCHCmnDlProcAddToRetx
15550 * Purpose: This function adds a HARQ process to retransmission
15551 * queue. This may be performed when a HARQ ack is
15554 * Invoked by: HARQ feedback processing
15556 * @param[in] RgSchCellCb* cell
15557 * @param[in] RgSchDlHqProc* hqP
15562 Void rgSCHCmnDlProcAddToRetx
15565 RgSchDlHqProcCb *hqP
15568 Void rgSCHCmnDlProcAddToRetx(cell, hqP)
15570 RgSchDlHqProcCb *hqP;
15573 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15575 if (hqP->hqE->msg4Proc == hqP) /* indicating msg4 transmission */
15577 cmLListAdd2Tail(&cellSch->dl.msg4RetxLst, \
15578 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15579 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
15582 else if(hqP->hqE->ccchSduProc == hqP)
15584 /*If CCCH SDU being transmitted without cont res CE*/
15585 cmLListAdd2Tail(&cellSch->dl.ccchSduRetxLst,
15586 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15587 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
15593 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
15595 /* Invoke SPS module for SPS HARQ proc re-transmission handling */
15596 rgSCHCmnSpsDlProcAddToRetx(cell, hqP);
15599 #endif /* LTEMAC_SPS */
15601 if((TRUE == cell->emtcEnable)
15602 && (TRUE == hqP->hqE->ue->isEmtcUe))
15604 cellSch->apisEmtcDl->rgSCHDlProcAddToRetx(cell, hqP);
15609 cellSch->apisDl->rgSCHDlProcAddToRetx(cell, hqP);
15617 * @brief This function performs RI validation and
15618 * updates it to the ueCb.
15622 * Function: rgSCHCmnDlSetUeRi
15623 * Purpose: This function performs RI validation and
15624 * updates it to the ueCb.
15626 * Invoked by: rgSCHCmnDlCqiInd
15628 * @param[in] RgSchCellCb *cell
15629 * @param[in] RgSchUeCb *ue
15630 * @param[in] uint8_t ri
15631 * @param[in] Bool isPeriodic
15636 PRIVATE Void rgSCHCmnDlSetUeRi
15644 PRIVATE Void rgSCHCmnDlSetUeRi(cell, ue, ri, isPer)
15651 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15652 RgSchCmnUeInfo *ueSchCmn = RG_SCH_CMN_GET_CMN_UE(ue);
15655 RgSchUePCqiCb *cqiCb = RG_SCH_GET_UE_CELL_CQI_CB(ue,cell);
15660 /* FIX for RRC Reconfiguration issue */
15661 /* ccpu00140894- During Tx Mode transition RI report will not entertained for
15662 * specific during which SCH expecting UE can complete TX mode transition*/
15663 if (ue->txModeTransCmplt == FALSE)
15668 /* Restrict the Number of TX layers to cell->numTxAntPorts.
15669 * Protection from invalid RI values. */
15670 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
15672 /* Special case of converting PMI to sane value when
15673 * there is a switch in RI from 1 to 2 and PMI reported
15674 * for RI=1 is invalid for RI=2 */
15675 if ((cell->numTxAntPorts == 2) && (ue->mimoInfo.txMode == RGR_UE_TM_4))
15677 if ((ri == 2) && ( ueDl->mimoInfo.ri == 1))
15679 ueDl->mimoInfo.pmi = (ueDl->mimoInfo.pmi < 2)? 1:2;
15683 /* Restrict the Number of TX layers according to the UE Category */
15684 ueDl->mimoInfo.ri = RGSCH_MIN(ri, rgUeCatTbl[ueSchCmn->ueCat].maxTxLyrs);
15686 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].riCnt[ueDl->mimoInfo.ri-1]++;
15687 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
15691 ue->tenbStats->stats.nonPersistent.sch[0].riCnt[ueDl->mimoInfo.ri-1]++;
15692 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
15698 /* If RI is from Periodic CQI report */
15699 cqiCb->perRiVal = ueDl->mimoInfo.ri;
15700 /* Reset at every Periodic RI Reception */
15701 cqiCb->invalidateCqi = FALSE;
15705 /* If RI is from Aperiodic CQI report */
15706 if (cqiCb->perRiVal != ueDl->mimoInfo.ri)
15708 /* if this aperRI is different from last reported
15709 * perRI then invalidate all CQI reports till next
15711 cqiCb->invalidateCqi = TRUE;
15715 cqiCb->invalidateCqi = FALSE;
15720 if (ueDl->mimoInfo.ri > 1)
15722 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
15724 else if (ue->mimoInfo.txMode == RGR_UE_TM_3) /* ri == 1 */
15726 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
15734 * @brief This function performs PMI validation and
15735 * updates it to the ueCb.
15739 * Function: rgSCHCmnDlSetUePmi
15740 * Purpose: This function performs PMI validation and
15741 * updates it to the ueCb.
15743 * Invoked by: rgSCHCmnDlCqiInd
15745 * @param[in] RgSchCellCb *cell
15746 * @param[in] RgSchUeCb *ue
15747 * @param[in] uint8_t pmi
15752 PRIVATE S16 rgSCHCmnDlSetUePmi
15759 PRIVATE S16 rgSCHCmnDlSetUePmi(cell, ue, pmi)
15765 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15767 if (ue->txModeTransCmplt == FALSE)
15772 if (cell->numTxAntPorts == 2)
15778 if (ueDl->mimoInfo.ri == 2)
15780 /*ccpu00118150 - MOD - changed pmi value validation from 0 to 2*/
15781 /* PMI 2 and 3 are invalid incase of 2 TxAnt and 2 Layered SM */
15782 if (pmi == 2 || pmi == 3)
15786 ueDl->mimoInfo.pmi = pmi+1;
15790 ueDl->mimoInfo.pmi = pmi;
15793 else if (cell->numTxAntPorts == 4)
15799 ueDl->mimoInfo.pmi = pmi;
15801 /* Reset the No PMI Flag in forceTD */
15802 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
15807 * @brief This function Updates the DL CQI on PUCCH for the UE.
15811 * Function: rgSCHCmnDlProcCqiMode10
15813 * This function updates the DL CQI on PUCCH for the UE.
15815 * Invoked by: rgSCHCmnDlCqiOnPucchInd
15817 * Processing Steps:
15819 * @param[in] RgSchCellCb *cell
15820 * @param[in] RgSchUeCb *ue
15821 * @param[in] TfuDlCqiRpt *dlCqiRpt
15826 #ifdef RGR_CQI_REPT
15828 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10
15832 TfuDlCqiPucch *pucchCqi,
15836 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail)
15839 TfuDlCqiPucch *pucchCqi;
15844 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10
15848 TfuDlCqiPucch *pucchCqi
15851 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi)
15854 TfuDlCqiPucch *pucchCqi;
15858 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15860 if (pucchCqi->u.mode10Info.type == TFU_RPT_CQI)
15862 /*ccpu00109787 - ADD - Check for non-zero CQI*/
15863 /* Checking whether the decoded CQI is a value between 1 and 15*/
15864 if((pucchCqi->u.mode10Info.u.cqi) && (pucchCqi->u.mode10Info.u.cqi
15865 < RG_SCH_CMN_MAX_CQI))
15867 ueDl->cqiFlag = TRUE;
15868 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode10Info.u.cqi;
15869 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
15870 /* ccpu00117452 - MOD - Changed macro name from
15871 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
15872 #ifdef RGR_CQI_REPT
15873 *isCqiAvail = TRUE;
15881 else if (pucchCqi->u.mode10Info.type == TFU_RPT_RI)
15883 if ( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode10Info.u.ri) )
15885 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode10Info.u.ri,
15890 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
15891 pucchCqi->u.mode10Info.u.ri,ue->ueId);
15898 * @brief This function Updates the DL CQI on PUCCH for the UE.
15902 * Function: rgSCHCmnDlProcCqiMode11
15904 * This function updates the DL CQI on PUCCH for the UE.
15906 * Invoked by: rgSCHCmnDlCqiOnPucchInd
15908 * Processing Steps:
15909 * Process CQI MODE 11
15910 * @param[in] RgSchCellCb *cell
15911 * @param[in] RgSchUeCb *ue
15912 * @param[in] TfuDlCqiRpt *dlCqiRpt
15917 #ifdef RGR_CQI_REPT
15919 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11
15923 TfuDlCqiPucch *pucchCqi,
15925 Bool *is2ndCwCqiAvail
15928 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail, is2ndCwCqiAvail)
15931 TfuDlCqiPucch *pucchCqi;
15933 Bool *is2ndCwCqiAvail;
15937 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11
15941 TfuDlCqiPucch *pucchCqi
15944 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi)
15947 TfuDlCqiPucch *pucchCqi;
15951 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15953 if (pucchCqi->u.mode11Info.type == TFU_RPT_CQI)
15955 ue->mimoInfo.puschFdbkVld = FALSE;
15956 /*ccpu00109787 - ADD - Check for non-zero CQI*/
15957 if((pucchCqi->u.mode11Info.u.cqi.cqi) &&
15958 (pucchCqi->u.mode11Info.u.cqi.cqi < RG_SCH_CMN_MAX_CQI))
15960 ueDl->cqiFlag = TRUE;
15961 /* ccpu00117452 - MOD - Changed macro name from
15962 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
15963 #ifdef RGR_CQI_REPT
15964 *isCqiAvail = TRUE;
15966 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode11Info.u.cqi.cqi;
15967 if (pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.pres)
15969 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
15970 ueDl->mimoInfo.cwInfo[1].cqi, \
15971 pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.val);
15972 #ifdef RGR_CQI_REPT
15973 /* ccpu00117259 - ADD - Considering second codeword CQI info
15974 incase of MIMO for CQI Reporting */
15975 *is2ndCwCqiAvail = TRUE;
15983 rgSCHCmnDlSetUePmi(cell, ue, \
15984 pucchCqi->u.mode11Info.u.cqi.pmi);
15986 else if (pucchCqi->u.mode11Info.type == TFU_RPT_RI)
15988 if( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode11Info.u.ri))
15990 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode11Info.u.ri,
15995 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
15996 pucchCqi->u.mode11Info.u.ri,ue->ueId);
16003 * @brief This function Updates the DL CQI on PUCCH for the UE.
16007 * Function: rgSCHCmnDlProcCqiMode20
16009 * This function updates the DL CQI on PUCCH for the UE.
16011 * Invoked by: rgSCHCmnDlCqiOnPucchInd
16013 * Processing Steps:
16014 * Process CQI MODE 20
16015 * @param[in] RgSchCellCb *cell
16016 * @param[in] RgSchUeCb *ue
16017 * @param[in] TfuDlCqiRpt *dlCqiRpt
16022 #ifdef RGR_CQI_REPT
16024 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20
16028 TfuDlCqiPucch *pucchCqi,
16032 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail )
16035 TfuDlCqiPucch *pucchCqi;
16040 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20
16044 TfuDlCqiPucch *pucchCqi
16047 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi)
16050 TfuDlCqiPucch *pucchCqi;
16054 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16056 if (pucchCqi->u.mode20Info.type == TFU_RPT_CQI)
16058 if (pucchCqi->u.mode20Info.u.cqi.isWideband)
16060 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16061 if((pucchCqi->u.mode20Info.u.cqi.u.wideCqi) &&
16062 (pucchCqi->u.mode20Info.u.cqi.u.wideCqi < RG_SCH_CMN_MAX_CQI))
16064 ueDl->cqiFlag = TRUE;
16065 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode20Info.u.cqi.\
16067 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16068 /* ccpu00117452 - MOD - Changed macro name from
16069 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16070 #ifdef RGR_CQI_REPT
16071 *isCqiAvail = TRUE;
16080 else if (pucchCqi->u.mode20Info.type == TFU_RPT_RI)
16082 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode20Info.u.ri))
16084 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode20Info.u.ri,
16089 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
16090 pucchCqi->u.mode20Info.u.ri,ue->ueId);
16098 * @brief This function Updates the DL CQI on PUCCH for the UE.
16102 * Function: rgSCHCmnDlProcCqiMode21
16104 * This function updates the DL CQI on PUCCH for the UE.
16106 * Invoked by: rgSCHCmnDlCqiOnPucchInd
16108 * Processing Steps:
16109 * Process CQI MODE 21
16110 * @param[in] RgSchCellCb *cell
16111 * @param[in] RgSchUeCb *ue
16112 * @param[in] TfuDlCqiRpt *dlCqiRpt
16117 #ifdef RGR_CQI_REPT
16119 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21
16123 TfuDlCqiPucch *pucchCqi,
16125 Bool *is2ndCwCqiAvail
16128 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail, is2ndCwCqiAvail)
16131 TfuDlCqiPucch *pucchCqi;
16132 TfuDlCqiRpt *dlCqiRpt;
16134 Bool *is2ndCwCqiAvail;
16138 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21
16142 TfuDlCqiPucch *pucchCqi
16145 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi)
16148 TfuDlCqiPucch *pucchCqi;
16152 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16154 if (pucchCqi->u.mode21Info.type == TFU_RPT_CQI)
16156 ue->mimoInfo.puschFdbkVld = FALSE;
16157 if (pucchCqi->u.mode21Info.u.cqi.isWideband)
16159 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16160 if((pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi) &&
16161 (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi < RG_SCH_CMN_MAX_CQI))
16163 ueDl->cqiFlag = TRUE;
16164 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode21Info.u.cqi.\
16166 if (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.pres)
16168 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
16169 ueDl->mimoInfo.cwInfo[1].cqi, \
16170 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.val);
16171 #ifdef RGR_CQI_REPT
16172 /* ccpu00117259 - ADD - Considering second codeword CQI info
16173 incase of MIMO for CQI Reporting */
16174 *is2ndCwCqiAvail = TRUE;
16177 /* ccpu00117452 - MOD - Changed macro name from
16178 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16179 #ifdef RGR_CQI_REPT
16180 *isCqiAvail = TRUE;
16187 rgSCHCmnDlSetUePmi(cell, ue, \
16188 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.pmi);
16191 else if (pucchCqi->u.mode21Info.type == TFU_RPT_RI)
16193 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode21Info.u.ri))
16195 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode21Info.u.ri,
16200 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
16201 pucchCqi->u.mode21Info.u.ri,ue->ueId);
16209 * @brief This function Updates the DL CQI on PUCCH for the UE.
16213 * Function: rgSCHCmnDlCqiOnPucchInd
16215 * This function updates the DL CQI on PUCCH for the UE.
16217 * Invoked by: rgSCHCmnDlCqiInd
16219 * Processing Steps:
16220 * - Depending on the reporting mode of the PUCCH, the CQI/PMI/RI values
16221 * are updated and stored for each UE
16223 * @param[in] RgSchCellCb *cell
16224 * @param[in] RgSchUeCb *ue
16225 * @param[in] TfuDlCqiRpt *dlCqiRpt
16230 #ifdef RGR_CQI_REPT
16232 PRIVATE Void rgSCHCmnDlCqiOnPucchInd
16236 TfuDlCqiPucch *pucchCqi,
16237 RgrUeCqiRept *ueCqiRept,
16239 Bool *is2ndCwCqiAvail
16242 PRIVATE Void rgSCHCmnDlCqiOnPucchInd(cell, ue, pucchCqi, ueCqiRept, isCqiAvail, is2ndCwCqiAvail)
16245 TfuDlCqiPucch *pucchCqi;
16246 RgrUeCqiRept *ueCqiRept;
16248 Bool *is2ndCwCqiAvail;
16252 PRIVATE Void rgSCHCmnDlCqiOnPucchInd
16256 TfuDlCqiPucch *pucchCqi
16259 PRIVATE Void rgSCHCmnDlCqiOnPucchInd(cell, ue, pucchCqi)
16262 TfuDlCqiPucch *pucchCqi;
16266 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16268 /* ccpu00117452 - MOD - Changed
16269 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16270 #ifdef RGR_CQI_REPT
16271 /* Save CQI mode information in the report */
16272 ueCqiRept->cqiMode = pucchCqi->mode;
16275 switch(pucchCqi->mode)
16277 case TFU_PUCCH_CQI_MODE10:
16278 #ifdef RGR_CQI_REPT
16279 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail);
16281 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi);
16283 ueDl->cqiFlag = TRUE;
16285 case TFU_PUCCH_CQI_MODE11:
16286 #ifdef RGR_CQI_REPT
16287 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail,
16290 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi);
16292 ueDl->cqiFlag = TRUE;
16294 case TFU_PUCCH_CQI_MODE20:
16295 #ifdef RGR_CQI_REPT
16296 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail);
16298 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi);
16300 ueDl->cqiFlag = TRUE;
16302 case TFU_PUCCH_CQI_MODE21:
16303 #ifdef RGR_CQI_REPT
16304 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail,
16307 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi);
16309 ueDl->cqiFlag = TRUE;
16313 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unknown CQI Mode %d",
16314 pucchCqi->mode,ue->ueId);
16315 /* ccpu00117452 - MOD - Changed macro name from
16316 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16317 #ifdef RGR_CQI_REPT
16318 *isCqiAvail = FALSE;
16325 } /* rgSCHCmnDlCqiOnPucchInd */
16329 * @brief This function Updates the DL CQI on PUSCH for the UE.
16333 * Function: rgSCHCmnDlCqiOnPuschInd
16335 * This function updates the DL CQI on PUSCH for the UE.
16337 * Invoked by: rgSCHCmnDlCqiInd
16339 * Processing Steps:
16340 * - Depending on the reporting mode of the PUSCH, the CQI/PMI/RI values
16341 * are updated and stored for each UE
16343 * @param[in] RgSchCellCb *cell
16344 * @param[in] RgSchUeCb *ue
16345 * @param[in] TfuDlCqiRpt *dlCqiRpt
16350 #ifdef RGR_CQI_REPT
16352 PRIVATE Void rgSCHCmnDlCqiOnPuschInd
16356 TfuDlCqiPusch *puschCqi,
16357 RgrUeCqiRept *ueCqiRept,
16359 Bool *is2ndCwCqiAvail
16362 PRIVATE Void rgSCHCmnDlCqiOnPuschInd(cell, ue, puschCqi, ueCqiRept, isCqiAvail, is2ndCwCqiAvail)
16365 TfuDlCqiPusch *puschCqi;
16366 RgrUeCqiRept *ueCqiRept;
16368 Bool *is2ndCwCqiAvail;
16372 PRIVATE Void rgSCHCmnDlCqiOnPuschInd
16376 TfuDlCqiPusch *puschCqi
16379 PRIVATE Void rgSCHCmnDlCqiOnPuschInd(cell, ue, puschCqi)
16382 TfuDlCqiPusch *puschCqi;
16386 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16387 uint32_t prevRiVal = 0;
16388 if (puschCqi->ri.pres == PRSNT_NODEF)
16390 if (RG_SCH_CMN_IS_RI_VALID(puschCqi->ri.val))
16392 /* Saving the previous ri value to revert back
16393 in case PMI update failed */
16394 if (RGR_UE_TM_4 == ue->mimoInfo.txMode ) /* Cheking for TM4. TM8 check later */
16396 prevRiVal = ueDl->mimoInfo.ri;
16398 rgSCHCmnDlSetUeRi(cell, ue, puschCqi->ri.val, FALSE);
16402 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
16403 puschCqi->ri.val,ue->ueId);
16407 ue->mimoInfo.puschFdbkVld = FALSE;
16408 /* ccpu00117452 - MOD - Changed macro name from
16409 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16410 #ifdef RGR_CQI_REPT
16411 /* Save CQI mode information in the report */
16412 ueCqiRept->cqiMode = puschCqi->mode;
16413 /* ccpu00117259 - DEL - removed default setting of isCqiAvail to TRUE */
16416 switch(puschCqi->mode)
16418 case TFU_PUSCH_CQI_MODE_20:
16419 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16420 /* Checking whether the decoded CQI is a value between 1 and 15*/
16421 if((puschCqi->u.mode20Info.wideBandCqi) &&
16422 (puschCqi->u.mode20Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
16424 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode20Info.wideBandCqi;
16425 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16426 /* ccpu00117452 - MOD - Changed macro name from
16427 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16428 #ifdef RGR_CQI_REPT
16429 *isCqiAvail = TRUE;
16437 case TFU_PUSCH_CQI_MODE_30:
16438 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16439 if((puschCqi->u.mode30Info.wideBandCqi) &&
16440 (puschCqi->u.mode30Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
16442 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode30Info.wideBandCqi;
16443 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16444 /* ccpu00117452 - MOD - Changed macro name from
16445 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16446 #ifdef RGR_CQI_REPT
16447 *isCqiAvail = TRUE;
16451 extern uint32_t gACqiRcvdCount;
16462 case TFU_PUSCH_CQI_MODE_12:
16463 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16464 if((puschCqi->u.mode12Info.cqiIdx[0]) &&
16465 (puschCqi->u.mode12Info.cqiIdx[0] < RG_SCH_CMN_MAX_CQI))
16467 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode12Info.cqiIdx[0];
16468 /* ccpu00117452 - MOD - Changed macro name from
16469 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16470 #ifdef RGR_CQI_REPT
16471 *isCqiAvail = TRUE;
16478 if((puschCqi->u.mode12Info.cqiIdx[1]) &&
16479 (puschCqi->u.mode12Info.cqiIdx[1] < RG_SCH_CMN_MAX_CQI))
16481 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode12Info.cqiIdx[1];
16482 /* ccpu00117452 - MOD - Changed macro name from
16483 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16484 #ifdef RGR_CQI_REPT
16485 /* ccpu00117259 - ADD - Considering second codeword CQI info
16486 incase of MIMO for CQI Reporting */
16487 *is2ndCwCqiAvail = TRUE;
16494 ue->mimoInfo.puschFdbkVld = TRUE;
16495 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_12;
16496 ue->mimoInfo.puschPmiInfo.u.mode12Info = puschCqi->u.mode12Info;
16497 /* : resetting this is time based. Make use of CQI reporting
16498 * periodicity, DELTA's in determining the exact time at which this
16499 * need to be reset. */
16501 case TFU_PUSCH_CQI_MODE_22:
16502 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16503 if((puschCqi->u.mode22Info.wideBandCqi[0]) &&
16504 (puschCqi->u.mode22Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
16506 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode22Info.wideBandCqi[0];
16507 /* ccpu00117452 - MOD - Changed macro name from
16508 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16509 #ifdef RGR_CQI_REPT
16510 *isCqiAvail = TRUE;
16517 if((puschCqi->u.mode22Info.wideBandCqi[1]) &&
16518 (puschCqi->u.mode22Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
16520 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode22Info.wideBandCqi[1];
16521 /* ccpu00117452 - MOD - Changed macro name from
16522 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16523 #ifdef RGR_CQI_REPT
16524 /* ccpu00117259 - ADD - Considering second codeword CQI info
16525 incase of MIMO for CQI Reporting */
16526 *is2ndCwCqiAvail = TRUE;
16533 rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode22Info.wideBandPmi);
16534 ue->mimoInfo.puschFdbkVld = TRUE;
16535 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_22;
16536 ue->mimoInfo.puschPmiInfo.u.mode22Info = puschCqi->u.mode22Info;
16538 case TFU_PUSCH_CQI_MODE_31:
16539 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16540 if((puschCqi->u.mode31Info.wideBandCqi[0]) &&
16541 (puschCqi->u.mode31Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
16543 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode31Info.wideBandCqi[0];
16544 /* ccpu00117452 - MOD - Changed macro name from
16545 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16546 #ifdef RGR_CQI_REPT
16547 *isCqiAvail = TRUE;
16550 if (ueDl->mimoInfo.ri > 1)
16552 if((puschCqi->u.mode31Info.wideBandCqi[1]) &&
16553 (puschCqi->u.mode31Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
16555 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode31Info.wideBandCqi[1];
16556 /* ccpu00117452 - MOD - Changed macro name from
16557 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16558 #ifdef RGR_CQI_REPT
16559 /* ccpu00117259 - ADD - Considering second codeword CQI info
16560 incase of MIMO for CQI Reporting */
16561 *is2ndCwCqiAvail = TRUE;
16565 if (rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode31Info.pmi) != ROK)
16567 /* To avoid Rank and PMI inconsistency */
16568 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
16569 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
16571 ueDl->mimoInfo.ri = prevRiVal;
16574 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_31;
16575 ue->mimoInfo.puschPmiInfo.u.mode31Info = puschCqi->u.mode31Info;
16579 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Unknown CQI Mode %d CRNTI:%d",
16580 puschCqi->mode,ue->ueId);
16581 /* CQI decoding failed revert the RI to previous value */
16582 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
16583 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
16585 ueDl->mimoInfo.ri = prevRiVal;
16587 /* ccpu00117452 - MOD - Changed macro name from
16588 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16589 #ifdef RGR_CQI_REPT
16590 *isCqiAvail = FALSE;
16591 /* ccpu00117259 - ADD - Considering second codeword CQI info
16592 incase of MIMO for CQI Reporting */
16593 *is2ndCwCqiAvail = FALSE;
16600 } /* rgSCHCmnDlCqiOnPuschInd */
16604 * @brief This function Updates the DL CQI for the UE.
16608 * Function: rgSCHCmnDlCqiInd
16609 * Purpose: Updates the DL CQI for the UE
16613 * @param[in] RgSchCellCb *cell
16614 * @param[in] RgSchUeCb *ue
16615 * @param[in] TfuDlCqiRpt *dlCqi
16620 Void rgSCHCmnDlCqiInd
16626 CmLteTimingInfo timingInfo
16629 Void rgSCHCmnDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo)
16634 CmLteTimingInfo timingInfo;
16637 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16638 /* ccpu00117452 - MOD - Changed macro name from
16639 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16640 #ifdef RGR_CQI_REPT
16641 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16642 RgrUeCqiRept ueCqiRept = {{0}};
16643 Bool isCqiAvail = FALSE;
16644 /* ccpu00117259 - ADD - Considering second codeword CQI info
16645 incase of MIMO for CQI Reporting */
16646 Bool is2ndCwCqiAvail = FALSE;
16650 #ifdef RGR_CQI_REPT
16653 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
16657 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
16662 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi);
16666 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi);
16670 #ifdef CQI_CONFBITMASK_DROP
16671 if(!ue->cqiConfBitMask)
16673 if (ueDl->mimoInfo.cwInfo[0].cqi >15)
16675 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
16676 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
16678 else if ( ueDl->mimoInfo.cwInfo[0].cqi >= ue->prevCqi)
16680 ue->prevCqi = ueDl->mimoInfo.cwInfo[0].cqi;
16684 uint8_t dlCqiDeltaPrev = 0;
16685 dlCqiDeltaPrev = ue->prevCqi - ueDl->mimoInfo.cwInfo[0].cqi;
16686 if (dlCqiDeltaPrev > 3)
16687 dlCqiDeltaPrev = 3;
16688 if ((ue->prevCqi - dlCqiDeltaPrev) < 6)
16694 ue->prevCqi = ue->prevCqi - dlCqiDeltaPrev;
16696 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
16697 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
16703 /* ccpu00117452 - MOD - Changed macro name from
16704 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16705 #ifdef RGR_CQI_REPT
16706 /* ccpu00117259 - ADD - Considering second codeword CQI info
16707 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail\
16708 in 'if' condition*/
16709 if (RG_SCH_CQIR_IS_PUSHNCQI_ENBLE(ue) && (isCqiAvail || is2ndCwCqiAvail))
16711 ueCqiRept.cqi[0] = ueDl->mimoInfo.cwInfo[0].cqi;
16713 /* ccpu00117259 - ADD - Considering second codeword CQI info
16714 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail
16715 in 'if' condition*/
16716 ueCqiRept.cqi[1] = 0;
16717 if(is2ndCwCqiAvail)
16719 ueCqiRept.cqi[1] = ueDl->mimoInfo.cwInfo[1].cqi;
16721 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, &ueCqiRept);
16726 rgSCHCmnDlSetUeAllocLmtLa(cell, ue);
16727 rgSCHCheckAndSetTxScheme(cell, ue);
16730 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), ue->isEmtcUe);
16732 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), FALSE);
16736 if (cellSch->dl.isDlFreqSel)
16738 cellSch->apisDlfs->rgSCHDlfsDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo);
16741 /* Call SPS module to update CQI indication */
16742 rgSCHCmnSpsDlCqiIndHndlr(cell, ue, timingInfo);
16744 /* Call Specific scheduler to process on dlCqiInd */
16746 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
16748 cellSch->apisEmtcDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
16753 cellSch->apisDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
16756 #ifdef RG_PFS_STATS
16757 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].avgCqi +=
16758 ueDl->mimoInfo.cwInfo[0].cqi;
16759 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].totalCqiOcc++;
16763 ueDl->avgCqi += ueDl->mimoInfo.cwInfo[0].cqi;
16764 ueDl->numCqiOccns++;
16765 if (ueDl->mimoInfo.ri == 1)
16776 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
16777 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
16778 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw0Cqi ++;
16779 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw1Cqi ++;
16780 cell->tenbStats->sch.dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
16781 cell->tenbStats->sch.dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
16782 cell->tenbStats->sch.dlNumCw0Cqi ++;
16783 cell->tenbStats->sch.dlNumCw1Cqi ++;
16790 * @brief This function calculates the wideband CQI from SNR
16791 * reported for each RB.
16795 * Function: rgSCHCmnCalcWcqiFrmSnr
16796 * Purpose: Wideband CQI calculation from SNR
16798 * Invoked by: RG SCH
16800 * @param[in] RgSchCellCb *cell
16801 * @param[in] TfuSrsRpt *srsRpt,
16802 * @return Wideband CQI
16806 PRIVATE uint8_t rgSCHCmnCalcWcqiFrmSnr
16812 PRIVATE uint8_t rgSCHCmnCalcWcqiFrmSnr(cell,srsRpt)
16817 uint8_t wideCqi=1; /*Calculated value from SNR*/
16818 /*Need to map a certain SNR with a WideCQI value.
16819 * The CQI calculation is still primitive. Further, need to
16820 * use a improvized method for calculating WideCQI from SNR*/
16821 if (srsRpt->snr[0] <=50)
16825 else if (srsRpt->snr[0]>=51 && srsRpt->snr[0] <=100)
16829 else if (srsRpt->snr[0]>=101 && srsRpt->snr[0] <=150)
16833 else if (srsRpt->snr[0]>=151 && srsRpt->snr[0] <=200)
16837 else if (srsRpt->snr[0]>=201 && srsRpt->snr[0] <=250)
16846 }/*rgSCHCmnCalcWcqiFrmSnr*/
16850 * @brief This function Updates the SRS for the UE.
16854 * Function: rgSCHCmnSrsInd
16855 * Purpose: Updates the UL SRS for the UE
16859 * @param[in] RgSchCellCb *cell
16860 * @param[in] RgSchUeCb *ue
16861 * @param[in] TfuSrsRpt *srsRpt,
16866 Void rgSCHCmnSrsInd
16871 CmLteTimingInfo timingInfo
16874 Void rgSCHCmnSrsInd(cell, ue, srsRpt, timingInfo)
16878 CmLteTimingInfo timingInfo;
16881 uint8_t wideCqi; /*Calculated value from SNR*/
16882 uint32_t recReqTime; /*Received Time in TTI*/
16884 recReqTime = (timingInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) + timingInfo.slot;
16885 ue->srsCb.selectedAnt = (recReqTime/ue->srsCb.peri)%2;
16886 if(srsRpt->wideCqiPres)
16888 wideCqi = srsRpt->wideCqi;
16892 wideCqi = rgSCHCmnCalcWcqiFrmSnr(cell, srsRpt);
16894 rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi);
16896 }/*rgSCHCmnSrsInd*/
16901 * @brief This function is a handler for TA report for an UE.
16905 * Function: rgSCHCmnDlTARpt
16906 * Purpose: Determine based on UE_IDLE_TIME threshold,
16907 * whether UE needs to be Linked to the scheduler's TA list OR
16908 * if it needs a PDCCH Order.
16913 * @param[in] RgSchCellCb *cell
16914 * @param[in] RgSchUeCb *ue
16919 Void rgSCHCmnDlTARpt
16925 Void rgSCHCmnDlTARpt(cell, ue)
16930 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16931 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
16932 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16933 CmLListCp poInactvLst;
16936 /* RACHO: If UE idle time is more than threshold, then
16937 * set its poInactv pdcch order inactivity */
16938 /* Fix : syed Ignore if TaTmr is not configured */
16939 if ((ue->dl.taCb.cfgTaTmr) && (rgSCHCmnUeIdleExdThrsld(cell, ue) == ROK))
16941 uint32_t prevDlMsk = ue->dl.dlInactvMask;
16942 uint32_t prevUlMsk = ue->ul.ulInactvMask;
16943 ue->dl.dlInactvMask |= RG_PDCCHODR_INACTIVE;
16944 ue->ul.ulInactvMask |= RG_PDCCHODR_INACTIVE;
16945 /* Indicate Specific scheduler for this UEs inactivity */
16946 cmLListInit(&poInactvLst);
16947 cmLListAdd2Tail(&poInactvLst, &ueDl->rachInfo.inActUeLnk);
16948 ueDl->rachInfo.inActUeLnk.node = (PTR)ue;
16949 /* Send inactivate ind only if not already sent */
16950 if (prevDlMsk == 0)
16952 cellSch->apisDl->rgSCHDlInactvtUes(cell, &poInactvLst);
16954 if (prevUlMsk == 0)
16956 cellSch->apisUl->rgSCHUlInactvtUes(cell, &poInactvLst);
16961 /* Fix: ccpu00124009 Fix for loop in the linked list "cellDl->taLst" */
16962 if (!ue->dlTaLnk.node)
16965 if(cell->emtcEnable)
16969 rgSCHEmtcAddToTaLst(cellDl,ue);
16976 cmLListAdd2Tail(&cellDl->taLst, &ue->dlTaLnk);
16977 ue->dlTaLnk.node = (PTR)ue;
16982 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
16983 "<TA>TA duplicate entry attempt failed: UEID:%u",
16992 * @brief Indication of UL CQI.
16996 * Function : rgSCHCmnFindUlCqiUlTxAnt
16998 * - Finds the Best Tx Antenna amongst the CQIs received
16999 * from Two Tx Antennas.
17001 * @param[in] RgSchCellCb *cell
17002 * @param[in] RgSchUeCb *ue
17003 * @param[in] uint8_t wideCqi
17007 PRIVATE Void rgSCHCmnFindUlCqiUlTxAnt
17014 PRIVATE Void rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi)
17020 ue->validTxAnt = 1;
17022 } /* rgSCHCmnFindUlCqiUlTxAnt */
17026 * @brief Indication of UL CQI.
17030 * Function : rgSCHCmnUlCqiInd
17032 * - Updates uplink CQI information for the UE. Computes and
17033 * stores the lowest CQI of CQIs reported in all subbands.
17035 * @param[in] RgSchCellCb *cell
17036 * @param[in] RgSchUeCb *ue
17037 * @param[in] TfuUlCqiRpt *ulCqiInfo
17041 Void rgSCHCmnUlCqiInd
17045 TfuUlCqiRpt *ulCqiInfo
17048 Void rgSCHCmnUlCqiInd(cell, ue, ulCqiInfo)
17051 TfuUlCqiRpt *ulCqiInfo;
17054 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17055 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17060 #if (defined(SCH_STATS) || defined(TENB_STATS))
17061 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
17064 /* consider inputs from SRS handlers about SRS occassions
17065 * in determining the UL TX Antenna selection */
17066 ueUl->crntUlCqi[0] = ulCqiInfo->wideCqi;
17068 ueUl->validUlCqi = ueUl->crntUlCqi[0];
17069 ue->validTxAnt = 0;
17071 iTbsNew = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][ueUl->validUlCqi];
17072 previTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
17074 if (RG_ITBS_DIFF(iTbsNew, previTbs) > 5)
17076 /* Ignore this iTBS report and mark that last iTBS report was */
17077 /* ignored so that subsequently we reset the LA algorithm */
17078 ueUl->ulLaCb.lastiTbsIgnored = TRUE;
17082 if (ueUl->ulLaCb.lastiTbsIgnored != TRUE)
17084 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
17085 (80 * ueUl->ulLaCb.cqiBasediTbs))/100;
17089 /* Reset the LA as iTbs in use caught up with the value */
17090 /* reported by UE. */
17091 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
17092 (80 * previTbs * 100))/100;
17093 ueUl->ulLaCb.deltaiTbs = 0;
17094 ueUl->ulLaCb.lastiTbsIgnored = FALSE;
17099 rgSCHPwrUlCqiInd(cell, ue);
17101 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
17103 rgSCHCmnSpsUlCqiInd(cell, ue);
17106 /* Applicable to only some schedulers */
17108 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
17110 cellSch->apisEmtcUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
17115 cellSch->apisUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
17119 ueUl->numCqiOccns++;
17120 ueUl->avgCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17125 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17126 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulNumCqi ++;
17127 cell->tenbStats->sch.ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17128 cell->tenbStats->sch.ulNumCqi ++;
17133 } /* rgSCHCmnUlCqiInd */
17136 * @brief Returns HARQ proc for which data expected now.
17140 * Function: rgSCHCmnUlHqProcForUe
17141 * Purpose: This function returns the harq process for
17142 * which data is expected in the current subframe.
17143 * It does not validate that the HARQ process
17144 * has an allocation.
17148 * @param[in] RgSchCellCb *cell
17149 * @param[in] CmLteTimingInfo frm
17150 * @param[in] RgSchUeCb *ue
17151 * @param[out] RgSchUlHqProcCb **procRef
17155 Void rgSCHCmnUlHqProcForUe
17158 CmLteTimingInfo frm,
17160 RgSchUlHqProcCb **procRef
17163 Void rgSCHCmnUlHqProcForUe(cell, frm, ue, procRef)
17165 CmLteTimingInfo frm;
17167 RgSchUlHqProcCb **procRef;
17171 uint8_t procId = rgSCHCmnGetUlHqProcIdx(&frm, cell);
17174 *procRef = rgSCHUhmGetUlHqProc(cell, ue, procId);
17176 *procRef = rgSCHUhmGetUlProcByTime(cell, ue, frm);
17183 * @brief Update harq process for allocation.
17187 * Function : rgSCHCmnUpdUlHqProc
17189 * This function is invoked when harq process
17190 * control block is now in a new memory location
17191 * thus requiring a pointer/reference update.
17193 * @param[in] RgSchCellCb *cell
17194 * @param[in] RgSchUlHqProcCb *curProc
17195 * @param[in] RgSchUlHqProcCb *oldProc
17201 S16 rgSCHCmnUpdUlHqProc
17204 RgSchUlHqProcCb *curProc,
17205 RgSchUlHqProcCb *oldProc
17208 S16 rgSCHCmnUpdUlHqProc(cell, curProc, oldProc)
17210 RgSchUlHqProcCb *curProc;
17211 RgSchUlHqProcCb *oldProc;
17217 #if (ERRCLASS & ERRCLS_DEBUG)
17218 if (curProc->alloc == NULLP)
17223 curProc->alloc->hqProc = curProc;
17225 } /* rgSCHCmnUpdUlHqProc */
17228 /*MS_WORKAROUND for CR FIXME */
17230 * @brief Hsndles BSR timer expiry
17234 * Function : rgSCHCmnBsrTmrExpry
17236 * This function is invoked when periodic BSR timer expires for a UE.
17238 * @param[in] RgSchUeCb *ue
17244 S16 rgSCHCmnBsrTmrExpry
17249 S16 rgSCHCmnBsrTmrExpry(ueCb)
17253 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ueCb->cell);
17256 ueCb->isSrGrant = TRUE;
17259 emtcStatsUlBsrTmrTxp++;
17263 if(ueCb->cell->emtcEnable)
17267 cellSch->apisEmtcUl->rgSCHSrRcvd(ueCb->cell, ueCb);
17274 cellSch->apisUl->rgSCHSrRcvd(ueCb->cell, ueCb);
17281 * @brief Short BSR update.
17285 * Function : rgSCHCmnUpdBsrShort
17287 * This functions does requisite updates to handle short BSR reporting.
17289 * @param[in] RgSchCellCb *cell
17290 * @param[in] RgSchUeCb *ue
17291 * @param[in] RgSchLcgCb *ulLcg
17292 * @param[in] uint8_t bsr
17293 * @param[out] RgSchErrInfo *err
17299 S16 rgSCHCmnUpdBsrShort
17308 S16 rgSCHCmnUpdBsrShort(cell, ue, ulLcg, bsr, err)
17318 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17320 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17321 RgSchCmnLcg *cmnLcg = NULLP;
17327 if (!RGSCH_LCG_ISCFGD(ulLcg))
17329 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
17332 for (lcgCnt=0; lcgCnt<4; lcgCnt++)
17335 /* Set BS of all other LCGs to Zero.
17336 If Zero BSR is reported in Short BSR include this LCG too */
17337 if ((lcgCnt != ulLcg->lcgId) ||
17338 (!bsr && !ueUl->hqEnt.numBusyHqProcs))
17340 /* If old BO is zero do nothing */
17341 if(((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs != 0)
17343 for(idx = 0; idx < ue->ul.lcgArr[lcgCnt].numLch; idx++)
17345 if((ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount) &&
17346 (ue->ulActiveLCs & (1 <<
17347 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1))))
17350 ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount--;
17351 ue->ulActiveLCs &= ~(1 <<
17352 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1));
17358 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgCnt]))
17360 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs = 0;
17361 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->reportedBs = 0;
17366 if(ulLcg->lcgId && bsr && (((RgSchCmnLcg *)(ulLcg->sch))->bs == 0))
17368 for(idx = 0; idx < ulLcg->numLch; idx++)
17371 if (!(ue->ulActiveLCs & (1 << (ulLcg->lcArray[idx]->qciCb->qci -1))))
17373 ulLcg->lcArray[idx]->qciCb->ulUeCount++;
17374 ue->ulActiveLCs |= (1 << (ulLcg->lcArray[idx]->qciCb->qci -1));
17379 /* Resetting the nonGbrLcgBs info here */
17380 ue->ul.nonGbrLcgBs = 0;
17381 ue->ul.nonLcg0Bs = 0;
17383 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
17385 if (TRUE == ue->ul.useExtBSRSizes)
17387 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
17391 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
17393 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17395 /* TBD check for effGbr != 0 */
17396 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17398 else if (0 == ulLcg->lcgId)
17400 /* This is added for handling LCG0 */
17401 cmnLcg->bs = cmnLcg->reportedBs;
17405 /* Update non GBR LCG's BS*/
17406 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
17407 cmnLcg->bs = ue->ul.nonGbrLcgBs;
17409 ue->ul.totalBsr = cmnLcg->bs;
17412 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (bsr == 0))
17414 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
17418 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
17420 rgSCHCmnSpsBsrRpt(cell, ue, ulLcg);
17423 rgSCHCmnUpdUlCompEffBsr(ue);
17426 if(cell->emtcEnable)
17430 cellSch->apisEmtcUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
17437 cellSch->apisUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
17441 if (ue->ul.isUlCaEnabled && ue->numSCells)
17443 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
17445 #ifndef PAL_ENABLE_UL_CA
17446 if((ue->cellInfo[sCellIdx] != NULLP) &&
17447 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
17449 if(ue->cellInfo[sCellIdx] != NULLP)
17452 cellSch->apisUl->rgSCHUpdBsrShort(ue->cellInfo[sCellIdx]->cell,
17463 * @brief Truncated BSR update.
17467 * Function : rgSCHCmnUpdBsrTrunc
17469 * This functions does required updates to handle truncated BSR report.
17472 * @param[in] RgSchCellCb *cell
17473 * @param[in] RgSchUeCb *ue
17474 * @param[in] RgSchLcgCb *ulLcg
17475 * @param[in] uint8_t bsr
17476 * @param[out] RgSchErrInfo *err
17482 S16 rgSCHCmnUpdBsrTrunc
17491 S16 rgSCHCmnUpdBsrTrunc(cell, ue, ulLcg, bsr, err)
17499 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17500 RgSchCmnLcg *cmnLcg = NULLP;
17507 if (!RGSCH_LCG_ISCFGD(ulLcg))
17509 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
17512 /* set all higher prio lcgs bs to 0 and update this lcgs bs and
17513 total bsr= sumofall lcgs bs */
17516 for (cnt = ulLcg->lcgId-1; cnt >= 0; cnt--)
17519 /* If Existing BO is zero the don't do anything */
17520 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs != 0)
17522 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
17525 if((ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount) &&
17526 (ue->ulActiveLCs & (1 <<
17527 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
17529 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount--;
17530 ue->ulActiveLCs &= ~(1 <<
17531 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
17536 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs = 0;
17537 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->reportedBs = 0;
17542 for (cnt = ulLcg->lcgId; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
17544 if (ulLcg->lcgId == 0)
17548 /* If Existing BO is zero the don't do anything */
17549 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs == 0)
17551 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
17554 if (!(ue->ulActiveLCs & (1 <<
17555 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
17557 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount++;
17558 ue->ulActiveLCs |= (1 <<
17559 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
17565 ue->ul.nonGbrLcgBs = 0;
17566 ue->ul.nonLcg0Bs = 0;
17567 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
17568 if (TRUE == ue->ul.useExtBSRSizes)
17570 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
17574 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
17576 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17578 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17580 else if(ulLcg->lcgId == 0)
17582 /* This is for handeling LCG0 */
17583 cmnLcg->bs = cmnLcg->reportedBs;
17587 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
17588 cmnLcg->bs = ue->ul.nonGbrLcgBs;
17590 ue->ul.totalBsr = cmnLcg->bs;
17592 for (cnt = ulLcg->lcgId+1; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
17594 /* TODO: The bs for the other LCGs may be stale because some or all of
17595 * the part of bs may have been already scheduled/data received. Please
17596 * consider this when truncated BSR is tested/implemented */
17597 ue->ul.totalBsr += ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs;
17600 rgSCHCmnUpdUlCompEffBsr(ue);
17603 if(cell->emtcEnable)
17607 cellSch->apisEmtcUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
17614 cellSch->apisUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
17618 if (ue->ul.isUlCaEnabled && ue->numSCells)
17620 for(uint8_t sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
17622 #ifndef PAL_ENABLE_UL_CA
17623 if((ue->cellInfo[sCellIdx] != NULLP) &&
17624 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
17626 if(ue->cellInfo[sCellIdx] != NULLP)
17629 cellSch->apisUl->rgSCHUpdBsrTrunc(ue->cellInfo[sCellIdx]->cell, ue, ulLcg, bsr);
17639 * @brief Long BSR update.
17643 * Function : rgSCHCmnUpdBsrLong
17645 * - Update BSRs for all configured LCGs.
17646 * - Update priority of LCGs if needed.
17647 * - Update UE's position within/across uplink scheduling queues.
17650 * @param[in] RgSchCellCb *cell
17651 * @param[in] RgSchUeCb *ue
17652 * @param[in] uint8_t bsArr[]
17653 * @param[out] RgSchErrInfo *err
17659 S16 rgSCHCmnUpdBsrLong
17667 S16 rgSCHCmnUpdBsrLong(cell, ue, bsArr, err)
17674 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17675 uint32_t tmpBsArr[4] = {0, 0, 0, 0};
17676 uint32_t nonGbrBs = 0;
17685 for(idx1 = 1; idx1 < RGSCH_MAX_LCG_PER_UE; idx1++)
17687 /* If Old BO is non zero then do nothing */
17688 if ((((RgSchCmnLcg *)(ue->ul.lcgArr[idx1].sch))->bs == 0)
17691 for(idx2 = 0; idx2 < ue->ul.lcgArr[idx1].numLch; idx2++)
17694 if (!(ue->ulActiveLCs & (1 <<
17695 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1))))
17697 ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->ulUeCount++;
17698 ue->ulActiveLCs |= (1 <<
17699 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1));
17705 ue->ul.nonGbrLcgBs = 0;
17706 ue->ul.nonLcg0Bs = 0;
17708 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[0]))
17710 if (TRUE == ue->ul.useExtBSRSizes)
17712 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnExtBsrTbl[bsArr[0]];
17713 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnExtBsrTbl[bsArr[0]];
17714 tmpBsArr[0] = rgSchCmnExtBsrTbl[bsArr[0]];
17718 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnBsrTbl[bsArr[0]];
17719 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnBsrTbl[bsArr[0]];
17720 tmpBsArr[0] = rgSchCmnBsrTbl[bsArr[0]];
17723 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
17725 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
17727 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
17729 if (TRUE == ue->ul.useExtBSRSizes)
17731 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsArr[lcgId]];
17735 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsArr[lcgId]];
17737 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17739 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17740 tmpBsArr[lcgId] = cmnLcg->bs;
17744 nonGbrBs += cmnLcg->reportedBs;
17745 tmpBsArr[lcgId] = cmnLcg->reportedBs;
17746 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
17750 ue->ul.nonGbrLcgBs = RGSCH_MIN(nonGbrBs,ue->ul.effAmbr);
17752 ue->ul.totalBsr = tmpBsArr[0] + tmpBsArr[1] + tmpBsArr[2] + tmpBsArr[3];
17754 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (ue->ul.totalBsr == 0))
17756 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
17761 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE) /* SPS_FIX */
17763 if(ue->ul.totalBsr - tmpBsArr[1] == 0)
17764 {/* Updaing the BSR to SPS only if LCG1 BS is present in sps active state */
17765 rgSCHCmnSpsBsrRpt(cell, ue, &ue->ul.lcgArr[1]);
17769 rgSCHCmnUpdUlCompEffBsr(ue);
17772 if(cell->emtcEnable)
17776 cellSch->apisEmtcUl->rgSCHUpdBsrLong(cell, ue, bsArr);
17783 cellSch->apisUl->rgSCHUpdBsrLong(cell, ue, bsArr);
17787 if (ue->ul.isUlCaEnabled && ue->numSCells)
17789 for(uint8_t idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
17791 #ifndef PAL_ENABLE_UL_CA
17792 if((ue->cellInfo[idx] != NULLP) &&
17793 (ue->cellInfo[idx]->sCellState == RG_SCH_SCELL_ACTIVE))
17795 if(ue->cellInfo[idx] != NULLP)
17798 cellSch->apisUl->rgSCHUpdBsrLong(ue->cellInfo[idx]->cell, ue, bsArr);
17808 * @brief PHR update.
17812 * Function : rgSCHCmnUpdExtPhr
17814 * Updates extended power headroom information for an UE.
17816 * @param[in] RgSchCellCb *cell
17817 * @param[in] RgSchUeCb *ue
17818 * @param[in] uint8_t phr
17819 * @param[out] RgSchErrInfo *err
17825 S16 rgSCHCmnUpdExtPhr
17829 RgInfExtPhrCEInfo *extPhr,
17833 S16 rgSCHCmnUpdExtPhr(cell, ue, extPhr, err)
17836 RgInfExtPhrCEInfo *extPhr;
17840 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17841 RgSchCmnAllocRecord *allRcd;
17842 CmLList *node = ueUl->ulAllocLst.last;
17845 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
17852 allRcd = (RgSchCmnAllocRecord *)node->node;
17854 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
17856 rgSCHPwrUpdExtPhr(cell, ue, extPhr, allRcd);
17861 if(ulSpsUe->isUlSpsActv)
17863 rgSCHCmnSpsPhrInd(cell,ue);
17868 } /* rgSCHCmnUpdExtPhr */
17874 * @brief PHR update.
17878 * Function : rgSCHCmnUpdPhr
17880 * Updates power headroom information for an UE.
17882 * @param[in] RgSchCellCb *cell
17883 * @param[in] RgSchUeCb *ue
17884 * @param[in] uint8_t phr
17885 * @param[out] RgSchErrInfo *err
17899 S16 rgSCHCmnUpdPhr(cell, ue, phr, err)
17906 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17907 RgSchCmnAllocRecord *allRcd;
17908 CmLList *node = ueUl->ulAllocLst.last;
17911 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
17918 allRcd = (RgSchCmnAllocRecord *)node->node;
17920 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
17922 rgSCHPwrUpdPhr(cell, ue, phr, allRcd, RG_SCH_CMN_PWR_USE_CFG_MAX_PWR);
17927 if(ulSpsUe->isUlSpsActv)
17929 rgSCHCmnSpsPhrInd(cell,ue);
17934 } /* rgSCHCmnUpdPhr */
17937 * @brief UL grant for contention resolution.
17941 * Function : rgSCHCmnContResUlGrant
17943 * Add UE to another queue specifically for CRNTI based contention
17947 * @param[in] RgSchUeCb *ue
17948 * @param[out] RgSchErrInfo *err
17954 S16 rgSCHCmnContResUlGrant
17961 S16 rgSCHCmnContResUlGrant(cell, ue, err)
17967 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17970 if(cell->emtcEnable)
17974 cellSch->apisEmtcUl->rgSCHContResUlGrant(cell, ue);
17981 cellSch->apisUl->rgSCHContResUlGrant(cell, ue);
17987 * @brief SR reception handling.
17991 * Function : rgSCHCmnSrRcvd
17993 * - Update UE's position within/across uplink scheduling queues
17994 * - Update priority of LCGs if needed.
17996 * @param[in] RgSchCellCb *cell
17997 * @param[in] RgSchUeCb *ue
17998 * @param[in] CmLteTimingInfo frm
17999 * @param[out] RgSchErrInfo *err
18009 CmLteTimingInfo frm,
18013 S16 rgSCHCmnSrRcvd(cell, ue, frm, err)
18016 CmLteTimingInfo frm;
18020 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18021 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18022 CmLList *node = ueUl->ulAllocLst.last;
18026 emtcStatsUlTomSrInd++;
18029 RGSCH_INCR_SUB_FRAME(frm, 1); /* 1 TTI after the time SR was sent */
18032 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)node->node;
18033 if (RGSCH_TIMEINFO_SAME(frm, allRcd->allocTime))
18039 //TODO_SID Need to check when it is getting triggered
18040 ue->isSrGrant = TRUE;
18042 if(cell->emtcEnable)
18046 cellSch->apisEmtcUl->rgSCHSrRcvd(cell, ue);
18053 cellSch->apisUl->rgSCHSrRcvd(cell, ue);
18059 * @brief Returns first uplink allocation to send reception
18064 * Function: rgSCHCmnFirstRcptnReq(cell)
18065 * Purpose: This function returns the first uplink allocation
18066 * (or NULLP if there is none) in the subframe
18067 * in which is expected to prepare and send reception
18072 * @param[in] RgSchCellCb *cell
18073 * @return RgSchUlAlloc*
18076 RgSchUlAlloc *rgSCHCmnFirstRcptnReq
18081 RgSchUlAlloc *rgSCHCmnFirstRcptnReq(cell)
18085 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18087 RgSchUlAlloc* alloc = NULLP;
18090 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
18092 RgSchUlSf* sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18093 alloc = rgSCHUtlUlAllocFirst(sf);
18095 if (alloc && alloc->hqProc == NULLP)
18097 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18105 * @brief Returns first uplink allocation to send reception
18110 * Function: rgSCHCmnNextRcptnReq(cell)
18111 * Purpose: This function returns the next uplink allocation
18112 * (or NULLP if there is none) in the subframe
18113 * in which is expected to prepare and send reception
18118 * @param[in] RgSchCellCb *cell
18119 * @return RgSchUlAlloc*
18122 RgSchUlAlloc *rgSCHCmnNextRcptnReq
18125 RgSchUlAlloc *alloc
18128 RgSchUlAlloc *rgSCHCmnNextRcptnReq(cell, alloc)
18130 RgSchUlAlloc *alloc;
18133 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18135 //RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18138 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
18140 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18142 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18143 if (alloc && alloc->hqProc == NULLP)
18145 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18156 * @brief Collates DRX enabled UE's scheduled in this SF
18160 * Function: rgSCHCmnDrxStrtInActvTmrInUl(cell)
18161 * Purpose: This function collates the link
18162 * of UE's scheduled in this SF who
18163 * have drx enabled. It then calls
18164 * DRX specific function to start/restart
18165 * inactivity timer in Ul
18169 * @param[in] RgSchCellCb *cell
18173 Void rgSCHCmnDrxStrtInActvTmrInUl
18178 Void rgSCHCmnDrxStrtInActvTmrInUl(cell)
18182 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18183 RgSchUlSf *sf = &(cellUl->ulSfArr[cellUl->schdIdx]);
18184 RgSchUlAlloc *alloc = rgSCHUtlUlAllocFirst(sf);
18189 cmLListInit(&ulUeLst);
18197 if (!(alloc->grnt.isRtx) && ueCb->isDrxEnabled && !(ueCb->isSrGrant)
18199 /* ccpu00139513- DRX inactivity timer should not be started for
18200 * UL SPS occasions */
18201 && (alloc->hqProc->isSpsOccnHqP == FALSE)
18205 cmLListAdd2Tail(&ulUeLst,&(ueCb->ulDrxInactvTmrLnk));
18206 ueCb->ulDrxInactvTmrLnk.node = (PTR)ueCb;
18210 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18213 (Void)rgSCHDrxStrtInActvTmr(cell,&ulUeLst,RG_SCH_DRX_UL);
18220 * @brief Returns first uplink allocation to send HARQ feedback
18225 * Function: rgSCHCmnFirstHqFdbkAlloc
18226 * Purpose: This function returns the first uplink allocation
18227 * (or NULLP if there is none) in the subframe
18228 * for which it is expected to prepare and send HARQ
18233 * @param[in] RgSchCellCb *cell
18234 * @param[in] uint8_t idx
18235 * @return RgSchUlAlloc*
18238 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc
18244 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc(cell, idx)
18249 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18251 RgSchUlAlloc *alloc = NULLP;
18254 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
18256 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
18257 alloc = rgSCHUtlUlAllocFirst(sf);
18259 while (alloc && (alloc->hqProc == NULLP))
18261 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18269 * @brief Returns next allocation to send HARQ feedback for.
18273 * Function: rgSCHCmnNextHqFdbkAlloc(cell)
18274 * Purpose: This function returns the next uplink allocation
18275 * (or NULLP if there is none) in the subframe
18276 * for which HARQ feedback needs to be sent.
18280 * @param[in] RgSchCellCb *cell
18281 * @return RgSchUlAlloc*
18284 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc
18287 RgSchUlAlloc *alloc,
18291 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc(cell, alloc, idx)
18293 RgSchUlAlloc *alloc;
18297 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18299 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
18301 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
18303 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18304 while (alloc && (alloc->hqProc == NULLP))
18306 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18316 /***********************************************************
18318 * Func : rgSCHCmnUlGetITbsFrmIMcs
18320 * Desc : Returns the Itbs that is mapped to an Imcs
18321 * for the case of uplink.
18329 **********************************************************/
18331 uint8_t rgSCHCmnUlGetITbsFrmIMcs
18336 uint8_t rgSCHCmnUlGetITbsFrmIMcs(iMcs)
18341 return (rgUlIMcsTbl[iMcs].iTbs);
18344 /***********************************************************
18346 * Func : rgSCHCmnUlGetIMcsFrmITbs
18348 * Desc : Returns the Imcs that is mapped to an Itbs
18349 * for the case of uplink.
18353 * Notes: For iTbs 19, iMcs is dependant on modulation order.
18354 * Refer to 36.213, Table 8.6.1-1 and 36.306 Table 4.1-2
18355 * for UE capability information
18359 **********************************************************/
18361 uint8_t rgSCHCmnUlGetIMcsFrmITbs
18364 CmLteUeCategory ueCtg
18367 uint8_t rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg)
18369 CmLteUeCategory ueCtg;
18378 /*a higher layer can force a 64QAM UE to transmit at 16QAM.
18379 * We currently do not support this. Once the support for such
18380 * is added, ueCtg should be replaced by current transmit
18381 * modulation configuration.Refer to 36.213 -8.6.1
18383 else if ( iTbs < 19 )
18387 else if ((iTbs == 19) && (ueCtg != CM_LTE_UE_CAT_5))
18397 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
18398 was seen when IMCS exceeds 20 on T2k TDD*/
18408 /***********************************************************
18410 * Func : rgSCHCmnUlMinTbBitsForITbs
18412 * Desc : Returns the minimum number of bits that can
18413 * be given as grant for a specific CQI.
18421 **********************************************************/
18423 uint32_t rgSCHCmnUlMinTbBitsForITbs
18425 RgSchCmnUlCell *cellUl,
18429 uint32_t rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs)
18430 RgSchCmnUlCell *cellUl;
18435 RGSCH_ARRAY_BOUND_CHECK(0, rgTbSzTbl[0], iTbs);
18437 return (rgTbSzTbl[0][iTbs][cellUl->sbSize-1]);
18440 /***********************************************************
18442 * Func : rgSCHCmnUlSbAlloc
18444 * Desc : Given a required 'number of subbands' and a hole,
18445 * returns a suitable alloc such that the subband
18446 * allocation size is valid
18450 * Notes: Does not assume either passed numSb or hole size
18451 * to be valid for allocation, and hence arrives at
18452 * an acceptable value.
18455 **********************************************************/
18457 RgSchUlAlloc *rgSCHCmnUlSbAlloc
18464 RgSchUlAlloc *rgSCHCmnUlSbAlloc(sf, numSb, hole)
18470 uint8_t holeSz; /* valid hole size */
18471 RgSchUlAlloc *alloc;
18473 if ((holeSz = rgSchCmnMult235Tbl[hole->num].prvMatch) == hole->num)
18475 numSb = rgSchCmnMult235Tbl[numSb].match;
18476 if (numSb >= holeSz)
18478 alloc = rgSCHUtlUlAllocGetCompHole(sf, hole);
18482 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
18487 if (numSb < holeSz)
18489 numSb = rgSchCmnMult235Tbl[numSb].match;
18493 numSb = rgSchCmnMult235Tbl[numSb].prvMatch;
18496 if ( numSb >= holeSz )
18500 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
18506 * @brief To fill the RgSchCmnUeUlAlloc structure of UeCb.
18510 * Function: rgSCHCmnUlUeFillAllocInfo
18511 * Purpose: Specific scheduler to call this API to fill the alloc
18514 * Invoked by: Scheduler
18516 * @param[in] RgSchCellCb *cell
18517 * @param[out] RgSchUeCb *ue
18521 Void rgSCHCmnUlUeFillAllocInfo
18527 Void rgSCHCmnUlUeFillAllocInfo(cell, ue)
18532 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18533 RgSchCmnUeUlAlloc *ulAllocInfo;
18534 RgSchCmnUlUe *ueUl;
18537 ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18538 ulAllocInfo = &ueUl->alloc;
18540 /* Fill alloc structure */
18541 rgSCHCmnUlAllocFillTpc(cell, ue, ulAllocInfo->alloc);
18542 rgSCHCmnUlAllocFillNdmrs(cellUl, ulAllocInfo->alloc);
18543 rgSCHCmnUlAllocLnkHqProc(ue, ulAllocInfo->alloc, ulAllocInfo->alloc->hqProc,
18544 ulAllocInfo->alloc->hqProc->isRetx);
18546 rgSCHCmnUlFillPdcchWithAlloc(ulAllocInfo->alloc->pdcch,
18547 ulAllocInfo->alloc, ue);
18548 /* Recording information about this allocation */
18549 rgSCHCmnUlRecordUeAlloc(cell, ue);
18551 /* Update the UE's outstanding allocation */
18552 if (!ulAllocInfo->alloc->hqProc->isRetx)
18554 rgSCHCmnUlUpdOutStndAlloc(cell, ue, ulAllocInfo->allocdBytes);
18561 * @brief Update the UEs outstanding alloc based on the BSR report's timing.
18566 * Function: rgSCHCmnUpdUlCompEffBsr
18567 * Purpose: Clear off all the allocations from outstanding allocation that
18568 * are later than or equal to BSR timing information (stored in UEs datIndTime).
18570 * Invoked by: Scheduler
18572 * @param[in] RgSchUeCb *ue
18576 PRIVATE Void rgSCHCmnUpdUlCompEffBsr
18581 PRIVATE Void rgSCHCmnUpdUlCompEffBsr(ue)
18585 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,ue->cell);
18586 CmLList *node = ueUl->ulAllocLst.last;
18587 RgSchCmnAllocRecord *allRcd;
18588 uint32_t outStndAlloc=0;
18589 uint32_t nonLcg0OutStndAllocBs=0;
18590 uint32_t nonLcg0Bsr=0;
18592 RgSchCmnLcg *cmnLcg = NULLP;
18596 allRcd = (RgSchCmnAllocRecord *)node->node;
18597 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
18606 allRcd = (RgSchCmnAllocRecord *)node->node;
18608 outStndAlloc += allRcd->alloc;
18611 cmnLcg = (RgSchCmnLcg *)(ue->ul.lcgArr[0].sch);
18612 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
18613 if (cmnLcg->bs > outStndAlloc)
18615 cmnLcg->bs -= outStndAlloc;
18616 ue->ul.minReqBytes = cmnLcg->bs;
18621 nonLcg0OutStndAllocBs = outStndAlloc - cmnLcg->bs;
18625 for(lcgId = 1;lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
18627 if(RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
18629 cmnLcg = ((RgSchCmnLcg *) (ue->ul.lcgArr[lcgId].sch));
18630 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
18632 nonLcg0Bsr += cmnLcg->bs;
18636 nonLcg0Bsr += ue->ul.nonGbrLcgBs;
18637 if (nonLcg0OutStndAllocBs > nonLcg0Bsr)
18643 nonLcg0Bsr -= nonLcg0OutStndAllocBs;
18645 ue->ul.nonLcg0Bs = nonLcg0Bsr;
18646 /* Cap effBsr with nonLcg0Bsr and append lcg0 bs.
18647 * nonLcg0Bsr limit applies only to lcg1,2,3 */
18648 /* better be handled in individual scheduler */
18649 ue->ul.effBsr = nonLcg0Bsr +\
18650 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18655 * @brief Records information about the current allocation.
18659 * Function: rgSCHCmnUlRecordUeAlloc
18660 * Purpose: Records information about the curent allocation.
18661 * This includes the allocated bytes, as well
18662 * as some power information.
18664 * Invoked by: Scheduler
18666 * @param[in] RgSchCellCb *cell
18667 * @param[in] RgSchUeCb *ue
18671 Void rgSCHCmnUlRecordUeAlloc
18677 Void rgSCHCmnUlRecordUeAlloc(cell, ue)
18683 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18685 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18686 CmLListCp *lst = &ueUl->ulAllocLst;
18687 CmLList *node = ueUl->ulAllocLst.first;
18688 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
18689 RgSchCmnUeUlAlloc *ulAllocInfo = &ueUl->alloc;
18690 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18692 cmLListDelFrm(lst, &allRcd->lnk);
18694 /* To the crntTime, add the MIN time at which UE will
18695 * actually send the BSR i.e DELTA+4 */
18696 allRcd->allocTime = cell->crntTime;
18697 /*ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
18699 if(ue->isEmtcUe == TRUE)
18701 RGSCH_INCR_SUB_FRAME_EMTC(allRcd->allocTime,
18702 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
18707 RGSCH_INCR_SUB_FRAME(allRcd->allocTime,
18708 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
18711 allRcd->allocTime = cellUl->schdTime;
18713 cmLListAdd2Tail(lst, &allRcd->lnk);
18715 /* Filling in the parameters to be recorded */
18716 allRcd->alloc = ulAllocInfo->allocdBytes;
18717 //allRcd->numRb = ulAllocInfo->alloc->grnt.numRb;
18718 allRcd->numRb = (ulAllocInfo->alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
18719 /*Recording the UL CQI derived from the maxUlCqi */
18720 allRcd->cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
18721 allRcd->tpc = ulAllocInfo->alloc->grnt.tpc;
18723 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
18725 cell->measurements.ulBytesCnt += ulAllocInfo->allocdBytes;
18730 /** PHR handling for MSG3
18731 * @brief Records allocation information of msg3 in the the UE.
18735 * Function: rgSCHCmnUlRecMsg3Alloc
18736 * Purpose: Records information about msg3 allocation.
18737 * This includes the allocated bytes, as well
18738 * as some power information.
18740 * Invoked by: Scheduler
18742 * @param[in] RgSchCellCb *cell
18743 * @param[in] RgSchUeCb *ue
18744 * @param[in] RgSchRaCb *raCb
18748 Void rgSCHCmnUlRecMsg3Alloc
18755 Void rgSCHCmnUlRecMsg3Alloc(cell, ue, raCb)
18761 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18762 CmLListCp *lst = &ueUl->ulAllocLst;
18763 CmLList *node = ueUl->ulAllocLst.first;
18764 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
18766 /* Stack Crash problem for TRACE5 changes */
18768 cmLListDelFrm(lst, node);
18769 allRcd->allocTime = raCb->msg3AllocTime;
18770 cmLListAdd2Tail(lst, node);
18772 /* Filling in the parameters to be recorded */
18773 allRcd->alloc = raCb->msg3Grnt.datSz;
18774 allRcd->numRb = raCb->msg3Grnt.numRb;
18775 allRcd->cqi = raCb->ccchCqi;
18776 allRcd->tpc = raCb->msg3Grnt.tpc;
18778 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
18783 * @brief Keeps track of the most recent RG_SCH_CMN_MAX_ALLOC_TRACK
18784 * allocations to track. Adds this allocation to the ueUl's ulAllocLst.
18789 * Function: rgSCHCmnUlUpdOutStndAlloc
18790 * Purpose: Recent Allocation shall be at First Pos'n.
18791 * Remove the last node, update the fields
18792 * with the new allocation and add at front.
18794 * Invoked by: Scheduler
18796 * @param[in] RgSchCellCb *cell
18797 * @param[in] RgSchUeCb *ue
18798 * @param[in] uint32_t alloc
18802 Void rgSCHCmnUlUpdOutStndAlloc
18809 Void rgSCHCmnUlUpdOutStndAlloc(cell, ue, alloc)
18815 uint32_t nonLcg0Alloc=0;
18817 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
18818 if (((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs > alloc)
18820 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs -= alloc;
18824 nonLcg0Alloc = alloc - ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18825 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = 0;
18828 if (nonLcg0Alloc >= ue->ul.nonLcg0Bs)
18830 ue->ul.nonLcg0Bs = 0;
18834 ue->ul.nonLcg0Bs -= nonLcg0Alloc;
18836 /* Cap effBsr with effAmbr and append lcg0 bs.
18837 * effAmbr limit applies only to lcg1,2,3 non GBR LCG's*/
18838 /* better be handled in individual scheduler */
18839 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
18840 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18842 if (ue->ul.effBsr == 0)
18844 if (ue->bsrTmr.tmrEvnt != TMR_NONE)
18846 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
18849 if (FALSE == ue->isSrGrant)
18851 if (ue->ul.bsrTmrCfg.isPrdBsrTmrPres)
18854 rgSCHTmrStartTmr(cell, ue, RG_SCH_TMR_BSR,
18855 ue->ul.bsrTmrCfg.prdBsrTmr);
18861 /* Resetting UEs lower Cap */
18862 ue->ul.minReqBytes = 0;
18869 * @brief Returns the "Itbs" for a given UE.
18873 * Function: rgSCHCmnUlGetITbs
18874 * Purpose: This function returns the "Itbs" for a given UE.
18876 * Invoked by: Scheduler
18878 * @param[in] RgSchUeCb *ue
18882 uint8_t rgSCHCmnUlGetITbs
18889 uint8_t rgSCHCmnUlGetITbs(cell, ue, isEcp)
18895 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18896 /* CQI will be capped to maxUlCqi for 16qam UEs */
18897 CmLteUeCategory ueCtgy = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18901 uint8_t maxiTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][ueUl->maxUlCqi];
18905 /* #ifdef RG_SCH_CMN_EXT_CP_SUP For ECP pick index 1 */
18907 if ( (ueCtgy != CM_LTE_UE_CAT_5) &&
18908 (ueUl->validUlCqi > ueUl->maxUlCqi)
18911 cqi = ueUl->maxUlCqi;
18915 cqi = ueUl->validUlCqi;
18919 iTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
18921 RG_SCH_CHK_ITBS_RANGE(iTbs, maxiTbs);
18923 iTbs = RGSCH_MIN(iTbs, ue->cell->thresholds.maxUlItbs);
18926 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
18927 was seen when IMCS exceeds 20 on T2k TDD */
18936 if ( (ueCtgy != CM_LTE_UE_CAT_5) && (ueUl->crntUlCqi[0] > ueUl->maxUlCqi ))
18938 cqi = ueUl->maxUlCqi;
18942 cqi = ueUl->crntUlCqi[0];
18945 return (rgSchCmnUlCqiToTbsTbl[(uint8_t)isEcp][cqi]);
18949 * @brief This function adds the UE to DLRbAllocInfo TX lst.
18953 * Function: rgSCHCmnDlRbInfoAddUeTx
18954 * Purpose: This function adds the UE to DLRbAllocInfo TX lst.
18956 * Invoked by: Common Scheduler
18958 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
18959 * @param[in] RgSchUeCb *ue
18960 * @param[in] RgSchDlHqProcCb *hqP
18965 PRIVATE Void rgSCHCmnDlRbInfoAddUeTx
18968 RgSchCmnDlRbAllocInfo *allocInfo,
18970 RgSchDlHqProcCb *hqP
18973 PRIVATE Void rgSCHCmnDlRbInfoAddUeTx(cell, allocInfo, ue, hqP)
18975 RgSchCmnDlRbAllocInfo *allocInfo;
18977 RgSchDlHqProcCb *hqP;
18980 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18983 if (hqP->reqLnk.node == NULLP)
18985 if (cellSch->dl.isDlFreqSel)
18987 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
18988 &allocInfo->dedAlloc.txHqPLst, hqP);
18993 cmLListAdd2Tail(&allocInfo->dedAlloc.txHqPLst, &hqP->reqLnk);
18995 hqP->reqLnk.node = (PTR)hqP;
19002 * @brief This function adds the UE to DLRbAllocInfo RETX lst.
19006 * Function: rgSCHCmnDlRbInfoAddUeRetx
19007 * Purpose: This function adds the UE to DLRbAllocInfo RETX lst.
19009 * Invoked by: Common Scheduler
19011 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19012 * @param[in] RgSchUeCb *ue
19013 * @param[in] RgSchDlHqProcCb *hqP
19018 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetx
19021 RgSchCmnDlRbAllocInfo *allocInfo,
19023 RgSchDlHqProcCb *hqP
19026 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetx(cell, allocInfo, ue, hqP)
19028 RgSchCmnDlRbAllocInfo *allocInfo;
19030 RgSchDlHqProcCb *hqP;
19033 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
19036 if (cellSch->dl.isDlFreqSel)
19038 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
19039 &allocInfo->dedAlloc.retxHqPLst, hqP);
19043 /* checking UE's presence in this lst is unnecessary */
19044 cmLListAdd2Tail(&allocInfo->dedAlloc.retxHqPLst, &hqP->reqLnk);
19045 hqP->reqLnk.node = (PTR)hqP;
19051 * @brief This function adds the UE to DLRbAllocInfo TX-RETX lst.
19055 * Function: rgSCHCmnDlRbInfoAddUeRetxTx
19056 * Purpose: This adds the UE to DLRbAllocInfo TX-RETX lst.
19058 * Invoked by: Common Scheduler
19060 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19061 * @param[in] RgSchUeCb *ue
19062 * @param[in] RgSchDlHqProcCb *hqP
19067 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetxTx
19070 RgSchCmnDlRbAllocInfo *allocInfo,
19072 RgSchDlHqProcCb *hqP
19075 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetxTx(allocInfo, ue, hqP)
19077 RgSchCmnDlRbAllocInfo *allocInfo;
19079 RgSchDlHqProcCb *hqP;
19082 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
19085 if (cellSch->dl.isDlFreqSel)
19087 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
19088 &allocInfo->dedAlloc.txRetxHqPLst, hqP);
19092 cmLListAdd2Tail(&allocInfo->dedAlloc.txRetxHqPLst, &hqP->reqLnk);
19093 hqP->reqLnk.node = (PTR)hqP;
19099 * @brief This function adds the UE to DLRbAllocInfo NonSchdRetxLst.
19103 * Function: rgSCHCmnDlAdd2NonSchdRetxLst
19104 * Purpose: During RB estimation for RETX, if allocation fails
19105 * then appending it to NonSchdRetxLst, the further
19106 * action is taken as part of Finalization in
19107 * respective schedulers.
19109 * Invoked by: Common Scheduler
19111 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19112 * @param[in] RgSchUeCb *ue
19113 * @param[in] RgSchDlHqProcCb *hqP
19118 PRIVATE Void rgSCHCmnDlAdd2NonSchdRetxLst
19120 RgSchCmnDlRbAllocInfo *allocInfo,
19122 RgSchDlHqProcCb *hqP
19125 PRIVATE Void rgSCHCmnDlAdd2NonSchdRetxLst(allocInfo, ue, hqP)
19126 RgSchCmnDlRbAllocInfo *allocInfo;
19128 RgSchDlHqProcCb *hqP;
19131 CmLList *schdLnkNode;
19135 if ( (hqP->sch != (RgSchCmnDlHqProc *)NULLP) &&
19136 (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP)))
19142 schdLnkNode = &hqP->schdLstLnk;
19143 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
19144 cmLListAdd2Tail(&allocInfo->dedAlloc.nonSchdRetxHqPLst, schdLnkNode);
19152 * @brief This function adds the UE to DLRbAllocInfo NonSchdTxRetxLst.
19156 * Function: rgSCHCmnDlAdd2NonSchdTxRetxLst
19157 * Purpose: During RB estimation for TXRETX, if allocation fails
19158 * then appending it to NonSchdTxRetxLst, the further
19159 * action is taken as part of Finalization in
19160 * respective schedulers.
19162 * Invoked by: Common Scheduler
19164 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19165 * @param[in] RgSchUeCb *ue
19166 * @param[in] RgSchDlHqProcCb *hqP
19172 * @brief This function handles the initialisation of DL HARQ/ACK feedback
19173 * timing information for eaach DL subframe.
19177 * Function: rgSCHCmnDlANFdbkInit
19178 * Purpose: Each DL subframe stores the sfn and subframe
19179 * information of UL subframe in which it expects
19180 * HARQ ACK/NACK feedback for this subframe.It
19181 * generates the information based on Downlink
19182 * Association Set Index table.
19184 * Invoked by: Scheduler
19186 * @param[in] RgSchCellCb* cell
19191 PRIVATE S16 rgSCHCmnDlANFdbkInit
19196 PRIVATE S16 rgSCHCmnDlANFdbkInit(cell)
19201 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19202 uint8_t maxDlSubfrms = cell->numDlSubfrms;
19206 uint8_t calcSfnOffset;
19208 uint8_t ulSfCnt =0;
19209 RgSchTddSubfrmInfo ulSubfrmInfo;
19210 uint8_t maxUlSubfrms;
19213 ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19214 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19216 /* Generate HARQ ACK/NACK feedback information for each DL sf in a radio frame
19217 * Calculate this information based on DL Association set Index table */
19218 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19220 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19221 RG_SCH_TDD_UL_SUBFRAME)
19223 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19227 for(idx=0; idx < rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
19228 numFdbkSubfrms; idx++)
19230 calcSfNum = sfNum - rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
19234 calcSfnOffset = RGSCH_CEIL(-calcSfNum, RGSCH_NUM_SUB_FRAMES);
19241 calcSfNum = ((RGSCH_NUM_SUB_FRAMES * calcSfnOffset) + calcSfNum)\
19242 % RGSCH_NUM_SUB_FRAMES;
19244 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19248 else if((ulSubfrmInfo.switchPoints == 2) && (calcSfNum <= \
19249 RG_SCH_CMN_SPL_SUBFRM_6))
19251 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19255 dlIdx = calcSfNum - maxUlSubfrms;
19258 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = sfNum;
19259 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = calcSfnOffset;
19260 cell->subFrms[dlIdx]->dlFdbkInfo.m = idx;
19262 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19265 /* DL subframes in the subsequent radio frames are initialized
19266 * with the previous radio frames */
19267 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;\
19270 sfNum = dlIdx - rgSchTddNumDlSubfrmTbl[ulDlCfgIdx]\
19271 [RGSCH_NUM_SUB_FRAMES-1];
19272 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = \
19273 cell->subFrms[sfNum]->dlFdbkInfo.subframe;
19274 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = \
19275 cell->subFrms[sfNum]->dlFdbkInfo.sfnOffset;
19276 cell->subFrms[dlIdx]->dlFdbkInfo.m = cell->subFrms[sfNum]->dlFdbkInfo.m;
19282 * @brief This function handles the initialization of uplink association
19283 * set information for each DL subframe.
19288 * Function: rgSCHCmnDlKdashUlAscInit
19289 * Purpose: Each DL sf stores the sfn and sf information of UL sf
19290 * in which it expects HQ ACK/NACK trans. It generates the information
19291 * based on k` in UL association set index table.
19293 * Invoked by: Scheduler
19295 * @param[in] RgSchCellCb* cell
19300 PRIVATE S16 rgSCHCmnDlKdashUlAscInit
19305 PRIVATE S16 rgSCHCmnDlKdashUlAscInit(cell)
19310 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19311 uint8_t maxDlSubfrms = cell->numDlSubfrms;
19316 uint8_t ulSfCnt =0;
19317 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19318 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19319 [RGSCH_NUM_SUB_FRAMES-1];
19320 uint8_t dlPres = 0;
19323 /* Generate ACK/NACK offset information for each DL subframe in a radio frame
19324 * Calculate this information based on K` in UL Association Set table */
19325 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19327 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19328 RG_SCH_TDD_UL_SUBFRAME)
19330 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19334 calcSfNum = (sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum] + \
19335 RGSCH_NUM_SUB_FRAMES) % RGSCH_NUM_SUB_FRAMES;
19336 calcSfnOffset = sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum];
19337 if(calcSfnOffset < 0)
19339 calcSfnOffset = RGSCH_CEIL(-calcSfnOffset, RGSCH_NUM_SUB_FRAMES);
19346 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19350 else if((ulSubfrmInfo.switchPoints == 2) &&
19351 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
19353 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19357 dlIdx = calcSfNum - maxUlSubfrms;
19360 cell->subFrms[dlIdx]->ulAscInfo.subframe = sfNum;
19361 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset = calcSfnOffset;
19363 /* set dlIdx for which ulAscInfo is updated */
19364 dlPres = dlPres | (1 << dlIdx);
19365 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19368 /* Set Invalid information for which ulAscInfo is not present */
19370 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19373 /* If dlPres is 0, ulAscInfo is not present in that DL index */
19374 if(! ((dlPres >> sfCount)&0x01))
19376 cell->subFrms[sfCount]->ulAscInfo.sfnOffset =
19377 RGSCH_INVALID_INFO;
19378 cell->subFrms[sfCount]->ulAscInfo.subframe =
19379 RGSCH_INVALID_INFO;
19383 /* DL subframes in the subsequent radio frames are initialized
19384 * with the previous radio frames */
19385 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;
19389 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19390 cell->subFrms[dlIdx]->ulAscInfo.subframe =
19391 cell->subFrms[sfNum]->ulAscInfo.subframe;
19392 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset =
19393 cell->subFrms[sfNum]->ulAscInfo.sfnOffset;
19400 * @brief This function initialises the 'Np' value for 'p'
19404 * Function: rgSCHCmnDlNpValInit
19405 * Purpose: To initialise the 'Np' value for each 'p'. It is used
19406 * to find the mapping between nCCE and 'p' and used in
19407 * HARQ ACK/NACK reception.
19409 * Invoked by: Scheduler
19411 * @param[in] RgSchCellCb* cell
19416 PRIVATE S16 rgSCHCmnDlNpValInit
19421 PRIVATE S16 rgSCHCmnDlNpValInit(cell)
19428 /* Always Np is 0 for p=0 */
19429 cell->rgSchTddNpValTbl[0] = 0;
19431 for(idx=1; idx < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; idx++)
19433 np = cell->bwCfg.dlTotalBw * (idx * RG_SCH_CMN_NUM_SUBCAR - 4);
19434 cell->rgSchTddNpValTbl[idx] = (uint8_t) (np/36);
19441 * @brief This function handles the creation of RACH preamble
19442 * list to queue the preambles and process at the scheduled
19447 * Function: rgSCHCmnDlCreateRachPrmLst
19448 * Purpose: To create RACH preamble list based on RA window size.
19449 * It is used to queue the preambles and process it at the
19452 * Invoked by: Scheduler
19454 * @param[in] RgSchCellCb* cell
19459 PRIVATE S16 rgSCHCmnDlCreateRachPrmLst
19464 PRIVATE S16 rgSCHCmnDlCreateRachPrmLst(cell)
19473 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
19475 lstSize = raArrSz * RGSCH_MAX_RA_RNTI_PER_SUBFRM * RGSCH_NUM_SUB_FRAMES;
19477 cell->raInfo.maxRaSize = raArrSz;
19478 ret = rgSCHUtlAllocSBuf(cell->instIdx,
19479 (Data **)(&cell->raInfo.raReqLst), (Size)(lstSize * sizeof(CmLListCp)));
19485 cell->raInfo.lstSize = lstSize;
19492 * @brief This function handles the initialization of RACH Response
19493 * information at each DL subframe.
19497 * Function: rgSCHCmnDlRachInfoInit
19498 * Purpose: Each DL subframe stores the sfn and subframe information of
19499 * possible RACH response allowed for UL subframes. It generates
19500 * the information based on PRACH configuration.
19502 * Invoked by: Scheduler
19504 * @param[in] RgSchCellCb* cell
19509 PRIVATE S16 rgSCHCmnDlRachInfoInit
19514 PRIVATE S16 rgSCHCmnDlRachInfoInit(cell)
19519 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19521 uint8_t ulSfCnt =0;
19522 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19523 [RGSCH_NUM_SUB_FRAMES-1];
19525 RgSchTddRachRspLst rachRspLst[3][RGSCH_NUM_SUB_FRAMES];
19530 uint8_t endSubfrmIdx;
19531 uint8_t startSubfrmIdx;
19533 RgSchTddRachDelInfo *delInfo;
19535 uint8_t numSubfrms;
19538 memset(rachRspLst, 0, sizeof(rachRspLst));
19540 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
19542 /* Include Special subframes */
19543 maxUlSubfrms = maxUlSubfrms + \
19544 rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx].switchPoints;
19545 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19547 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] ==
19548 RG_SCH_TDD_DL_SUBFRAME)
19550 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19554 startWin = (sfNum + RG_SCH_CMN_RARSP_WAIT_PRD + \
19555 ((RgSchCmnCell *)cell->sc.sch)->dl.numRaSubFrms);
19556 endWin = (startWin + cell->rachCfg.raWinSize - 1);
19558 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][startWin%RGSCH_NUM_SUB_FRAMES];
19559 /* Find the next DL subframe starting from Subframe 0 */
19560 if((startSubfrmIdx % RGSCH_NUM_SUB_FRAMES) == 0)
19562 startWin = RGSCH_CEIL(startWin, RGSCH_NUM_SUB_FRAMES);
19563 startWin = startWin * RGSCH_NUM_SUB_FRAMES;
19567 rgSchTddLowDlSubfrmIdxTbl[ulDlCfgIdx][endWin%RGSCH_NUM_SUB_FRAMES];
19568 endWin = (endWin/RGSCH_NUM_SUB_FRAMES) * RGSCH_NUM_SUB_FRAMES \
19570 if(startWin > endWin)
19574 /* Find all the possible RACH Response transmission
19575 * time within the RA window size */
19576 startSubfrmIdx = startWin%RGSCH_NUM_SUB_FRAMES;
19577 for(sfnIdx = startWin/RGSCH_NUM_SUB_FRAMES;
19578 sfnIdx <= endWin/RGSCH_NUM_SUB_FRAMES; sfnIdx++)
19580 if(sfnIdx == endWin/RGSCH_NUM_SUB_FRAMES)
19582 endSubfrmIdx = endWin%RGSCH_NUM_SUB_FRAMES;
19586 endSubfrmIdx = RGSCH_NUM_SUB_FRAMES-1;
19589 /* Find all the possible RACH Response transmission
19590 * time within radio frame */
19591 for(subfrmIdx = startSubfrmIdx;
19592 subfrmIdx <= endSubfrmIdx; subfrmIdx++)
19594 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][subfrmIdx] ==
19595 RG_SCH_TDD_UL_SUBFRAME)
19599 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
19600 /* Find the next DL subframe starting from Subframe 0 */
19601 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
19605 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx], subfrmIdx);
19607 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
19608 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset = sfnIdx;
19609 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].subframe[numSubfrms]
19611 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms++;
19613 startSubfrmIdx = RG_SCH_CMN_SUBFRM_0;
19615 /* Update the subframes to be deleted at this subframe */
19616 /* Get the subframe after the end of RA window size */
19619 sfnOffset = endWin/RGSCH_NUM_SUB_FRAMES;
19622 sfnOffset += raArrSz;
19624 sfnIdx = (endWin/RGSCH_NUM_SUB_FRAMES) % raArrSz;
19626 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx],endSubfrmIdx-1);
19627 if((endSubfrmIdx == RGSCH_NUM_SUB_FRAMES) ||
19628 (rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx] ==
19629 RGSCH_NUM_SUB_FRAMES))
19632 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][RG_SCH_CMN_SUBFRM_0];
19636 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx];
19639 delInfo = &rachRspLst[sfnIdx][subfrmIdx].delInfo;
19640 delInfo->sfnOffset = sfnOffset;
19641 delInfo->subframe[delInfo->numSubfrms] = sfNum;
19642 delInfo->numSubfrms++;
19644 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19647 ret = rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz);
19657 * @brief This function handles the initialization of PHICH information
19658 * for each DL subframe based on PHICH table.
19662 * Function: rgSCHCmnDlPhichOffsetInit
19663 * Purpose: Each DL subf stores the sfn and subf information of UL subframe
19664 * for which it trnsmts PHICH in this subframe. It generates the information
19665 * based on PHICH table.
19667 * Invoked by: Scheduler
19669 * @param[in] RgSchCellCb* cell
19674 PRIVATE S16 rgSCHCmnDlPhichOffsetInit
19679 PRIVATE S16 rgSCHCmnDlPhichOffsetInit(cell)
19684 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19685 uint8_t maxDlSubfrms = cell->numDlSubfrms;
19688 uint8_t dlPres = 0;
19689 uint8_t calcSfnOffset;
19691 uint8_t ulSfCnt =0;
19692 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19693 uint8_t maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19694 [RGSCH_NUM_SUB_FRAMES-1];
19697 /* Generate PHICH offset information for each DL subframe in a radio frame
19698 * Calculate this information based on K in PHICH table */
19699 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19701 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19702 RG_SCH_TDD_UL_SUBFRAME)
19704 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19708 calcSfNum = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) % \
19709 RGSCH_NUM_SUB_FRAMES;
19710 calcSfnOffset = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) / \
19711 RGSCH_NUM_SUB_FRAMES;
19713 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19717 else if((ulSubfrmInfo.switchPoints == 2) &&
19718 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
19720 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19724 dlIdx = calcSfNum - maxUlSubfrms;
19727 cell->subFrms[dlIdx]->phichOffInfo.subframe = sfNum;
19728 cell->subFrms[dlIdx]->phichOffInfo.numSubfrms = 1;
19730 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset = calcSfnOffset;
19732 /* set dlIdx for which phich offset is updated */
19733 dlPres = dlPres | (1 << dlIdx);
19734 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19737 /* Set Invalid information for which phich offset is not present */
19739 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19742 /* If dlPres is 0, phich offset is not present in that DL index */
19743 if(! ((dlPres >> sfCount)&0x01))
19745 cell->subFrms[sfCount]->phichOffInfo.sfnOffset =
19746 RGSCH_INVALID_INFO;
19747 cell->subFrms[sfCount]->phichOffInfo.subframe =
19748 RGSCH_INVALID_INFO;
19749 cell->subFrms[sfCount]->phichOffInfo.numSubfrms = 0;
19753 /* DL subframes in the subsequent radio frames are
19754 * initialized with the previous radio frames */
19755 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms;
19756 dlIdx < maxDlSubfrms; dlIdx++)
19759 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19761 cell->subFrms[dlIdx]->phichOffInfo.subframe =
19762 cell->subFrms[sfNum]->phichOffInfo.subframe;
19764 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset =
19765 cell->subFrms[sfNum]->phichOffInfo.sfnOffset;
19772 * @brief Updation of Sch vars per TTI.
19776 * Function: rgSCHCmnUpdVars
19777 * Purpose: Updation of Sch vars per TTI.
19779 * @param[in] RgSchCellCb *cell
19784 Void rgSCHCmnUpdVars
19789 Void rgSCHCmnUpdVars(cell)
19793 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19794 CmLteTimingInfo timeInfo;
19796 uint8_t ulSubframe;
19797 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
19798 uint8_t msg3Subfrm;
19801 /* ccpu00132654-ADD- Initializing all the indices in every subframe*/
19802 rgSCHCmnInitVars(cell);
19804 idx = (cell->crntTime.slot + TFU_ULCNTRL_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
19805 /* Calculate the UL scheduling subframe idx based on the
19807 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][idx] != 0)
19809 /* PUSCH transmission is based on offset from DL
19810 * PDCCH scheduling */
19811 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
19812 ulSubframe = rgSchTddPuschTxKTbl[ulDlCfgIdx][timeInfo.subframe];
19813 /* Add the DCI-0 to PUSCH time to get the time of UL subframe */
19814 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, ulSubframe);
19816 cellUl->schdTti = timeInfo.sfn * 10 + timeInfo.subframe;
19818 /* Fetch the corresponding UL subframe Idx in UL sf array */
19819 cellUl->schdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19820 /* Fetch the corresponding UL Harq Proc ID */
19821 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
19822 cellUl->schdTime = timeInfo;
19824 Mval = rgSchTddPhichMValTbl[ulDlCfgIdx][idx];
19827 /* Fetch the tx time for DL HIDCI-0 */
19828 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
19829 /* Fetch the corresponding n-k tx time of PUSCH */
19830 cellUl->hqFdbkIdx[0] = rgSCHCmnGetPhichUlSfIdx(&timeInfo, cell);
19831 /* Retx will happen according to the Pusch k table */
19832 cellUl->reTxIdx[0] = cellUl->schdIdx;
19834 if(ulDlCfgIdx == 0)
19836 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[0] */
19837 cellUl->reTxIdx[0] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
19838 cellUl->hqFdbkIdx[0]);
19841 /* At Idx 1 store the UL SF adjacent(left) to the UL SF
19843 cellUl->hqFdbkIdx[1] = (cellUl->hqFdbkIdx[0]-1 +
19844 cellUl->numUlSubfrms) % cellUl->numUlSubfrms;
19845 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[1] */
19846 cellUl->reTxIdx[1] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
19847 cellUl->hqFdbkIdx[1]);
19852 idx = (cell->crntTime.slot + TFU_RECPREQ_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
19853 if (rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] == RG_SCH_TDD_UL_SUBFRAME)
19855 RGSCHCMNADDTOCRNTTIME(cell->crntTime, timeInfo, TFU_RECPREQ_DLDELTA)
19856 cellUl->rcpReqIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19858 idx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) % RGSCH_NUM_SUB_FRAMES;
19860 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
19861 special subframe */
19862 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] != RG_SCH_TDD_UL_SUBFRAME)
19864 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
19865 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
19866 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, msg3Subfrm);
19867 cellUl->msg3SchdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19868 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
19871 if(!rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][idx])
19873 cellUl->spsUlRsrvIdx = RGSCH_INVALID_INFO;
19877 /* introduce some reuse with above code? */
19879 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
19880 //offst = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
19881 offst = rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][timeInfo.subframe];
19882 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, offst);
19883 cellUl->spsUlRsrvIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19884 /* The harq proc continues to be accessed and used the same delta before
19885 * actual data occurance, and hence use the same idx */
19886 cellUl->spsUlRsrvHqProcIdx = cellUl->schdHqProcIdx;
19890 /* RACHO: update cmn sched specific RACH variables,
19891 * mainly the prachMaskIndex */
19892 rgSCHCmnUpdRachParam(cell);
19898 * @brief To get 'p' value from nCCE.
19902 * Function: rgSCHCmnGetPValFrmCCE
19903 * Purpose: Gets 'p' value for HARQ ACK/NACK reception from CCE.
19905 * @param[in] RgSchCellCb *cell
19906 * @param[in] uint8_t cce
19911 uint8_t rgSCHCmnGetPValFrmCCE
19917 uint8_t rgSCHCmnGetPValFrmCCE(cell, cce)
19924 for(i=1; i < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; i++)
19926 if(cce < cell->rgSchTddNpValTbl[i])
19935 /***********************************************************
19937 * Func : rgSCHCmnUlAdapRetx
19939 * Desc : Adaptive retransmission for an allocation.
19947 **********************************************************/
19949 PRIVATE Void rgSCHCmnUlAdapRetx
19951 RgSchUlAlloc *alloc,
19952 RgSchUlHqProcCb *proc
19955 PRIVATE Void rgSCHCmnUlAdapRetx(alloc, proc)
19956 RgSchUlAlloc *alloc;
19957 RgSchUlHqProcCb *proc;
19961 rgSCHUhmRetx(proc, alloc);
19963 if (proc->rvIdx != 0)
19965 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[proc->rvIdx];
19970 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
19976 * @brief Scheduler invocation per TTI.
19980 * Function: rgSCHCmnHdlUlInactUes
19983 * Invoked by: Common Scheduler
19985 * @param[in] RgSchCellCb *cell
19989 PRIVATE Void rgSCHCmnHdlUlInactUes
19994 PRIVATE Void rgSCHCmnHdlUlInactUes(cell)
19998 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
19999 CmLListCp ulInactvLst;
20000 /* Get a List of Inactv UEs for UL*/
20001 cmLListInit(&ulInactvLst);
20003 /* Trigger Spfc Schedulers with Inactive UEs */
20004 rgSCHMeasGapANRepGetUlInactvUe (cell, &ulInactvLst);
20005 /* take care of this in UL retransmission */
20006 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInactvLst);
20012 * @brief Scheduler invocation per TTI.
20016 * Function: rgSCHCmnHdlDlInactUes
20019 * Invoked by: Common Scheduler
20021 * @param[in] RgSchCellCb *cell
20025 PRIVATE Void rgSCHCmnHdlDlInactUes
20030 PRIVATE Void rgSCHCmnHdlDlInactUes(cell)
20034 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20035 CmLListCp dlInactvLst;
20036 /* Get a List of Inactv UEs for DL */
20037 cmLListInit(&dlInactvLst);
20039 /* Trigger Spfc Schedulers with Inactive UEs */
20040 rgSCHMeasGapANRepGetDlInactvUe (cell, &dlInactvLst);
20042 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInactvLst);
20046 /* RACHO: Rach handover functions start here */
20047 /***********************************************************
20049 * Func : rgSCHCmnUeIdleExdThrsld
20051 * Desc : RETURN ROK if UE has been idle more
20060 **********************************************************/
20062 PRIVATE S16 rgSCHCmnUeIdleExdThrsld
20068 PRIVATE S16 rgSCHCmnUeIdleExdThrsld(cell, ue)
20073 /* Time difference in subframes */
20074 uint32_t sfDiff = RGSCH_CALC_SF_DIFF(cell->crntTime, ue->ul.ulTransTime);
20077 if (sfDiff > (uint32_t)RG_SCH_CMN_UE_IDLE_THRSLD(ue))
20089 * @brief Scheduler processing for Ded Preambles on cell configuration.
20093 * Function : rgSCHCmnCfgRachDedPrm
20095 * This function does requisite initialisation
20096 * for RACH Ded Preambles.
20099 * @param[in] RgSchCellCb *cell
20103 PRIVATE Void rgSCHCmnCfgRachDedPrm
20108 PRIVATE Void rgSCHCmnCfgRachDedPrm(cell)
20112 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20113 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
20117 if (cell->macPreambleSet.pres == NOTPRSNT)
20121 cellSch->rachCfg.numDedPrm = cell->macPreambleSet.size;
20122 cellSch->rachCfg.dedPrmStart = cell->macPreambleSet.start;
20123 /* Initialize handover List */
20124 cmLListInit(&cellSch->rachCfg.hoUeLst);
20125 /* Initialize pdcch Order List */
20126 cmLListInit(&cellSch->rachCfg.pdcchOdrLst);
20128 /* Intialize the rapId to UE mapping structure */
20129 for (cnt = 0; cnt<cellSch->rachCfg.numDedPrm; cnt++)
20131 cellSch->rachCfg.rapIdMap[cnt].rapId = cellSch->rachCfg.dedPrmStart + \
20133 cmLListInit(&cellSch->rachCfg.rapIdMap[cnt].assgndUes);
20135 /* Perform Prach Mask Idx, remDedPrm, applFrm initializations */
20136 /* Set remDedPrm as numDedPrm */
20137 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
20138 /* Initialize applFrm */
20139 cellSch->rachCfg.prachMskIndx = 0;
20140 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_EVEN)
20142 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + \
20143 (cell->crntTime.sfn % 2)) % RGSCH_MAX_SFN;
20146 else if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ODD)
20148 if((cell->crntTime.sfn%2) == 0)
20150 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + 1)\
20157 cellSch->rachCfg.applFrm.sfn = cell->crntTime.sfn;
20159 /* Initialize cellSch->rachCfg.applFrm as >= crntTime.
20160 * This is because of RGSCH_CALC_SF_DIFF logic */
20161 if (cellSch->rachCfg.applFrm.sfn == cell->crntTime.sfn)
20163 while (cellSch->rachCfg.prachMskIndx < cell->rachCfg.raOccasion.size)
20165 if (cell->crntTime.slot <\
20166 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx])
20170 cellSch->rachCfg.prachMskIndx++;
20172 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size)
20174 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
20176 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) %\
20181 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) %\
20184 cellSch->rachCfg.prachMskIndx = 0;
20186 cellSch->rachCfg.applFrm.slot = \
20187 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20191 cellSch->rachCfg.applFrm.slot = \
20192 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20195 /* Note first param to this macro should always be the latest in time */
20196 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
20197 while (sfDiff <= gap)
20199 rgSCHCmnUpdNxtPrchMskIdx(cell);
20200 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
20207 * @brief Updates the PRACH MASK INDEX.
20211 * Function: rgSCHCmnUpdNxtPrchMskIdx
20212 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
20213 * CFG is always >= "n"+"DELTA", where "n" is the crntTime
20214 * of the cell. If not, applFrm is updated to the next avl
20215 * PRACH oppurtunity as per the PRACH Cfg Index configuration.
20218 * Invoked by: Common Scheduler
20220 * @param[in] RgSchCellCb *cell
20224 PRIVATE Void rgSCHCmnUpdNxtPrchMskIdx
20229 PRIVATE Void rgSCHCmnUpdNxtPrchMskIdx(cell)
20233 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20235 /* Determine the next prach mask Index */
20236 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size - 1)
20238 /* PRACH within applFrm.sfn are done, go to next AVL sfn */
20239 cellSch->rachCfg.prachMskIndx = 0;
20240 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
20242 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) % \
20245 else/* RGR_SFN_EVEN or RGR_SFN_ODD */
20247 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) % \
20250 cellSch->rachCfg.applFrm.slot = cell->rachCfg.raOccasion.\
20253 else /* applFrm.sfn is still valid */
20255 cellSch->rachCfg.prachMskIndx += 1;
20256 if ( cellSch->rachCfg.prachMskIndx < RGR_MAX_SUBFRAME_NUM )
20258 cellSch->rachCfg.applFrm.slot = \
20259 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20266 * @brief Updates the Ded preamble RACH parameters
20271 * Function: rgSCHCmnUpdRachParam
20272 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
20273 * CFG is always >= "n"+"6"+"DELTA", where "n" is the crntTime
20274 * of the cell. If not, applFrm is updated to the next avl
20275 * PRACH oppurtunity as per the PRACH Cfg Index configuration,
20276 * accordingly the "remDedPrm" is reset to "numDedPrm" and
20277 * "prachMskIdx" field is updated as per "applFrm".
20280 * Invoked by: Common Scheduler
20282 * @param[in] RgSchCellCb *cell
20286 PRIVATE Void rgSCHCmnUpdRachParam
20291 PRIVATE Void rgSCHCmnUpdRachParam(cell)
20296 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20297 uint32_t gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
20300 if (cell->macPreambleSet.pres == NOTPRSNT)
20304 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, \
20308 /* applFrm is still a valid next Prach Oppurtunity */
20311 rgSCHCmnUpdNxtPrchMskIdx(cell);
20312 /* Reset remDedPrm as numDedPrm */
20313 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
20319 * @brief Dedicated Preamble allocation function.
20323 * Function: rgSCHCmnAllocPOParam
20324 * Purpose: Allocate pdcch, rapId and PrachMskIdx.
20325 * Set mapping of UE with the allocated rapId.
20327 * Invoked by: Common Scheduler
20329 * @param[in] RgSchCellCb *cell
20330 * @param[in] RgSchDlSf *dlSf
20331 * @param[in] RgSchUeCb *ue
20332 * @param[out] RgSchPdcch **pdcch
20333 * @param[out] uint8_t *rapId
20334 * @param[out] uint8_t *prachMskIdx
20338 PRIVATE S16 rgSCHCmnAllocPOParam
20343 RgSchPdcch **pdcch,
20345 uint8_t *prachMskIdx
20348 PRIVATE S16 rgSCHCmnAllocPOParam(cell, dlSf, ue, pdcch, rapId, prachMskIdx)
20352 RgSchPdcch **pdcch;
20354 uint8_t *prachMskIdx;
20358 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20359 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20362 if (cell->macPreambleSet.pres == PRSNT_NODEF)
20364 if (cellSch->rachCfg.remDedPrm == 0)
20368 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
20369 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
20373 /* The stored prachMskIdx is the index of PRACH Oppurtunities in
20374 * raOccasions.subframes[].
20375 * Converting the same to the actual PRACHMskIdx to be transmitted. */
20376 *prachMskIdx = cellSch->rachCfg.prachMskIndx + 1;
20377 /* Distribution starts from dedPrmStart till dedPrmStart + numDedPrm */
20378 *rapId = cellSch->rachCfg.dedPrmStart +
20379 cellSch->rachCfg.numDedPrm - cellSch->rachCfg.remDedPrm;
20380 cellSch->rachCfg.remDedPrm--;
20381 /* Map UE with the allocated RapId */
20382 ueDl->rachInfo.asgnOppr = cellSch->rachCfg.applFrm;
20383 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, cellSch->rachCfg.rapIdMap, (*rapId - cellSch->rachCfg.dedPrmStart));
20384 cmLListAdd2Tail(&cellSch->rachCfg.rapIdMap[*rapId - cellSch->rachCfg.dedPrmStart].assgndUes,
20385 &ueDl->rachInfo.rapIdLnk);
20386 ueDl->rachInfo.rapIdLnk.node = (PTR)ue;
20387 ueDl->rachInfo.poRapId = *rapId;
20389 else /* if dedicated preambles not configured */
20391 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
20392 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
20404 * @brief Dowlink Scheduling Handler.
20408 * Function: rgSCHCmnGenPdcchOrder
20409 * Purpose: For each UE in PO Q, grab a PDCCH,
20410 * get an available ded RapId and fill PDCCH
20411 * with PO information.
20413 * Invoked by: Common Scheduler
20415 * @param[in] RgSchCellCb *cell
20416 * @param[in] RgSchDlSf *dlSf
20420 PRIVATE Void rgSCHCmnGenPdcchOrder
20426 PRIVATE Void rgSCHCmnGenPdcchOrder(cell, dlSf)
20431 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20432 CmLList *node = cellSch->rachCfg.pdcchOdrLst.first;
20435 uint8_t prachMskIdx;
20436 RgSchPdcch *pdcch = NULLP;
20441 ue = (RgSchUeCb *)node->node;
20443 /* Skip sending for this subframe is Measuring or inActive in UL due
20444 * to MeasGap or inactie due to DRX
20446 if ((ue->measGapCb.isMeasuring == TRUE) ||
20447 (ue->ul.ulInactvMask & RG_MEASGAP_INACTIVE) ||
20448 (ue->isDrxEnabled &&
20449 ue->dl.dlInactvMask & RG_DRX_INACTIVE)
20454 if (rgSCHCmnAllocPOParam(cell, dlSf, ue, &pdcch, &rapId,\
20455 &prachMskIdx) != ROK)
20457 /* No More rapIds left for the valid next avl Oppurtunity.
20458 * Unsatisfied UEs here would be given a chance, when the
20459 * prach Mask Index changes as per rachUpd every TTI */
20461 /* PDDCH can also be ordered with rapId=0, prachMskIdx=0
20462 * so that UE triggers a RACH procedure with non-dedicated preamble.
20463 * But the implementation here does not do this. Instead, the "break"
20464 * here implies, that PDCCH Odr always given with valid rapId!=0,
20465 * prachMskIdx!=0 if dedicated preambles are configured.
20466 * If not configured, then trigger a PO with rapId=0,prchMskIdx=0*/
20469 /* Fill pdcch with pdcch odr information */
20470 rgSCHCmnFillPdcchOdr2Sf(cell, ue, pdcch, rapId, prachMskIdx);
20471 /* Remove this UE from the PDCCH ORDER QUEUE */
20472 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
20473 /* Reset UE's power state */
20474 rgSCHPwrUeReset(cell, ue);
20481 * @brief This function add UE to PdcchOdr Q if not already present.
20485 * Function: rgSCHCmnDlAdd2PdcchOdrQ
20488 * Invoked by: CMN Scheduler
20490 * @param[in] RgSchCellCb* cell
20491 * @param[in] RgSchUeCb* ue
20496 PRIVATE Void rgSCHCmnDlAdd2PdcchOdrQ
20502 PRIVATE Void rgSCHCmnDlAdd2PdcchOdrQ(cell, ue)
20507 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20508 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20511 if (ueDl->rachInfo.poLnk.node == NULLP)
20513 cmLListAdd2Tail(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
20514 ueDl->rachInfo.poLnk.node = (PTR)ue;
20521 * @brief This function rmvs UE to PdcchOdr Q if not already present.
20525 * Function: rgSCHCmnDlRmvFrmPdcchOdrQ
20528 * Invoked by: CMN Scheduler
20530 * @param[in] RgSchCellCb* cell
20531 * @param[in] RgSchUeCb* ue
20536 PRIVATE Void rgSCHCmnDlRmvFrmPdcchOdrQ
20542 PRIVATE Void rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue)
20547 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20548 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20551 cmLListDelFrm(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
20552 ueDl->rachInfo.poLnk.node = NULLP;
20557 * @brief Fill pdcch with PDCCH order information.
20561 * Function: rgSCHCmnFillPdcchOdr2Sf
20562 * Purpose: Fill PDCCH with PDCCH order information,
20564 * Invoked by: Common Scheduler
20566 * @param[in] RgSchUeCb *ue
20567 * @param[in] RgSchPdcch *pdcch
20568 * @param[in] uint8_t rapId
20569 * @param[in] uint8_t prachMskIdx
20573 PRIVATE Void rgSCHCmnFillPdcchOdr2Sf
20579 uint8_t prachMskIdx
20582 PRIVATE Void rgSCHCmnFillPdcchOdr2Sf(ue, pdcch, rapId, prachMskIdx)
20587 uint8_t prachMskIdx;
20590 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
20593 pdcch->rnti = ue->ueId;
20594 pdcch->dci.dciFormat = TFU_DCI_FORMAT_1A;
20595 pdcch->dci.u.format1aInfo.isPdcchOrder = TRUE;
20596 pdcch->dci.u.format1aInfo.t.pdcchOrder.preambleIdx = rapId;
20597 pdcch->dci.u.format1aInfo.t.pdcchOrder.prachMaskIdx = prachMskIdx;
20599 /* Request for APer CQI immediately after PDCCH Order */
20600 /* CR ccpu00144525 */
20602 if(ue->dl.ueDlCqiCfg.aprdCqiCfg.pres)
20604 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
20605 acqiCb->aCqiTrigWt = 0;
20614 * @brief UE deletion for scheduler.
20618 * Function : rgSCHCmnDelRachInfo
20620 * This functions deletes all scheduler information
20621 * pertaining to an UE.
20623 * @param[in] RgSchCellCb *cell
20624 * @param[in] RgSchUeCb *ue
20628 PRIVATE Void rgSCHCmnDelRachInfo
20634 PRIVATE Void rgSCHCmnDelRachInfo(cell, ue)
20639 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20640 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20644 if (ueDl->rachInfo.poLnk.node)
20646 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
20648 if (ueDl->rachInfo.hoLnk.node)
20650 cmLListDelFrm(&cellSch->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
20651 ueDl->rachInfo.hoLnk.node = NULLP;
20653 if (ueDl->rachInfo.rapIdLnk.node)
20655 rapIdIdx = ueDl->rachInfo.poRapId - cellSch->rachCfg.dedPrmStart;
20656 cmLListDelFrm(&cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes,
20657 &ueDl->rachInfo.rapIdLnk);
20658 ueDl->rachInfo.rapIdLnk.node = NULLP;
20664 * @brief This function retrieves the ue which has sent this raReq
20665 * and it allocates grant for UEs undergoing (for which RAR
20666 * is being generated) HandOver/PdcchOrder.
20671 * Function: rgSCHCmnHdlHoPo
20672 * Purpose: This function retrieves the ue which has sent this raReq
20673 * and it allocates grant for UEs undergoing (for which RAR
20674 * is being generated) HandOver/PdcchOrder.
20676 * Invoked by: Common Scheduler
20678 * @param[in] RgSchCellCb *cell
20679 * @param[out] CmLListCp *raRspLst
20680 * @param[in] RgSchRaReqInfo *raReq
20685 PRIVATE Void rgSCHCmnHdlHoPo
20688 CmLListCp *raRspLst,
20689 RgSchRaReqInfo *raReq
20692 PRIVATE Void rgSCHCmnHdlHoPo(cell, raRspLst, raReq)
20694 CmLListCp *raRspLst;
20695 RgSchRaReqInfo *raReq;
20698 RgSchUeCb *ue = raReq->ue;
20700 if ( ue->isDrxEnabled )
20702 rgSCHDrxDedRa(cell,ue);
20704 rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq);
20709 * @brief This function retrieves the UE which has sent this raReq
20710 * for handover case.
20715 * Function: rgSCHCmnGetHoUe
20716 * Purpose: This function retrieves the UE which has sent this raReq
20717 * for handover case.
20719 * Invoked by: Common Scheduler
20721 * @param[in] RgSchCellCb *cell
20722 * @param[in] RgSchRaReqInfo *raReq
20723 * @return RgSchUeCb*
20727 RgSchUeCb* rgSCHCmnGetHoUe
20733 RgSchUeCb* rgSCHCmnGetHoUe(cell, rapId)
20738 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20742 RgSchCmnDlUe *ueDl;
20744 ueLst = &cellSch->rachCfg.hoUeLst;
20745 node = ueLst->first;
20748 ue = (RgSchUeCb *)node->node;
20750 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20751 if (ueDl->rachInfo.hoRapId == rapId)
20760 PRIVATE Void rgSCHCmnDelDedPreamble
20766 PRIVATE rgSCHCmnDelDedPreamble(cell, preambleId)
20768 uint8_t preambleId;
20771 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20775 RgSchCmnDlUe *ueDl;
20777 ueLst = &cellSch->rachCfg.hoUeLst;
20778 node = ueLst->first;
20781 ue = (RgSchUeCb *)node->node;
20783 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20784 if (ueDl->rachInfo.hoRapId == preambleId)
20786 cmLListDelFrm(ueLst, &ueDl->rachInfo.hoLnk);
20787 ueDl->rachInfo.hoLnk.node = (PTR)NULLP;
20793 * @brief This function retrieves the UE which has sent this raReq
20794 * for PDCCh Order case.
20799 * Function: rgSCHCmnGetPoUe
20800 * Purpose: This function retrieves the UE which has sent this raReq
20801 * for PDCCH Order case.
20803 * Invoked by: Common Scheduler
20805 * @param[in] RgSchCellCb *cell
20806 * @param[in] RgSchRaReqInfo *raReq
20807 * @return RgSchUeCb*
20811 RgSchUeCb* rgSCHCmnGetPoUe
20815 CmLteTimingInfo timingInfo
20818 RgSchUeCb* rgSCHCmnGetPoUe(cell, rapId, timingInfo)
20821 CmLteTimingInfo timingInfo;
20824 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20828 RgSchCmnDlUe *ueDl;
20831 rapIdIdx = rapId -cellSch->rachCfg.dedPrmStart;
20832 ueLst = &cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes;
20833 node = ueLst->first;
20836 ue = (RgSchUeCb *)node->node;
20838 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20839 /* Remove UEs irrespective.
20840 * Old UE associations are removed.*/
20841 cmLListDelFrm(ueLst, &ueDl->rachInfo.rapIdLnk);
20842 ueDl->rachInfo.rapIdLnk.node = (PTR)NULLP;
20843 if (RGSCH_TIMEINFO_SAME(ueDl->rachInfo.asgnOppr, timingInfo))
20854 * @brief This function returns the valid UL cqi for a given UE.
20858 * Function: rgSCHCmnUlGetCqi
20859 * Purpose: This function returns the "valid UL cqi" for a given UE
20860 * based on UE category
20862 * Invoked by: Scheduler
20864 * @param[in] RgSchUeCb *ue
20865 * @param[in] uint8_t ueCtgy
20869 uint8_t rgSCHCmnUlGetCqi
20873 CmLteUeCategory ueCtgy
20876 uint8_t rgSCHCmnUlGetCqi(cell, ue, ueCtgy)
20879 CmLteUeCategory ueCtgy;
20882 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
20886 cqi = ueUl->maxUlCqi;
20888 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
20889 (ueUl->validUlCqi > ueUl->maxUlCqi)))
20891 cqi = ueUl->validUlCqi;
20894 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
20895 (ueUl->crntUlCqi[0] > ueUl->maxUlCqi )))
20897 cqi = ueUl->crntUlCqi[0];
20901 }/* End of rgSCHCmnUlGetCqi */
20903 /***********************************************************
20905 * Func : rgSCHCmnUlRbAllocForPoHoUe
20907 * Desc : Do uplink RB allocation for a HO/PO UE.
20911 * Notes: Note that as of now, for retx, maxRb
20912 * is not considered. Alternatives, such
20913 * as dropping retx if it crosses maxRb
20914 * could be considered.
20918 **********************************************************/
20920 PRIVATE S16 rgSCHCmnUlRbAllocForPoHoUe
20928 PRIVATE S16 rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, maxRb)
20935 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
20936 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
20937 uint8_t sbSize = cellUl->sbSize;
20938 uint32_t maxBits = ue->ul.maxBytesPerUePerTti*8;
20940 RgSchUlAlloc *alloc;
20950 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->msg3SchdHqProcIdx];
20951 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
20953 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
20957 /*MS_WORKAROUND for HO ccpu00121116*/
20958 cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
20959 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend], cqi);
20960 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
20961 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
20962 while(iMcs > RG_SCH_CMN_MAX_MSG3_IMCS)
20965 iTbs = rgSchCmnUlCqiToTbsTbl[(uint8_t)cell->isCpUlExtend][cqi];
20966 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg);
20968 /* Filling the modorder in the grant structure*/
20969 RG_SCH_UL_MCS_TO_MODODR(iMcs,modOdr);
20970 if (!cell->isCpUlExtend)
20972 eff = rgSchCmnNorUlEff[0][iTbs];
20976 eff = rgSchCmnExtUlEff[0][iTbs];
20979 bits = ueUl->alloc.reqBytes * 8;
20981 #if (ERRCLASS & ERRCLS_DEBUG)
20988 if (bits < rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs))
20991 nPrb = numSb * sbSize;
20995 if (bits > maxBits)
20998 nPrb = bits * 1024 / eff / RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
21003 numSb = nPrb / sbSize;
21007 /*ccpu00128775:MOD-Change to get upper threshold nPrb*/
21008 nPrb = RGSCH_CEIL((RGSCH_CEIL(bits * 1024, eff)),
21009 RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl));
21014 numSb = RGSCH_DIV_ROUND(nPrb, sbSize);
21019 alloc = rgSCHCmnUlSbAlloc(sf, (uint8_t)RGSCH_MIN(numSb, cellUl->maxSbPerUe),\
21021 if (alloc == NULLP)
21023 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
21024 "rgSCHCmnUlRbAllocForPoHoUe(): Could not get UlAlloc");
21027 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
21029 /* Filling the modorder in the grant structure start*/
21030 alloc->grnt.modOdr = (TfuModScheme) modOdr;
21031 alloc->grnt.iMcs = iMcs;
21032 alloc->grnt.iMcsCrnt = iMcsCrnt;
21033 alloc->grnt.hop = 0;
21034 /* Fix for ccpu00123915*/
21035 alloc->forMsg3 = TRUE;
21036 alloc->hqProc = proc;
21037 alloc->hqProc->ulSfIdx = cellUl->msg3SchdIdx;
21039 alloc->rnti = ue->ueId;
21040 /* updating initNumRbs in case of HO */
21042 ue->initNumRbs = alloc->grnt.numRb;
21044 ueUl->alloc.alloc = alloc;
21045 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
21046 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
21047 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
21048 /* MS_WORKAROUND for HO ccpu00121124*/
21049 /*[Adi temp change] Need to fil modOdr */
21050 RG_SCH_UL_MCS_TO_MODODR(alloc->grnt.iMcsCrnt,alloc->grnt.modOdr);
21051 rgSCHUhmNewTx(proc, ueUl->hqEnt.maxHqRetx, alloc);
21052 /* No grant attr recorded now */
21057 * @brief This function allocates grant for UEs undergoing (for which RAR
21058 * is being generated) HandOver/PdcchOrder.
21063 * Function: rgSCHCmnAllocPoHoGrnt
21064 * Purpose: This function allocates grant for UEs undergoing (for which RAR
21065 * is being generated) HandOver/PdcchOrder.
21067 * Invoked by: Common Scheduler
21069 * @param[in] RgSchCellCb *cell
21070 * @param[out] CmLListCp *raRspLst,
21071 * @param[in] RgSchUeCb *ue
21072 * @param[in] RgSchRaReqInfo *raReq
21077 PRIVATE Void rgSCHCmnAllocPoHoGrnt
21080 CmLListCp *raRspLst,
21082 RgSchRaReqInfo *raReq
21085 PRIVATE Void rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq)
21087 CmLListCp *raRspLst;
21089 RgSchRaReqInfo *raReq;
21092 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21093 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
21095 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
21098 /* Clearing previous allocs if any*/
21099 rgSCHCmnUlUeDelAllocs(cell, ue);
21100 /* Fix : syed allocs are limited */
21101 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
21105 ueUl->alloc.reqBytes = RG_SCH_MIN_GRNT_HOPO;
21106 if (rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, RGSCH_MAX_UL_RB) != ROK)
21111 /* Fill grant information */
21112 grnt = &ueUl->alloc.alloc->grnt;
21117 RLOG_ARG1(L_ERROR,DBG_INSTID,cell->instIdx, "Failed to get"
21118 "the grant for HO/PDCCH Order. CRNTI:%d",ue->ueId);
21121 ue->ul.rarGrnt.rapId = raReq->raReq.rapId;
21122 ue->ul.rarGrnt.hop = grnt->hop;
21123 ue->ul.rarGrnt.rbStart = grnt->rbStart;
21124 ue->ul.rarGrnt.numRb = grnt->numRb;
21125 ue->ul.rarGrnt.tpc = grnt->tpc;
21126 ue->ul.rarGrnt.iMcsCrnt = grnt->iMcsCrnt;
21127 ue->ul.rarGrnt.ta.pres = TRUE;
21128 ue->ul.rarGrnt.ta.val = raReq->raReq.ta;
21129 ue->ul.rarGrnt.datSz = grnt->datSz;
21130 if((sf->numACqiCount < RG_SCH_MAX_ACQI_PER_ULSF) && (RG_SCH_APCQI_NO != ue->dl.reqForCqi))
21134 /* Send two bits cqireq field if more than one cells are configured else one*/
21135 for (idx = 1;idx < CM_LTE_MAX_CELLS;idx++)
21137 if (ue->cellInfo[idx] != NULLP)
21139 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
21143 if (idx == CM_LTE_MAX_CELLS)
21146 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
21148 ue->dl.reqForCqi = RG_SCH_APCQI_NO;
21149 sf->numACqiCount++;
21153 ue->ul.rarGrnt.cqiReqBit = 0;
21155 /* Attach Ho/Po allocation to RAR Rsp cont free Lst */
21156 cmLListAdd2Tail(raRspLst, &ue->ul.rarGrnt.raRspLnk);
21157 ue->ul.rarGrnt.raRspLnk.node = (PTR)ue;
21163 * @brief This is a utility function to set the fields in
21164 * an UL harq proc which is identified for non-adaptive retx
21168 * Function: rgSCHCmnUlNonadapRetx
21169 * Purpose: Sets the fields in UL Harq proc for non-adaptive retx
21171 * @param[in] RgSchCmnUlCell *cellUl
21172 * @param[out] RgSchUlAlloc *alloc
21173 * @param[in] uint8_t idx
21179 PRIVATE Void rgSCHCmnUlNonadapRetx
21181 RgSchCmnUlCell *cellUl,
21182 RgSchUlAlloc *alloc,
21186 PRIVATE Void rgSCHCmnUlNonadapRetx(cellUl, alloc, idx)
21187 RgSchCmnUlCell *cellUl;
21188 RgSchUlAlloc *alloc;
21192 rgSCHUhmRetx(alloc->hqProc, alloc);
21194 /* Update alloc to retx */
21195 alloc->hqProc->isRetx = TRUE;
21196 alloc->hqProc->ulSfIdx = cellUl->reTxIdx[idx];
21198 if (alloc->hqProc->rvIdx != 0)
21200 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[alloc->hqProc->rvIdx];
21204 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
21206 alloc->grnt.isRtx = TRUE;
21207 alloc->pdcch = NULLP;
21211 * @brief Check if 2 allocs overlap
21215 * Function : rgSCHCmnUlAllocsOvrLap
21217 * - Return TRUE if alloc1 and alloc2 overlap.
21219 * @param[in] RgSchUlAlloc *alloc1
21220 * @param[in] RgSchUlAlloc *alloc2
21224 PRIVATE Bool rgSCHCmnUlAllocsOvrLap
21226 RgSchUlAlloc *alloc1,
21227 RgSchUlAlloc *alloc2
21230 PRIVATE Bool rgSCHCmnUlAllocsOvrLap(alloc1, alloc2)
21231 RgSchUlAlloc *alloc1;
21232 RgSchUlAlloc *alloc2;
21237 if (((alloc1->sbStart >= alloc2->sbStart) &&
21238 (alloc1->sbStart <= alloc2->sbStart + alloc2->numSb-1)) ||
21239 ((alloc2->sbStart >= alloc1->sbStart) &&
21240 (alloc2->sbStart <= alloc1->sbStart + alloc1->numSb-1)))
21247 * @brief Copy allocation Info from src to dst.
21251 * Function : rgSCHCmnUlCpyAllocInfo
21253 * - Copy allocation Info from src to dst.
21255 * @param[in] RgSchUlAlloc *srcAlloc
21256 * @param[in] RgSchUlAlloc *dstAlloc
21260 PRIVATE Void rgSCHCmnUlCpyAllocInfo
21263 RgSchUlAlloc *srcAlloc,
21264 RgSchUlAlloc *dstAlloc
21267 PRIVATE Void rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc)
21269 RgSchUlAlloc *srcAlloc;
21270 RgSchUlAlloc *dstAlloc;
21273 RgSchCmnUlUe *ueUl;
21275 dstAlloc->grnt = srcAlloc->grnt;
21276 dstAlloc->hqProc = srcAlloc->hqProc;
21277 /* Fix : syed During UE context release, hqProc->alloc
21278 * was pointing to srcAlloc instead of dstAlloc and
21279 * freeing from incorrect sf->allocDb was
21280 * corrupting the list. */
21281 /* In case of SPS Occasion Allocation is done in advance and
21282 at a later time Hq Proc is linked. Hence HqProc
21283 pointer in alloc shall be NULL */
21285 if (dstAlloc->hqProc)
21288 dstAlloc->hqProc->alloc = dstAlloc;
21290 dstAlloc->ue = srcAlloc->ue;
21291 dstAlloc->rnti = srcAlloc->rnti;
21292 dstAlloc->forMsg3 = srcAlloc->forMsg3;
21293 dstAlloc->raCb = srcAlloc->raCb;
21294 dstAlloc->pdcch = srcAlloc->pdcch;
21295 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21298 ueUl = RG_SCH_CMN_GET_UL_UE(dstAlloc->ue,cell);
21299 ueUl->alloc.alloc = dstAlloc;
21301 if (dstAlloc->ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
21303 if((dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc != NULLP)
21304 && (dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc == srcAlloc))
21306 dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc = dstAlloc;
21315 * @brief Update TX and RETX subframe's allocation
21320 * Function : rgSCHCmnUlInsAllocFrmNewSf2OldSf
21322 * - Release all preassigned allocations of newSf and merge
21324 * - If alloc of newSf collide with one or more allocs of oldSf
21325 * - mark all such allocs of oldSf for Adaptive Retx.
21326 * - Swap the alloc and hole DB references of oldSf and newSf.
21328 * @param[in] RgSchCellCb *cell
21329 * @param[in] RgSchUlSf *newSf
21330 * @param[in] RgSchUlSf *oldSf
21331 * @param[in] RgSchUlAlloc *srcAlloc
21335 PRIVATE Void rgSCHCmnUlInsAllocFrmNewSf2OldSf
21340 RgSchUlAlloc *srcAlloc
21343 PRIVATE Void rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, srcAlloc)
21347 RgSchUlAlloc *srcAlloc;
21350 RgSchUlAlloc *alloc, *dstAlloc, *nxtAlloc;
21352 /* MS_WORKAROUND ccpu00120827 */
21353 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
21356 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
21360 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
21361 /* If there is an overlap between alloc and srcAlloc
21362 * then alloc is marked for Adaptive retx and it is released
21364 if (rgSCHCmnUlAllocsOvrLap(alloc, srcAlloc) == TRUE)
21366 rgSCHCmnUlUpdAllocRetx(cell, alloc);
21367 rgSCHUtlUlAllocRls(oldSf, alloc);
21369 /* No further allocs spanning the srcAlloc subbands */
21370 if (srcAlloc->sbStart + srcAlloc->numSb - 1 <= alloc->sbStart)
21374 } while ((alloc = nxtAlloc) != NULLP);
21377 /* After freeing all the colliding allocs, request for an allocation
21378 * specifying the start and numSb with in txSf. This function should
21379 * always return positively with a nonNULL dstAlloc */
21380 /* MS_WORKAROUND ccpu00120827 */
21381 remAllocs = schCmnCell->ul.maxAllocPerUlSf - *oldSf->allocCountRef;
21384 /* Fix : If oldSf already has max Allocs then release the
21385 * old RETX alloc to make space for new alloc of newSf.
21386 * newSf allocs(i.e new Msg3s) are given higher priority
21387 * over retx allocs. */
21388 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
21392 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
21393 if (!alloc->mrgdNewTxAlloc)
21395 /* If alloc is for RETX */
21396 /* TODO: Incase of this ad also in case of choosing
21397 * and alloc for ADAP RETX, we need to send ACK for
21398 * the corresponding alloc in PHICH */
21399 #ifndef EMTC_ENABLE
21400 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc);
21402 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc,FALSE);
21406 }while((alloc = nxtAlloc) != NULLP);
21409 dstAlloc = rgSCHUtlUlGetSpfcAlloc(oldSf, srcAlloc->sbStart, srcAlloc->numSb);
21411 /* This should never happen */
21412 if (dstAlloc == NULLP)
21414 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"CRNTI:%d "
21415 "rgSCHUtlUlGetSpfcAlloc failed in rgSCHCmnUlInsAllocFrmNewSf2OldSf",
21420 /* Copy the srcAlloc's state information in to dstAlloc */
21421 rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc);
21422 /* Set new Tx merged Alloc Flag to TRUE, indicating that this
21423 * alloc shall not be processed for non-adaptive retransmission */
21424 dstAlloc->mrgdNewTxAlloc = TRUE;
21428 * @brief Merge all allocations of newSf to oldSf.
21432 * Function : rgSCHCmnUlMergeSfAllocs
21434 * - Merge all allocations of newSf to oldSf.
21435 * - If newSf's alloc collides with oldSf's alloc
21436 * then oldSf's alloc is marked for adaptive Retx
21437 * and is released from oldSf to create space for
21440 * @param[in] RgSchCellCb *cell
21441 * @param[in] RgSchUlSf *oldSf
21442 * @param[in] RgSchUlSf *newSf
21446 PRIVATE Void rgSCHCmnUlMergeSfAllocs
21453 PRIVATE Void rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf)
21459 RgSchUlAlloc *alloc, *nxtAlloc;
21462 /* Merge each alloc of newSf in to oldSf
21463 * and release it from newSf */
21464 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
21468 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
21469 rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, alloc);
21470 rgSCHUtlUlAllocRls(newSf, alloc);
21471 } while((alloc = nxtAlloc) != NULLP);
21476 * @brief Swap Hole/Alloc DB context of newSf and oldSf.
21480 * Function : rgSCHCmnUlSwapSfAllocs
21482 * - Swap Hole/Alloc DB context of newSf and oldSf.
21484 * @param[in] RgSchCellCb *cell
21485 * @param[in] RgSchUlSf *oldSf
21486 * @param[in] RgSchUlSf *newSf
21490 PRIVATE Void rgSCHCmnUlSwapSfAllocs
21497 PRIVATE Void rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf)
21503 RgSchUlAllocDb *tempAllocDb = newSf->allocDb;
21504 RgSchUlHoleDb *tempHoleDb = newSf->holeDb;
21505 uint8_t tempAvailSbs = newSf->availSubbands;
21509 newSf->allocDb = oldSf->allocDb;
21510 newSf->holeDb = oldSf->holeDb;
21511 newSf->availSubbands = oldSf->availSubbands;
21513 oldSf->allocDb = tempAllocDb;
21514 oldSf->holeDb = tempHoleDb;
21515 oldSf->availSubbands = tempAvailSbs;
21517 /* Fix ccpu00120610*/
21518 newSf->allocCountRef = &newSf->allocDb->count;
21519 oldSf->allocCountRef = &oldSf->allocDb->count;
21523 * @brief Perform non-adaptive RETX for non-colliding allocs.
21527 * Function : rgSCHCmnUlPrcNonAdptRetx
21529 * - Perform non-adaptive RETX for non-colliding allocs.
21531 * @param[in] RgSchCellCb *cell
21532 * @param[in] RgSchUlSf *newSf
21533 * @param[in] uint8_t idx
21537 PRIVATE Void rgSCHCmnUlPrcNonAdptRetx
21544 PRIVATE Void rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx)
21550 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21551 RgSchUlAlloc *alloc, *nxtAlloc;
21553 /* perform non-adaptive retx allocation(adjustment) */
21554 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
21558 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
21559 /* A merged new TX alloc, reset the state and skip */
21560 if (alloc->mrgdNewTxAlloc)
21562 alloc->mrgdNewTxAlloc = FALSE;
21567 rgSCHCmnUlNonadapRetx(cellUl, alloc, idx);
21569 } while((alloc = nxtAlloc) != NULLP);
21575 * @brief Update TX and RETX subframe's allocation
21580 * Function : rgSCHCmnUlPrfmSfMerge
21582 * - Release all preassigned allocations of newSf and merge
21584 * - If alloc of newSf collide with one or more allocs of oldSf
21585 * - mark all such allocs of oldSf for Adaptive Retx.
21586 * - Swap the alloc and hole DB references of oldSf and newSf.
21587 * - The allocs which did not collide with pre-assigned msg3
21588 * allocs are marked for non-adaptive RETX.
21590 * @param[in] RgSchCellCb *cell
21591 * @param[in] RgSchUlSf *oldSf
21592 * @param[in] RgSchUlSf *newSf
21593 * @param[in] uint8_t idx
21597 PRIVATE Void rgSCHCmnUlPrfmSfMerge
21605 PRIVATE Void rgSCHCmnUlPrfmSfMerge(cell, oldSf, newSf, idx)
21612 /* Preassigned resources for msg3 in newSf.
21613 * Hence do adaptive retx for all NACKED TXs */
21614 rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf);
21615 /* swap alloc and hole DBs of oldSf and newSf. */
21616 rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf);
21617 /* Here newSf has the resultant merged allocs context */
21618 /* Perform non-adaptive RETX for non-colliding allocs */
21619 rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx);
21625 * @brief Update TX and RETX subframe's allocation
21630 * Function : rgSCHCmnUlRmvCmpltdAllocs
21632 * - Free all Transmission which are ACKED
21633 * OR for which MAX retransmission have
21637 * @param[in] RgSchCellCb *cell,
21638 * @param[in] RgSchUlSf *sf
21642 PRIVATE Void rgSCHCmnUlRmvCmpltdAllocs
21648 PRIVATE Void rgSCHCmnUlRmvCmpltdAllocs(cell, sf)
21653 RgSchUlAlloc *alloc, *nxtAlloc;
21655 if ((alloc = rgSCHUtlUlAllocFirst(sf)) == NULLP)
21661 nxtAlloc = rgSCHUtlUlAllocNxt(sf, alloc);
21663 printf("rgSCHCmnUlRmvCmpltdAllocs:time(%d %d) alloc->hqProc->remTx %d hqProcId(%d) \n",cell->crntTime.sfn,cell->crntTime.slot,alloc->hqProc->remTx, alloc->grnt.hqProcId);
21665 alloc->hqProc->rcvdCrcInd = TRUE;
21666 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
21669 /* SR_RACH_STATS : MSG 3 MAX RETX FAIL*/
21670 if ((alloc->forMsg3 == TRUE) && (alloc->hqProc->remTx == 0))
21672 rgNumMsg3FailMaxRetx++;
21674 cell->tenbStats->sch.msg3Fail++;
21678 #ifdef MAC_SCH_STATS
21679 if(alloc->ue != NULLP)
21681 /* access from ulHarqProc*/
21682 RgSchUeCb *ueCb = alloc->ue;
21683 RgSchCmnUe *cmnUe = (RgSchCmnUe*)ueCb->sch;
21684 RgSchCmnUlUe *ulUe = &(cmnUe->ul);
21685 uint8_t cqi = ulUe->crntUlCqi[0];
21686 uint16_t numUlRetx = ueCb->ul.hqEnt.maxHqRetx - alloc->hqProc->remTx;
21688 hqRetxStats.ulCqiStat[(cqi - 1)].mcs = alloc->grnt.iMcs;
21693 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1++;
21696 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2++;
21699 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3++;
21702 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4++;
21705 hqRetxStats.ulCqiStat[(cqi - 1)].totalTx = \
21706 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1 + \
21707 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2 * 2) + \
21708 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3 * 3) + \
21709 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4 * 4);
21712 #endif /*MAC_SCH_STATS*/
21713 rgSCHCmnUlFreeAllocation(cell, sf, alloc);
21715 /*ccpu00106104 MOD added check for AckNackRep */
21716 /*added check for acknack so that adaptive retx considers ue
21717 inactivity due to ack nack repetition*/
21718 else if((alloc->ue != NULLP) && (TRUE != alloc->forMsg3))
21720 rgSCHCmnUlUpdAllocRetx(cell, alloc);
21721 rgSCHUtlUlAllocRls(sf, alloc);
21723 } while ((alloc = nxtAlloc) != NULLP);
21729 * @brief Update an uplink subframe.
21733 * Function : rgSCHCmnRlsUlSf
21735 * For each allocation
21736 * - if no more tx needed
21737 * - Release allocation
21739 * - Perform retransmission
21741 * @param[in] RgSchUlSf *sf
21742 * @param[in] uint8_t idx
21746 Void rgSCHCmnRlsUlSf
21752 Void rgSCHCmnRlsUlSf(cell, idx)
21758 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21760 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
21762 RgSchUlSf *oldSf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
21764 /* Initialize the reTxLst of UL HqProcs for RETX subframe */
21765 if (rgSCHUtlUlAllocFirst(oldSf) == NULLP)
21769 /* Release all completed TX allocs from sf */
21770 rgSCHCmnUlRmvCmpltdAllocs(cell, oldSf);
21772 oldSf->numACqiCount = 0;
21778 * @brief Handle uplink allocation for retransmission.
21782 * Function : rgSCHCmnUlUpdAllocRetx
21784 * - Perform adaptive retransmission
21786 * @param[in] RgSchUlSf *sf
21787 * @param[in] RgSchUlAlloc *alloc
21791 PRIVATE Void rgSCHCmnUlUpdAllocRetx
21794 RgSchUlAlloc *alloc
21797 PRIVATE Void rgSCHCmnUlUpdAllocRetx(cell, alloc)
21799 RgSchUlAlloc *alloc;
21802 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
21805 alloc->hqProc->reTxAlloc.rnti = alloc->rnti;
21806 alloc->hqProc->reTxAlloc.numSb = alloc->numSb;
21807 alloc->hqProc->reTxAlloc.iMcs = alloc->grnt.iMcs;
21809 alloc->hqProc->reTxAlloc.dciFrmt = alloc->grnt.dciFrmt;
21810 alloc->hqProc->reTxAlloc.numLyr = alloc->grnt.numLyr;
21811 alloc->hqProc->reTxAlloc.vrbgStart = alloc->grnt.vrbgStart;
21812 alloc->hqProc->reTxAlloc.numVrbg = alloc->grnt.numVrbg;
21813 alloc->hqProc->reTxAlloc.modOdr = alloc->grnt.modOdr;
21815 //iTbs = rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs);
21816 //iTbs = alloc->grnt.iMcs;
21817 //RGSCH_ARRAY_BOUND_CHECK( 0, rgTbSzTbl[0], iTbs);
21818 alloc->hqProc->reTxAlloc.tbSz = alloc->grnt.datSz;
21819 //rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1]/8;
21820 alloc->hqProc->reTxAlloc.ue = alloc->ue;
21821 alloc->hqProc->reTxAlloc.forMsg3 = alloc->forMsg3;
21822 alloc->hqProc->reTxAlloc.raCb = alloc->raCb;
21824 /* Set as retransmission is pending */
21825 alloc->hqProc->isRetx = TRUE;
21826 alloc->hqProc->alloc = NULLP;
21827 alloc->hqProc->ulSfIdx = RGSCH_INVALID_INFO;
21829 printf("Adding Harq Proc Id in the retx list hqProcId %d \n",alloc->grnt.hqProcId);
21831 cmLListAdd2Tail(&cmnUlCell->reTxLst, &alloc->hqProc->reTxLnk);
21832 alloc->hqProc->reTxLnk.node = (PTR)alloc->hqProc;
21837 * @brief Attempts allocation for msg3s for which ADAP retransmissions
21842 * Function : rgSCHCmnUlAdapRetxAlloc
21844 * Attempts allocation for msg3s for which ADAP retransmissions
21847 * @param[in] RgSchCellCb *cell
21848 * @param[in] RgSchUlSf *sf
21849 * @param[in] RgSchUlHqProcCb *proc;
21850 * @param[in] RgSchUlHole *hole;
21854 PRIVATE Bool rgSCHCmnUlAdapRetxAlloc
21858 RgSchUlHqProcCb *proc,
21862 PRIVATE Bool rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole)
21865 RgSchUlHqProcCb *proc;
21869 uint8_t numSb = proc->reTxAlloc.numSb;
21870 uint8_t iMcs = proc->reTxAlloc.iMcs;
21871 CmLteTimingInfo frm = cell->crntTime;
21872 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21875 RgSchUlAlloc *alloc;
21877 /* Fetch PDCCH for msg3 */
21878 /* ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
21879 /* Introduced timing delta for UL control */
21880 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
21881 dlSf = rgSCHUtlSubFrmGet(cell, frm);
21882 pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
21883 if (pdcch == NULLP)
21888 /* Fetch UL Alloc for msg3 */
21889 if (numSb <= hole->num)
21891 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
21896 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
21897 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
21898 "UL Alloc fail for msg3 retx for rnti: %d\n",
21899 proc->reTxAlloc.rnti);
21903 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
21904 alloc->grnt.iMcs = iMcs;
21905 alloc->grnt.datSz = proc->reTxAlloc.tbSz;
21908 //RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
21910 /* Fill UL Alloc for msg3 */
21911 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
21912 alloc->grnt.nDmrs = 0;
21913 alloc->grnt.hop = 0;
21914 alloc->grnt.delayBit = 0;
21915 alloc->grnt.isRtx = TRUE;
21916 proc->ulSfIdx = cellUl->schdIdx;
21918 proc->schdTime = cellUl->schdTime;
21919 alloc->grnt.hqProcId = proc->procId;
21920 alloc->grnt.dciFrmt = proc->reTxAlloc.dciFrmt;
21921 alloc->grnt.numLyr = proc->reTxAlloc.numLyr;
21922 alloc->grnt.vrbgStart = proc->reTxAlloc.vrbgStart;
21923 alloc->grnt.numVrbg = proc->reTxAlloc.numVrbg;
21924 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
21925 alloc->grnt.modOdr = proc->reTxAlloc.modOdr;
21927 /* TODO : Hardcoding these as of now */
21928 alloc->grnt.hop = 0;
21929 alloc->grnt.SCID = 0;
21930 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
21931 alloc->grnt.PMI = 0;
21932 alloc->grnt.uciOnxPUSCH = 0;
21934 alloc->rnti = proc->reTxAlloc.rnti;
21935 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21936 alloc->ue = proc->reTxAlloc.ue;
21937 alloc->pdcch = pdcch;
21938 alloc->forMsg3 = proc->reTxAlloc.forMsg3;
21939 alloc->raCb = proc->reTxAlloc.raCb;
21940 alloc->hqProc = proc;
21941 alloc->isAdaptive = TRUE;
21943 sf->totPrb += alloc->grnt.numRb;
21945 /* FIX : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21948 alloc->raCb->msg3Grnt= alloc->grnt;
21950 /* To the crntTime, add the time at which UE will
21951 * actually send MSG3 */
21952 alloc->raCb->msg3AllocTime = cell->crntTime;
21953 RGSCH_INCR_SUB_FRAME(alloc->raCb->msg3AllocTime, RG_SCH_CMN_MIN_RETXMSG3_RECP_INTRVL);
21955 alloc->raCb->msg3AllocTime = cellUl->schdTime;
21957 rgSCHCmnUlAdapRetx(alloc, proc);
21958 /* Fill PDCCH with alloc info */
21959 pdcch->rnti = alloc->rnti;
21960 pdcch->dci.dciFormat = TFU_DCI_FORMAT_0;
21961 pdcch->dci.u.format0Info.hoppingEnbld = alloc->grnt.hop;
21962 pdcch->dci.u.format0Info.rbStart = alloc->grnt.rbStart;
21963 pdcch->dci.u.format0Info.numRb = alloc->grnt.numRb;
21964 pdcch->dci.u.format0Info.mcs = alloc->grnt.iMcsCrnt;
21965 pdcch->dci.u.format0Info.ndi = alloc->hqProc->ndi;
21966 pdcch->dci.u.format0Info.nDmrs = alloc->grnt.nDmrs;
21967 pdcch->dci.u.format0Info.tpcCmd = alloc->grnt.tpc;
21971 /* ulIdx setting for cfg 0 shall be appropriately fixed thru ccpu00109015 */
21972 pdcch->dci.u.format0Info.ulIdx = RG_SCH_ULIDX_MSB;
21973 pdcch->dci.u.format0Info.dai = RG_SCH_MAX_DAI_IDX;
21976 pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_0];
21980 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue,cell);
21982 alloc->ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
21985 ue->ul.nPrb = alloc->grnt.numRb;
21987 ueUl->alloc.alloc = alloc;
21988 /* FIx: Removed the call to rgSCHCmnUlAdapRetx */
21989 rgSCHCmnUlUeFillAllocInfo(cell, alloc->ue);
21990 /* Setting csireq as false for Adaptive Retx*/
21991 ueUl->alloc.alloc->pdcch->dci.u.format0Info.cqiReq = RG_SCH_APCQI_NO;
21992 pdcch->dciNumOfBits = alloc->ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
21994 /* Reset as retransmission is done */
21995 proc->isRetx = FALSE;
21997 else /* Intg fix */
21999 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
22000 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
22001 "Num SB not suffiecient for adap retx for rnti: %d",
22002 proc->reTxAlloc.rnti);
22008 /* Fix: syed Adaptive Msg3 Retx crash. */
22010 * @brief Releases all Adaptive Retx HqProcs which failed for
22011 * allocations in this scheduling occassion.
22015 * Function : rgSCHCmnUlSfRlsRetxProcs
22018 * @param[in] RgSchCellCb *cell
22019 * @param[in] RgSchUlSf *sf
22024 PRIVATE Void rgSCHCmnUlSfRlsRetxProcs
22030 PRIVATE Void rgSCHCmnUlSfRlsRetxProcs(cell, sf)
22037 RgSchUlHqProcCb *proc;
22038 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
22041 cp = &(cellUl->reTxLst);
22045 proc = (RgSchUlHqProcCb *)node->node;
22047 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
22048 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
22049 proc->reTxLnk.node = (PTR)NULLP;
22056 * @brief Attempts allocation for UEs for which retransmissions
22061 * Function : rgSCHCmnUlSfReTxAllocs
22063 * Attempts allocation for UEs for which retransmissions
22066 * @param[in] RgSchCellCb *cell
22067 * @param[in] RgSchUlSf *sf
22071 PRIVATE Void rgSCHCmnUlSfReTxAllocs
22077 PRIVATE Void rgSCHCmnUlSfReTxAllocs(cell, sf)
22084 RgSchUlHqProcCb *proc;
22087 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
22088 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
22090 cp = &(cellUl->reTxLst);
22094 proc = (RgSchUlHqProcCb *)node->node;
22095 ue = proc->reTxAlloc.ue;
22097 /*ccpu00106104 MOD added check for AckNackRep */
22098 /*added check for acknack so that adaptive retx considers ue
22099 inactivity due to ack nack repetition*/
22100 if((ue != NULLP) &&
22101 ((ue->measGapCb.isMeasuring == TRUE)||
22102 (ue->ackNakRepCb.isAckNakRep == TRUE)))
22106 /* Fix for ccpu00123917: Check if maximum allocs per UL sf have been exhausted */
22107 if (((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
22108 || (sf->allocDb->count == schCmnCell->ul.maxAllocPerUlSf))
22110 /* No more UL BW then return */
22113 /* perform adaptive retx for UE's */
22114 if (rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole) == FALSE)
22118 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
22119 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
22120 /* Fix: syed Adaptive Msg3 Retx crash. */
22121 proc->reTxLnk.node = (PTR)NULLP;
22127 * @brief Handles RB allocation for downlink.
22131 * Function : rgSCHCmnDlRbAlloc
22133 * Invoking Module Processing:
22134 * - This function is invoked for DL RB allocation
22136 * Processing Steps:
22137 * - If cell is frequency selecive,
22138 * - Call rgSCHDlfsAllocRb().
22140 * - Call rgSCHCmnNonDlfsRbAlloc().
22142 * @param[in] RgSchCellCb *cell
22143 * @param[in] RgSchDlRbAllocInfo *allocInfo
22148 PRIVATE Void rgSCHCmnDlRbAlloc
22151 RgSchCmnDlRbAllocInfo *allocInfo
22154 PRIVATE Void rgSCHCmnDlRbAlloc(cell, allocInfo)
22156 RgSchCmnDlRbAllocInfo *allocInfo;
22159 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
22161 if (cellSch->dl.isDlFreqSel)
22163 printf("5GTF_ERROR DLFS SCH Enabled\n");
22164 cellSch->apisDlfs->rgSCHDlfsAllocRb(cell, allocInfo);
22168 rgSCHCmnNonDlfsRbAlloc(cell, allocInfo);
22176 * @brief Determines number of RBGs and RBG subset sizes for the given DL
22177 * bandwidth and rbgSize
22180 * Function : rgSCHCmnDlGetRbgInfo
22183 * Processing Steps:
22184 * - Fill-up rbgInfo data structure for given DL bandwidth and rbgSize
22186 * @param[in] uint8_t dlTotalBw
22187 * @param[in] uint8_t dlSubsetBw
22188 * @param[in] uint8_t maxRaType1SubsetBw
22189 * @param[in] uint8_t rbgSize
22190 * @param[out] RgSchBwRbgInfo *rbgInfo
22194 Void rgSCHCmnDlGetRbgInfo
22197 uint8_t dlSubsetBw,
22198 uint8_t maxRaType1SubsetBw,
22200 RgSchBwRbgInfo *rbgInfo
22203 Void rgSCHCmnDlGetRbgInfo(dlTotalBw, dlSubsetBw, maxRaType1SubsetBw,
22206 uint8_t dlSubsetBw;
22207 uint8_t maxRaType1SubsetBw;
22209 RgSchBwRbgInfo *rbgInfo;
22212 #ifdef RGSCH_SPS_UNUSED
22214 uint8_t lastRbgIdx = ((dlTotalBw + rbgSize - 1)/rbgSize) - 1;
22215 uint8_t currRbgSize = rbgSize;
22216 uint8_t subsetSizeIdx = 0;
22217 uint8_t subsetSize[RG_SCH_NUM_RATYPE1_SUBSETS] = {0};
22218 uint8_t lastRbgSize = rbgSize - (dlTotalBw - ((dlTotalBw/rbgSize) * rbgSize));
22219 uint8_t numRaType1Rbgs = (maxRaType1SubsetBw + rbgSize - 1)/rbgSize;
22222 /* Compute maximum number of SPS RBGs for the cell */
22223 rbgInfo->numRbgs = ((dlSubsetBw + rbgSize - 1)/rbgSize);
22225 #ifdef RGSCH_SPS_UNUSED
22226 /* Distribute RBGs across subsets except last RBG */
22227 for (;idx < numRaType1Rbgs - 1; ++idx)
22229 subsetSize[subsetSizeIdx] += currRbgSize;
22230 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
22233 /* Computation for last RBG */
22234 if (idx == lastRbgIdx)
22236 currRbgSize = lastRbgSize;
22238 subsetSize[subsetSizeIdx] += currRbgSize;
22239 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
22242 /* Update the computed sizes */
22243 #ifdef RGSCH_SPS_UNUSED
22244 rbgInfo->lastRbgSize = currRbgSize;
22246 rbgInfo->lastRbgSize = rbgSize -
22247 (dlSubsetBw - ((dlSubsetBw/rbgSize) * rbgSize));
22248 #ifdef RGSCH_SPS_UNUSED
22249 memcpy(rbgInfo->rbgSubsetSize, subsetSize, 4 * sizeof(uint8_t));
22251 rbgInfo->numRbs = (rbgInfo->numRbgs * rbgSize > dlTotalBw) ?
22252 dlTotalBw:(rbgInfo->numRbgs * rbgSize);
22253 rbgInfo->rbgSize = rbgSize;
22257 * @brief Handles RB allocation for Resource allocation type 0
22261 * Function : rgSCHCmnDlRaType0Alloc
22263 * Invoking Module Processing:
22264 * - This function is invoked for DL RB allocation for resource allocation
22267 * Processing Steps:
22268 * - Determine the available positions in the rbgMask.
22269 * - Allocate RBGs in the available positions.
22270 * - Update RA Type 0, RA Type 1 and RA type 2 masks.
22272 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22273 * @param[in] uint8_t rbsReq
22274 * @param[in] RgSchBwRbgInfo *rbgInfo
22275 * @param[out] uint8_t *numAllocRbs
22276 * @param[out] RgSchDlSfAllocInfo *resAllocInfo
22277 * @param[in] Bool isPartialAlloc
22283 uint8_t rgSCHCmnDlRaType0Alloc
22285 RgSchDlSfAllocInfo *allocedInfo,
22287 RgSchBwRbgInfo *rbgInfo,
22288 uint8_t *numAllocRbs,
22289 RgSchDlSfAllocInfo *resAllocInfo,
22290 Bool isPartialAlloc
22293 uint8_t rgSCHCmnDlRaType0Alloc(allocedInfo, rbsReq, rbgInfo,
22294 numAllocRbs, resAllocInfo, isPartialAlloc)
22295 RgSchDlSfAllocInfo *allocedInfo;
22297 RgSchBwRbgInfo *rbgInfo;
22298 uint8_t *numAllocRbs;
22299 RgSchDlSfAllocInfo *resAllocInfo;
22300 Bool isPartialAlloc;
22303 /* Note: This function atttempts allocation only full allocation */
22304 uint32_t remNumRbs, rbgPosInRbgMask, ueRaType2Mask;
22305 uint8_t type2MaskIdx, cnt, rbIdx;
22306 uint8_t maskSize, rbg;
22307 uint8_t bestNumAvailRbs = 0;
22308 uint8_t usedRbs = 0;
22309 uint8_t numAllocRbgs = 0;
22310 uint8_t rbgSize = rbgInfo->rbgSize;
22311 uint32_t *rbgMask = &(resAllocInfo->raType0Mask);
22312 #ifdef RGSCH_SPS_UNUSED
22314 uint32_t ueRaType1Mask;
22315 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
22316 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22318 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
22320 uint32_t allocedMask = allocedInfo->raType0Mask;
22322 maskSize = rbgInfo->numRbgs;
22325 RG_SCH_CMN_DL_COUNT_ONES(allocedMask, maskSize, &usedRbs);
22326 if (maskSize == usedRbs)
22328 /* All RBGs are allocated, including the last one */
22333 remNumRbs = (maskSize - usedRbs - 1) * rbgSize; /* vamsee: removed minus 1 */
22335 /* If last RBG is available, add last RBG size */
22336 if (!(allocedMask & (1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(maskSize - 1))))
22338 remNumRbs += rbgInfo->lastRbgSize;
22342 /* If complete allocation is needed, check if total requested RBs are available else
22343 * check the best available RBs */
22344 if (!isPartialAlloc)
22346 if (remNumRbs >= rbsReq)
22348 bestNumAvailRbs = rbsReq;
22353 bestNumAvailRbs = remNumRbs > rbsReq ? rbsReq : remNumRbs;
22356 /* Allocate for bestNumAvailRbs */
22357 if (bestNumAvailRbs)
22359 for (rbg = 0; rbg < maskSize - 1; ++rbg)
22361 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22362 if (!(allocedMask & rbgPosInRbgMask))
22364 /* Update RBG mask */
22365 *rbgMask |= rbgPosInRbgMask;
22367 /* Compute RB index of the first RB of the RBG allocated */
22368 rbIdx = rbg * rbgSize;
22370 for (cnt = 0; cnt < rbgSize; ++cnt)
22372 #ifdef RGSCH_SPS_UNUSED
22373 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22375 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22376 #ifdef RGSCH_SPS_UNUSED
22377 /* Update RBG mask for RA type 1 */
22378 raType1Mask[rbgSubset] |= ueRaType1Mask;
22379 raType1UsedRbs[rbgSubset]++;
22381 /* Update RA type 2 mask */
22382 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22385 *numAllocRbs += rbgSize;
22386 remNumRbs -= rbgSize;
22388 if (*numAllocRbs >= bestNumAvailRbs)
22394 /* If last RBG available and allocation is not completed, allocate
22396 if (*numAllocRbs < bestNumAvailRbs)
22398 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22399 *rbgMask |= rbgPosInRbgMask;
22400 *numAllocRbs += rbgInfo->lastRbgSize;
22402 /* Compute RB index of the first RB of the last RBG */
22403 rbIdx = ((rbgInfo->numRbgs - 1 ) * rbgSize ); /* removed minus 1 vamsee */
22405 for (cnt = 0; cnt < rbgInfo->lastRbgSize; ++cnt)
22407 #ifdef RGSCH_SPS_UNUSED
22408 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22410 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22411 #ifdef RGSCH_SPS_UNUSED
22412 /* Update RBG mask for RA type 1 */
22413 raType1Mask[rbgSubset] |= ueRaType1Mask;
22414 raType1UsedRbs[rbgSubset]++;
22416 /* Update RA type 2 mask */
22417 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22420 remNumRbs -= rbgInfo->lastRbgSize;
22423 /* Note: this should complete allocation, not checking for the
22427 return (numAllocRbgs);
22430 #ifdef RGSCH_SPS_UNUSED
22432 * @brief Handles RB allocation for Resource allocation type 1
22436 * Function : rgSCHCmnDlRaType1Alloc
22438 * Invoking Module Processing:
22439 * - This function is invoked for DL RB allocation for resource allocation
22442 * Processing Steps:
22443 * - Determine the available positions in the subsets.
22444 * - Allocate RB in the available subset.
22445 * - Update RA Type1, RA type 0 and RA type 2 masks.
22447 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22448 * @param[in] uint8_t rbsReq
22449 * @param[in] RgSchBwRbgInfo *rbgInfo
22450 * @param[in] uint8_t startRbgSubset
22451 * @param[in] uint8_t *allocRbgSubset
22452 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
22453 * @param[in] Bool isPartialAlloc
22456 * Number of allocated RBs
22460 uint8_t rgSCHCmnDlRaType1Alloc
22462 RgSchDlSfAllocInfo *allocedInfo,
22464 RgSchBwRbgInfo *rbgInfo,
22465 uint8_t startRbgSubset,
22466 uint8_t *allocRbgSubset,
22467 RgSchDlSfAllocInfo *resAllocInfo,
22468 Bool isPartialAlloc
22471 uint8_t rgSCHCmnDlRaType1Alloc(allocedInfo, rbsReq,rbgInfo,startRbgSubset,
22472 allocRbgSubset, resAllocInfo, isPartialAlloc)
22473 RgSchDlSfAllocInfo *allocedInfo;
22475 RgSchBwRbgInfo *rbgInfo;
22476 uint8_t startRbgSubset;
22477 uint8_t *allocRbgSubset;
22478 RgSchDlSfAllocInfo *resAllocInfo;
22479 Bool isPartialAlloc;
22482 /* Note: This function atttempts only full allocation */
22483 uint8_t *rbgSubsetSzArr;
22484 uint8_t type2MaskIdx, subsetIdx, rbIdx, rbInSubset, rbgInSubset;
22485 uint8_t offset, rbg, maskSize, bestSubsetIdx;
22486 uint8_t startPos = 0;
22487 uint8_t bestNumAvailRbs = 0;
22488 uint8_t numAllocRbs = 0;
22489 uint32_t ueRaType2Mask, ueRaType0Mask, rbPosInSubset;
22490 uint32_t remNumRbs, allocedMask;
22491 uint8_t usedRbs = 0;
22492 uint8_t rbgSize = rbgInfo->rbgSize;
22493 uint8_t rbgSubset = startRbgSubset;
22494 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
22495 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
22496 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
22497 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22498 uint32_t *allocMask = allocedInfo->raType1Mask;
22500 /* Initialize the subset size Array */
22501 rbgSubsetSzArr = rbgInfo->rbgSubsetSize;
22503 /* Perform allocation for RA type 1 */
22504 for (subsetIdx = 0;subsetIdx < rbgSize; ++subsetIdx)
22506 allocedMask = allocMask[rbgSubset];
22507 maskSize = rbgSubsetSzArr[rbgSubset];
22509 /* Determine number of available RBs in the subset */
22510 usedRbs = allocedInfo->raType1UsedRbs[subsetIdx];
22511 remNumRbs = maskSize - usedRbs;
22513 if (remNumRbs >= rbsReq)
22515 bestNumAvailRbs = rbsReq;
22516 bestSubsetIdx = rbgSubset;
22519 else if (isPartialAlloc && (remNumRbs > bestNumAvailRbs))
22521 bestNumAvailRbs = remNumRbs;
22522 bestSubsetIdx = rbgSubset;
22525 rbgSubset = (rbgSubset + 1) % rbgSize;
22526 } /* End of for (each rbgsubset) */
22528 if (bestNumAvailRbs)
22530 /* Initialize alloced mask and subsetSize depending on the RBG
22531 * subset of allocation */
22532 uint8_t startIdx = 0;
22533 maskSize = rbgSubsetSzArr[bestSubsetIdx];
22534 allocedMask = allocMask[bestSubsetIdx];
22535 RG_SCH_CMN_DL_GET_START_POS(allocedMask, maskSize,
22537 for (; startIdx < rbgSize; ++startIdx, ++startPos)
22539 for (rbInSubset = startPos; rbInSubset < maskSize;
22540 rbInSubset = rbInSubset + rbgSize)
22542 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
22543 if (!(allocedMask & rbPosInSubset))
22545 raType1Mask[bestSubsetIdx] |= rbPosInSubset;
22546 raType1UsedRbs[bestSubsetIdx]++;
22548 /* Compute RB index value for the RB being allocated */
22549 rbgInSubset = rbInSubset /rbgSize;
22550 offset = rbInSubset % rbgSize;
22551 rbg = (rbgInSubset * rbgSize) + bestSubsetIdx;
22552 rbIdx = (rbg * rbgSize) + offset;
22554 /* Update RBG mask for RA type 0 allocation */
22555 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
22556 *rbgMask |= ueRaType0Mask;
22558 /* Update RA type 2 mask */
22559 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22560 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22562 /* Update the counters */
22565 if (numAllocRbs == bestNumAvailRbs)
22570 } /* End of for (each position in the subset mask) */
22571 if (numAllocRbs == bestNumAvailRbs)
22575 } /* End of for startIdx = 0 to rbgSize */
22577 *allocRbgSubset = bestSubsetIdx;
22578 } /* End of if (bestNumAvailRbs) */
22580 return (numAllocRbs);
22584 * @brief Handles RB allocation for Resource allocation type 2
22588 * Function : rgSCHCmnDlRaType2Alloc
22590 * Invoking Module Processing:
22591 * - This function is invoked for DL RB allocation for resource allocation
22594 * Processing Steps:
22595 * - Determine the available positions in the mask
22596 * - Allocate best fit cosecutive RBs.
22597 * - Update RA Type2, RA type 1 and RA type 0 masks.
22599 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22600 * @param[in] uint8_t rbsReq
22601 * @param[in] RgSchBwRbgInfo *rbgInfo
22602 * @param[out] uint8_t *rbStart
22603 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
22604 * @param[in] Bool isPartialAlloc
22607 * Number of allocated RBs
22611 uint8_t rgSCHCmnDlRaType2Alloc
22613 RgSchDlSfAllocInfo *allocedInfo,
22615 RgSchBwRbgInfo *rbgInfo,
22617 RgSchDlSfAllocInfo *resAllocInfo,
22618 Bool isPartialAlloc
22621 uint8_t rgSCHCmnDlRaType2Alloc(allocedInfo, rbsReq, rbgInfo, rbStart,
22622 resAllocInfo, isPartialAlloc)
22623 RgSchDlSfAllocInfo *allocedInfo;
22625 RgSchBwRbgInfo *rbgInfo;
22627 RgSchDlSfAllocInfo *resAllocInfo;
22628 Bool isPartialAlloc;
22631 uint8_t numAllocRbs = 0;
22633 uint8_t rbgSize = rbgInfo->rbgSize;
22634 uint32_t *rbgMask = &resAllocInfo->raType0Mask;
22635 #ifdef RGSCH_SPS_UNUSED
22636 uint32_t *raType1Mask = resAllocInfo->raType1Mask;
22638 uint32_t *raType2Mask = resAllocInfo->raType2Mask;
22639 #ifdef RGSCH_SPS_UNUSED
22640 uint32_t *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22642 uint32_t *allocedMask = allocedInfo->raType2Mask;
22644 /* Note: This function atttempts only full allocation */
22645 rgSCHCmnDlGetBestFitHole(allocedMask, rbgInfo->numRbs,
22646 raType2Mask, rbsReq, rbStart, &numAllocRbs, isPartialAlloc);
22649 /* Update the allocation in RA type 0 and RA type 1 masks */
22650 uint8_t rbCnt = numAllocRbs;
22651 #ifdef RGSCH_SPS_UNUSED
22653 uint32_t ueRaType1Mask;
22655 uint32_t ueRaType0Mask;
22660 /* Update RBG mask for RA type 0 allocation */
22661 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
22662 *rbgMask |= ueRaType0Mask;
22664 #ifdef RGSCH_SPS_UNUSED
22665 /* Update RBG mask for RA type 1 */
22666 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22667 raType1Mask[rbgSubset] |= ueRaType1Mask;
22668 raType1UsedRbs[rbgSubset]++;
22670 /* Update the counters */
22676 return (numAllocRbs);
22680 * @brief Determines RA type 0 mask from given RB index.
22684 * Function : rgSCHCmnGetRaType0Mask
22687 * Processing Steps:
22688 * - Determine RA Type 0 mask for given rbIdex and rbg size.
22690 * @param[in] uint8_t rbIdx
22691 * @param[in] uint8_t rbgSize
22692 * @return uint32_t RA type 0 mask
22695 PRIVATE uint32_t rgSCHCmnGetRaType0Mask
22701 PRIVATE uint32_t rgSCHCmnGetRaType0Mask(rbIdx, rbgSize)
22707 uint32_t rbgPosInRbgMask = 0;
22709 rbg = rbIdx/rbgSize;
22710 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22712 return (rbgPosInRbgMask);
22715 #ifdef RGSCH_SPS_UNUSED
22717 * @brief Determines RA type 1 mask from given RB index.
22721 * Function : rgSCHCmnGetRaType1Mask
22724 * Processing Steps:
22725 * - Determine RA Type 1 mask for given rbIdex and rbg size.
22727 * @param[in] uint8_t rbIdx
22728 * @param[in] uint8_t rbgSize
22729 * @param[out] uint8_t *type1Subset
22730 * @return uint32_t RA type 1 mask
22733 PRIVATE uint32_t rgSCHCmnGetRaType1Mask
22737 uint8_t *type1Subset
22740 PRIVATE uint32_t rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, type1Subset)
22743 uint8_t *type1Subset;
22746 uint8_t rbg, rbgSubset, rbgInSubset, offset, rbInSubset;
22747 uint32_t rbPosInSubset;
22749 rbg = rbIdx/rbgSize;
22750 rbgSubset = rbg % rbgSize;
22751 rbgInSubset = rbg/rbgSize;
22752 offset = rbIdx % rbgSize;
22753 rbInSubset = rbgInSubset * rbgSize + offset;
22754 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
22756 *type1Subset = rbgSubset;
22757 return (rbPosInSubset);
22759 #endif /* RGSCH_SPS_UNUSED */
22761 * @brief Determines RA type 2 mask from given RB index.
22765 * Function : rgSCHCmnGetRaType2Mask
22768 * Processing Steps:
22769 * - Determine RA Type 2 mask for given rbIdx and rbg size.
22771 * @param[in] uint8_t rbIdx
22772 * @param[out] uint8_t *maskIdx
22773 * @return uint32_t RA type 2 mask
22776 PRIVATE uint32_t rgSCHCmnGetRaType2Mask
22782 PRIVATE uint32_t rgSCHCmnGetRaType2Mask(rbIdx, maskIdx)
22787 uint32_t rbPosInType2;
22789 *maskIdx = rbIdx / 32;
22790 rbPosInType2 = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbIdx % 32);
22792 return (rbPosInType2);
22796 * @brief Performs resource allocation for a non-SPS UE in SPS bandwidth
22800 * Function : rgSCHCmnAllocUeInSpsBw
22803 * Processing Steps:
22804 * - Determine allocation for the UE.
22805 * - Use resource allocation type 0, 1 and 2 for allocation
22806 * within maximum SPS bandwidth.
22808 * @param[in] RgSchDlSf *dlSf
22809 * @param[in] RgSchCellCb *cell
22810 * @param[in] RgSchUeCb *ue
22811 * @param[in] RgSchDlRbAlloc *rbAllocInfo
22812 * @param[in] Bool isPartialAlloc
22818 Bool rgSCHCmnAllocUeInSpsBw
22823 RgSchDlRbAlloc *rbAllocInfo,
22824 Bool isPartialAlloc
22827 Bool rgSCHCmnAllocUeInSpsBw(dlSf, cell, ue, rbAllocInfo, isPartialAlloc)
22831 RgSchDlRbAlloc *rbAllocInfo;
22832 Bool isPartialAlloc;
22835 uint8_t rbgSize = cell->rbgSize;
22836 uint8_t numAllocRbs = 0;
22837 uint8_t numAllocRbgs = 0;
22838 uint8_t rbStart = 0;
22839 uint8_t idx, noLyr, iTbs;
22840 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22841 RgSchDlSfAllocInfo *dlSfAlloc = &rbAllocInfo->dlSf->dlSfAllocInfo;
22842 RgSchBwRbgInfo *spsRbgInfo = &cell->spsBwRbgInfo;
22844 /* SPS_FIX : Check if this Hq proc is scheduled */
22845 if ((0 == rbAllocInfo->tbInfo[0].schdlngForTb) &&
22846 (0 == rbAllocInfo->tbInfo[1].schdlngForTb))
22851 /* Check if the requirement can be accomodated in SPS BW */
22852 if (dlSf->spsAllocdBw == spsRbgInfo->numRbs)
22854 /* SPS Bandwidth has been exhausted: no further allocations possible */
22857 if (!isPartialAlloc)
22859 if((dlSf->spsAllocdBw + rbAllocInfo->rbsReq) > spsRbgInfo->numRbs)
22865 /* Perform allocation for RA type 0 if rbsReq is multiple of RBG size (also
22866 * if RBG size = 1) */
22867 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22869 rbAllocInfo->rbsReq += (rbgSize - rbAllocInfo->rbsReq % rbgSize);
22870 numAllocRbgs = rgSCHCmnDlRaType0Alloc(dlSfAlloc,
22871 rbAllocInfo->rbsReq, spsRbgInfo, &numAllocRbs,
22872 &rbAllocInfo->resAllocInfo, isPartialAlloc);
22874 #ifdef RGSCH_SPS_UNUSED
22875 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
22877 /* If no RBS could be allocated, attempt RA TYPE 1 */
22879 numAllocRbs = rgSCHCmnDlRaType1Alloc(dlSfAlloc,
22880 rbAllocInfo->rbsReq, spsRbgInfo, (uint8_t)dlSfAlloc->nxtRbgSubset,
22881 &rbAllocInfo->allocInfo.raType1.rbgSubset,
22882 &rbAllocInfo->resAllocInfo, isPartialAlloc);
22886 dlSfAlloc->nxtRbgSubset =
22887 (rbAllocInfo->allocInfo.raType1.rbgSubset + 1 ) % rbgSize;
22891 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22893 numAllocRbs = rgSCHCmnDlRaType2Alloc(dlSfAlloc,
22894 rbAllocInfo->rbsReq, spsRbgInfo,
22895 &rbStart, &rbAllocInfo->resAllocInfo, isPartialAlloc);
22902 if (!(rbAllocInfo->pdcch =
22903 rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi,\
22904 rbAllocInfo->dciFormat, FALSE)))
22906 /* Note: Returning TRUE since PDCCH might be available for another UE */
22910 /* Update Tb info for each scheduled TB */
22911 iTbs = rbAllocInfo->tbInfo[0].iTbs;
22912 noLyr = rbAllocInfo->tbInfo[0].noLyr;
22913 rbAllocInfo->tbInfo[0].bytesAlloc =
22914 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
22916 if (rbAllocInfo->tbInfo[1].schdlngForTb)
22918 iTbs = rbAllocInfo->tbInfo[1].iTbs;
22919 noLyr = rbAllocInfo->tbInfo[1].noLyr;
22920 rbAllocInfo->tbInfo[1].bytesAlloc =
22921 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
22924 /* Update rbAllocInfo with the allocation information */
22925 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22927 rbAllocInfo->allocInfo.raType0.dlAllocBitMask =
22928 rbAllocInfo->resAllocInfo.raType0Mask;
22929 rbAllocInfo->allocInfo.raType0.numDlAlloc = numAllocRbgs;
22931 #ifdef RGSCH_SPS_UNUSED
22932 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
22934 rbAllocInfo->allocInfo.raType1.dlAllocBitMask =
22935 rbAllocInfo->resAllocInfo.raType1Mask[rbAllocInfo->allocInfo.raType1.rbgSubset];
22936 rbAllocInfo->allocInfo.raType1.numDlAlloc = numAllocRbs;
22937 rbAllocInfo->allocInfo.raType1.shift = 0;
22940 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22942 rbAllocInfo->allocInfo.raType2.isLocal = TRUE;
22943 rbAllocInfo->allocInfo.raType2.rbStart = rbStart;
22944 rbAllocInfo->allocInfo.raType2.numRb = numAllocRbs;
22947 rbAllocInfo->rbsAlloc = numAllocRbs;
22948 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
22950 /* Update allocation masks for RA types 0, 1 and 2 in DL SF */
22952 /* Update type 0 allocation mask */
22953 dlSfAlloc->raType0Mask |= rbAllocInfo->resAllocInfo.raType0Mask;
22954 #ifdef RGSCH_SPS_UNUSED
22955 /* Update type 1 allocation masks */
22956 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
22958 dlSfAlloc->raType1Mask[idx] |= rbAllocInfo->resAllocInfo.raType1Mask[idx];
22959 dlSfAlloc->raType1UsedRbs[idx] +=
22960 rbAllocInfo->resAllocInfo.raType1UsedRbs[idx];
22963 /* Update type 2 allocation masks */
22964 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
22966 dlSfAlloc->raType2Mask[idx] |= rbAllocInfo->resAllocInfo.raType2Mask[idx];
22969 dlSf->spsAllocdBw += numAllocRbs;
22973 /***********************************************************
22975 * Func : rgSCHCmnDlGetBestFitHole
22978 * Desc : Converts the best fit hole into allocation and returns the
22979 * allocation information.
22989 **********************************************************/
22991 PRIVATE Void rgSCHCmnDlGetBestFitHole
22993 uint32_t *allocMask,
22994 uint8_t numMaskRbs,
22995 uint32_t *crntAllocMask,
22997 uint8_t *allocStart,
22998 uint8_t *allocNumRbs,
22999 Bool isPartialAlloc
23002 PRIVATE Void rgSCHCmnDlGetBestFitHole (allocMask, numMaskRbs,
23003 crntAllocMask, rbsReq, allocStart, allocNumRbs, isPartialAlloc)
23004 uint32_t *allocMask;
23005 uint8_t numMaskRbs;
23006 uint32_t *crntAllocMask;
23008 uint8_t *allocStart;
23009 uint8_t *allocNumRbs;
23010 Bool isPartialAlloc;
23013 uint8_t maskSz = (numMaskRbs + 31)/32;
23014 uint8_t maxMaskPos = (numMaskRbs % 32);
23015 uint8_t maskIdx, maskPos;
23016 uint8_t numAvailRbs = 0;
23017 uint8_t bestAvailNumRbs = 0;
23018 S8 bestStartPos = -1;
23020 uint32_t tmpMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
23021 uint32_t bestMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
23023 *allocNumRbs = numAvailRbs;
23026 for (maskIdx = 0; maskIdx < maskSz; ++maskIdx)
23029 if (maskIdx == (maskSz - 1))
23031 if (numMaskRbs % 32)
23033 maxMaskPos = numMaskRbs % 32;
23036 for (maskPos = 0; maskPos < maxMaskPos; ++maskPos)
23038 if (!(allocMask[maskIdx] & (1 << (31 - maskPos))))
23040 tmpMask[maskIdx] |= (1 << (31 - maskPos));
23041 if (startPos == -1)
23043 startPos = maskIdx * 32 + maskPos;
23046 if (numAvailRbs == rbsReq)
23048 *allocStart = (uint8_t)startPos;
23049 *allocNumRbs = rbsReq;
23055 if (numAvailRbs > bestAvailNumRbs)
23057 bestAvailNumRbs = numAvailRbs;
23058 bestStartPos = startPos;
23059 memcpy(bestMask, tmpMask, 4 * sizeof(uint32_t));
23063 memset(tmpMask, 0, 4 * sizeof(uint32_t));
23066 if (*allocNumRbs == rbsReq)
23072 if (*allocNumRbs == rbsReq)
23074 /* Convert the hole into allocation */
23075 memcpy(crntAllocMask, tmpMask, 4 * sizeof(uint32_t));
23080 if (bestAvailNumRbs && isPartialAlloc)
23082 /* Partial allocation could have been done */
23083 *allocStart = (uint8_t)bestStartPos;
23084 *allocNumRbs = bestAvailNumRbs;
23085 /* Convert the hole into allocation */
23086 memcpy(crntAllocMask, bestMask, 4 * sizeof(uint32_t));
23092 #endif /* LTEMAC_SPS */
23094 /***************************************************************************
23096 * NON-DLFS Allocation functions
23098 * *************************************************************************/
23102 * @brief Function to find out code rate
23106 * Function : rgSCHCmnFindCodeRate
23108 * Processing Steps:
23110 * @param[in] RgSchCellCb *cell
23111 * @param[in] RgSchDlSf *dlSf
23112 * @param[in,out] RgSchDlRbAlloc *allocInfo
23117 PRIVATE Void rgSCHCmnFindCodeRate
23121 RgSchDlRbAlloc *allocInfo,
23125 PRIVATE Void rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,idx)
23128 RgSchDlRbAlloc *allocInfo;
23137 /* Adjust the Imcs and bytes allocated also with respect to the adjusted
23138 RBs - Here we will find out the Imcs by identifying first Highest
23139 number of bits compared to the original bytes allocated. */
23141 * @brief Adjust IMCS according to tbSize and ITBS
23145 * Function : rgSCHCmnNonDlfsPbchTbImcsAdj
23147 * Processing Steps:
23148 * - Adjust Imcs according to tbSize and ITBS.
23150 * @param[in,out] RgSchDlRbAlloc *allocInfo
23151 * @param[in] uint8_t *idx
23155 PRIVATE Void rgSCHCmnNonDlfsPbchTbImcsAdj
23158 RgSchDlRbAlloc *allocInfo,
23163 PRIVATE Void rgSCHCmnNonDlfsPbchTbImcsAdj(cell,allocInfo, idx, rbsReq)
23165 RgSchDlRbAlloc *allocInfo;
23170 uint8_t noLyrs = 0;
23172 uint32_t origBytesReq;
23173 uint8_t noRbgs = 0;
23175 RgSchDlSf *dlSf = allocInfo->dlSf;
23177 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
23178 noLyrs = allocInfo->tbInfo[idx].noLyr;
23180 if((allocInfo->raType == RG_SCH_CMN_RA_TYPE0))
23182 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + dlSf->lstRbgDfct), cell->rbgSize);
23183 noRbs = (noRbgs * cell->rbgSize) - dlSf->lstRbgDfct;
23187 noRbs = allocInfo->rbsReq;
23190 /* This line will help in case if tbs is zero and reduction in MCS is not possible */
23191 if (allocInfo->rbsReq == 0 )
23195 origBytesReq = rgTbSzTbl[noLyrs - 1][tbs][rbsReq - 1]/8;
23197 /* Find out the ITbs & Imcs by identifying first Highest
23198 number of bits compared to the original bytes allocated.*/
23201 if(((rgTbSzTbl[noLyrs - 1][0][noRbs - 1])/8) < origBytesReq)
23203 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyrs - 1], tbs);
23204 while(((rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1])/8) > origBytesReq)
23213 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1]/8;
23214 allocInfo->tbInfo[idx].iTbs = tbs;
23215 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
23220 /* Added funcion to adjust TBSize*/
23222 * @brief Function to adjust the tbsize in case of subframe 0 & 5 when
23223 * we were not able to do RB alloc adjustment by adding extra required Rbs
23227 * Function : rgSCHCmnNonDlfsPbchTbSizeAdj
23229 * Processing Steps:
23231 * @param[in,out] RgSchDlRbAlloc *allocInfo
23232 * @param[in] uint8_t numOvrlapgPbchRb
23233 * @param[in] uint8_t idx
23234 * @param[in] uint8_t pbchSsRsSym
23238 PRIVATE Void rgSCHCmnNonDlfsPbchTbSizeAdj
23240 RgSchDlRbAlloc *allocInfo,
23241 uint8_t numOvrlapgPbchRb,
23242 uint8_t pbchSsRsSym,
23247 PRIVATE Void rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,idx,bytesReq)
23248 RgSchDlRbAlloc *allocInfo;
23249 uint8_t numOvrlapgPbchRb;
23250 uint8_t pbchSsRsSym;
23255 uint32_t reducedTbs = 0;
23256 uint8_t noLyrs = 0;
23259 noLyrs = allocInfo->tbInfo[idx].noLyr;
23261 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
23263 reducedTbs = bytesReq - (((uint32_t)numOvrlapgPbchRb * (uint32_t)pbchSsRsSym * 6)/8);
23265 /* find out the ITbs & Imcs by identifying first Highest
23266 number of bits compared with reduced bits considering the bits that are
23267 reserved for PBCH/PSS/SSS */
23268 if(((rgTbSzTbl[noLyrs - 1][0][allocInfo->rbsReq - 1])/8) < reducedTbs)
23270 while(((rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1])/8) > reducedTbs)
23279 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1]/8;
23280 allocInfo->tbInfo[idx].iTbs = tbs;
23281 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
23286 /* Added this function to find num of ovrlapping PBCH rb*/
23288 * @brief Function to find out how many additional rbs are available
23289 * in the entire bw which can be allocated to a UE
23292 * Function : rgSCHCmnFindNumAddtlRbsAvl
23294 * Processing Steps:
23295 * - Calculates number of additinal rbs available
23297 * @param[in] RgSchCellCb *cell
23298 * @param[in] RgSchDlSf *dlSf
23299 * @param[in,out] RgSchDlRbAlloc *allocInfo
23300 * @param[out] uint8_t addtlRbsAvl
23304 PRIVATE uint8_t rgSCHCmnFindNumAddtlRbsAvl
23308 RgSchDlRbAlloc *allocInfo
23311 PRIVATE uint8_t rgSCHCmnFindNumAddtlRbsAvl(cell,dlSf,allocInfo)
23314 RgSchDlRbAlloc *allocInfo;
23317 uint8_t addtlRbsAvl = 0;
23320 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
23322 addtlRbsAvl = (((dlSf->type0End - dlSf->type2End + 1)*\
23323 cell->rbgSize) - dlSf->lstRbgDfct) - allocInfo->rbsReq;
23325 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
23327 addtlRbsAvl = (dlSf->bw - dlSf->bwAlloced) - allocInfo->rbsReq;
23330 return (addtlRbsAvl);
23333 /* Added this function to find num of ovrlapping PBCH rb*/
23335 * @brief Function to find out how many of the requested RBs are
23336 * falling in the center 6 RBs of the downlink bandwidth.
23339 * Function : rgSCHCmnFindNumPbchOvrlapRbs
23341 * Processing Steps:
23342 * - Calculates number of overlapping rbs
23344 * @param[in] RgSchCellCb *cell
23345 * @param[in] RgSchDlSf *dlSf
23346 * @param[in,out] RgSchDlRbAlloc *allocInfo
23347 * @param[out] uint8_t* numOvrlapgPbchRb
23351 PRIVATE Void rgSCHCmnFindNumPbchOvrlapRbs
23355 RgSchDlRbAlloc *allocInfo,
23356 uint8_t *numOvrlapgPbchRb
23359 PRIVATE Void rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,numOvrlapgPbchRb)
23362 RgSchDlRbAlloc *allocInfo;
23363 uint8_t *numOvrlapgPbchRb;
23366 *numOvrlapgPbchRb = 0;
23367 /*Find if we have already crossed the start boundary for PBCH 6 RBs,
23368 * if yes then lets find the number of RBs which are getting overlapped
23369 * with this allocation.*/
23370 if(dlSf->bwAlloced <= (cell->pbchRbStart))
23372 /*We have not crossed the start boundary of PBCH RBs. Now we need
23373 * to know that if take this allocation then how much PBCH RBs
23374 * are overlapping with this allocation.*/
23375 /* Find out the overlapping RBs in the centre 6 RBs */
23376 if((dlSf->bwAlloced + allocInfo->rbsReq) > cell->pbchRbStart)
23378 *numOvrlapgPbchRb = (dlSf->bwAlloced + allocInfo->rbsReq) - (cell->pbchRbStart);
23379 if(*numOvrlapgPbchRb > 6)
23380 *numOvrlapgPbchRb = 6;
23383 else if ((dlSf->bwAlloced > (cell->pbchRbStart)) &&
23384 (dlSf->bwAlloced < (cell->pbchRbEnd)))
23386 /*We have already crossed the start boundary of PBCH RBs.We need to
23387 * find that if we take this allocation then how much of the RBs for
23388 * this allocation will overlap with PBCH RBs.*/
23389 /* Find out the overlapping RBs in the centre 6 RBs */
23390 if(dlSf->bwAlloced + allocInfo->rbsReq < (cell->pbchRbEnd))
23392 /*If we take this allocation then also we are not crossing the
23393 * end boundary of PBCH 6 RBs.*/
23394 *numOvrlapgPbchRb = allocInfo->rbsReq;
23398 /*If we take this allocation then we are crossing the
23399 * end boundary of PBCH 6 RBs.*/
23400 *numOvrlapgPbchRb = (cell->pbchRbEnd) - dlSf->bwAlloced;
23407 * @brief Performs RB allocation adjustment if the requested RBs are
23408 * falling in the center 6 RBs of the downlink bandwidth.
23411 * Function : rgSCHCmnNonDlfsPbchRbAllocAdj
23413 * Processing Steps:
23414 * - Allocate consecutively available RBs.
23416 * @param[in] RgSchCellCb *cell
23417 * @param[in,out] RgSchDlRbAlloc *allocInfo
23418 * @param[in] uint8_t pbchSsRsSym
23422 PRIVATE Void rgSCHCmnNonDlfsPbchRbAllocAdj
23425 RgSchDlRbAlloc *allocInfo,
23426 uint8_t pbchSsRsSym,
23430 PRIVATE Void rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo,pbchSsRsSym)
23432 RgSchDlRbAlloc *allocInfo;
23433 uint8_t pbchSsRsSym;
23437 RgSchDlSf *dlSf = allocInfo->dlSf;
23438 uint8_t numOvrlapgPbchRb = 0;
23439 uint8_t numOvrlapgAdtlPbchRb = 0;
23441 uint8_t addtlRbsReq = 0;
23442 uint8_t moreAddtlRbsReq = 0;
23443 uint8_t addtlRbsAdd = 0;
23444 uint8_t moreAddtlRbsAdd = 0;
23446 uint8_t origRbsReq = 0;
23454 origRbsReq = allocInfo->rbsReq;
23455 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23457 totSym = (cell->isCpDlExtend) ? RGSCH_TOT_NUM_SYM_EXTCP : RGSCH_TOT_NUM_SYM_NORCP;
23459 /* Additional RBs are allocated by considering the loss due to
23460 the reserved symbols for CFICH, PBCH, PSS, SSS and cell specific RS */
23462 divResult = (numOvrlapgPbchRb * pbchSsRsSym)/totSym;
23463 if((numOvrlapgPbchRb * pbchSsRsSym) % totSym)
23467 addtlRbsReq = divResult;
23469 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, addtlRbsReq, addtlRbsAdd)
23471 /*Now RBs requires is original requested RBs + these additional RBs to make
23472 * up for PSS/SSS/BCCH.*/
23473 allocInfo->rbsReq = allocInfo->rbsReq + addtlRbsAdd;
23475 /*Check if with these additional RBs we have taken up, these are also falling
23476 * under PBCH RBs range, if yes then we would need to account for
23477 * PSS/BSS/BCCH for these additional RBs too.*/
23478 if(addtlRbsAdd && ((dlSf->bwAlloced + allocInfo->rbsReq - addtlRbsAdd) < (cell->pbchRbEnd)))
23480 if((dlSf->bwAlloced + allocInfo->rbsReq) <= (cell->pbchRbEnd))
23482 /*With additional RBs taken into account, we are not crossing the
23483 * PBCH RB end boundary.Thus here we need to account just for
23484 * overlapping PBCH RBs for these additonal RBs.*/
23485 divResult = (addtlRbsAdd * pbchSsRsSym)/totSym;
23486 if((addtlRbsAdd * pbchSsRsSym) % totSym)
23491 moreAddtlRbsReq = divResult;
23493 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
23495 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
23500 /*Here we have crossed the PBCH RB end boundary, thus we need to take
23501 * into account the overlapping RBs for additional RBs which will be
23502 * subset of addtlRbs.*/
23503 numOvrlapgAdtlPbchRb = (cell->pbchRbEnd) - ((dlSf->bwAlloced + allocInfo->rbsReq) - addtlRbsAdd);
23505 divResult = (numOvrlapgAdtlPbchRb * pbchSsRsSym)/totSym;
23506 if((numOvrlapgAdtlPbchRb * pbchSsRsSym) % totSym)
23511 moreAddtlRbsReq = divResult;
23513 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
23515 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
23518 if (isBcchPcch == TRUE)
23523 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
23526 /* This case might be for Imcs value 6 and NPrb = 1 case - Not
23527 Adjusting either RBs or Imcs or Bytes Allocated */
23528 allocInfo->rbsReq = allocInfo->rbsReq - addtlRbsAdd - moreAddtlRbsAdd;
23530 else if(tbs && ((0 == addtlRbsAdd) && (moreAddtlRbsAdd == 0)))
23532 /*In case of a situation where we the entire bandwidth is already occupied
23533 * and we dont have room to add additional Rbs then in order to decrease the
23534 * code rate we reduce the tbsize such that we reduce the present calculated
23535 * tbsize by number of bytes that would be occupied by PBCH/PSS/SSS in overlapping
23536 * rbs and find the nearest tbsize which would be less than this deduced value*/
23538 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23540 noLyr = allocInfo->tbInfo[0].noLyr;
23541 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyr - 1], tbs);
23542 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
23544 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,bytesReq);
23546 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23548 noLyr = allocInfo->tbInfo[1].noLyr;
23549 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
23550 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,bytesReq);
23554 else if(tbs && ((addtlRbsAdd != addtlRbsReq) ||
23555 (addtlRbsAdd && (moreAddtlRbsReq != moreAddtlRbsAdd))))
23557 /*In case of a situation where we were not able to add required number of
23558 * additional RBs then we adjust the Imcs based on original RBs requested.
23559 * Doing this would comensate for the few extra Rbs we have added but inorder
23560 * to comensate for number of RBS we couldnt add we again do the TBSize adjustment*/
23562 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
23564 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23566 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
23569 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23570 numOvrlapgPbchRb = numOvrlapgPbchRb - (addtlRbsAdd + moreAddtlRbsAdd);
23572 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,allocInfo->tbInfo[0].bytesReq);
23574 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23576 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,allocInfo->tbInfo[1].bytesReq);
23582 /*We hit this code when we were able to add the required additional RBS
23583 * hence we should adjust the IMcs based on orignals RBs requested*/
23585 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
23587 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23589 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
23594 } /* end of rgSCHCmnNonDlfsPbchRbAllocAdj */
23598 * @brief Performs RB allocation for frequency non-selective cell.
23602 * Function : rgSCHCmnNonDlfsCmnRbAlloc
23604 * Processing Steps:
23605 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
23607 * @param[in] RgSchCellCb *cell
23608 * @param[in, out] RgSchDlRbAlloc *allocInfo
23614 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc
23617 RgSchDlRbAlloc *allocInfo
23620 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc(cell, allocInfo)
23622 RgSchDlRbAlloc *allocInfo;
23628 uint8_t pbchSsRsSym = 0;
23629 uint8_t pbchFrame = 0;
23631 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
23633 RgSchDlSf *dlSf = allocInfo->dlSf;
23635 uint8_t rbStart = 0;
23636 uint8_t spsRbsAlloc = 0;
23637 RgSchDlSfAllocInfo *dlSfAlloc = &allocInfo->dlSf->dlSfAllocInfo;
23640 allocInfo->tbInfo[0].noLyr = 1;
23643 /* Note: Initialize the masks to 0, this might not be needed since alloInfo
23644 * is initialized to 0 at the beginning of allcoation */
23645 allocInfo->resAllocInfo.raType0Mask = 0;
23646 memset(allocInfo->resAllocInfo.raType1Mask, 0,
23647 RG_SCH_NUM_RATYPE1_32BIT_MASK * sizeof (uint32_t));
23648 memset(allocInfo->resAllocInfo.raType2Mask, 0,
23649 RG_SCH_NUM_RATYPE2_32BIT_MASK * sizeof (uint32_t));
23651 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
23652 (dlSf->bwAlloced == dlSf->bw))
23654 if(dlSf->bwAlloced == dlSf->bw)
23660 if (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced))
23663 if ((allocInfo->tbInfo[0].imcs < 29) && (dlSf->bwAlloced < dlSf->bw))
23665 if(allocInfo->tbInfo[0].imcs < 29)
23668 /* set the remaining RBs for the requested UE */
23669 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
23670 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
23671 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[0][tbs][allocInfo->rbsReq - 1]/8;
23676 /* Attempt RA Type 2 allocation in SPS Bandwidth */
23677 if (dlSf->spsAllocdBw < cell->spsBwRbgInfo.numRbs)
23680 rgSCHCmnDlRaType2Alloc(dlSfAlloc,
23681 allocInfo->rbsReq, &cell->spsBwRbgInfo, &rbStart,
23682 &allocInfo->resAllocInfo, FALSE);
23683 /* rbsAlloc assignment moved from line 16671 to here to avoid
23684 * compilation error. Recheck */
23685 dlSf->spsAllocdBw += spsRbsAlloc;
23688 #endif /* LTEMAC_SPS */
23696 /* Update allocation information */
23697 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
23698 if (allocInfo->pdcch == NULLP)
23702 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
23703 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
23704 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
23705 allocInfo->allocInfo.raType2.isLocal = TRUE;
23709 allocInfo->allocInfo.raType2.rbStart = rbStart;
23710 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23711 allocInfo->rbsAlloc = allocInfo->rbsReq;
23722 if(!(dlSf->sfNum == 5))
23724 /* case for subframes 1 to 9 except 5 */
23726 allocInfo->allocInfo.raType2.rbStart = rbStart;
23728 /*Fix for ccpu00123918*/
23729 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
23734 pbchFrame = 1; /* case for subframe 5 */
23735 /* In subframe 5, symbols are reserved for PSS and SSS and CFICH
23736 and Cell Specific Reference Signals */
23737 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PSS_SSS_SYM) *
23738 RGSCH_NUM_SC_IN_RB + cell->numCellRSPerSf);
23744 /* In subframe 0, symbols are reserved for PSS, SSS, PBCH, CFICH and
23745 and Cell Specific Reference signals */
23746 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PBCH_SYM +
23747 RGSCH_NUM_PSS_SSS_SYM) * RGSCH_NUM_SC_IN_RB +
23748 cell->numCellRSPerSf);
23749 } /* end of outer else */
23752 (((dlSf->bwAlloced + allocInfo->rbsReq) - cell->pbchRbStart) > 0)&&
23753 (dlSf->bwAlloced < cell->pbchRbEnd))
23755 if(allocInfo->tbInfo[0].imcs < 29)
23757 rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo, pbchSsRsSym, TRUE);
23769 /*Fix for ccpu00123918*/
23770 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
23771 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23772 allocInfo->rbsAlloc = allocInfo->rbsReq;
23774 /* LTE_ADV_FLAG_REMOVED_START */
23776 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
23778 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
23779 allocInfo->allocInfo.raType2.rbStart, \
23780 allocInfo->allocInfo.raType2.numRb);
23785 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
23786 allocInfo->allocInfo.raType2.rbStart, \
23787 allocInfo->allocInfo.raType2.numRb);
23793 /* LTE_ADV_FLAG_REMOVED_END */
23794 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23801 /* Update type 0, 1 and 2 masks */
23802 dlSfAlloc->raType0Mask |= allocInfo->resAllocInfo.raType0Mask;
23803 #ifdef RGSCH_SPS_UNUSED
23804 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
23806 dlSfAlloc->raType1Mask[idx] |=
23807 allocInfo->resAllocInfo.raType1Mask[idx];
23808 dlSfAlloc->raType1UsedRbs[idx] +=
23809 allocInfo->resAllocInfo.raType1UsedRbs[idx];
23812 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
23814 dlSfAlloc->raType2Mask[idx] |=
23815 allocInfo->resAllocInfo.raType2Mask[idx];
23825 * @brief Performs RB allocation for frequency non-selective cell.
23829 * Function : rgSCHCmnNonDlfsCmnRbAllocRar
23831 * Processing Steps:
23832 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
23834 * @param[in] RgSchCellCb *cell
23835 * @param[in, out] RgSchDlRbAlloc *allocInfo
23841 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAllocRar
23844 RgSchDlRbAlloc *allocInfo
23847 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc(cell, allocInfo)
23849 RgSchDlRbAlloc *allocInfo;
23852 RgSchDlSf *dlSf = allocInfo->dlSf;
23855 if(dlSf->bwAlloced == dlSf->bw)
23860 allocInfo->tbInfo[0].noLyr = 1;
23862 /* Update allocation information */
23863 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
23864 if (allocInfo->pdcch == NULLP)
23868 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
23869 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
23870 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
23871 allocInfo->allocInfo.raType2.isLocal = TRUE;
23873 /*Fix for ccpu00123918*/
23874 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
23875 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23876 allocInfo->rbsAlloc = allocInfo->rbsReq;
23878 /* LTE_ADV_FLAG_REMOVED_END */
23879 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23882 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, NULLP, dlSf, 13, TFU_DCI_FORMAT_B1, FALSE);
23883 if (allocInfo->pdcch == NULLP)
23887 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
23888 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
23890 printf("5GTF_ERROR vrbg allocated > 25\n");
23894 allocInfo->tbInfo[0].cmnGrnt.vrbgStart = beamInfo->vrbgStart;
23895 allocInfo->tbInfo[0].cmnGrnt.numVrbg = allocInfo->vrbgReq;
23897 /* Update allocation information */
23898 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23900 allocInfo->tbInfo[0].cmnGrnt.xPDSCHRange = 1;
23901 allocInfo->tbInfo[0].cmnGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
23902 allocInfo->tbInfo[0].cmnGrnt.vrbgStart, allocInfo->tbInfo[0].cmnGrnt.numVrbg);
23904 allocInfo->tbInfo[0].cmnGrnt.rbStrt = (allocInfo->tbInfo[0].cmnGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
23905 allocInfo->tbInfo[0].cmnGrnt.numRb = (allocInfo->tbInfo[0].cmnGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
23907 beamInfo->vrbgStart += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
23908 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
23909 allocInfo->tbInfo[0].cmnGrnt.rv = 0;
23910 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23913 printf("\n[%s],allocInfo->tbInfo[0].bytesAlloc:%u,vrbgReq:%u\n",
23914 __func__,allocInfo->tbInfo[0].bytesAlloc,allocInfo->vrbgReq);
23920 /* LTE_ADV_FLAG_REMOVED_START */
23923 * @brief To check if DL BW available for non-DLFS allocation.
23927 * Function : rgSCHCmnNonDlfsBwAvlbl
23929 * Processing Steps:
23930 * - Determine availability based on RA Type.
23932 * @param[in] RgSchCellCb *cell
23933 * @param[in] RgSchDlSf *dlSf
23934 * @param[in] RgSchDlRbAlloc *allocInfo
23942 PRIVATE Bool rgSCHCmnNonDlfsSFRBwAvlbl
23945 RgSchSFRPoolInfo **sfrpoolInfo,
23947 RgSchDlRbAlloc *allocInfo,
23951 PRIVATE Bool rgSCHCmnNonDlfsSFRBwAvlbl(cell, sfrpoolInfo, dlSf, allocInfo, isUeCellEdge)
23953 RgSchSFRPoolInfo **sfrpoolInfo;
23955 RgSchDlRbAlloc *allocInfo;
23963 RgSchSFRPoolInfo *sfrPool;
23964 RgSchSFRPoolInfo *sfrCEPool;
23968 RgSchSFRPoolInfo *poolWithMaxAvlblBw = NULLP;
23969 uint32_t bwAvlbl = 0;
23970 uint32_t addtnlPRBs = 0;
23972 if (dlSf->bw <= dlSf->bwAlloced)
23974 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
23975 "BW is fully allocated for subframe (%d) CRNTI:%d", dlSf->sfNum,allocInfo->rnti);
23979 if (dlSf->sfrTotalPoolInfo.ccBwFull == TRUE)
23981 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
23982 "BW is fully allocated for CC Pool CRNTI:%d",allocInfo->rnti);
23986 if ((dlSf->sfrTotalPoolInfo.ceBwFull == TRUE) && (isUeCellEdge))
23988 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
23989 "BW is fully allocated for CE Pool CRNTI:%d",allocInfo->rnti);
23993 /* We first check if the ue scheduled is a cell edge or cell centre and accordingly check the avaialble
23994 memory in their pool. If the cell centre UE doesnt have Bw available in its pool, then it will check
23995 Bw availability in cell edge pool but the other way around is NOT possible. */
23998 l = &dlSf->sfrTotalPoolInfo.cePool;
24002 l = &dlSf->sfrTotalPoolInfo.ccPool;
24005 n = cmLListFirst(l);
24009 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
24011 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24013 /* MS_FIX for ccpu00123919 : Number of RBs in case of RETX should be same as that of initial transmission. */
24014 if(allocInfo->tbInfo[0].tbCb->txCntr)
24016 /* If RB assignment is being done for RETX. Then if reqRbs are a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
24017 * not a multiple of rbgSize then check if lsgRbgDfct exists */
24018 if (allocInfo->rbsReq % cell->rbgSize == 0)
24020 if ((sfrPool->type2End == dlSf->type2End) && dlSf->lstRbgDfct)
24022 /* In this scenario we are wasting the last RBG for this dlSf */
24023 sfrPool->type0End--;
24024 sfrPool->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24026 dlSf->lstRbgDfct = 0;
24028 /*ABHINAV To check if these variables need to be taken care of*/
24030 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24035 if (dlSf->lstRbgDfct)
24037 /* Check if type0 allocation can cater to this RETX requirement */
24038 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
24044 if (sfrPool->type2End != dlSf->type2End) /*Search again for some pool which has the END RBG of the BandWidth*/
24052 /* cannot allocate same number of required RBs */
24058 /*rg002.301 ccpu00120391 MOD condition is modified approprialtely to find if rbsReq is less than available RBS*/
24059 if(allocInfo->rbsReq <= (((sfrPool->type0End - sfrPool->type2End + 1)*\
24060 cell->rbgSize) - dlSf->lstRbgDfct))
24062 *sfrpoolInfo = sfrPool;
24067 if (sfrPool->bw <= sfrPool->bwAlloced + cell->rbgSize)
24069 n = cmLListNext(l);
24070 /* If the ue is cell centre then it will simply check the memory available in next pool.
24071 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
24073 if((!isUeCellEdge) && (!n->node))
24075 l = &dlSf->sfrTotalPoolInfo.cePool;
24076 n = cmLListFirst(l);
24082 /* MS_FIX: Number of RBs in case of RETX should be same as that of initial transmission */
24083 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24085 /*rg002.301 ccpu00120391 MOD setting the remaining RBs for the requested UE*/
24086 allocInfo->rbsReq = (((sfrPool->type0End - sfrPool->type2End + 1)*\
24087 cell->rbgSize) - dlSf->lstRbgDfct);
24088 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24089 noLyrs = allocInfo->tbInfo[0].noLyr;
24090 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24091 *sfrpoolInfo = sfrPool;
24096 n = cmLListNext(l);
24098 /* If the ue is cell centre then it will simply check the memory available in next pool.
24099 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
24100 if((!isUeCellEdge) && (!n->node))
24102 l = &dlSf->sfrTotalPoolInfo.cePool;
24103 n = cmLListFirst(l);
24112 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
24114 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24115 /* This is a Case where a UE was CC and had more RBs allocated than present in CE pool.
24116 In case this UE whn become CE with retx going on, then BW is not sufficient for Retx */
24117 if ((isUeCellEdge) &&
24118 (allocInfo->tbInfo[0].tbCb->txCntr != 0))
24120 if(allocInfo->rbsReq > (sfrPool->bw - sfrPool->bwAlloced))
24122 /* Adjust CE BW such that Retx alloc is successful */
24123 /* Check if merging CE with adjacent CC pool will be sufficient to process Retx */
24125 /* If no Type 0 allocations are made from this pool */
24126 if (sfrPool->type0End == (((sfrPool->poolendRB + 1) / cell->rbgSize) - 1))
24128 if (sfrPool->adjCCPool &&
24129 (sfrPool->adjCCPool->type2Start == sfrPool->poolendRB + 1) &&
24130 (allocInfo->rbsReq <= ((sfrPool->bw - sfrPool->bwAlloced) +
24131 ((sfrPool->adjCCPool->bw - sfrPool->adjCCPool->bwAlloced)))))
24133 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
24135 /* Adjusting CE Pool Info */
24136 sfrPool->bw += addtnlPRBs;
24137 sfrPool->type0End = ((sfrPool->poolendRB + addtnlPRBs + 1) /
24138 cell->rbgSize) - 1;
24140 /* Adjusting CC Pool Info */
24141 sfrPool->adjCCPool->type2Start += addtnlPRBs;
24142 sfrPool->adjCCPool->type2End = RGSCH_CEIL(sfrPool->adjCCPool->type2Start,
24144 sfrPool->adjCCPool->bw -= addtnlPRBs;
24145 *sfrpoolInfo = sfrPool;
24152 /* Check if CC pool is one of the following:
24153 * 1. |CE| + |CC "CCPool2Exists" = TRUE|
24154 * 2. |CC "CCPool2Exists" = FALSE| + |CE| + |CC "CCPool2Exists" = TRUE|
24156 if(TRUE == sfrPool->CCPool2Exists)
24158 l1 = &dlSf->sfrTotalPoolInfo.cePool;
24159 n1 = cmLListFirst(l1);
24160 sfrCEPool = (RgSchSFRPoolInfo*)(n1->node);
24161 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced))
24163 *sfrpoolInfo = sfrCEPool;
24166 else if(allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
24168 *sfrpoolInfo = sfrPool;
24171 /* Check if CE and CC boundary has unallocated prbs */
24172 else if ((sfrPool->poolstartRB == sfrPool->type2Start) &&
24173 (sfrCEPool->type0End == ((sfrCEPool->poolendRB + 1) / cell->rbgSize) - 1))
24175 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced) +
24176 (sfrPool->bw - sfrPool->bwAlloced))
24178 /* Checking if BW can be allocated partly from CE pool and partly
24181 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
24182 /* Updating CE and CC type2 parametrs based on the RBs allocated
24183 * from these pools*/
24184 sfrPool->type2Start -= addtnlPRBs;
24185 sfrPool->type2End = RGSCH_CEIL(sfrPool->type2Start, cell->rbgSize);
24186 sfrPool->bw += addtnlPRBs;
24187 if (addtnlPRBs == (sfrCEPool->bw - sfrCEPool->bwAlloced))
24189 sfrCEPool->bwAlloced = sfrCEPool->bw;
24190 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24194 sfrCEPool->bw -= addtnlPRBs;
24195 sfrCEPool->type0End = ((sfrCEPool->poolendRB + 1 - addtnlPRBs) / cell->rbgSize) - 1;
24197 *sfrpoolInfo = sfrPool;
24200 else if ( bwAvlbl <
24201 ((sfrCEPool->bw - sfrCEPool->bwAlloced) +
24202 (sfrPool->bw - sfrPool->bwAlloced)))
24204 /* All the Prbs from CE BW shall be allocated */
24205 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24207 sfrPool->type2Start = sfrCEPool->type2Start;
24208 sfrPool->bw += sfrCEPool->bw - sfrCEPool->bwAlloced;
24209 sfrCEPool->type2Start = sfrCEPool->poolendRB + 1;
24210 sfrCEPool->bwAlloced = sfrCEPool->bw;
24211 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24213 /* set the remaining RBs for the requested UE */
24214 allocInfo->rbsReq = (sfrPool->bw - sfrPool->bwAlloced);
24215 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24216 noLyrs = allocInfo->tbInfo[0].noLyr;
24217 allocInfo->tbInfo[0].bytesReq =
24218 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24219 *sfrpoolInfo = sfrPool;
24230 /* Checking if no. of RBs required can be allocated from
24232 * 1. If available return the SFR pool.
24233 * 2. Else update the RBs required parameter based on the
24234 * BW available in the pool
24235 * 3. Return FALSE if no B/W is available.
24237 if (allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
24239 *sfrpoolInfo = sfrPool;
24244 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24246 if (bwAvlbl < sfrPool->bw - sfrPool->bwAlloced)
24250 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24252 bwAvlbl = sfrPool->bw - sfrPool->bwAlloced;
24253 poolWithMaxAvlblBw = sfrPool;
24255 n = cmLListNext(l);
24257 if ((isUeCellEdge == FALSE) && (n == NULLP))
24259 if(l != &dlSf->sfrTotalPoolInfo.cePool)
24261 l = &dlSf->sfrTotalPoolInfo.cePool;
24262 n = cmLListFirst(l);
24272 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24276 dlSf->sfrTotalPoolInfo.ccBwFull = TRUE;
24282 /* set the remaining RBs for the requested UE */
24283 allocInfo->rbsReq = poolWithMaxAvlblBw->bw -
24284 poolWithMaxAvlblBw->bwAlloced;
24285 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24286 noLyrs = allocInfo->tbInfo[0].noLyr;
24287 allocInfo->tbInfo[0].bytesReq =
24288 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24289 *sfrpoolInfo = poolWithMaxAvlblBw;
24296 n = cmLListNext(l);
24298 if ((isUeCellEdge == FALSE) && (n == NULLP))
24300 if(l != &dlSf->sfrTotalPoolInfo.cePool)
24302 l = &dlSf->sfrTotalPoolInfo.cePool;
24303 n = cmLListFirst(l);
24319 #endif /* end of ifndef LTE_TDD*/
24320 /* LTE_ADV_FLAG_REMOVED_END */
24323 * @brief To check if DL BW available for non-DLFS allocation.
24327 * Function : rgSCHCmnNonDlfsUeRbAlloc
24329 * Processing Steps:
24330 * - Determine availability based on RA Type.
24332 * @param[in] RgSchCellCb *cell
24333 * @param[in] RgSchDlSf *dlSf
24334 * @param[in] RgSchDlRbAlloc *allocInfo
24342 PRIVATE Bool rgSCHCmnNonDlfsBwAvlbl
24346 RgSchDlRbAlloc *allocInfo
24349 PRIVATE Bool rgSCHCmnNonDlfsBwAvlbl(cell, dlSf, allocInfo)
24352 RgSchDlRbAlloc *allocInfo;
24357 uint8_t ignoredDfctRbg = FALSE;
24359 if (dlSf->bw <= dlSf->bwAlloced)
24361 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId, "(%d:%d)FAILED CRNTI:%d",
24362 dlSf->bw, dlSf->bwAlloced,allocInfo->rnti);
24365 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
24367 /* Fix for ccpu00123919 : Number of RBs in case of RETX should be same as
24368 * that of initial transmission. */
24369 if(allocInfo->tbInfo[0].tbCb->txCntr)
24371 /* If RB assignment is being done for RETX. Then if reqRbs are
24372 * a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
24373 * not a multiple of rbgSize then check if lsgRbgDfct exists */
24374 if (allocInfo->rbsReq % cell->rbgSize == 0)
24376 if (dlSf->lstRbgDfct)
24378 /* In this scenario we are wasting the last RBG for this dlSf */
24381 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24382 /* Fix: MUE_PERTTI_DL */
24383 dlSf->lstRbgDfct = 0;
24384 ignoredDfctRbg = TRUE;
24390 if (dlSf->lstRbgDfct)
24392 /* Check if type0 allocation can cater to this RETX requirement */
24393 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
24400 /* cannot allocate same number of required RBs */
24406 /* Condition is modified approprialtely to find
24407 * if rbsReq is less than available RBS*/
24408 if(allocInfo->rbsReq <= (((dlSf->type0End - dlSf->type2End + 1)*\
24409 cell->rbgSize) - dlSf->lstRbgDfct))
24413 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
24414 * allocation in TDD when requested RBs are more than available RBs*/
24417 /* MS_WORKAROUND for ccpu00122022 */
24418 if (dlSf->bw < dlSf->bwAlloced + cell->rbgSize)
24420 /* ccpu00132358- Re-assigning the values which were updated above
24421 * if it is RETX and Last RBG available*/
24422 if(ignoredDfctRbg == TRUE)
24425 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
24426 dlSf->lstRbgDfct = 1;
24432 /* Fix: Number of RBs in case of RETX should be same as
24433 * that of initial transmission. */
24434 if(allocInfo->tbInfo[0].tbCb->txCntr == 0
24436 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
24440 /* Setting the remaining RBs for the requested UE*/
24441 allocInfo->rbsReq = (((dlSf->type0End - dlSf->type2End + 1)*\
24442 cell->rbgSize) - dlSf->lstRbgDfct);
24443 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24444 noLyrs = allocInfo->tbInfo[0].noLyr;
24445 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24446 /* DwPts Scheduling Changes Start */
24448 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24450 allocInfo->tbInfo[0].bytesReq =
24451 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
24454 /* DwPts Scheduling Changes End */
24458 /* ccpu00132358- Re-assigning the values which were updated above
24459 * if it is RETX and Last RBG available*/
24460 if(ignoredDfctRbg == TRUE)
24463 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
24464 dlSf->lstRbgDfct = 1;
24467 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "FAILED for CRNTI:%d",
24469 printf ("RB Alloc failed for LAA TB type 0\n");
24475 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
24477 if (allocInfo->rbsReq <= (dlSf->bw - dlSf->bwAlloced))
24481 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
24482 * allocation in TDD when requested RBs are more than available RBs*/
24485 /* Fix: Number of RBs in case of RETX should be same as
24486 * that of initial transmission. */
24487 if((allocInfo->tbInfo[0].tbCb->txCntr == 0)
24489 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
24493 /* set the remaining RBs for the requested UE */
24494 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
24495 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24496 noLyrs = allocInfo->tbInfo[0].noLyr;
24497 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24498 /* DwPts Scheduling Changes Start */
24500 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24502 allocInfo->tbInfo[0].bytesReq =
24503 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
24506 /* DwPts Scheduling Changes End */
24510 printf ("RB Alloc failed for LAA TB type 2\n");
24511 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
24514 /* Fix: Number of RBs in case of RETX should be same as
24515 * that of initial transmission. */
24519 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
24523 /* LTE_ADV_FLAG_REMOVED_START */
24526 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24530 * Function : rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
24532 * Processing Steps:
24534 * @param[in] RgSchCellCb *cell
24535 * @param[in] RgSchDlSf *dlSf
24536 * @param[in] uint8_t rbStrt
24537 * @param[in] uint8_t numRb
24542 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
24550 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, rbStrt, numRb)
24559 RgSchSFRPoolInfo *sfrPool;
24561 l = &dlSf->sfrTotalPoolInfo.ccPool;
24563 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24564 dlSf->bwAlloced += numRb;
24565 dlSf->type2Start += numRb;
24566 n = cmLListFirst(l);
24570 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24571 n = cmLListNext(l);
24573 /* If the pool contains some RBs allocated in this allocation, e.g: Pool is [30.50]. Pool->type2Start is 40 , dlSf->type2Start is 45. then update the variables in pool */
24574 if((sfrPool->poolendRB >= dlSf->type2Start) && (sfrPool->type2Start < dlSf->type2Start))
24576 sfrPool->type2End = dlSf->type2End;
24577 sfrPool->bwAlloced = dlSf->type2Start - sfrPool->poolstartRB;
24578 sfrPool->type2Start = dlSf->type2Start;
24582 /* If the pool contains all RBs allocated in this allocation*/
24583 if(dlSf->type2Start > sfrPool->poolendRB)
24585 sfrPool->type2End = sfrPool->type0End + 1;
24586 sfrPool->bwAlloced = sfrPool->bw;
24587 sfrPool->type2Start = sfrPool->poolendRB + 1;
24592 if (l != &dlSf->sfrTotalPoolInfo.cePool)
24594 l = &dlSf->sfrTotalPoolInfo.cePool;
24595 n = cmLListFirst(l);
24605 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24609 * Function : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
24611 * Processing Steps:
24613 * @param[in] RgSchCellCb *cell
24614 * @param[in] RgSchDlSf *dlSf
24615 * @param[in] uint8_t rbStrt
24616 * @param[in] uint8_t numRb
24622 PRIVATE S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
24631 PRIVATE S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc(cell, ue, dlSf, rbStrt, numRb)
24641 RgSchSFRPoolInfo *sfrCCPool1 = NULL;
24642 RgSchSFRPoolInfo *sfrCCPool2 = NULL;
24645 /* Move the type2End pivot forward */
24648 l = &dlSf->sfrTotalPoolInfo.ccPool;
24649 n = cmLListFirst(l);
24652 sfrCCPool1 = (RgSchSFRPoolInfo*)(n->node);
24654 if (sfrCCPool1 == NULLP)
24656 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24657 "sfrCCPool1 is NULL for CRNTI:%d",ue->ueId);
24660 n = cmLListNext(l);
24663 sfrCCPool2 = (RgSchSFRPoolInfo*)(n->node);
24664 n = cmLListNext(l);
24666 if((sfrCCPool1) && (sfrCCPool2))
24668 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
24669 if(((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
24670 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb)) ||
24671 ((dlSf->type2Start >= sfrCCPool2->pwrHiCCRange.startRb) &&
24672 (dlSf->type2Start + numRb < sfrCCPool2->pwrHiCCRange.endRb)))
24674 ue->lteAdvUeCb.isCCUePHigh = TRUE;
24676 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
24677 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
24680 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24681 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
24688 if((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
24689 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb))
24691 ue->lteAdvUeCb.isCCUePHigh = TRUE;
24693 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
24694 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
24697 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24698 "rgSCHCmnBuildRntpInfo() function returned RFAILED CRNTI:%d",ue->ueId);
24704 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24706 dlSf->bwAlloced += numRb;
24707 /*MS_FIX for ccpu00123918*/
24708 dlSf->type2Start += numRb;
24714 #endif /* end of ifndef LTE_TDD*/
24715 /* LTE_ADV_FLAG_REMOVED_END */
24717 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24721 * Function : rgSCHCmnNonDlfsUpdTyp2Alloc
24723 * Processing Steps:
24725 * @param[in] RgSchCellCb *cell
24726 * @param[in] RgSchDlSf *dlSf
24727 * @param[in] uint8_t rbStrt
24728 * @param[in] uint8_t numRb
24733 PRIVATE Void rgSCHCmnNonDlfsUpdTyp2Alloc
24741 PRIVATE Void rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, rbStrt, numRb)
24748 /* Move the type2End pivot forward */
24749 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24750 //#ifndef LTEMAC_SPS
24751 dlSf->bwAlloced += numRb;
24752 /*Fix for ccpu00123918*/
24753 dlSf->type2Start += numRb;
24759 * @brief To do DL allocation using TYPE0 RA.
24763 * Function : rgSCHCmnNonDlfsType0Alloc
24765 * Processing Steps:
24766 * - Perform TYPE0 allocation using the RBGs between
24767 * type0End and type2End.
24768 * - Build the allocation mask as per RBG positioning.
24769 * - Update the allocation parameters.
24771 * @param[in] RgSchCellCb *cell
24772 * @param[in] RgSchDlSf *dlSf
24773 * @param[in] RgSchDlRbAlloc *allocInfo
24779 PRIVATE Void rgSCHCmnNonDlfsType0Alloc
24783 RgSchDlRbAlloc *allocInfo,
24787 PRIVATE Void rgSCHCmnNonDlfsType0Alloc(cell, dlSf, allocInfo, dlUe)
24790 RgSchDlRbAlloc *allocInfo;
24794 uint32_t dlAllocMsk = 0;
24795 uint8_t rbgFiller = dlSf->lstRbgDfct;
24796 uint8_t noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
24797 //uint8_t noRbgs = (allocInfo->rbsReq + rbgFiller)/ cell->rbgSize;
24801 uint32_t tb1BytesAlloc = 0;
24802 uint32_t tb2BytesAlloc = 0;
24803 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
24805 //if(noRbgs == 0) noRbgs = 1; /* Not required as ceilling is used above*/
24807 /* Fix for ccpu00123919*/
24808 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24809 if (dlSf->bwAlloced + noRbs > dlSf->bw)
24815 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24818 /* Fix for ccpu00138701: Ceilling is using to derive num of RBGs, Therefore,
24819 * after this operation,checking Max TB size and Max RBs are not crossed
24820 * if it is crossed then decrement num of RBGs. */
24821 //if((noRbs + rbgFiller) % cell->rbgSize)
24822 if((noRbs > allocInfo->rbsReq) &&
24823 (allocInfo->rbsReq + rbgFiller) % cell->rbgSize)
24824 {/* considering ue category limitation
24825 * due to ceiling */
24828 if (rgSCHLaaIsLaaTB(allocInfo)== FALSE)
24831 if ((allocInfo->tbInfo[0].schdlngForTb) && (!allocInfo->tbInfo[0].tbCb->txCntr))
24833 iTbs = allocInfo->tbInfo[0].iTbs;
24834 noLyr = allocInfo->tbInfo[0].noLyr;
24835 tb1BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24838 if ((allocInfo->tbInfo[1].schdlngForTb) && (!allocInfo->tbInfo[1].tbCb->txCntr))
24840 iTbs = allocInfo->tbInfo[1].iTbs;
24841 noLyr = allocInfo->tbInfo[1].noLyr;
24842 tb2BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24846 /* Only Check for New Tx No need for Retx */
24847 if (tb1BytesAlloc || tb2BytesAlloc)
24849 if (( ue->dl.aggTbBits >= dlUe->maxTbBits) ||
24850 (tb1BytesAlloc >= dlUe->maxTbSz/8) ||
24851 (tb2BytesAlloc >= dlUe->maxTbSz/8) ||
24852 (noRbs >= dlUe->maxRb))
24858 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24862 /* type0End would have been initially (during subfrm Init) at the bit position
24863 * (cell->noOfRbgs - 1), 0 being the most significant.
24864 * Getting DlAllocMsk for noRbgs and at the appropriate position */
24865 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - dlSf->type0End));
24866 /* Move backwards the type0End pivot */
24867 dlSf->type0End -= noRbgs;
24868 /*Fix for ccpu00123919*/
24869 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
24870 /* Update the bwAlloced field accordingly */
24871 //#ifndef LTEMAC_SPS /* ccpu00129474*/
24872 dlSf->bwAlloced += noRbs;
24874 /* Update Type0 Alloc Info */
24875 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
24876 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
24877 allocInfo->rbsAlloc = noRbs;
24879 /* Update Tb info for each scheduled TB */
24880 iTbs = allocInfo->tbInfo[0].iTbs;
24881 noLyr = allocInfo->tbInfo[0].noLyr;
24882 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
24883 * RETX TB Size is same as Init TX TB Size */
24884 if (allocInfo->tbInfo[0].tbCb->txCntr)
24886 allocInfo->tbInfo[0].bytesAlloc =
24887 allocInfo->tbInfo[0].bytesReq;
24891 allocInfo->tbInfo[0].bytesAlloc =
24892 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24893 /* DwPts Scheduling Changes Start */
24895 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24897 allocInfo->tbInfo[0].bytesAlloc =
24898 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
24901 /* DwPts Scheduling Changes End */
24904 if (allocInfo->tbInfo[1].schdlngForTb)
24906 iTbs = allocInfo->tbInfo[1].iTbs;
24907 noLyr = allocInfo->tbInfo[1].noLyr;
24908 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
24909 * RETX TB Size is same as Init TX TB Size */
24910 if (allocInfo->tbInfo[1].tbCb->txCntr)
24912 allocInfo->tbInfo[1].bytesAlloc =
24913 allocInfo->tbInfo[1].bytesReq;
24917 allocInfo->tbInfo[1].bytesAlloc =
24918 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24919 /* DwPts Scheduling Changes Start */
24921 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24923 allocInfo->tbInfo[1].bytesAlloc =
24924 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
24927 /* DwPts Scheduling Changes End */
24931 /* The last RBG which can be smaller than the RBG size is consedered
24932 * only for the first time allocation of TYPE0 UE */
24933 dlSf->lstRbgDfct = 0;
24940 * @brief To prepare RNTP value from the PRB allocation (P-High -> 1 and P-Low -> 0)
24944 * Function : rgSCHCmnBuildRntpInfo
24946 * Processing Steps:
24948 * @param[in] uint8_t *rntpPtr
24949 * @param[in] uint8_t startRb
24950 * @param[in] uint8_t numRb
24956 PRIVATE S16 rgSCHCmnBuildRntpInfo
24965 PRIVATE S16 rgSCHCmnBuildRntpInfo(cell, rntpPtr, startRb, nmbRb, bw)
24973 uint16_t rbPtrStartIdx; /* Start Index of Octete Buffer to be filled */
24974 uint16_t rbPtrEndIdx; /* End Index of Octete Buffer to be filled */
24975 uint16_t rbBitLoc; /* Bit Location to be set as 1 in the current Byte */
24976 uint16_t nmbRbPerByte; /* PRB's to be set in the current Byte (in case of multiple Bytes) */
24979 rbPtrStartIdx = (startRb)/8;
24980 rbPtrEndIdx = (startRb + nmbRb)/8;
24982 if (rntpPtr == NULLP)
24984 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
24985 "rgSCHCmnBuildRntpInfo():"
24986 "rntpPtr can't be NULLP (Memory Allocation Failed)");
24990 while(rbPtrStartIdx <= rbPtrEndIdx)
24992 rbBitLoc = (startRb)%8;
24994 /* case 1: startRb and endRb lies in same Byte */
24995 if (rbPtrStartIdx == rbPtrEndIdx)
24997 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
24998 | (((1<<nmbRb)-1)<<rbBitLoc);
25001 /* case 2: startRb and endRb lies in different Byte */
25002 if (rbPtrStartIdx != rbPtrEndIdx)
25004 nmbRbPerByte = 8 - rbBitLoc;
25005 nmbRb = nmbRb - nmbRbPerByte;
25006 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
25007 | (((1<<nmbRbPerByte)-1)<<rbBitLoc);
25008 startRb = startRb + nmbRbPerByte;
25014 /* dsfr_pal_fixes ** 21-March-2013 ** SKS ** Adding Debug logs */
25016 /* dsfr_pal_fixes ** 25-March-2013 ** SKS ** Adding Debug logs to print RNTP */
25022 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
25026 * Function : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
25028 * Processing Steps:
25030 * @param[in] RgSchCellCb *cell
25031 * @param[in] RgSchDlSf *dlSf
25032 * @param[in] uint8_t rbStrt
25033 * @param[in] uint8_t numRb
25038 PRIVATE S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
25043 RgSchSFRPoolInfo *sfrPool,
25048 PRIVATE S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrPool, rbStrt, numRb)
25052 RgSchSFRPoolInfo *sfrPool;
25061 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
25062 sfrPool->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
25065 dlSf->type2Start += numRb;
25066 dlSf->bwAlloced += numRb;
25068 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
25070 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
25071 if(FALSE == ue->lteAdvUeCb.rgrLteAdvUeCfg.isUeCellEdge)
25073 if((sfrPool->type2Start >= sfrPool->pwrHiCCRange.startRb) &&
25074 (sfrPool->type2Start + numRb < sfrPool->pwrHiCCRange.endRb))
25076 ue->lteAdvUeCb.isCCUePHigh = TRUE;
25078 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
25079 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
25082 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
25083 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
25090 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
25091 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
25094 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
25095 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
25100 sfrPool->type2Start += numRb;
25101 sfrPool->bwAlloced += numRb;
25108 * @brief To do DL allocation using TYPE0 RA.
25112 * Function : rgSCHCmnNonDlfsSFRPoolType0Alloc
25114 * Processing Steps:
25115 * - Perform TYPE0 allocation using the RBGs between type0End and type2End.
25116 * - Build the allocation mask as per RBG positioning.
25117 * - Update the allocation parameters.
25119 * @param[in] RgSchCellCb *cell
25120 * @param[in] RgSchDlSf *dlSf
25121 * @param[in] RgSchDlRbAlloc *allocInfo
25126 PRIVATE Void rgSCHCmnNonDlfsSFRPoolType0Alloc
25130 RgSchSFRPoolInfo *poolInfo,
25131 RgSchDlRbAlloc *allocInfo
25134 PRIVATE Void rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, poolInfo, allocInfo)
25137 RgSchSFRPoolInfo *poolInfo;
25138 RgSchDlRbAlloc *allocInfo;
25141 uint32_t dlAllocMsk = 0;
25142 uint8_t rbgFiller = 0;
25143 uint8_t noRbgs = 0;
25149 if (poolInfo->poolstartRB + poolInfo->bw == dlSf->bw)
25151 if (poolInfo->type0End == dlSf->bw/4)
25153 rbgFiller = dlSf->lstRbgDfct;
25154 /* The last RBG which can be smaller than the RBG size is consedered
25155 * only for the first time allocation of TYPE0 UE */
25156 dlSf->lstRbgDfct = 0;
25160 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
25162 /* Abhinav to-do start */
25163 /* MS_FIX for ccpu00123919*/
25164 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
25165 if (dlSf->bwAlloced + noRbs > dlSf->bw)
25171 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
25173 /* Abhinav to-do end */
25177 /* type0End would have been initially (during subfrm Init) at the bit position
25178 * (cell->noOfRbgs - 1), 0 being the most significant.
25179 * Getting DlAllocMsk for noRbgs and at the appropriate position */
25180 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - poolInfo->type0End));
25181 /* Move backwards the type0End pivot */
25182 poolInfo->type0End -= noRbgs;
25183 /*MS_FIX for ccpu00123919*/
25184 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
25185 /* Update the bwAlloced field accordingly */
25186 poolInfo->bwAlloced += noRbs + dlSf->lstRbgDfct;
25187 dlSf->bwAlloced += noRbs + dlSf->lstRbgDfct;
25189 /* Update Type0 Alloc Info */
25190 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
25191 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
25192 allocInfo->rbsAlloc = noRbs;
25194 /* Update Tb info for each scheduled TB */
25195 iTbs = allocInfo->tbInfo[0].iTbs;
25196 noLyr = allocInfo->tbInfo[0].noLyr;
25197 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
25198 * RETX TB Size is same as Init TX TB Size */
25199 if (allocInfo->tbInfo[0].tbCb->txCntr)
25201 allocInfo->tbInfo[0].bytesAlloc =
25202 allocInfo->tbInfo[0].bytesReq;
25206 allocInfo->tbInfo[0].bytesAlloc =
25207 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
25210 if (allocInfo->tbInfo[1].schdlngForTb)
25212 iTbs = allocInfo->tbInfo[1].iTbs;
25213 noLyr = allocInfo->tbInfo[1].noLyr;
25214 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
25215 * RETX TB Size is same as Init TX TB Size */
25216 if (allocInfo->tbInfo[1].tbCb->txCntr)
25218 allocInfo->tbInfo[1].bytesAlloc =
25219 allocInfo->tbInfo[1].bytesReq;
25223 allocInfo->tbInfo[1].bytesAlloc =
25224 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
25228 /* The last RBG which can be smaller than the RBG size is consedered
25229 * only for the first time allocation of TYPE0 UE */
25230 dlSf->lstRbgDfct = 0;
25235 * @brief Computes RNTP Info for a subframe.
25239 * Function : rgSCHCmnNonDlfsDsfrRntpComp
25241 * Processing Steps:
25242 * - Computes RNTP info from individual pools.
25244 * @param[in] RgSchDlSf *dlSf
25250 PRIVATE void rgSCHCmnNonDlfsDsfrRntpComp
25256 PRIVATE void rgSCHCmnNonDlfsDsfrRntpComp(cell, dlSf)
25261 PRIVATE uint16_t samples = 0;
25263 uint16_t bwBytes = (dlSf->bw-1)/8;
25264 RgrLoadInfIndInfo *rgrLoadInf;
25266 uint16_t ret = ROK;
25269 len = (dlSf->bw % 8 == 0) ? dlSf->bw/8 : dlSf->bw/8 + 1;
25271 /* RNTP info is ORed every TTI and the sample is stored in cell control block */
25272 for(i = 0; i <= bwBytes; i++)
25274 cell->rntpAggrInfo.val[i] |= dlSf->rntpInfo.val[i];
25276 samples = samples + 1;
25277 /* After every 1000 ms, the RNTP info will be sent to application to be further sent to all neighbouring eNB
25278 informing them about the load indication for cell edge users */
25279 if(RG_SCH_MAX_RNTP_SAMPLES == samples)
25282 ret = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&rgrLoadInf,
25283 sizeof(RgrLoadInfIndInfo));
25286 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
25287 "allocate memory for sending LoadInfo");
25291 rgrLoadInf->u.rntpInfo.pres = cell->rntpAggrInfo.pres;
25292 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
25293 rgrLoadInf->u.rntpInfo.len = len;
25295 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
25296 rgrLoadInf->u.rntpInfo.val = cell->rntpAggrInfo.val;
25297 rgrLoadInf->cellId = cell->cellId;
25299 /* dsfr_pal_fixes ** 22-March-2013 ** SKS */
25300 rgrLoadInf->bw = dlSf->bw;
25301 rgrLoadInf->type = RGR_SFR;
25303 ret = rgSCHUtlRgrLoadInfInd(cell, rgrLoadInf);
25306 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsDsfrRntpComp():"
25307 "rgSCHUtlRgrLoadInfInd() returned RFAILED");
25310 memset(cell->rntpAggrInfo.val,0,len);
25314 /* LTE_ADV_FLAG_REMOVED_END */
25316 /* LTE_ADV_FLAG_REMOVED_START */
25318 * @brief Performs RB allocation per UE from a pool.
25322 * Function : rgSCHCmnSFRNonDlfsUeRbAlloc
25324 * Processing Steps:
25325 * - Allocate consecutively available RBs.
25327 * @param[in] RgSchCellCb *cell
25328 * @param[in] RgSchUeCb *ue
25329 * @param[in] RgSchDlSf *dlSf
25330 * @param[out] uint8_t *isDlBwAvail
25338 PRIVATE S16 rgSCHCmnSFRNonDlfsUeRbAlloc
25343 uint8_t *isDlBwAvail
25346 PRIVATE S16 rgSCHCmnSFRNonDlfsUeRbAlloc(cell, ue, dlSf, isDlBwAvail)
25350 uint8_t *isDlBwAvail;
25353 RgSchDlRbAlloc *allocInfo;
25354 RgSchCmnDlUe *dlUe;
25356 RgSchSFRPoolInfo *sfrpoolInfo = NULLP;
25359 isUECellEdge = RG_SCH_CMN_IS_UE_CELL_EDGE(ue);
25361 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
25362 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25363 *isDlBwAvail = TRUE;
25365 /*Find which pool is available for this UE*/
25366 if (rgSCHCmnNonDlfsSFRBwAvlbl(cell, &sfrpoolInfo, dlSf, allocInfo, isUECellEdge) != TRUE)
25368 /* SFR_FIX - If this is CE UE there may be BW available in CC Pool
25369 So CC UEs will be scheduled */
25372 *isDlBwAvail = TRUE;
25376 *isDlBwAvail = FALSE;
25381 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX || dlUe->proc->tbInfo[1].isAckNackDtx)
25383 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
25387 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
25390 if (!(allocInfo->pdcch))
25392 /* Returning ROK since PDCCH might be available for another UE and further allocations could be done */
25397 allocInfo->rnti = ue->ueId;
25400 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
25402 allocInfo->allocInfo.raType2.isLocal = TRUE;
25403 /* rg004.201 patch - ccpu00109921 fix end */
25404 /* MS_FIX for ccpu00123918*/
25405 allocInfo->allocInfo.raType2.rbStart = (uint8_t)sfrpoolInfo->type2Start;
25406 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25407 /* rg007.201 - Changes for MIMO feature addition */
25408 /* rg008.201 - Removed dependency on MIMO compile-time flag */
25409 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrpoolInfo, \
25410 allocInfo->allocInfo.raType2.rbStart, \
25411 allocInfo->allocInfo.raType2.numRb);
25412 allocInfo->rbsAlloc = allocInfo->rbsReq;
25413 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25415 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
25417 rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, sfrpoolInfo, allocInfo);
25421 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,0);
25422 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
25424 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,1);
25429 #if defined(LTEMAC_SPS)
25430 /* Update the sub-frame with new allocation */
25431 dlSf->bwAlloced += allocInfo->rbsReq;
25437 /* LTE_ADV_FLAG_REMOVED_END */
25438 #endif /* LTE_TDD */
25441 * @brief Performs RB allocation per UE for frequency non-selective cell.
25445 * Function : rgSCHCmnNonDlfsUeRbAlloc
25447 * Processing Steps:
25448 * - Allocate consecutively available RBs.
25450 * @param[in] RgSchCellCb *cell
25451 * @param[in] RgSchUeCb *ue
25452 * @param[in] RgSchDlSf *dlSf
25453 * @param[out] uint8_t *isDlBwAvail
25460 PRIVATE S16 rgSCHCmnNonDlfsUeRbAlloc
25465 uint8_t *isDlBwAvail
25468 PRIVATE S16 rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, isDlBwAvail)
25472 uint8_t *isDlBwAvail;
25475 RgSchDlRbAlloc *allocInfo;
25476 RgSchCmnDlUe *dlUe;
25478 uint32_t dbgRbsReq = 0;
25482 RgSch5gtfUeCb *ue5gtfCb = &(ue->ue5gtfCb);
25483 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[ue5gtfCb->BeamId]);
25485 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
25486 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25487 *isDlBwAvail = TRUE;
25489 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
25491 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25492 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
25494 printf("5GTF_ERROR vrbg allocated > 25\n");
25498 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX
25499 || dlUe->proc->tbInfo[1].isAckNackDtx)
25501 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
25505 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
25507 if (!(allocInfo->pdcch))
25509 /* Returning ROK since PDCCH might be available for another UE and
25510 * further allocations could be done */
25511 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25512 "5GTF_ERROR : PDCCH allocation failed :ue (%u)",
25514 printf("5GTF_ERROR PDCCH allocation failed\n");
25518 //maxPrb = RGSCH_MIN((allocInfo->vrbgReq * MAX_5GTF_VRBG_SIZE), ue5gtfCb->maxPrb);
25519 //maxPrb = RGSCH_MIN(maxPrb,
25520 //((beamInfo->totVrbgAvail - beamInfo->vrbgStart)* MAX_5GTF_VRBG_SIZE)));
25521 //TODO_SID Need to check for vrbg available after scheduling for same beam.
25522 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
25523 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
25524 //TODO_SID: Setting for max TP
25525 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
25526 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
25527 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
25528 allocInfo->tbInfo[0].tbCb->dlGrnt.SCID = 0;
25529 allocInfo->tbInfo[0].tbCb->dlGrnt.dciFormat = allocInfo->dciFormat;
25530 //Filling temporarily
25531 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
25532 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
25534 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25535 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25536 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25544 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
25548 * Function : rgSCHCmnNonDlfsCcchSduAlloc
25550 * Processing Steps:
25551 * - For each element in the list, Call rgSCHCmnNonDlfsCcchSduRbAlloc().
25552 * - If allocation is successful, add the ueCb to scheduled list of CCCH
25554 * - else, add UeCb to non-scheduled list.
25556 * @param[in] RgSchCellCb *cell
25557 * @param[in, out] RgSchCmnCcchSduRbAlloc *allocInfo
25558 * @param[in] uint8_t isRetx
25563 PRIVATE Void rgSCHCmnNonDlfsCcchSduAlloc
25566 RgSchCmnCcchSduRbAlloc *allocInfo,
25570 PRIVATE Void rgSCHCmnNonDlfsCcchSduAlloc(cell, allocInfo, isRetx)
25572 RgSchCmnCcchSduRbAlloc *allocInfo;
25577 CmLListCp *ccchSduLst = NULLP;
25578 CmLListCp *schdCcchSduLst = NULLP;
25579 CmLListCp *nonSchdCcchSduLst = NULLP;
25580 CmLList *schdLnkNode = NULLP;
25581 CmLList *toBeSchdLnk = NULLP;
25582 RgSchDlSf *dlSf = allocInfo->ccchSduDlSf;
25583 RgSchUeCb *ueCb = NULLP;
25584 RgSchDlHqProcCb *hqP = NULLP;
25588 /* Initialize re-transmitting lists */
25589 ccchSduLst = &(allocInfo->ccchSduRetxLst);
25590 schdCcchSduLst = &(allocInfo->schdCcchSduRetxLst);
25591 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduRetxLst);
25595 /* Initialize transmitting lists */
25596 ccchSduLst = &(allocInfo->ccchSduTxLst);
25597 schdCcchSduLst = &(allocInfo->schdCcchSduTxLst);
25598 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduTxLst);
25601 /* Perform allocaations for the list */
25602 toBeSchdLnk = cmLListFirst(ccchSduLst);
25603 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
25605 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25606 ueCb = hqP->hqE->ue;
25607 schdLnkNode = &hqP->schdLstLnk;
25608 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25609 ret = rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf);
25612 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
25613 * list and return */
25616 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25617 ueCb = hqP->hqE->ue;
25618 schdLnkNode = &hqP->schdLstLnk;
25619 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25620 cmLListAdd2Tail(nonSchdCcchSduLst, schdLnkNode);
25621 toBeSchdLnk = toBeSchdLnk->next;
25622 } while(toBeSchdLnk);
25626 /* Allocation successful: Add UE to the scheduled list */
25627 cmLListAdd2Tail(schdCcchSduLst, schdLnkNode);
25635 * @brief Performs RB allocation for CcchSdu for frequency non-selective cell.
25639 * Function : rgSCHCmnNonDlfsCcchSduRbAlloc
25641 * Processing Steps:
25643 * - Allocate consecutively available RBs
25645 * @param[in] RgSchCellCb *cell
25646 * @param[in] RgSchUeCb *ueCb
25647 * @param[in] RgSchDlSf *dlSf
25653 PRIVATE S16 rgSCHCmnNonDlfsCcchSduRbAlloc
25660 PRIVATE S16 rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf)
25666 RgSchDlRbAlloc *allocInfo;
25667 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
25671 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb,cell);
25673 /* [ccpu00138802]-MOD-If Bw is less than required, return fail
25674 It will be allocated in next TTI */
25676 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
25677 (dlSf->bwAlloced == dlSf->bw))
25679 if((dlSf->bwAlloced == dlSf->bw) ||
25680 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
25685 /* Retrieve PDCCH */
25686 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
25687 if (ueDl->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
25689 /* allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, dlSf, y, ueDl->cqi,
25690 * TFU_DCI_FORMAT_1A, TRUE);*/
25691 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, TRUE);
25695 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE);
25697 if (!(allocInfo->pdcch))
25699 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
25703 /* Update allocation information */
25704 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25705 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25706 allocInfo->allocInfo.raType2.isLocal = TRUE;
25708 /*Fix for ccpu00123918*/
25709 /* Push this harq process back to the free queue */
25710 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
25711 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25712 allocInfo->rbsAlloc = allocInfo->rbsReq;
25713 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25714 /* Update the sub-frame with new allocation */
25716 /* LTE_ADV_FLAG_REMOVED_START */
25718 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
25720 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf,
25721 allocInfo->allocInfo.raType2.rbStart,
25722 allocInfo->allocInfo.raType2.numRb);
25725 #endif /* end of ifndef LTE_TDD*/
25727 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf,
25728 allocInfo->allocInfo.raType2.rbStart,
25729 allocInfo->allocInfo.raType2.numRb);
25732 /* LTE_ADV_FLAG_REMOVED_END */
25733 /* ccpu00131941 - bwAlloced is updated from SPS bandwidth */
25741 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
25745 * Function : rgSCHCmnNonDlfsMsg4RbAlloc
25747 * Processing Steps:
25749 * - Allocate consecutively available RBs
25751 * @param[in] RgSchCellCb *cell
25752 * @param[in] RgSchRaCb *raCb
25753 * @param[in] RgSchDlSf *dlSf
25759 PRIVATE S16 rgSCHCmnNonDlfsMsg4RbAlloc
25766 PRIVATE S16 rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf)
25772 RgSchDlRbAlloc *allocInfo;
25775 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_RACB(raCb);
25778 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
25779 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
25781 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25782 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
25784 printf("5GTF_ERROR vrbg allocated > 25\n");
25789 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
25790 (dlSf->bwAlloced == dlSf->bw))
25792 if((dlSf->bwAlloced == dlSf->bw) ||
25793 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
25800 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
25801 if (raCb->dlHqE->msg4Proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
25803 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, TRUE);
25807 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, FALSE);
25809 if (!(allocInfo->pdcch))
25811 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
25816 /* SR_RACH_STATS : MSG4 TX Failed */
25817 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
25819 /* Update allocation information */
25820 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25821 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25822 allocInfo->allocInfo.raType2.isLocal = TRUE;
25825 /*Fix for ccpu00123918*/
25826 allocInfo->allocInfo.raType2.rbStart = (uint8_t)dlSf->type2Start;
25827 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25828 /* LTE_ADV_FLAG_REMOVED_START */
25830 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
25832 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
25833 allocInfo->allocInfo.raType2.rbStart, \
25834 allocInfo->allocInfo.raType2.numRb);
25837 #endif /* end of ifndef LTE_TDD */
25839 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
25840 allocInfo->allocInfo.raType2.rbStart, \
25841 allocInfo->allocInfo.raType2.numRb);
25843 /* LTE_ADV_FLAG_REMOVED_END */
25845 allocInfo->rbsAlloc = allocInfo->rbsReq;
25846 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25850 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
25852 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
25853 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
25855 /* Update allocation information */
25856 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
25858 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
25859 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
25860 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
25862 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
25863 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
25866 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25867 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25868 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25876 * @brief Performs RB allocation for Msg4 lists of frequency non-selective cell.
25880 * Function : rgSCHCmnNonDlfsMsg4Alloc
25882 * Processing Steps:
25883 * - For each element in the list, Call rgSCHCmnNonDlfsMsg4RbAlloc().
25884 * - If allocation is successful, add the raCb to scheduled list of MSG4.
25885 * - else, add RaCb to non-scheduled list.
25887 * @param[in] RgSchCellCb *cell
25888 * @param[in, out] RgSchCmnMsg4RbAlloc *allocInfo
25889 * @param[in] uint8_t isRetx
25894 PRIVATE Void rgSCHCmnNonDlfsMsg4Alloc
25897 RgSchCmnMsg4RbAlloc *allocInfo,
25901 PRIVATE Void rgSCHCmnNonDlfsMsg4Alloc(cell, allocInfo, isRetx)
25903 RgSchCmnMsg4RbAlloc *allocInfo;
25908 CmLListCp *msg4Lst = NULLP;
25909 CmLListCp *schdMsg4Lst = NULLP;
25910 CmLListCp *nonSchdMsg4Lst = NULLP;
25911 CmLList *schdLnkNode = NULLP;
25912 CmLList *toBeSchdLnk = NULLP;
25913 RgSchDlSf *dlSf = allocInfo->msg4DlSf;
25914 RgSchRaCb *raCb = NULLP;
25915 RgSchDlHqProcCb *hqP = NULLP;
25919 /* Initialize re-transmitting lists */
25920 msg4Lst = &(allocInfo->msg4RetxLst);
25921 schdMsg4Lst = &(allocInfo->schdMsg4RetxLst);
25922 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4RetxLst);
25926 /* Initialize transmitting lists */
25927 msg4Lst = &(allocInfo->msg4TxLst);
25928 schdMsg4Lst = &(allocInfo->schdMsg4TxLst);
25929 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4TxLst);
25932 /* Perform allocaations for the list */
25933 toBeSchdLnk = cmLListFirst(msg4Lst);
25934 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
25936 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25937 raCb = hqP->hqE->raCb;
25938 schdLnkNode = &hqP->schdLstLnk;
25939 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25940 ret = rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf);
25943 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
25944 * list and return */
25947 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25948 raCb = hqP->hqE->raCb;
25949 schdLnkNode = &hqP->schdLstLnk;
25950 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25951 cmLListAdd2Tail(nonSchdMsg4Lst, schdLnkNode);
25952 toBeSchdLnk = toBeSchdLnk->next;
25953 } while(toBeSchdLnk);
25957 /* Allocation successful: Add UE to the scheduled list */
25958 cmLListAdd2Tail(schdMsg4Lst, schdLnkNode);
25969 * @brief Performs RB allocation for the list of UEs of a frequency
25970 * non-selective cell.
25974 * Function : rgSCHCmnNonDlfsDedRbAlloc
25976 * Processing Steps:
25977 * - For each element in the list, Call rgSCHCmnNonDlfsUeRbAlloc().
25978 * - If allocation is successful, add the ueCb to scheduled list of UEs.
25979 * - else, add ueCb to non-scheduled list of UEs.
25981 * @param[in] RgSchCellCb *cell
25982 * @param[in, out] RgSchCmnUeRbAlloc *allocInfo
25983 * @param[in] CmLListCp *ueLst,
25984 * @param[in, out] CmLListCp *schdHqPLst,
25985 * @param[in, out] CmLListCp *nonSchdHqPLst
25990 Void rgSCHCmnNonDlfsDedRbAlloc
25993 RgSchCmnUeRbAlloc *allocInfo,
25995 CmLListCp *schdHqPLst,
25996 CmLListCp *nonSchdHqPLst
25999 Void rgSCHCmnNonDlfsDedRbAlloc(cell, allocInfo, ueLst,
26000 schdHqPLst, nonSchdHqPLst)
26002 RgSchCmnUeRbAlloc *allocInfo;
26004 CmLListCp *schdHqPLst;
26005 CmLListCp *nonSchdHqPLst;
26009 CmLList *schdLnkNode = NULLP;
26010 CmLList *toBeSchdLnk = NULLP;
26011 RgSchDlSf *dlSf = allocInfo->dedDlSf;
26012 RgSchUeCb *ue = NULLP;
26013 RgSchDlHqProcCb *hqP = NULLP;
26014 uint8_t isDlBwAvail;
26017 /* Perform allocaations for the list */
26018 toBeSchdLnk = cmLListFirst(ueLst);
26019 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
26021 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
26023 schdLnkNode = &hqP->schdLstLnk;
26024 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
26026 ret = rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, &isDlBwAvail);
26029 /* Allocation failed: Add remaining UEs to non-scheduled
26030 * list and return */
26033 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
26035 schdLnkNode = &hqP->schdLstLnk;
26036 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
26037 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
26038 toBeSchdLnk = toBeSchdLnk->next;
26039 } while(toBeSchdLnk);
26045 #if defined (TENB_STATS) && defined (RG_5GTF)
26046 cell->tenbStats->sch.dl5gtfRbAllocPass++;
26048 /* Allocation successful: Add UE to the scheduled list */
26049 cmLListAdd2Tail(schdHqPLst, schdLnkNode);
26053 #if defined (TENB_STATS) && defined (RG_5GTF)
26054 cell->tenbStats->sch.dl5gtfRbAllocFail++;
26056 /* Allocation failed : Add UE to the non-scheduled list */
26057 printf("5GTF_ERROR Dl rb alloc failed adding nonSchdHqPLst\n");
26058 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
26066 * @brief Handles RB allocation for frequency non-selective cell.
26070 * Function : rgSCHCmnNonDlfsRbAlloc
26072 * Invoking Module Processing:
26073 * - SCH shall invoke this if downlink frequency selective is disabled for
26074 * the cell for RB allocation.
26075 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
26076 * estimate and subframe for each allocation to be made to SCH.
26078 * Processing Steps:
26079 * - Allocate sequentially for common channels.
26080 * - For transmitting and re-transmitting UE list.
26082 * - Perform wide-band allocations for UE in increasing order of
26084 * - Determine Imcs for the allocation.
26085 * - Determine RA type.
26086 * - Determine DCI format.
26088 * @param[in] RgSchCellCb *cell
26089 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
26094 Void rgSCHCmnNonDlfsRbAlloc
26097 RgSchCmnDlRbAllocInfo *allocInfo
26100 Void rgSCHCmnNonDlfsRbAlloc(cell, allocInfo)
26102 RgSchCmnDlRbAllocInfo *allocInfo;
26105 uint8_t raRspCnt = 0;
26106 RgSchDlRbAlloc *reqAllocInfo;
26108 /* Allocate for MSG4 retransmissions */
26109 if (allocInfo->msg4Alloc.msg4RetxLst.count)
26111 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc RetxLst\n");
26112 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), TRUE);
26115 /* Allocate for MSG4 transmissions */
26116 /* Assuming all the nodes in the list need allocations: rbsReq is valid */
26117 if (allocInfo->msg4Alloc.msg4TxLst.count)
26119 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc txLst\n");
26120 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), FALSE);
26123 /* Allocate for CCCH SDU (received after guard timer expiry)
26124 * retransmissions */
26125 if (allocInfo->ccchSduAlloc.ccchSduRetxLst.count)
26127 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
26128 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), TRUE);
26131 /* Allocate for CCCD SDU transmissions */
26132 /* Allocate for CCCH SDU (received after guard timer expiry) transmissions */
26133 if (allocInfo->ccchSduAlloc.ccchSduTxLst.count)
26135 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
26136 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), FALSE);
26140 /* Allocate for Random access response */
26141 for (raRspCnt = 0; raRspCnt < RG_SCH_CMN_MAX_CMN_PDCCH; ++raRspCnt)
26143 /* Assuming that the requests will be filled in sequentially */
26144 reqAllocInfo = &(allocInfo->raRspAlloc[raRspCnt]);
26145 if (!reqAllocInfo->rbsReq)
26149 printf("5GTF_ERROR calling RAR rgSCHCmnNonDlfsCmnRbAlloc\n");
26150 // if ((rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo)) != ROK)
26151 if ((rgSCHCmnNonDlfsCmnRbAllocRar(cell, reqAllocInfo)) != ROK)
26157 /* Allocate for RETX+TX UEs */
26158 if(allocInfo->dedAlloc.txRetxHqPLst.count)
26160 printf("5GTF_ERROR TX RETX rgSCHCmnNonDlfsDedRbAlloc\n");
26161 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26162 &(allocInfo->dedAlloc.txRetxHqPLst),
26163 &(allocInfo->dedAlloc.schdTxRetxHqPLst),
26164 &(allocInfo->dedAlloc.nonSchdTxRetxHqPLst));
26167 if((allocInfo->dedAlloc.retxHqPLst.count))
26169 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26170 &(allocInfo->dedAlloc.retxHqPLst),
26171 &(allocInfo->dedAlloc.schdRetxHqPLst),
26172 &(allocInfo->dedAlloc.nonSchdRetxHqPLst));
26175 /* Allocate for transmitting UEs */
26176 if((allocInfo->dedAlloc.txHqPLst.count))
26178 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26179 &(allocInfo->dedAlloc.txHqPLst),
26180 &(allocInfo->dedAlloc.schdTxHqPLst),
26181 &(allocInfo->dedAlloc.nonSchdTxHqPLst));
26184 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cell);
26185 if ((allocInfo->dedAlloc.txRetxHqPLst.count +
26186 allocInfo->dedAlloc.retxHqPLst.count +
26187 allocInfo->dedAlloc.txHqPLst.count) >
26188 cmnCell->dl.maxUePerDlSf)
26190 #ifndef ALIGN_64BIT
26191 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
26192 " scheduler exceed maximumUePerDlSf(%u)tx-retx %ld retx %ld tx %ld\n",
26193 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
26194 allocInfo->dedAlloc.retxHqPLst.count,
26195 allocInfo->dedAlloc.txHqPLst.count));
26197 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
26198 " scheduler exceed maximumUePerDlSf(%u)tx-retx %d retx %d tx %d\n",
26199 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
26200 allocInfo->dedAlloc.retxHqPLst.count,
26201 allocInfo->dedAlloc.txHqPLst.count));
26206 /* LTE_ADV_FLAG_REMOVED_START */
26207 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
26209 printf("5GTF_ERROR RETX rgSCHCmnNonDlfsDsfrRntpComp\n");
26210 rgSCHCmnNonDlfsDsfrRntpComp(cell, allocInfo->dedAlloc.dedDlSf);
26212 /* LTE_ADV_FLAG_REMOVED_END */
26213 #endif /* LTE_TDD */
26217 /***********************************************************
26219 * Func : rgSCHCmnCalcRiv
26221 * Desc : This function calculates RIV.
26227 * File : rg_sch_utl.c
26229 **********************************************************/
26232 uint32_t rgSCHCmnCalcRiv
26239 uint32_t rgSCHCmnCalcRiv(bw, rbStart, numRb)
26246 uint32_t rgSCHCmnCalcRiv
26253 uint32_t rgSCHCmnCalcRiv(bw, rbStart, numRb)
26260 uint8_t numRbMinus1 = numRb - 1;
26264 if (numRbMinus1 <= bw/2)
26266 riv = bw * numRbMinus1 + rbStart;
26270 riv = bw * (bw - numRbMinus1) + (bw - rbStart - 1);
26273 } /* rgSCHCmnCalcRiv */
26277 * @brief This function allocates and copies the RACH response scheduling
26278 * related information into cell control block.
26282 * Function: rgSCHCmnDlCpyRachInfo
26283 * Purpose: This function allocates and copies the RACH response
26284 * scheduling related information into cell control block
26285 * for each DL subframe.
26288 * Invoked by: Scheduler
26290 * @param[in] RgSchCellCb* cell
26291 * @param[in] RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES]
26292 * @param[in] uint8_t raArrSz
26297 PRIVATE S16 rgSCHCmnDlCpyRachInfo
26300 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
26304 PRIVATE S16 rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz)
26306 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES];
26310 uint8_t ulDlCfgIdx = cell->ulDlCfgIdx;
26313 uint16_t subfrmIdx;
26315 uint8_t numSubfrms;
26320 /* Allocate RACH response information for each DL
26321 * subframe in a radio frame */
26322 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cell->rachRspLst,
26323 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1] *
26324 sizeof(RgSchTddRachRspLst));
26330 for(sfnIdx=raArrSz-1; sfnIdx>=0; sfnIdx--)
26332 for(subfrmIdx=0; subfrmIdx < RGSCH_NUM_SUB_FRAMES; subfrmIdx++)
26334 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
26335 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
26340 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx],subfrmIdx);
26342 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
26344 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddNumDlSubfrmTbl[ulDlCfgIdx],subfrmIdx);
26345 sfNum = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][subfrmIdx]-1;
26346 numRfs = cell->rachRspLst[sfNum].numRadiofrms;
26347 /* For each DL subframe in which RACH response can
26348 * be sent is updated */
26351 cell->rachRspLst[sfNum].rachRsp[numRfs].sfnOffset =
26352 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset;
26353 for(sfcount=0; sfcount < numSubfrms; sfcount++)
26355 cell->rachRspLst[sfNum].rachRsp[numRfs].\
26356 subframe[sfcount] =
26357 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].\
26360 cell->rachRspLst[sfNum].rachRsp[numRfs].numSubfrms =
26361 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
26362 cell->rachRspLst[sfNum].numRadiofrms++;
26365 /* Copy the subframes to be deleted at ths subframe */
26367 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
26370 cell->rachRspLst[sfNum].delInfo.sfnOffset =
26371 rachRspLst[sfnIdx][subfrmIdx].delInfo.sfnOffset;
26372 for(sfcount=0; sfcount < numSubfrms; sfcount++)
26374 cell->rachRspLst[sfNum].delInfo.subframe[sfcount] =
26375 rachRspLst[sfnIdx][subfrmIdx].delInfo.subframe[sfcount];
26377 cell->rachRspLst[sfNum].delInfo.numSubfrms =
26378 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
26386 * @brief This function determines the iTbs based on the new CFI,
26387 * CQI and BLER based delta iTbs
26391 * Function: rgSchCmnFetchItbs
26392 * Purpose: Fetch the new iTbs when CFI changes.
26394 * @param[in] RgSchCellCb *cell
26395 * @param[in] RgSchCmnDlUe *ueDl
26396 * @param[in] uint8_t cqi
26403 PRIVATE S32 rgSchCmnFetchItbs
26406 RgSchCmnDlUe *ueDl,
26414 PRIVATE S32 rgSchCmnFetchItbs (cell, ueDl, subFrm, cqi, cfi, cwIdx, noLyr)
26416 RgSchCmnDlUe *ueDl;
26425 PRIVATE S32 rgSchCmnFetchItbs
26428 RgSchCmnDlUe *ueDl,
26435 PRIVATE S32 rgSchCmnFetchItbs (cell, ueDl, cqi, cfi, cwIdx, noLyr)
26437 RgSchCmnDlUe *ueDl;
26446 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26451 /* Special Handling for Spl Sf when CFI is 3 as
26452 * CFI in Spl Sf will be max 2 */
26453 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26455 if((cellDl->currCfi == 3) ||
26456 ((cell->bwCfg.dlTotalBw <= 10) && (cellDl->currCfi == 1)))
26458 /* Use CFI 2 in this case */
26459 iTbs = (ueDl->laCb[cwIdx].deltaiTbs +
26460 ((*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][2]))[cqi])* 100)/100;
26462 RG_SCH_CHK_ITBS_RANGE(iTbs, RGSCH_NUM_ITBS - 1);
26466 iTbs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1];
26468 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
26470 else /* CFI Changed. Update with new iTbs Reset the BLER*/
26473 S32 tmpiTbs = (*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][cfi]))[cqi];
26475 iTbs = (ueDl->laCb[cwIdx].deltaiTbs + tmpiTbs*100)/100;
26477 RG_SCH_CHK_ITBS_RANGE(iTbs, tmpiTbs);
26479 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
26481 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1] = iTbs;
26483 ueDl->lastCfi = cfi;
26484 ueDl->laCb[cwIdx].deltaiTbs = 0;
26491 * @brief This function determines the RBs and Bytes required for BO
26492 * transmission for UEs configured with TM 1/2/6/7.
26496 * Function: rgSCHCmnDlAllocTxRb1Tb1Cw
26497 * Purpose: Allocate TB1 on CW1.
26499 * Reference Parameter effBo is filled with alloced bytes.
26500 * Returns RFAILED if BO not satisfied at all.
26502 * Invoked by: rgSCHCmnDlAllocTxRbTM1/2/6/7
26504 * @param[in] RgSchCellCb *cell
26505 * @param[in] RgSchDlSf *subFrm
26506 * @param[in] RgSchUeCb *ue
26507 * @param[in] uint32_t bo
26508 * @param[out] uint32_t *effBo
26509 * @param[in] RgSchDlHqProcCb *proc
26510 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26515 PRIVATE Void rgSCHCmnDlAllocTxRb1Tb1Cw
26522 RgSchDlHqProcCb *proc,
26523 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26526 PRIVATE Void rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26532 RgSchDlHqProcCb *proc;
26533 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26536 RgSchDlRbAlloc *allocInfo;
26541 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26543 if (ue->ue5gtfCb.rank == 2)
26545 allocInfo->dciFormat = TFU_DCI_FORMAT_B2;
26549 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
26552 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
26553 allocInfo->raType);
26555 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
26556 bo, &numRb, effBo);
26557 if (ret == RFAILED)
26559 /* If allocation couldn't be made then return */
26562 /* Adding UE to RbAllocInfo TX Lst */
26563 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
26564 /* Fill UE alloc Info */
26565 allocInfo->rbsReq = numRb;
26566 allocInfo->dlSf = subFrm;
26568 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
26576 * @brief This function determines the RBs and Bytes required for BO
26577 * retransmission for UEs configured with TM 1/2/6/7.
26581 * Function: rgSCHCmnDlAllocRetxRb1Tb1Cw
26582 * Purpose: Allocate TB1 on CW1.
26584 * Reference Parameter effBo is filled with alloced bytes.
26585 * Returns RFAILED if BO not satisfied at all.
26587 * Invoked by: rgSCHCmnDlAllocRetxRbTM1/2/6/7
26589 * @param[in] RgSchCellCb *cell
26590 * @param[in] RgSchDlSf *subFrm
26591 * @param[in] RgSchUeCb *ue
26592 * @param[in] uint32_t bo
26593 * @param[out] uint32_t *effBo
26594 * @param[in] RgSchDlHqProcCb *proc
26595 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26600 PRIVATE Void rgSCHCmnDlAllocRetxRb1Tb1Cw
26607 RgSchDlHqProcCb *proc,
26608 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26611 PRIVATE Void rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26617 RgSchDlHqProcCb *proc;
26618 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26621 RgSchDlRbAlloc *allocInfo;
26626 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26629 /* 5GTF: RETX DCI format same as TX */
26630 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
26631 &allocInfo->raType);
26634 /* Get the Allocation in terms of RBs that are required for
26635 * this retx of TB1 */
26636 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
26638 if (ret == RFAILED)
26640 /* Allocation couldn't be made for Retx */
26641 /* Fix : syed If TxRetx allocation failed then add the UE along with the proc
26642 * to the nonSchdTxRetxUeLst and let spfc scheduler take care of it during
26644 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
26647 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
26648 /* Fill UE alloc Info */
26649 allocInfo->rbsReq = numRb;
26650 allocInfo->dlSf = subFrm;
26652 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
26660 * @brief This function determines the RBs and Bytes required for BO
26661 * transmission for UEs configured with TM 2.
26665 * Function: rgSCHCmnDlAllocTxRbTM1
26668 * Reference Parameter effBo is filled with alloced bytes.
26669 * Returns RFAILED if BO not satisfied at all.
26671 * Invoked by: rgSCHCmnDlAllocTxRb
26673 * @param[in] RgSchCellCb *cell
26674 * @param[in] RgSchDlSf *subFrm
26675 * @param[in] RgSchUeCb *ue
26676 * @param[in] uint32_t bo
26677 * @param[out] uint32_t *effBo
26678 * @param[in] RgSchDlHqProcCb *proc
26679 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26684 PRIVATE Void rgSCHCmnDlAllocTxRbTM1
26691 RgSchDlHqProcCb *proc,
26692 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26695 PRIVATE Void rgSCHCmnDlAllocTxRbTM1(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26701 RgSchDlHqProcCb *proc;
26702 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26705 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26711 * @brief This function determines the RBs and Bytes required for BO
26712 * retransmission for UEs configured with TM 2.
26716 * Function: rgSCHCmnDlAllocRetxRbTM1
26719 * Reference Parameter effBo is filled with alloced bytes.
26720 * Returns RFAILED if BO not satisfied at all.
26722 * Invoked by: rgSCHCmnDlAllocRetxRb
26724 * @param[in] RgSchCellCb *cell
26725 * @param[in] RgSchDlSf *subFrm
26726 * @param[in] RgSchUeCb *ue
26727 * @param[in] uint32_t bo
26728 * @param[out] uint32_t *effBo
26729 * @param[in] RgSchDlHqProcCb *proc
26730 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26735 PRIVATE Void rgSCHCmnDlAllocRetxRbTM1
26742 RgSchDlHqProcCb *proc,
26743 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26746 PRIVATE Void rgSCHCmnDlAllocRetxRbTM1(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26752 RgSchDlHqProcCb *proc;
26753 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26756 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26762 * @brief This function determines the RBs and Bytes required for BO
26763 * transmission for UEs configured with TM 2.
26767 * Function: rgSCHCmnDlAllocTxRbTM2
26770 * Reference Parameter effBo is filled with alloced bytes.
26771 * Returns RFAILED if BO not satisfied at all.
26773 * Invoked by: rgSCHCmnDlAllocTxRb
26775 * @param[in] RgSchCellCb *cell
26776 * @param[in] RgSchDlSf *subFrm
26777 * @param[in] RgSchUeCb *ue
26778 * @param[in] uint32_t bo
26779 * @param[out] uint32_t *effBo
26780 * @param[in] RgSchDlHqProcCb *proc
26781 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26786 PRIVATE Void rgSCHCmnDlAllocTxRbTM2
26793 RgSchDlHqProcCb *proc,
26794 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26797 PRIVATE Void rgSCHCmnDlAllocTxRbTM2(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26803 RgSchDlHqProcCb *proc;
26804 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26807 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26813 * @brief This function determines the RBs and Bytes required for BO
26814 * retransmission for UEs configured with TM 2.
26818 * Function: rgSCHCmnDlAllocRetxRbTM2
26821 * Reference Parameter effBo is filled with alloced bytes.
26822 * Returns RFAILED if BO not satisfied at all.
26824 * Invoked by: rgSCHCmnDlAllocRetxRb
26826 * @param[in] RgSchCellCb *cell
26827 * @param[in] RgSchDlSf *subFrm
26828 * @param[in] RgSchUeCb *ue
26829 * @param[in] uint32_t bo
26830 * @param[out] uint32_t *effBo
26831 * @param[in] RgSchDlHqProcCb *proc
26832 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26837 PRIVATE Void rgSCHCmnDlAllocRetxRbTM2
26844 RgSchDlHqProcCb *proc,
26845 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26848 PRIVATE Void rgSCHCmnDlAllocRetxRbTM2(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26854 RgSchDlHqProcCb *proc;
26855 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26858 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26864 * @brief This function determines the RBs and Bytes required for BO
26865 * transmission for UEs configured with TM 3.
26869 * Function: rgSCHCmnDlAllocTxRbTM3
26872 * Reference Parameter effBo is filled with alloced bytes.
26873 * Returns RFAILED if BO not satisfied at all.
26875 * Invoked by: rgSCHCmnDlAllocTxRb
26877 * @param[in] RgSchCellCb *cell
26878 * @param[in] RgSchDlSf *subFrm
26879 * @param[in] RgSchUeCb *ue
26880 * @param[in] uint32_t bo
26881 * @param[out] uint32_t *effBo
26882 * @param[in] RgSchDlHqProcCb *proc
26883 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26888 PRIVATE Void rgSCHCmnDlAllocTxRbTM3
26895 RgSchDlHqProcCb *proc,
26896 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26899 PRIVATE Void rgSCHCmnDlAllocTxRbTM3(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26905 RgSchDlHqProcCb *proc;
26906 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26911 /* Both TBs free for TX allocation */
26912 rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo,\
26913 proc, cellWdAllocInfo);
26920 * @brief This function determines the RBs and Bytes required for BO
26921 * retransmission for UEs configured with TM 3.
26925 * Function: rgSCHCmnDlAllocRetxRbTM3
26928 * Reference Parameter effBo is filled with alloced bytes.
26929 * Returns RFAILED if BO not satisfied at all.
26931 * Invoked by: rgSCHCmnDlAllocRetxRb
26933 * @param[in] RgSchCellCb *cell
26934 * @param[in] RgSchDlSf *subFrm
26935 * @param[in] RgSchUeCb *ue
26936 * @param[in] uint32_t bo
26937 * @param[out] uint32_t *effBo
26938 * @param[in] RgSchDlHqProcCb *proc
26939 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26944 PRIVATE Void rgSCHCmnDlAllocRetxRbTM3
26951 RgSchDlHqProcCb *proc,
26952 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26955 PRIVATE Void rgSCHCmnDlAllocRetxRbTM3(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26961 RgSchDlHqProcCb *proc;
26962 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26967 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
26968 (proc->tbInfo[1].state == HQ_TB_NACKED))
26971 printf ("RETX RB TM3 nack for both hqp %d cell %d \n", proc->procId, proc->hqE->cell->cellId);
26973 /* Both TBs require RETX allocation */
26974 rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo,\
26975 proc, cellWdAllocInfo);
26979 /* One of the TBs need RETX allocation. Other TB may/maynot
26980 * be available for new TX allocation. */
26981 rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo,\
26982 proc, cellWdAllocInfo);
26990 * @brief This function performs the DCI format selection in case of
26991 * Transmit Diversity scheme where there can be more
26992 * than 1 option for DCI format selection.
26996 * Function: rgSCHCmnSlctPdcchFrmt
26997 * Purpose: 1. If DLFS is enabled, then choose TM specific
26998 * DCI format for Transmit diversity. All the
26999 * TM Specific DCI Formats support Type0 and/or
27000 * Type1 resource allocation scheme. DLFS
27001 * supports only Type-0&1 Resource allocation.
27002 * 2. If DLFS is not enabled, select a DCI format
27003 * which is of smaller size. Since Non-DLFS
27004 * scheduler supports all Resource allocation
27005 * schemes, selection is based on efficiency.
27007 * Invoked by: DL UE Allocation by Common Scheduler.
27009 * @param[in] RgSchCellCb *cell
27010 * @param[in] RgSchUeCb *ue
27011 * @param[out] uint8_t *raType
27012 * @return TfuDciFormat
27016 TfuDciFormat rgSCHCmnSlctPdcchFrmt
27023 TfuDciFormat rgSCHCmnSlctPdcchFrmt(cell, ue, raType)
27029 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27032 /* ccpu00140894- Selective DCI Format and RA type should be selected only
27033 * after TX Mode transition is completed*/
27034 if ((cellSch->dl.isDlFreqSel) && (ue->txModeTransCmplt))
27036 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciRAType;
27037 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciFrmt);
27041 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciRAType;
27042 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciFrmt);
27048 * @brief This function handles Retx allocation in case of TM3 UEs
27049 * where both the TBs were NACKED previously.
27053 * Function: rgSCHCmnDlTM3RetxRetx
27054 * Purpose: If forceTD flag enabled
27055 * TD for TB1 on CW1.
27057 * DCI Frmt 2A and RA Type 0
27058 * RI layered SM of both TBs on 2 CWs
27059 * Add UE to cell Alloc Info.
27060 * Fill UE alloc Info.
27063 * Successful allocation is indicated by non-zero effBo value.
27065 * Invoked by: rgSCHCmnDlAllocRbTM3
27067 * @param[in] RgSchCellCb *cell
27068 * @param[in] RgSchDlSf *subFrm
27069 * @param[in] RgSchUeCb *ue
27070 * @param[in] uint32_t bo
27071 * @param[out] uint32_t *effBo
27072 * @param[in] RgSchDlHqProcCb *proc
27073 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27078 PRIVATE Void rgSCHCmnDlTM3RetxRetx
27085 RgSchDlHqProcCb *proc,
27086 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27089 PRIVATE Void rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
27095 RgSchDlHqProcCb *proc;
27096 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
27100 RgSchDlRbAlloc *allocInfo;
27105 uint8_t precInfoAntIdx;
27109 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
27111 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
27113 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
27114 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
27116 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
27118 if (ret == RFAILED)
27120 /* Allocation couldn't be made for Retx */
27121 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
27124 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
27125 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
27126 #ifdef FOUR_TX_ANTENNA
27127 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1 should
27128 * have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
27129 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
27132 proc->cwSwpEnabled = TRUE;
27135 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27136 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
27140 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
27143 /* Adding UE to allocInfo RETX Lst */
27144 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
27146 /* Fill UE alloc Info scratch pad */
27147 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
27148 precInfo, noTxLyrs, subFrm);
27155 * @brief This function handles Retx allocation in case of TM4 UEs
27156 * where both the TBs were NACKED previously.
27160 * Function: rgSCHCmnDlTM4RetxRetx
27161 * Purpose: If forceTD flag enabled
27162 * TD for TB1 on CW1.
27164 * DCI Frmt 2 and RA Type 0
27166 * 1 layer SM of TB1 on CW1.
27168 * RI layered SM of both TBs on 2 CWs
27169 * Add UE to cell Alloc Info.
27170 * Fill UE alloc Info.
27173 * Successful allocation is indicated by non-zero effBo value.
27175 * Invoked by: rgSCHCmnDlAllocRbTM4
27177 * @param[in] RgSchCellCb *cell
27178 * @param[in] RgSchDlSf *subFrm
27179 * @param[in] RgSchUeCb *ue
27180 * @param[in] uint32_t bo
27181 * @param[out] uint32_t *effBo
27182 * @param[in] RgSchDlHqProcCb *proc
27183 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27188 PRIVATE Void rgSCHCmnDlTM4RetxRetx
27195 RgSchDlHqProcCb *proc,
27196 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27199 PRIVATE Void rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
27205 RgSchDlHqProcCb *proc;
27206 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
27210 RgSchDlRbAlloc *allocInfo;
27212 Bool swpFlg = FALSE;
27214 #ifdef FOUR_TX_ANTENNA
27215 uint8_t precInfoAntIdx;
27221 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
27223 /* Irrespective of RI Schedule both CWs */
27224 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
27225 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
27227 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
27229 if (ret == RFAILED)
27231 /* Allocation couldn't be made for Retx */
27232 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
27235 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
27237 #ifdef FOUR_TX_ANTENNA
27238 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1
27239 * should have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
27240 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
27243 proc->cwSwpEnabled = TRUE;
27245 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27246 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
27250 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
27253 /* Adding UE to allocInfo RETX Lst */
27254 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
27256 /* Fill UE alloc Info scratch pad */
27257 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
27258 precInfo, noTxLyrs, subFrm);
27266 * @brief This function determines Transmission attributes
27267 * incase of Spatial multiplexing for TX and RETX TBs.
27271 * Function: rgSCHCmnDlSMGetAttrForTxRetx
27272 * Purpose: 1. Reached here for a TM3/4 UE's HqP whose one of the TBs is
27273 * NACKED and the other TB is either NACKED or WAITING.
27274 * 2. Select the NACKED TB for RETX allocation.
27275 * 3. Allocation preference for RETX TB by mapping it to a better
27276 * CW (better in terms of efficiency).
27277 * 4. Determine the state of the other TB.
27278 * Determine if swapFlag were to be set.
27279 * Swap flag would be set if Retx TB is cross
27281 * 5. If UE has new data available for TX and if the other TB's state
27282 * is ACKED then set furtherScope as TRUE.
27284 * Invoked by: rgSCHCmnDlTM3[4]TxRetx
27286 * @param[in] RgSchUeCb *ue
27287 * @param[in] RgSchDlHqProcCb *proc
27288 * @param[out] RgSchDlHqTbCb **retxTb
27289 * @param[out] RgSchDlHqTbCb **txTb
27290 * @param[out] Bool *frthrScp
27291 * @param[out] Bool *swpFlg
27296 PRIVATE Void rgSCHCmnDlSMGetAttrForTxRetx
27299 RgSchDlHqProcCb *proc,
27300 RgSchDlHqTbCb **retxTb,
27301 RgSchDlHqTbCb **txTb,
27306 PRIVATE Void rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, frthrScp,\
27309 RgSchDlHqProcCb *proc;
27310 RgSchDlHqTbCb **retxTb;
27311 RgSchDlHqTbCb **txTb;
27316 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,proc->hqE->cell);
27317 RgSchDlRbAlloc *allocInfo;
27320 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27322 *retxTb = &proc->tbInfo[0];
27323 *txTb = &proc->tbInfo[1];
27324 /* TENB_BRDCM_TM4- Currently disabling swapflag for TM3/TM4, since
27325 * HqFeedback processing does not consider a swapped hq feedback */
27326 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 1))
27329 proc->cwSwpEnabled = TRUE;
27331 if (proc->tbInfo[1].state == HQ_TB_ACKED)
27333 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
27334 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
27339 *retxTb = &proc->tbInfo[1];
27340 *txTb = &proc->tbInfo[0];
27341 /* TENB_BRDCM_TM4 - Currently disabling swapflag for TM3/TM4, since
27342 * HqFeedback processing does not consider a swapped hq feedback */
27343 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 0))
27346 proc->cwSwpEnabled = TRUE;
27348 if (proc->tbInfo[0].state == HQ_TB_ACKED)
27350 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
27351 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
27359 * @brief Determine Precoding information for TM3 2 TX Antenna.
27363 * Function: rgSCHCmnDlTM3PrecInf2
27366 * Invoked by: rgSCHCmnDlGetAttrForTM3
27368 * @param[in] RgSchUeCb *ue
27369 * @param[in] uint8_t numTxLyrs
27370 * @param[in] Bool bothCwEnbld
27375 PRIVATE uint8_t rgSCHCmnDlTM3PrecInf2
27383 PRIVATE uint8_t rgSCHCmnDlTM3PrecInf2(ue, numTxLyrs, bothCwEnbld)
27396 * @brief Determine Precoding information for TM4 2 TX Antenna.
27400 * Function: rgSCHCmnDlTM4PrecInf2
27401 * Purpose: To determine a logic of deriving precoding index
27402 * information from 36.212 table 5.3.3.1.5-4
27404 * Invoked by: rgSCHCmnDlGetAttrForTM4
27406 * @param[in] RgSchUeCb *ue
27407 * @param[in] uint8_t numTxLyrs
27408 * @param[in] Bool bothCwEnbld
27413 PRIVATE uint8_t rgSCHCmnDlTM4PrecInf2
27421 PRIVATE uint8_t rgSCHCmnDlTM4PrecInf2(ue, numTxLyrs, bothCwEnbld)
27428 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27432 if (ueDl->mimoInfo.ri == numTxLyrs)
27434 if (ueDl->mimoInfo.ri == 2)
27436 /* PrecInfo corresponding to 2 CW
27438 if (ue->mimoInfo.puschFdbkVld)
27444 precIdx = ueDl->mimoInfo.pmi - 1;
27449 /* PrecInfo corresponding to 1 CW
27451 if (ue->mimoInfo.puschFdbkVld)
27457 precIdx = ueDl->mimoInfo.pmi + 1;
27461 else if (ueDl->mimoInfo.ri > numTxLyrs)
27463 /* In case of choosing among the columns of a
27464 * precoding matrix, choose the column corresponding
27465 * to the MAX-CQI */
27466 if (ue->mimoInfo.puschFdbkVld)
27472 precIdx = (ueDl->mimoInfo.pmi- 1)* 2 + 1;
27475 else /* if RI < numTxLyrs */
27477 precIdx = (ueDl->mimoInfo.pmi < 2)? 0:1;
27484 * @brief Determine Precoding information for TM3 4 TX Antenna.
27488 * Function: rgSCHCmnDlTM3PrecInf4
27489 * Purpose: To determine a logic of deriving precoding index
27490 * information from 36.212 table 5.3.3.1.5A-2
27492 * Invoked by: rgSCHCmnDlGetAttrForTM3
27494 * @param[in] RgSchUeCb *ue
27495 * @param[in] uint8_t numTxLyrs
27496 * @param[in] Bool bothCwEnbld
27501 PRIVATE uint8_t rgSCHCmnDlTM3PrecInf4
27509 PRIVATE uint8_t rgSCHCmnDlTM3PrecInf4(ue, numTxLyrs, bothCwEnbld)
27521 precIdx = numTxLyrs - 2;
27523 else /* one 1 CW transmission */
27532 * @brief Determine Precoding information for TM4 4 TX Antenna.
27536 * Function: rgSCHCmnDlTM4PrecInf4
27537 * Purpose: To determine a logic of deriving precoding index
27538 * information from 36.212 table 5.3.3.1.5-5
27540 * Invoked by: rgSCHCmnDlGetAttrForTM4
27542 * @param[in] RgSchUeCb *ue
27543 * @param[in] uint8_t numTxLyrs
27544 * @param[in] Bool bothCwEnbld
27549 PRIVATE uint8_t rgSCHCmnDlTM4PrecInf4
27557 PRIVATE uint8_t rgSCHCmnDlTM4PrecInf4(cell, ue, numTxLyrs, bothCwEnbld)
27564 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27565 uint8_t precInfoBaseIdx, precIdx;
27568 precInfoBaseIdx = (ue->mimoInfo.puschFdbkVld)? (16):
27569 (ueDl->mimoInfo.pmi);
27572 precIdx = precInfoBaseIdx + (numTxLyrs-2)*17;
27574 else /* one 1 CW transmission */
27576 precInfoBaseIdx += 1;
27577 precIdx = precInfoBaseIdx + (numTxLyrs-1)*17;
27584 * @brief This function determines Transmission attributes
27585 * incase of TM3 scheduling.
27589 * Function: rgSCHCmnDlGetAttrForTM3
27590 * Purpose: Determine retx TB and tx TB based on TB states.
27591 * If forceTD enabled
27592 * perform only retx TB allocation.
27593 * If retxTB == TB2 then DCI Frmt = 2A, RA Type = 0.
27594 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
27596 * perform retxTB allocation on CW1.
27598 * Determine further Scope and Swap Flag attributes
27599 * assuming a 2 CW transmission of RetxTB and new Tx TB.
27600 * If no further scope for new TX allocation
27601 * Allocate only retx TB using 2 layers if
27602 * this TB was previously transmitted using 2 layers AND
27603 * number of Tx antenna ports == 4.
27604 * otherwise do single layer precoding.
27606 * Invoked by: rgSCHCmnDlTM3TxRetx
27608 * @param[in] RgSchUeCb *ue
27609 * @param[in] RgSchDlHqProcCb *proc
27610 * @param[out] uint8_t *numTxLyrs
27611 * @param[out] Bool *isTraDiv
27612 * @param[out] uint8_t *prcdngInf
27613 * @param[out] uint8_t *raType
27618 PRIVATE Void rgSCHCmnDlGetAttrForTM3
27622 RgSchDlHqProcCb *proc,
27623 uint8_t *numTxLyrs,
27624 TfuDciFormat *dciFrmt,
27625 uint8_t *prcdngInf,
27626 RgSchDlHqTbCb **retxTb,
27627 RgSchDlHqTbCb **txTb,
27633 PRIVATE Void rgSCHCmnDlGetAttrForTM3(cell, ue, proc, numTxLyrs, dciFrmt,\
27634 prcdngInf, retxTb, txTb, frthrScp, swpFlg, raType)
27637 RgSchDlHqProcCb *proc;
27638 uint8_t *numTxLyrs;
27639 TfuDciFormat *dciFrmt;
27640 uint8_t *prcdngInf;
27641 RgSchDlHqTbCb **retxTb;
27642 RgSchDlHqTbCb **txTb;
27648 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27649 uint8_t precInfoAntIdx;
27652 /* Avoiding Tx-Retx for LAA cell as firstSchedTime is associated with
27654 /* Integration_fix: SPS Proc shall always have only one Cw */
27656 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
27657 (ueDl->mimoInfo.forceTD))
27659 ||(TRUE == rgSCHLaaSCellEnabled(cell))
27663 if ((ueDl->mimoInfo.forceTD)
27665 || (TRUE == rgSCHLaaSCellEnabled(cell))
27670 /* Transmit Diversity. Format based on dlfsEnabled
27671 * No further scope */
27672 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27674 *retxTb = &proc->tbInfo[0];
27675 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27679 *retxTb = &proc->tbInfo[1];
27680 *dciFrmt = TFU_DCI_FORMAT_2A;
27681 *raType = RG_SCH_CMN_RA_TYPE0;
27689 /* Determine the 2 TB transmission attributes */
27690 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
27694 /* Prefer allocation of RETX TB over 2 layers rather than combining
27695 * it with a new TX. */
27696 if ((ueDl->mimoInfo.ri == 2)
27697 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
27699 /* Allocate TB on CW1, using 2 Lyrs,
27700 * Format 2, precoding accordingly */
27706 *numTxLyrs= ((*retxTb)->numLyrs + ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)].noLyr);
27708 if((*retxTb)->tbIdx == 0 && ((*retxTb)->numLyrs == 2 ) && *numTxLyrs ==3)
27711 proc->cwSwpEnabled = TRUE;
27713 else if((*retxTb)->tbIdx == 1 && ((*retxTb)->numLyrs == 1) && *numTxLyrs ==3)
27716 proc->cwSwpEnabled = TRUE;
27720 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27721 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])\
27722 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
27723 *dciFrmt = TFU_DCI_FORMAT_2A;
27724 *raType = RG_SCH_CMN_RA_TYPE0;
27726 else /* frthrScp == FALSE */
27728 if (cell->numTxAntPorts == 2)
27730 /* Transmit Diversity */
27732 if ((*retxTb)->tbIdx == 0)
27734 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27738 /* If retxTB is TB2 then use format 2A */
27739 *dciFrmt = TFU_DCI_FORMAT_2A;
27740 *raType = RG_SCH_CMN_RA_TYPE0;
27745 else /* NumAntPorts == 4 */
27747 if ((*retxTb)->numLyrs == 2)
27749 /* Allocate TB on CW1, using 2 Lyrs,
27750 * Format 2A, precoding accordingly */
27752 *dciFrmt = TFU_DCI_FORMAT_2A;
27753 *raType = RG_SCH_CMN_RA_TYPE0;
27754 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27755 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, *numTxLyrs, *frthrScp);
27760 /* Transmit Diversity */
27762 if ((*retxTb)->tbIdx == 0)
27764 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27768 /* If retxTB is TB2 then use format 2A */
27769 *dciFrmt = TFU_DCI_FORMAT_2A;
27770 *raType = RG_SCH_CMN_RA_TYPE0;
27784 * @brief This function determines Transmission attributes
27785 * incase of TM4 scheduling.
27789 * Function: rgSCHCmnDlGetAttrForTM4
27790 * Purpose: Determine retx TB and tx TB based on TB states.
27791 * If forceTD enabled
27792 * perform only retx TB allocation.
27793 * If retxTB == TB2 then DCI Frmt = 2, RA Type = 0.
27794 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
27796 * perform retxTB allocation on CW1.
27798 * Determine further Scope and Swap Flag attributes
27799 * assuming a 2 CW transmission of RetxTB and new Tx TB.
27800 * If no further scope for new TX allocation
27801 * Allocate only retx TB using 2 layers if
27802 * this TB was previously transmitted using 2 layers AND
27803 * number of Tx antenna ports == 4.
27804 * otherwise do single layer precoding.
27806 * Invoked by: rgSCHCmnDlTM4TxRetx
27808 * @param[in] RgSchUeCb *ue
27809 * @param[in] RgSchDlHqProcCb *proc
27810 * @param[out] uint8_t *numTxLyrs
27811 * @param[out] Bool *isTraDiv
27812 * @param[out] uint8_t *prcdngInf
27813 * @param[out] uint8_t *raType
27818 PRIVATE Void rgSCHCmnDlGetAttrForTM4
27822 RgSchDlHqProcCb *proc,
27823 uint8_t *numTxLyrs,
27824 TfuDciFormat *dciFrmt,
27825 uint8_t *prcdngInf,
27826 RgSchDlHqTbCb **retxTb,
27827 RgSchDlHqTbCb **txTb,
27833 PRIVATE Void rgSCHCmnDlGetAttrForTM4(cell, ue, proc, numTxLyrs, dciFrmt,\
27834 prcdngInf, retxTb, txTb, frthrScp, swpFlg, raType)
27837 RgSchDlHqProcCb *proc;
27838 uint8_t *numTxLyrs;
27839 TfuDciFormat *dciFrmt;
27840 uint8_t *prcdngInf;
27841 RgSchDlHqTbCb **retxTb;
27842 RgSchDlHqTbCb **txTb;
27848 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27849 uint8_t precInfoAntIdx;
27853 /* Integration_fix: SPS Proc shall always have only one Cw */
27855 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
27856 (ueDl->mimoInfo.forceTD))
27858 ||(TRUE == rgSCHLaaSCellEnabled(cell))
27862 if ((ueDl->mimoInfo.forceTD)
27864 || (TRUE == rgSCHLaaSCellEnabled(cell))
27869 /* Transmit Diversity. Format based on dlfsEnabled
27870 * No further scope */
27871 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27873 *retxTb = &proc->tbInfo[0];
27874 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27878 *retxTb = &proc->tbInfo[1];
27879 *dciFrmt = TFU_DCI_FORMAT_2;
27880 *raType = RG_SCH_CMN_RA_TYPE0;
27888 if (ueDl->mimoInfo.ri == 1)
27890 /* single layer precoding. Format 2.
27891 * No further scope */
27892 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27894 *retxTb = &proc->tbInfo[0];
27898 *retxTb = &proc->tbInfo[1];
27901 *dciFrmt = TFU_DCI_FORMAT_2;
27902 *raType = RG_SCH_CMN_RA_TYPE0;
27904 *prcdngInf = 0; /*When RI= 1*/
27908 /* Determine the 2 TB transmission attributes */
27909 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
27911 *dciFrmt = TFU_DCI_FORMAT_2;
27912 *raType = RG_SCH_CMN_RA_TYPE0;
27915 /* Prefer allocation of RETX TB over 2 layers rather than combining
27916 * it with a new TX. */
27917 if ((ueDl->mimoInfo.ri == 2)
27918 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
27920 /* Allocate TB on CW1, using 2 Lyrs,
27921 * Format 2, precoding accordingly */
27925 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27926 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])
27927 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
27929 else /* frthrScp == FALSE */
27931 if (cell->numTxAntPorts == 2)
27933 /* single layer precoding. Format 2. */
27935 *prcdngInf = (getPrecInfoFunc[1][cell->numTxAntPorts/2 - 1])\
27936 (cell, ue, *numTxLyrs, *frthrScp);
27939 else /* NumAntPorts == 4 */
27941 if ((*retxTb)->numLyrs == 2)
27943 /* Allocate TB on CW1, using 2 Lyrs,
27944 * Format 2, precoding accordingly */
27946 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27947 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
27948 (cell, ue, *numTxLyrs, *frthrScp);
27953 /* Allocate TB with 1 lyr precoding,
27954 * Format 2, precoding info accordingly */
27956 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27957 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
27958 (cell, ue, *numTxLyrs, *frthrScp);
27969 * @brief This function handles Retx allocation in case of TM3 UEs
27970 * where previously one of the TBs was NACKED and the other
27971 * TB is either ACKED/WAITING.
27975 * Function: rgSCHCmnDlTM3TxRetx
27976 * Purpose: Determine the TX attributes for TM3 TxRetx Allocation.
27977 * If futher Scope for New Tx Allocation on other TB
27978 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
27979 * Add UE to cell wide RetxTx List.
27981 * Perform only RETX alloc'n on CW1.
27982 * Add UE to cell wide Retx List.
27984 * effBo is set to a non-zero value if allocation is
27987 * Invoked by: rgSCHCmnDlAllocRbTM3
27989 * @param[in] RgSchCellCb *cell
27990 * @param[in] RgSchDlSf *subFrm
27991 * @param[in] RgSchUeCb *ue
27992 * @param[in] uint32_t bo
27993 * @param[out] uint32_t *effBo
27994 * @param[in] RgSchDlHqProcCb *proc
27995 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28000 PRIVATE Void rgSCHCmnDlTM3TxRetx
28007 RgSchDlHqProcCb *proc,
28008 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28011 PRIVATE Void rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28017 RgSchDlHqProcCb *proc;
28018 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28022 RgSchDlRbAlloc *allocInfo;
28024 RgSchDlHqTbCb *retxTb, *txTb;
28033 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28036 /* Determine the transmission attributes */
28037 rgSCHCmnDlGetAttrForTM3(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
28038 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
28039 &allocInfo->raType);
28044 printf ("TX RETX called from proc %d cell %d \n",proc->procId, cell->cellId);
28046 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
28048 if (ret == RFAILED)
28050 /* Allocation couldn't be made for Retx */
28051 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28054 /* Adding UE to RbAllocInfo RETX-TX Lst */
28055 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
28059 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
28060 numTxLyrs, &numRb, effBo);
28061 if (ret == RFAILED)
28063 /* Allocation couldn't be made for Retx */
28064 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28068 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28071 /* Adding UE to allocInfo RETX Lst */
28072 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28075 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
28076 prcdngInf, numTxLyrs, subFrm);
28083 * @brief This function handles Retx allocation in case of TM4 UEs
28084 * where previously one of the TBs was NACKED and the other
28085 * TB is either ACKED/WAITING.
28089 * Function: rgSCHCmnDlTM4TxRetx
28090 * Purpose: Determine the TX attributes for TM4 TxRetx Allocation.
28091 * If futher Scope for New Tx Allocation on other TB
28092 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
28093 * Add UE to cell wide RetxTx List.
28095 * Perform only RETX alloc'n on CW1.
28096 * Add UE to cell wide Retx List.
28098 * effBo is set to a non-zero value if allocation is
28101 * Invoked by: rgSCHCmnDlAllocRbTM4
28103 * @param[in] RgSchCellCb *cell
28104 * @param[in] RgSchDlSf *subFrm
28105 * @param[in] RgSchUeCb *ue
28106 * @param[in] uint32_t bo
28107 * @param[out] uint32_t *effBo
28108 * @param[in] RgSchDlHqProcCb *proc
28109 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28114 PRIVATE Void rgSCHCmnDlTM4TxRetx
28121 RgSchDlHqProcCb *proc,
28122 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28125 PRIVATE Void rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28131 RgSchDlHqProcCb *proc;
28132 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28136 RgSchDlRbAlloc *allocInfo;
28138 RgSchDlHqTbCb *retxTb, *txTb;
28146 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28149 /* Determine the transmission attributes */
28150 rgSCHCmnDlGetAttrForTM4(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
28151 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
28152 &allocInfo->raType);
28156 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
28158 if (ret == RFAILED)
28160 /* Fix : syed If TxRetx allocation failed then add the UE along
28161 * with the proc to the nonSchdTxRetxUeLst and let spfc scheduler
28162 * take care of it during finalization. */
28163 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28166 /* Adding UE to RbAllocInfo RETX-TX Lst */
28167 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
28171 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
28172 numTxLyrs, &numRb, effBo);
28173 if (ret == RFAILED)
28175 /* Allocation couldn't be made for Retx */
28176 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28180 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28183 /* Adding UE to allocInfo RETX Lst */
28184 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28187 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
28188 prcdngInf, numTxLyrs, subFrm)
28195 * @brief This function handles Retx allocation in case of TM4 UEs
28196 * where previously both the TBs were ACKED and ACKED
28201 * Function: rgSCHCmnDlTM3TxTx
28202 * Purpose: Reached here for a TM3 UE's HqP's fresh allocation
28203 * where both the TBs are free for TX scheduling.
28204 * If forceTD flag is set
28205 * perform TD on CW1 with TB1.
28210 * RI layered precoding 2 TB on 2 CW.
28211 * Set precoding info.
28212 * Add UE to cellAllocInfo.
28213 * Fill ueAllocInfo.
28215 * effBo is set to a non-zero value if allocation is
28218 * Invoked by: rgSCHCmnDlAllocRbTM3
28220 * @param[in] RgSchCellCb *cell
28221 * @param[in] RgSchDlSf *subFrm
28222 * @param[in] RgSchUeCb *ue
28223 * @param[in] uint32_t bo
28224 * @param[out] uint32_t *effBo
28225 * @param[in] RgSchDlHqProcCb *proc
28226 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28231 PRIVATE Void rgSCHCmnDlTM3TxTx
28238 RgSchDlHqProcCb *proc,
28239 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28242 PRIVATE Void rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28248 RgSchDlHqProcCb *proc;
28249 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28252 RgSchCmnDlUe *ueDl;
28253 RgSchDlRbAlloc *allocInfo;
28258 uint8_t precInfoAntIdx;
28262 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28263 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28265 /* Integration_fix: SPS Proc shall always have only one Cw */
28267 #ifdef FOUR_TX_ANTENNA
28268 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28269 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
28271 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28272 (ueDl->mimoInfo.forceTD))
28275 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
28278 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
28279 &allocInfo->raType);
28280 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28281 bo, &numRb, effBo);
28282 if (ret == RFAILED)
28284 /* If allocation couldn't be made then return */
28288 precInfo = 0; /* TD */
28290 else /* Precoding */
28292 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
28293 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
28295 /* Spatial Multiplexing using 2 CWs */
28296 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
28297 if (ret == RFAILED)
28299 /* If allocation couldn't be made then return */
28302 noTxLyrs = ueDl->mimoInfo.ri;
28303 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
28304 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, getPrecInfoFunc[0], precInfoAntIdx);
28305 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
28309 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28312 /* Adding UE to RbAllocInfo TX Lst */
28313 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28315 /* Fill UE allocInfo scrath pad */
28316 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
28317 precInfo, noTxLyrs, subFrm);
28324 * @brief This function handles Retx allocation in case of TM4 UEs
28325 * where previously both the TBs were ACKED and ACKED
28330 * Function: rgSCHCmnDlTM4TxTx
28331 * Purpose: Reached here for a TM4 UE's HqP's fresh allocation
28332 * where both the TBs are free for TX scheduling.
28333 * If forceTD flag is set
28334 * perform TD on CW1 with TB1.
28340 * Single layer precoding of TB1 on CW1.
28341 * Set precoding info.
28343 * RI layered precoding 2 TB on 2 CW.
28344 * Set precoding info.
28345 * Add UE to cellAllocInfo.
28346 * Fill ueAllocInfo.
28348 * effBo is set to a non-zero value if allocation is
28351 * Invoked by: rgSCHCmnDlAllocRbTM4
28353 * @param[in] RgSchCellCb *cell
28354 * @param[in] RgSchDlSf *subFrm
28355 * @param[in] RgSchUeCb *ue
28356 * @param[in] uint32_t bo
28357 * @param[out] uint32_t *effBo
28358 * @param[in] RgSchDlHqProcCb *proc
28359 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28364 PRIVATE Void rgSCHCmnDlTM4TxTx
28371 RgSchDlHqProcCb *proc,
28372 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28375 PRIVATE Void rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28381 RgSchDlHqProcCb *proc;
28382 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28385 RgSchCmnDlUe *ueDl;
28386 RgSchDlRbAlloc *allocInfo;
28390 uint8_t precInfoAntIdx;
28395 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28396 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28398 /* Integration_fix: SPS Proc shall always have only one Cw */
28400 #ifdef FOUR_TX_ANTENNA
28401 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28402 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
28404 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28405 (ueDl->mimoInfo.forceTD))
28408 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
28411 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
28412 &allocInfo->raType);
28414 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28415 bo, &numRb, effBo);
28416 if (ret == RFAILED)
28418 /* If allocation couldn't be made then return */
28422 precInfo = 0; /* TD */
28424 else /* Precoding */
28426 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
28427 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
28429 if (ueDl->mimoInfo.ri == 1)
28431 /* Single Layer SM using FORMAT 2 */
28432 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28433 bo, &numRb, effBo);
28434 if (ret == RFAILED)
28436 /* If allocation couldn't be made then return */
28440 precInfo = 0; /* PrecInfo as 0 for RI=1*/
28444 /* Spatial Multiplexing using 2 CWs */
28445 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
28446 if (ret == RFAILED)
28448 /* If allocation couldn't be made then return */
28451 noTxLyrs = ueDl->mimoInfo.ri;
28452 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
28453 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
28459 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28462 /* Adding UE to RbAllocInfo TX Lst */
28463 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28466 /* Fill UE allocInfo scrath pad */
28467 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
28468 precInfo, noTxLyrs, subFrm);
28475 * @brief This function determines the RBs and Bytes required for BO
28476 * transmission for UEs configured with TM 4.
28480 * Function: rgSCHCmnDlAllocTxRbTM4
28481 * Purpose: Invokes the functionality particular to the
28482 * current state of the TBs of the "proc".
28484 * Reference Parameter effBo is filled with alloced bytes.
28485 * Returns RFAILED if BO not satisfied at all.
28487 * Invoked by: rgSCHCmnDlAllocTxRb
28489 * @param[in] RgSchCellCb *cell
28490 * @param[in] RgSchDlSf *subFrm
28491 * @param[in] RgSchUeCb *ue
28492 * @param[in] uint32_t bo
28493 * @param[out] uint32_t *effBo
28494 * @param[in] RgSchDlHqProcCb *proc
28495 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28500 PRIVATE Void rgSCHCmnDlAllocTxRbTM4
28507 RgSchDlHqProcCb *proc,
28508 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28511 PRIVATE Void rgSCHCmnDlAllocTxRbTM4(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28517 RgSchDlHqProcCb *proc;
28518 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28522 /* Both TBs free for TX allocation */
28523 rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo,\
28524 proc, cellWdAllocInfo);
28531 * @brief This function determines the RBs and Bytes required for BO
28532 * retransmission for UEs configured with TM 4.
28536 * Function: rgSCHCmnDlAllocRetxRbTM4
28537 * Purpose: Invokes the functionality particular to the
28538 * current state of the TBs of the "proc".
28540 * Reference Parameter effBo is filled with alloced bytes.
28541 * Returns RFAILED if BO not satisfied at all.
28543 * Invoked by: rgSCHCmnDlAllocRetxRb
28545 * @param[in] RgSchCellCb *cell
28546 * @param[in] RgSchDlSf *subFrm
28547 * @param[in] RgSchUeCb *ue
28548 * @param[in] uint32_t bo
28549 * @param[out] uint32_t *effBo
28550 * @param[in] RgSchDlHqProcCb *proc
28551 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28556 PRIVATE Void rgSCHCmnDlAllocRetxRbTM4
28563 RgSchDlHqProcCb *proc,
28564 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28567 PRIVATE Void rgSCHCmnDlAllocRetxRbTM4(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28573 RgSchDlHqProcCb *proc;
28574 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28578 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
28579 (proc->tbInfo[1].state == HQ_TB_NACKED))
28581 /* Both TBs require RETX allocation */
28582 rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo,\
28583 proc, cellWdAllocInfo);
28587 /* One of the TBs need RETX allocation. Other TB may/maynot
28588 * be available for new TX allocation. */
28589 rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo,\
28590 proc, cellWdAllocInfo);
28599 * @brief This function determines the RBs and Bytes required for BO
28600 * transmission for UEs configured with TM 5.
28604 * Function: rgSCHCmnDlAllocTxRbTM5
28607 * Reference Parameter effBo is filled with alloced bytes.
28608 * Returns RFAILED if BO not satisfied at all.
28610 * Invoked by: rgSCHCmnDlAllocTxRb
28612 * @param[in] RgSchCellCb *cell
28613 * @param[in] RgSchDlSf *subFrm
28614 * @param[in] RgSchUeCb *ue
28615 * @param[in] uint32_t bo
28616 * @param[out] uint32_t *effBo
28617 * @param[in] RgSchDlHqProcCb *proc
28618 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28623 PRIVATE Void rgSCHCmnDlAllocTxRbTM5
28630 RgSchDlHqProcCb *proc,
28631 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28634 PRIVATE Void rgSCHCmnDlAllocTxRbTM5(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28640 RgSchDlHqProcCb *proc;
28641 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28644 #if (ERRCLASS & ERRCLS_DEBUG)
28645 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
28652 * @brief This function determines the RBs and Bytes required for BO
28653 * retransmission for UEs configured with TM 5.
28657 * Function: rgSCHCmnDlAllocRetxRbTM5
28660 * Reference Parameter effBo is filled with alloced bytes.
28661 * Returns RFAILED if BO not satisfied at all.
28663 * Invoked by: rgSCHCmnDlAllocRetxRb
28665 * @param[in] RgSchCellCb *cell
28666 * @param[in] RgSchDlSf *subFrm
28667 * @param[in] RgSchUeCb *ue
28668 * @param[in] uint32_t bo
28669 * @param[out] uint32_t *effBo
28670 * @param[in] RgSchDlHqProcCb *proc
28671 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28676 PRIVATE Void rgSCHCmnDlAllocRetxRbTM5
28683 RgSchDlHqProcCb *proc,
28684 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28687 PRIVATE Void rgSCHCmnDlAllocRetxRbTM5(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28693 RgSchDlHqProcCb *proc;
28694 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28697 #if (ERRCLASS & ERRCLS_DEBUG)
28698 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
28706 * @brief This function determines the RBs and Bytes required for BO
28707 * transmission for UEs configured with TM 6.
28711 * Function: rgSCHCmnDlAllocTxRbTM6
28714 * Reference Parameter effBo is filled with alloced bytes.
28715 * Returns RFAILED if BO not satisfied at all.
28717 * Invoked by: rgSCHCmnDlAllocTxRb
28719 * @param[in] RgSchCellCb *cell
28720 * @param[in] RgSchDlSf *subFrm
28721 * @param[in] RgSchUeCb *ue
28722 * @param[in] uint32_t bo
28723 * @param[out] uint32_t *effBo
28724 * @param[in] RgSchDlHqProcCb *proc
28725 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28730 PRIVATE Void rgSCHCmnDlAllocTxRbTM6
28737 RgSchDlHqProcCb *proc,
28738 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28741 PRIVATE Void rgSCHCmnDlAllocTxRbTM6(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28747 RgSchDlHqProcCb *proc;
28748 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28751 RgSchDlRbAlloc *allocInfo;
28752 RgSchCmnDlUe *ueDl;
28758 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28759 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28761 if (ueDl->mimoInfo.forceTD)
28763 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
28764 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28768 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
28769 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28770 /* Fill precoding information for FORMAT 1B */
28771 /* First 4 least significant bits to indicate PMI.
28772 * 4th most significant corresponds to pmi Confirmation.
28774 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
28775 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
28777 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28778 bo, &numRb, effBo);
28779 if (ret == RFAILED)
28781 /* If allocation couldn't be made then return */
28786 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28789 /* Adding UE to RbAllocInfo TX Lst */
28790 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28792 /* Fill UE alloc Info */
28793 allocInfo->rbsReq = numRb;
28794 allocInfo->dlSf = subFrm;
28800 * @brief This function determines the RBs and Bytes required for BO
28801 * retransmission for UEs configured with TM 6.
28805 * Function: rgSCHCmnDlAllocRetxRbTM6
28808 * Reference Parameter effBo is filled with alloced bytes.
28809 * Returns RFAILED if BO not satisfied at all.
28811 * Invoked by: rgSCHCmnDlAllocRetxRb
28813 * @param[in] RgSchCellCb *cell
28814 * @param[in] RgSchDlSf *subFrm
28815 * @param[in] RgSchUeCb *ue
28816 * @param[in] uint32_t bo
28817 * @param[out] uint32_t *effBo
28818 * @param[in] RgSchDlHqProcCb *proc
28819 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28824 PRIVATE Void rgSCHCmnDlAllocRetxRbTM6
28831 RgSchDlHqProcCb *proc,
28832 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28835 PRIVATE Void rgSCHCmnDlAllocRetxRbTM6(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28841 RgSchDlHqProcCb *proc;
28842 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28845 RgSchDlRbAlloc *allocInfo;
28846 RgSchCmnDlUe *ueDl;
28852 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28853 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28855 if (ueDl->mimoInfo.forceTD)
28857 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
28858 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28862 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
28863 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28864 /* Fill precoding information for FORMAT 1B */
28865 /* First 4 least significant bits to indicate PMI.
28866 * 4th most significant corresponds to pmi Confirmation.
28868 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
28869 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
28872 /* Get the Allocation in terms of RBs that are required for
28873 * this retx of TB1 */
28874 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
28876 if (ret == RFAILED)
28878 /* Allocation couldn't be made for Retx */
28879 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28882 /* Adding UE to allocInfo RETX Lst */
28883 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28884 /* Fill UE alloc Info */
28885 allocInfo->rbsReq = numRb;
28886 allocInfo->dlSf = subFrm;
28892 * @brief This function determines the RBs and Bytes required for BO
28893 * transmission for UEs configured with TM 7.
28897 * Function: rgSCHCmnDlAllocTxRbTM7
28900 * Reference Parameter effBo is filled with alloced bytes.
28901 * Returns RFAILED if BO not satisfied at all.
28903 * Invoked by: rgSCHCmnDlAllocTxRb
28905 * @param[in] RgSchCellCb *cell
28906 * @param[in] RgSchDlSf *subFrm
28907 * @param[in] RgSchUeCb *ue
28908 * @param[in] uint32_t bo
28909 * @param[out] uint32_t *effBo
28910 * @param[in] RgSchDlHqProcCb *proc
28911 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28916 PRIVATE Void rgSCHCmnDlAllocTxRbTM7
28923 RgSchDlHqProcCb *proc,
28924 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28927 PRIVATE Void rgSCHCmnDlAllocTxRbTM7(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28933 RgSchDlHqProcCb *proc;
28934 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28937 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
28943 * @brief This function determines the RBs and Bytes required for BO
28944 * retransmission for UEs configured with TM 7.
28948 * Function: rgSCHCmnDlAllocRetxRbTM7
28951 * Reference Parameter effBo is filled with alloced bytes.
28952 * Returns RFAILED if BO not satisfied at all.
28954 * Invoked by: rgSCHCmnDlAllocRetxRb
28956 * @param[in] RgSchCellCb *cell
28957 * @param[in] RgSchDlSf *subFrm
28958 * @param[in] RgSchUeCb *ue
28959 * @param[in] uint32_t bo
28960 * @param[out] uint32_t *effBo
28961 * @param[in] RgSchDlHqProcCb *proc
28962 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28967 PRIVATE Void rgSCHCmnDlAllocRetxRbTM7
28974 RgSchDlHqProcCb *proc,
28975 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28978 PRIVATE Void rgSCHCmnDlAllocRetxRbTM7(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28984 RgSchDlHqProcCb *proc;
28985 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28988 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
28994 * @brief This function invokes the TM specific DL TX RB Allocation routine.
28998 * Function: rgSCHCmnDlAllocTxRb
28999 * Purpose: This function invokes the TM specific
29000 * DL TX RB Allocation routine.
29002 * Invoked by: Specific Schedulers
29004 * @param[in] RgSchCellCb *cell
29005 * @param[in] RgSchDlSf *subFrm
29006 * @param[in] RgSchUeCb *ue
29007 * @param[in] uint32_t bo
29008 * @param[out] uint32_t *effBo
29009 * @param[in] RgSchDlHqProcCb *proc
29010 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29015 S16 rgSCHCmnDlAllocTxRb
29022 RgSchDlHqProcCb *proc,
29023 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29026 S16 rgSCHCmnDlAllocTxRb(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
29032 RgSchDlHqProcCb *proc;
29033 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
29036 uint32_t newSchBits = 0;
29037 uint32_t prevSchBits = 0;
29038 RgSchDlRbAlloc *allocInfo;
29041 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
29043 ue->dl.aggTbBits = 0;
29047 /* Calculate totals bits previously allocated */
29048 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29049 if (allocInfo->tbInfo[0].schdlngForTb)
29051 prevSchBits += allocInfo->tbInfo[0].bytesReq;
29053 if (allocInfo->tbInfo[1].schdlngForTb)
29055 prevSchBits += allocInfo->tbInfo[1].bytesReq;
29058 /* Call TM specific RB allocation routine */
29059 (dlAllocTxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
29060 proc, cellWdAllocInfo);
29064 /* Calculate totals bits newly allocated */
29065 if (allocInfo->tbInfo[0].schdlngForTb)
29067 newSchBits += allocInfo->tbInfo[0].bytesReq;
29069 if (allocInfo->tbInfo[1].schdlngForTb)
29071 newSchBits += allocInfo->tbInfo[1].bytesReq;
29073 if (newSchBits > prevSchBits)
29075 ue->dl.aggTbBits += ((newSchBits - prevSchBits) * 8);
29076 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
29083 /* DwPTS Scheduling Changes Start */
29086 * @brief Retransmit decision for TDD. Retx is avoided in below cases
29087 * 1) DL Sf -> Spl Sf
29088 * 2) DL SF -> DL SF 0
29092 * Function: rgSCHCmnRetxAvoidTdd
29093 * Purpose: Avoid allocating RETX for cases 1, 2
29095 * Invoked by: rgSCHCmnRetxAvoidTdd
29097 * @param[in] RgSchDlSf *curSf
29098 * @param[in] RgSchCellCb *cell
29099 * @param[in] RgSchDlHqProcCb *proc
29104 Bool rgSCHCmnRetxAvoidTdd
29108 RgSchDlHqProcCb *proc
29111 Bool rgSCHCmnRetxAvoidTdd(curSf, cell, proc)
29114 RgSchDlHqProcCb *proc;
29117 RgSchTddSfType txSfType = 0;
29120 /* Get the RBs of TB that will be retransmitted */
29121 if (proc->tbInfo[0].state == HQ_TB_NACKED)
29123 txSfType = proc->tbInfo[0].sfType;
29125 #ifdef XEON_SPECIFIC_CHANGES
29126 #ifndef XEON_TDD_SPCL
29127 /* Avoid re-transmission on Normal SF when the corresponding TB wss transmitted on SPCL SF */
29128 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
29135 if (proc->tbInfo[1].state == HQ_TB_NACKED)
29137 /* Select the TxSf with the highest num of possible REs
29138 * In ascending order -> 1) SPL SF 2) DL_SF_0 3) DL_SF */
29139 txSfType = RGSCH_MAX(txSfType, proc->tbInfo[1].sfType);
29141 #ifdef XEON_SPECIFIC_CHANGES
29142 #ifndef XEON_TDD_SPCL
29143 /* Avoid re-transmission on Normal SF when the corresponding TB wss tranmitted on SPCL SF */
29144 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
29152 if (txSfType > curSf->sfType)
29163 /* DwPTS Scheduling Changes End */
29166 * @brief Avoid allocating RETX incase of collision
29167 * with reserved resources for BCH/PSS/SSS occassions.
29171 * Function: rgSCHCmnRetxAllocAvoid
29172 * Purpose: Avoid allocating RETX incase of collision
29173 * with reserved resources for BCH/PSS/SSS occassions
29175 * Invoked by: rgSCHCmnDlAllocRetxRb
29177 * @param[in] RgSchDlSf *subFrm
29178 * @param[in] RgSchUeCb *ue
29179 * @param[in] RgSchDlHqProcCb *proc
29184 Bool rgSCHCmnRetxAllocAvoid
29188 RgSchDlHqProcCb *proc
29191 Bool rgSCHCmnRetxAllocAvoid(subFrm, cell, proc)
29194 RgSchDlHqProcCb *proc;
29200 if (proc->tbInfo[0].state == HQ_TB_NACKED)
29202 reqRbs = proc->tbInfo[0].dlGrnt.numRb;
29206 reqRbs = proc->tbInfo[1].dlGrnt.numRb;
29208 /* Consider the dlGrnt.numRb of the Retransmitting proc->tbInfo
29209 * and current available RBs to determine if this RETX TB
29210 * will collide with the BCH/PSS/SSS occassion */
29211 if (subFrm->sfNum % 5 == 0)
29213 if ((subFrm->bwAssigned < cell->pbchRbEnd) &&
29214 (((subFrm->bwAssigned + reqRbs) - cell->pbchRbStart) > 0))
29226 * @brief This function invokes the TM specific DL RETX RB Allocation routine.
29230 * Function: rgSCHCmnDlAllocRetxRb
29231 * Purpose: This function invokes the TM specific
29232 * DL RETX RB Allocation routine.
29234 * Invoked by: Specific Schedulers
29236 * @param[in] RgSchCellCb *cell
29237 * @param[in] RgSchDlSf *subFrm
29238 * @param[in] RgSchUeCb *ue
29239 * @param[in] uint32_t bo
29240 * @param[out] uint32_t *effBo
29241 * @param[in] RgSchDlHqProcCb *proc
29242 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29247 S16 rgSCHCmnDlAllocRetxRb
29254 RgSchDlHqProcCb *proc,
29255 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29258 S16 rgSCHCmnDlAllocRetxRb(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
29264 RgSchDlHqProcCb *proc;
29265 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
29268 uint32_t newSchBits = 0;
29269 RgSchDlRbAlloc *allocInfo;
29272 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
29274 ue->dl.aggTbBits = 0;
29278 /* Check for DL BW exhaustion */
29279 if (subFrm->bw <= subFrm->bwAssigned)
29283 /* Call TM specific RB allocation routine */
29284 (dlAllocRetxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
29285 proc, cellWdAllocInfo);
29289 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29290 /* Calculate totals bits newly allocated */
29291 if (allocInfo->tbInfo[0].schdlngForTb)
29293 newSchBits += allocInfo->tbInfo[0].bytesReq;
29295 if (allocInfo->tbInfo[1].schdlngForTb)
29297 newSchBits += allocInfo->tbInfo[1].bytesReq;
29299 ue->dl.aggTbBits += (newSchBits * 8);
29300 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
29308 * @brief This function determines the RBs and Bytes required for
29309 * Transmission on 1 CW.
29313 * Function: rgSCHCmnDlAlloc1CwTxRb
29314 * Purpose: This function determines the RBs and Bytes required
29315 * for Transmission of DL SVC BO on 1 CW.
29316 * Also, takes care of SVC by SVC allocation by tracking
29317 * previous SVCs allocations.
29318 * Returns RFAILED if BO not satisfied at all.
29320 * Invoked by: DL UE Allocation
29322 * @param[in] RgSchCellCb *cell
29323 * @param[in] RgSchDlSf *subFrm
29324 * @param[in] RgSchUeCb *ue
29325 * @param[in] RgSchDlHqTbCb *tbInfo
29326 * @param[in] uint32_t bo
29327 * @param[out] uint8_t *numRb
29328 * @param[out] uint32_t *effBo
29333 PRIVATE S16 rgSCHCmnDlAlloc1CwTxRb
29338 RgSchDlHqTbCb *tbInfo,
29344 PRIVATE S16 rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, tbInfo, bo, numRb, effBo)
29348 RgSchDlHqTbCb *tbInfo;
29357 RgSchCmnDlUe *ueDl;
29358 RgSchDlRbAlloc *allocInfo;
29361 /* Correcting wrap around issue.
29362 * This change has been done at mutliple places in this function.*/
29363 uint32_t tempNumRb;
29366 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29367 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29368 oldReq = ueDl->outStndAlloc;
29371 //TODO_SID: Currently setting max Tb size wrt to 5GTF TM3
29372 iTbs = ue->ue5gtfCb.mcs;
29373 ueDl->maxTbSz = MAX_5GTF_TB_SIZE * ue->ue5gtfCb.rank;
29374 ueDl->maxRb = MAX_5GTF_PRBS;
29376 ueDl->outStndAlloc += bo;
29377 /* consider Cumulative amount of this BO and bytes so far allocated */
29378 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbSz/8);
29379 /* Get the number of REs needed for this bo. */
29380 //noRes = ((bo * 8 * 1024) / eff);
29382 /* Get the number of RBs needed for this transmission */
29383 /* Number of RBs = No of REs / No of REs per RB */
29384 //tempNumRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
29385 tempNumRb = MAX_5GTF_PRBS;
29386 tbSz = RGSCH_MIN(bo, (rgSch5gtfTbSzTbl[iTbs]/8) * ue->ue5gtfCb.rank);
29388 /* DwPts Scheduling Changes End */
29389 *effBo = RGSCH_MIN(tbSz - oldReq, reqBytes);
29392 //RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs);
29397 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbSz, \
29398 iTbs, imcs, tbInfo, ue->ue5gtfCb.rank);
29399 *numRb = (uint8_t) tempNumRb;
29401 /* Update the subframe Allocated BW field */
29402 subFrm->bwAssigned = subFrm->bwAssigned + tempNumRb - allocInfo->rbsReq;
29409 * @brief This function is invoked in the event of any TB's allocation
29410 * being underutilized by the specific scheduler. Here we reduce iMcs
29411 * to increase redundancy and hence increase reception quality at UE.
29415 * Function: rgSCHCmnRdcImcsTxTb
29416 * Purpose: This function shall reduce the iMcs in accordance with
29417 * the total consumed bytes by the UE at allocation
29420 * Invoked by: UE DL Allocation finalization routine
29421 * of specific scheduler.
29423 * @param[in] RgSchDlRbAlloc *allocInfo
29424 * @param[in] uint8_t tbInfoIdx
29425 * @param[in] uint32_t cnsmdBytes
29430 Void rgSCHCmnRdcImcsTxTb
29432 RgSchDlRbAlloc *allocInfo,
29434 uint32_t cnsmdBytes
29437 Void rgSCHCmnRdcImcsTxTb(allocInfo, tbInfoIdx, cnsmdBytes)
29438 RgSchDlRbAlloc *allocInfo;
29440 uint32_t cnsmdBytes;
29444 /*The below functionality is not needed.*/
29450 iTbs = allocInfo->tbInfo[tbInfoIdx].iTbs;
29451 noLyr = allocInfo->tbInfo[tbInfoIdx].noLyr;
29452 numRb = allocInfo->rbsAlloc;
29455 if ((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) == cnsmdBytes)
29460 /* Get iTbs as suitable for the consumed bytes */
29461 while((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) > cnsmdBytes)
29465 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].\
29466 tbCb->dlGrnt.iMcs);
29472 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].tbCb->dlGrnt.iMcs);
29479 * @brief This function determines the RBs and Bytes required for
29480 * Transmission on 2 CWs.
29484 * Function: rgSCHCmnDlAlloc2CwTxRb
29485 * Purpose: This function determines the RBs and Bytes required
29486 * for Transmission of DL SVC BO on 2 CWs.
29487 * Also, takes care of SVC by SVC allocation by tracking
29488 * previous SVCs allocations.
29489 * Returns RFAILED if BO not satisfied at all.
29491 * Invoked by: TM3 and TM4 DL UE Allocation
29493 * @param[in] RgSchCellCb *cell
29494 * @param[in] RgSchDlSf *subFrm
29495 * @param[in] RgSchUeCb *ue
29496 * @param[in] RgSchDlHqProcCb *proc
29497 * @param[in] RgSchDlHqProcCb bo
29498 * @param[out] uint8_t *numRb
29499 * @param[out] uint32_t *effBo
29504 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRb
29509 RgSchDlHqProcCb *proc,
29515 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, numRbRef, effBo)
29519 RgSchDlHqProcCb *proc;
29526 uint32_t eff1, eff2;
29527 uint32_t tb1Sz, tb2Sz;
29528 uint8_t imcs1, imcs2;
29529 uint8_t noLyr1, noLyr2;
29530 uint8_t iTbs1, iTbs2;
29531 RgSchCmnDlCell *cellDl;
29532 RgSchCmnDlUe *ueDl;
29533 RgSchDlRbAlloc *allocInfo;
29536 /* Fix: MUE_PERTTI_DL */
29538 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
29539 uint8_t cfi = cellSch->dl.currCfi;
29541 uint32_t availBits = 0;
29543 uint32_t boTmp = bo;
29548 cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
29549 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29550 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29551 oldReq = ueDl->outStndAlloc;
29554 if (ueDl->maxTbBits > ue->dl.aggTbBits)
29556 availBits = ueDl->maxTbBits - ue->dl.aggTbBits;
29558 /* check if we can further allocate to this UE */
29559 if ((ue->dl.aggTbBits >= ueDl->maxTbBits) ||
29560 (allocInfo->tbInfo[0].bytesReq >= ueDl->maxTbSz/8) ||
29561 (allocInfo->tbInfo[1].bytesReq >= ueDl->maxTbSz/8) ||
29562 (allocInfo->rbsReq >= ueDl->maxRb))
29564 RLOG_ARG0(L_DEBUG,DBG_CELLID,cell->cellId,
29565 "rgSCHCmnDlAllocRb(): UEs max allocation exceed");
29569 noLyr1 = ueDl->mimoInfo.cwInfo[0].noLyr;
29570 noLyr2 = ueDl->mimoInfo.cwInfo[1].noLyr;
29572 /* If there is no CFI change, continue to use the BLER based
29574 if (ueDl->lastCfi == cfi)
29576 iTbs1 = ueDl->mimoInfo.cwInfo[0].iTbs[noLyr1 - 1];
29577 iTbs2 = ueDl->mimoInfo.cwInfo[1].iTbs[noLyr2 - 1];
29581 uint8_t cqi = ueDl->mimoInfo.cwInfo[0].cqi;
29583 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 0, noLyr1);
29585 iTbs1 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 0, noLyr1);
29588 cqi = ueDl->mimoInfo.cwInfo[1].cqi;
29590 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 1, noLyr2);
29592 iTbs2 = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 1, noLyr2);
29596 /*ccpu00131191 and ccpu00131317 - Fix for RRC Reconfig failure
29597 * issue for VoLTE call */
29598 //if ((proc->hasDcch) || (TRUE == rgSCHLaaSCellEnabled(cell)))
29618 else if(!cellSch->dl.isDlFreqSel)
29621 /* for Tdd reduce iTbs only for SF0. SF5 contains only
29622 * SSS and can be ignored */
29623 if (subFrm->sfNum == 0)
29625 (iTbs1 > 1)? (iTbs1 -= 1) : (iTbs1 = 0);
29626 (iTbs2 > 1)? (iTbs2 -= 1) : (iTbs2 = 0);
29628 /* For SF 3 and 8 CRC is getting failed in DL.
29629 Need to do proper fix after the replay from
29631 #ifdef CA_PHY_BRDCM_61765
29632 if ((subFrm->sfNum == 3) || (subFrm->sfNum == 8))
29634 (iTbs1 > 2)? (iTbs1 -= 2) : (iTbs1 = 0);
29635 (iTbs2 > 2)? (iTbs2 -= 2) : (iTbs2 = 0);
29643 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
29645 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
29649 eff1 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr1 - 1][cfi]))[iTbs1];
29650 eff2 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr2 - 1][cfi]))[iTbs2];
29653 bo = RGSCH_MIN(bo,availBits/8);
29654 ueDl->outStndAlloc += bo;
29655 /* consider Cumulative amount of this BO and bytes so far allocated */
29656 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbBits/8);
29657 bo = RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff1)/(eff1+eff2)),
29659 RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff2)/(eff1+eff2)),
29660 (ueDl->maxTbSz)/8) +
29661 1; /* Add 1 to adjust the truncation at weighted averaging */
29662 /* Get the number of REs needed for this bo. */
29663 noRes = ((bo * 8 * 1024) / (eff1 + eff2));
29665 /* Get the number of RBs needed for this transmission */
29666 /* Number of RBs = No of REs / No of REs per RB */
29667 numRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
29668 /* Cannot exceed the maximum number of RBs per UE */
29669 if (numRb > ueDl->maxRb)
29671 numRb = ueDl->maxRb;
29676 if(RFAILED == rgSCHLaaCmn2CwAdjustPrb(allocInfo, boTmp, &numRb, ueDl, noLyr1, noLyr2, iTbs1, iTbs2))
29679 while ((numRb <= ueDl->maxRb) &&
29680 (rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1] <= ueDl->maxTbSz) &&
29681 (rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1] <= ueDl->maxTbSz) &&
29682 ((rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8 +
29683 rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8) <= bo))
29689 availBw = subFrm->bw - subFrm->bwAssigned;
29690 /* Cannot exceed the total number of RBs in the cell */
29691 if ((S16)(numRb - allocInfo->rbsReq) > availBw)
29693 numRb = availBw + allocInfo->rbsReq;
29695 tb1Sz = rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8;
29696 tb2Sz = rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8;
29697 /* DwPts Scheduling Changes Start */
29699 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
29701 /* Max Rb for Special Sf is approximated as 4/3 of maxRb */
29702 rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, (uint8_t*)&numRb, ueDl->maxRb*4/3,
29703 &iTbs1, &iTbs2, noLyr1,
29704 noLyr2, &tb1Sz, &tb2Sz, cfi);
29705 /* Check for available Bw */
29706 if ((S16)numRb - allocInfo->rbsReq > availBw)
29708 numRb = availBw + allocInfo->rbsReq;
29709 tb1Sz = rgTbSzTbl[noLyr1-1][iTbs1][RGSCH_MAX(numRb*3/4,1)-1]/8;
29710 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs2][RGSCH_MAX(numRb*3/4,1)-1]/8;
29714 /* DwPts Scheduling Changes End */
29715 /* Update the subframe Allocated BW field */
29716 subFrm->bwAssigned = subFrm->bwAssigned + numRb - \
29719 *effBo = RGSCH_MIN((tb1Sz + tb2Sz) - oldReq, reqBytes);
29722 if (ROK != rgSCHLaaCmn2TBPrbCheck(allocInfo, tb1Sz, tb2Sz, boTmp, effBo, iTbs1, iTbs2, numRb, proc))
29728 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs1, imcs1);
29729 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs2, imcs2);
29730 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tb1Sz, \
29731 iTbs1, imcs1, &proc->tbInfo[0], noLyr1);
29732 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
29733 iTbs2, imcs2, &proc->tbInfo[1], noLyr2);
29734 *numRbRef = (uint8_t)numRb;
29742 * @brief This function determines the RBs and Bytes required for
29743 * Transmission & Retransmission on 2 CWs.
29747 * Function: rgSCHCmnDlAlloc2CwTxRetxRb
29748 * Purpose: This function determines the RBs and Bytes required
29749 * for Transmission & Retransmission on 2 CWs. Allocate
29750 * RETX TB on a better CW and restrict new TX TB by
29752 * Returns RFAILED if BO not satisfied at all.
29754 * Invoked by: TM3 and TM4 DL UE Allocation
29756 * @param[in] RgSchCellCb *cell
29757 * @param[in] RgSchDlSf *subFrm
29758 * @param[in] RgSchUeCb *ue
29759 * @param[in] RgSchDlHqTbCb *reTxTb
29760 * @param[in] RgSchDlHqTbCb *txTb
29761 * @param[out] uint8_t *numRb
29762 * @param[out] uint32_t *effBo
29767 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRetxRb
29772 RgSchDlHqTbCb *reTxTb,
29773 RgSchDlHqTbCb *txTb,
29778 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, reTxTb, txTb, numRb,\
29783 RgSchDlHqTbCb *reTxTb;
29784 RgSchDlHqTbCb *txTb;
29789 RgSchCmnDlUe *ueDl;
29790 RgSchDlRbAlloc *allocInfo;
29791 uint8_t imcs1, imcs2;
29794 RgSchCmnDlUeCwInfo *otherCw;
29796 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
29797 uint8_t cfi = cellDl->currCfi;
29801 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29802 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29803 otherCw = &ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)];
29806 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
29807 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
29809 availBw = subFrm->bw - subFrm->bwAssigned;
29810 *numRb = reTxTb->dlGrnt.numRb;
29812 #ifdef XEON_TDD_SPCL
29813 *numRb = (reTxTb->initTxNumRbs);
29814 if(reTxTb->sfType == RG_SCH_SPL_SF_DATA && subFrm->sfType != RG_SCH_SPL_SF_DATA)
29816 *numRb = (reTxTb->initTxNumRbs*3/4);
29820 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
29826 if ((S16)*numRb > availBw)
29830 /* Update the subframe Allocated BW field */
29831 subFrm->bwAssigned += *numRb;
29832 noLyr2 = otherCw->noLyr;
29833 RG_SCH_CMN_GET_MCS_FOR_RETX(reTxTb, imcs1);
29835 /* If there is no CFI change, continue to use the BLER based
29837 if (ueDl->lastCfi == cfi)
29839 iTbs = otherCw->iTbs[noLyr2-1];
29844 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, subFrm, otherCw->cqi, cfi,
29845 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
29847 iTbs = (uint8_t) rgSchCmnFetchItbs(cell, ueDl, otherCw->cqi, cfi,
29848 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
29851 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs][*numRb-1]/8;
29852 /* DwPts Scheduling Changes Start */
29855 /* DwPts Scheduling Changes End */
29856 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs2);
29858 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], reTxTb->tbSz, \
29859 0, imcs1, reTxTb, reTxTb->numLyrs);
29861 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
29862 iTbs, imcs2, txTb, noLyr2);
29864 *effBo = reTxTb->tbSz + tb2Sz;
29871 * @brief This function determines the RBs and Bytes required for BO
29872 * Retransmission on 2 CWs.
29876 * Function: rgSCHCmnDlAlloc2CwRetxRb
29877 * Purpose: This function determines the RBs and Bytes required
29878 * for BO Retransmission on 2 CWs. Allocate larger TB
29879 * on a better CW and check if the smaller TB can be
29880 * accomodated on the other CW.
29881 * Returns RFAILED if BO not satisfied at all.
29883 * Invoked by: Common Scheduler
29885 * @param[in] RgSchCellCb *cell
29886 * @param[in] RgSchDlSf *subFrm
29887 * @param[in] RgSchUeCb *ue
29888 * @param[in] RgSchDlHqProcCb *proc
29889 * @param[out] uint8_t *numRb
29890 * @param[out] Bool *swpFlg
29891 * @param[out] uint32_t *effBo
29896 PRIVATE S16 rgSCHCmnDlAlloc2CwRetxRb
29901 RgSchDlHqProcCb *proc,
29907 PRIVATE S16 rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc,\
29908 numRb, swpFlg, effBo)
29912 RgSchDlHqProcCb *proc;
29918 RgSchDlRbAlloc *allocInfo;
29921 RgSchDlHqTbCb *lrgTbInfo, *othrTbInfo;
29924 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29927 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
29928 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
29930 lrgTbInfo = &proc->tbInfo[0];
29931 othrTbInfo = &proc->tbInfo[1];
29932 *numRb = lrgTbInfo->dlGrnt.numRb;
29933 #ifdef XEON_TDD_SPCL
29934 if((lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA || othrTbInfo->sfType == RG_SCH_SPL_SF_DATA))
29936 if(lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA)
29938 *numRb = (lrgTbInfo->initTxNumRbs);
29942 *numRb = (othrTbInfo->initTxNumRbs);
29945 if(subFrm->sfType != RG_SCH_SPL_SF_DATA)
29947 *numRb = (*numRb)*3/4;
29952 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
29957 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
29961 /* Update the subframe Allocated BW field */
29962 subFrm->bwAssigned += *numRb;
29963 RG_SCH_CMN_GET_MCS_FOR_RETX(lrgTbInfo, imcs1);
29964 RG_SCH_CMN_GET_MCS_FOR_RETX(othrTbInfo, imcs2);
29965 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], lrgTbInfo->tbSz, \
29966 0, imcs1, lrgTbInfo, lrgTbInfo->numLyrs);
29967 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], othrTbInfo->tbSz, \
29968 0, imcs2, othrTbInfo, othrTbInfo->numLyrs);
29969 *effBo = lrgTbInfo->tbSz + othrTbInfo->tbSz;
29978 * @brief This function determines the RBs and Bytes required for BO
29979 * Retransmission on 1 CW.
29983 * Function: rgSCHCmnDlAlloc1CwRetxRb
29984 * Purpose: This function determines the RBs and Bytes required
29985 * for BO Retransmission on 1 CW, the first CW.
29986 * Returns RFAILED if BO not satisfied at all.
29988 * Invoked by: Common Scheduler
29990 * @param[in] RgSchCellCb *cell
29991 * @param[in] RgSchDlSf *subFrm
29992 * @param[in] RgSchUeCb *ue
29993 * @param[in] RgSchDlHqTbCb *tbInfo
29994 * @param[in] uint8_t noLyr
29995 * @param[out] uint8_t *numRb
29996 * @param[out] uint32_t *effBo
30001 PRIVATE S16 rgSCHCmnDlAlloc1CwRetxRb
30006 RgSchDlHqTbCb *tbInfo,
30012 PRIVATE S16 rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, tbInfo, noLyr,\
30017 RgSchDlHqTbCb *tbInfo;
30023 RgSchDlRbAlloc *allocInfo;
30027 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
30030 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
30031 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
30033 *numRb = tbInfo->dlGrnt.numRb;
30034 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
30038 /* Update the subframe Allocated BW field */
30039 subFrm->bwAssigned += *numRb;
30040 imcs = tbInfo->dlGrnt.iMcs;
30041 allocInfo->dciFormat = tbInfo->dlGrnt.dciFormat;
30042 /* Fix: For a RETX TB the iTbs is irrelevant, hence setting 0 */
30043 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbInfo->tbSz, \
30044 0, imcs, tbInfo, tbInfo->numLyrs);
30045 *effBo = tbInfo->tbSz;
30053 * @brief This function is called to handle Release PDCCH feedback for SPS UE
30057 * Function: rgSCHCmnDlRelPdcchFbk
30058 * Purpose: Invokes SPS module to handle release PDCCH feedback
30062 * @param[in] RgSchCellCb *cell
30063 * @param[in] RgSchUeCb *ue
30064 * @param[in] Bool isAck
30069 Void rgSCHCmnDlRelPdcchFbk
30076 Void rgSCHCmnDlRelPdcchFbk(cell, ue, isAck)
30083 rgSCHCmnSpsDlRelPdcchFbk(cell, ue, isAck);
30090 * @brief This function is invoked to handle Ack processing for a HARQ proc.
30094 * Function: rgSCHCmnDlProcAck
30095 * Purpose: DTX processing for HARQ proc
30099 * @param[in] RgSchCellCb *cell
30100 * @param[in] RgSchDlHqProcCb *hqP
30105 Void rgSCHCmnDlProcAck
30108 RgSchDlHqProcCb *hqP
30111 Void rgSCHCmnDlProcAck(cell, hqP)
30113 RgSchDlHqProcCb *hqP;
30118 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
30120 /* Invoke SPS module if SPS service was scheduled for this HARQ proc */
30121 rgSCHCmnSpsDlProcAck(cell, hqP);
30125 #ifdef RGSCH_SPS_STATS
30126 extern uint32_t rgSchStatCrntiCeRcvCnt;
30129 * @brief This function is invoked to handle CRNTI CE reception for an UE
30133 * Function: rgSCHCmnHdlCrntiCE
30134 * Purpose: Handle CRNTI CE reception
30138 * @param[in] RgSchCellCb *cell
30139 * @param[in] RgSchDlHqProcCb *hqP
30144 Void rgSCHCmnHdlCrntiCE
30150 Void rgSCHCmnHdlCrntiCE(cell, ue)
30156 #ifdef RGSCH_SPS_STATS
30157 rgSchStatCrntiCeRcvCnt++;
30160 /* When UL sync lost happened due to TA timer expiry UE is being moved to
30161 PDCCH order inactivity list.But when CRNTI CE received in msg3 from UE
30162 we are not moving UE into active state due to that RRC Reconfiguration is
30164 So here we are moving UE to active list whenever we receive the CRNTI CE and
30166 /* CR ccpu00144525 */
30167 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
30169 /* Activate this UE if it was inactive */
30170 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
30171 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
30174 /* Handling is same as reception of UE RESET for both DL and UL */
30175 if (ue->dl.dlSpsCfg.isDlSpsEnabled)
30177 rgSCHCmnSpsDlUeReset(cell, ue);
30179 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30181 rgSCHCmnSpsUlUeReset(cell, ue);
30189 * @brief This function is called to handle relInd from MAC for a UE
30193 * Function: rgSCHCmnUlSpsRelInd
30194 * Purpose: Invokes SPS module to handle UL SPS release for a UE
30196 * Invoked by: SCH_UTL
30198 * @param[in] RgSchCellCb *cell
30199 * @param[in] RgSchUeCb *ue
30200 * @param[in] Bool isExplRel
30205 Void rgSCHCmnUlSpsRelInd
30212 Void rgSCHCmnUlSpsRelInd(cell, ue, isExplRel)
30219 rgSCHCmnSpsUlProcRelInd(cell, ue, isExplRel);
30222 } /* end of rgSCHCmnUlSpsRelInd */
30225 * @brief This function is called to handle SPS Activate Ind from MAC for a UE
30229 * Function: rgSCHCmnUlSpsActInd
30230 * Purpose: Invokes SPS module to handle UL SPS activate for a UE
30232 * Invoked by: SCH_UTL
30234 * @param[in] RgSchCellCb *cell
30235 * @param[in] RgSchUeCb *ue
30240 Void rgSCHCmnUlSpsActInd
30244 uint16_t spsSduSize
30247 Void rgSCHCmnUlSpsActInd(cell, ue,spsSduSize)
30250 uint16_t spsSduSize;
30255 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30257 rgSCHCmnSpsUlProcActInd(cell, ue,spsSduSize);
30261 } /* end of rgSCHCmnUlSpsActInd */
30264 * @brief This function is called to handle CRC in UL for UEs
30265 * undergoing SPS release
30269 * Function: rgSCHCmnUlCrcInd
30270 * Purpose: Invokes SPS module to handle CRC in UL for SPS UE
30272 * Invoked by: SCH_UTL
30274 * @param[in] RgSchCellCb *cell
30275 * @param[in] RgSchUeCb *ue
30276 * @param[in] CmLteTimingInfo crcTime
30281 Void rgSCHCmnUlCrcInd
30285 CmLteTimingInfo crcTime
30288 Void rgSCHCmnUlCrcInd(cell, ue, crcTime)
30291 CmLteTimingInfo crcTime;
30295 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30297 rgSCHCmnSpsUlProcCrcInd(cell, ue, crcTime);
30301 } /* end of rgSCHCmnUlCrcFailInd */
30304 * @brief This function is called to handle CRC failure in UL
30308 * Function: rgSCHCmnUlCrcFailInd
30309 * Purpose: Invokes SPS module to handle CRC failure in UL for SPS UE
30311 * Invoked by: SCH_UTL
30313 * @param[in] RgSchCellCb *cell
30314 * @param[in] RgSchUeCb *ue
30315 * @param[in] CmLteTimingInfo crcTime
30320 Void rgSCHCmnUlCrcFailInd
30324 CmLteTimingInfo crcTime
30327 Void rgSCHCmnUlCrcFailInd(cell, ue, crcTime)
30330 CmLteTimingInfo crcTime;
30334 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30336 rgSCHCmnSpsUlProcDtxInd(cell, ue, crcTime);
30340 } /* end of rgSCHCmnUlCrcFailInd */
30342 #endif /* LTEMAC_SPS */
30345 * @brief BCH,BCCH,PCCH Dowlink Scheduling Handler.
30349 * Function: rgSCHCmnDlBcchPcchAlloc
30350 * Purpose: This function calls common scheduler APIs to
30351 * schedule for BCCH/PCCH.
30352 * It then invokes Allocator for actual RB
30353 * allocations. It processes on the actual resources allocated
30354 * against requested to the allocator module.
30356 * Invoked by: Common Scheduler
30358 * @param[in] RgSchCellCb *cell
30362 PRIVATE Void rgSCHCmnDlBcchPcchAlloc
30367 PRIVATE Void rgSCHCmnDlBcchPcchAlloc(cell)
30372 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
30374 #ifdef LTEMAC_HDFDD
30375 uint8_t nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
30377 uint8_t nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
30380 RgInfSfAlloc *nextsfAlloc = &(cell->sfAllocArr[nextSfIdx]);
30381 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
30382 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
30386 /*Reset the bitmask for BCCH/PCCH*/
30387 rgSCHUtlResetSfAlloc(nextsfAlloc,TRUE,FALSE);
30388 #ifndef DISABLE_MIB_SIB /* Not sending MIB and SIB to CL */
30390 rgSCHChkNUpdSiCfg(cell);
30391 rgSCHSelectSi(cell);
30394 /*Perform the scheduling for BCCH,PCCH*/
30395 rgSCHCmnDlBcchPcch(cell, allocInfo, nextsfAlloc);
30397 /* Call common allocator for RB Allocation */
30398 rgSCHBcchPcchDlRbAlloc(cell, allocInfo);
30400 /* Finalize the Allocations for reqested Against alloced */
30401 rgSCHCmnDlBcchPcchFnlz(cell, allocInfo);
30402 #endif /* DISABLE_MIB_SIB */
30407 * @brief Handles RB allocation for BCCH/PCCH for downlink.
30411 * Function : rgSCHBcchPcchDlRbAlloc
30413 * Invoking Module Processing:
30414 * - This function is invoked for DL RB allocation of BCCH/PCCH
30416 * Processing Steps:
30417 * - If cell is frequency selecive,
30418 * - Call rgSCHDlfsBcchPcchAllocRb().
30420 * - Do the processing
30422 * @param[in] RgSchCellCb *cell
30423 * @param[in] RgSchDlRbAllocInfo *allocInfo
30428 PRIVATE Void rgSCHBcchPcchDlRbAlloc
30431 RgSchCmnDlRbAllocInfo *allocInfo
30434 PRIVATE Void rgSCHBcchPcchDlRbAlloc(cell, allocInfo)
30436 RgSchCmnDlRbAllocInfo *allocInfo;
30439 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
30443 if (cellSch->dl.isDlFreqSel)
30445 cellSch->apisDlfs->rgSCHDlfsBcchPcchAllocRb(cell, allocInfo);
30449 rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo);
30456 * @brief Handles RB allocation for BCCH,PCCH for frequency
30457 * non-selective cell.
30461 * Function : rgSCHCmnNonDlfsBcchPcchRbAlloc
30463 * Invoking Module Processing:
30464 * - SCH shall invoke this if downlink frequency selective is disabled for
30465 * the cell for RB allocation.
30466 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
30467 * estimate and subframe for each allocation to be made to SCH.
30469 * Processing Steps:
30470 * - Allocate sequentially for BCCH,PCCH common channels.
30472 * @param[in] RgSchCellCb *cell
30473 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
30478 PRIVATE Void rgSCHCmnNonDlfsBcchPcchRbAlloc
30481 RgSchCmnDlRbAllocInfo *allocInfo
30484 PRIVATE Void rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo)
30486 RgSchCmnDlRbAllocInfo *allocInfo;
30489 RgSchDlRbAlloc *reqAllocInfo;
30493 /* Allocate for PCCH */
30494 reqAllocInfo = &(allocInfo->pcchAlloc);
30495 if (reqAllocInfo->rbsReq)
30497 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
30499 /* Allocate for BCCH on DLSCH */
30500 reqAllocInfo = &(allocInfo->bcchAlloc);
30501 if (reqAllocInfo->rbsReq)
30503 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
30511 * @brief This function implements the handling to check and
30512 * update the SI cfg at the start of the modificiation period.
30516 * Function: rgSCHChkNUpdSiCfg
30517 * Purpose: This function implements handling for update of SI Cfg
30518 * at the start of modification period.
30520 * Invoked by: Scheduler
30522 * @param[in] RgSchCellCb* cell
30528 PRIVATE Void rgSCHChkNUpdSiCfg
30533 PRIVATE Void rgSCHChkNUpdSiCfg(cell)
30537 CmLteTimingInfo pdSchTmInfo;
30541 pdSchTmInfo = cell->crntTime;
30542 #ifdef LTEMAC_HDFDD
30543 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30544 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30545 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30547 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA);
30551 /* Updating the SIB1 for Warning SI message immediately after it is received
30552 * from application. No need to wait for next modification period.
30554 if((pdSchTmInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
30555 && (RGSCH_SIB1_TX_SF_NUM == (pdSchTmInfo.slot % RGSCH_NUM_SUB_FRAMES)))
30557 /*Check whether SIB1 with PWS has been updated*/
30558 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_PWS_UPD)
30560 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
30561 cell->siCb.newSiInfo.sib1Info.sib1);
30562 cell->siCb.crntSiInfo.sib1Info.mcs =
30563 cell->siCb.newSiInfo.sib1Info.mcs;
30564 cell->siCb.crntSiInfo.sib1Info.nPrb =
30565 cell->siCb.newSiInfo.sib1Info.nPrb;
30566 cell->siCb.crntSiInfo.sib1Info.msgLen =
30567 cell->siCb.newSiInfo.sib1Info.msgLen;
30568 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_PWS_UPD;
30572 /*Check if this SFN and SF No marks the start of next modification
30573 period. If current SFN,SF No doesn't marks the start of next
30574 modification period, then return. */
30575 if(!((pdSchTmInfo.sfn % cell->siCfg.modPrd == 0)
30576 && (0 == pdSchTmInfo.slot)))
30577 /*if(!((((pdSchTmInfo.hSfn * 1024) + pdSchTmInfo.sfn) % cell->siCfg.modPrd == 0)
30578 && (0 == pdSchTmInfo.slot)))*/
30583 /*Check whether MIB has been updated*/
30584 if(cell->siCb.siBitMask & RGSCH_SI_MIB_UPD)
30586 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.mib,
30587 cell->siCb.newSiInfo.mib);
30588 cell->siCb.siBitMask &= ~RGSCH_SI_MIB_UPD;
30591 /*Check whether SIB1 has been updated*/
30592 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_UPD)
30594 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
30595 cell->siCb.newSiInfo.sib1Info.sib1);
30596 cell->siCb.crntSiInfo.sib1Info.mcs = cell->siCb.newSiInfo.sib1Info.mcs;
30597 cell->siCb.crntSiInfo.sib1Info.nPrb = cell->siCb.newSiInfo.sib1Info.nPrb;
30598 cell->siCb.crntSiInfo.sib1Info.msgLen =
30599 cell->siCb.newSiInfo.sib1Info.msgLen;
30600 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_UPD;
30603 /*Check whether SIs have been updated*/
30604 if(cell->siCb.siBitMask & RGSCH_SI_SI_UPD)
30608 /*Check if SI cfg have been modified And Check if numSi have
30609 been changed, if yes then we would need to update the
30610 pointers for all the SIs */
30611 if((cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD) &&
30612 (cell->siCfg.numSi != cell->siCb.newSiCfg.numSi))
30614 for(idx = 0;idx < cell->siCb.newSiCfg.numSi;idx++)
30616 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
30617 cell->siCb.newSiInfo.siInfo[idx].si);
30618 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
30619 cell->siCb.siArray[idx].isWarningSi = FALSE;
30621 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
30622 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
30623 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
30626 /*If numSi have been reduced then we need to free the
30627 pointers at the indexes in crntSiInfo which haven't
30628 been exercised. If numSi has increased then nothing
30629 additional is requires as above handling has taken
30631 if(cell->siCfg.numSi > cell->siCb.newSiCfg.numSi)
30633 for(idx = cell->siCb.newSiCfg.numSi;
30634 idx < cell->siCfg.numSi;idx++)
30636 RGSCH_FREE_MSG(cell->siCb.crntSiInfo.siInfo[idx].si);
30637 cell->siCb.siArray[idx].si = NULLP;
30643 /*numSi has not been updated, we just need to update the
30644 pointers for the SIs which are set to NON NULLP */
30645 /*ccpu00118260 - Correct Update of SIB2 */
30646 for(idx = 0;idx < cell->siCfg.numSi;idx++)
30648 if(NULLP != cell->siCb.newSiInfo.siInfo[idx].si)
30650 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
30651 cell->siCb.newSiInfo.siInfo[idx].si);
30653 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
30654 cell->siCb.siArray[idx].isWarningSi = FALSE;
30655 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
30656 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
30657 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
30661 cell->siCb.siBitMask &= ~RGSCH_SI_SI_UPD;
30664 /*Check whether SI cfg have been updated*/
30665 if(cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD)
30667 cell->siCfg = cell->siCb.newSiCfg;
30668 cell->siCb.siBitMask &= ~RGSCH_SI_SICFG_UPD;
30676 * @brief This function implements the selection of the SI
30677 * that is to be scheduled.
30681 * Function: rgSCHSelectSi
30682 * Purpose: This function implements the selection of SI
30683 * that is to be scheduled.
30685 * Invoked by: Scheduler
30687 * @param[in] RgSchCellCb* cell
30693 PRIVATE Void rgSCHSelectSi
30698 PRIVATE Void rgSCHSelectSi(cell)
30702 CmLteTimingInfo crntTmInfo;
30709 crntTmInfo = cell->crntTime;
30710 #ifdef LTEMAC_HDFDD
30711 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30712 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30713 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30715 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA);
30718 siWinSize = cell->siCfg.siWinSize;
30720 /* Select SI only once at the starting of the new window */
30721 if(cell->siCb.inWindow)
30723 if ((crntTmInfo.sfn % cell->siCfg.minPeriodicity) == 0 &&
30724 crntTmInfo.slot == 0)
30726 /* Reinit inWindow at the beginning of every SI window */
30727 cell->siCb.inWindow = siWinSize - 1;
30731 cell->siCb.inWindow--;
30735 else /* New window. Re-init the winSize counter with the window length */
30737 if((cell->siCb.siArray[cell->siCb.siCtx.siId - 1].isWarningSi == TRUE)&&
30738 (cell->siCb.siCtx.retxCntRem != 0))
30740 rgSCHUtlFreeWarningSiPdu(cell);
30741 cell->siCb.siCtx.warningSiFlag = FALSE;
30744 cell->siCb.inWindow = siWinSize - 1;
30747 x = rgSCHCmnGetSiSetId(crntTmInfo.sfn, crntTmInfo.slot,
30748 cell->siCfg.minPeriodicity);
30750 /* Window Id within a SI set. This window Id directly maps to a
30752 windowId = (((crntTmInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) +
30753 crntTmInfo.slot) - (x * (cell->siCfg.minPeriodicity * 10)))
30756 if(windowId >= RGR_MAX_NUM_SI)
30759 /* Update the siCtx if there is a valid SI and its periodicity
30761 if (NULLP != cell->siCb.siArray[windowId].si)
30763 /* Warning SI Periodicity is same as SIB2 Periodicity */
30764 if(((cell->siCb.siArray[windowId].isWarningSi == FALSE) &&
30765 (x % (cell->siCfg.siPeriodicity[windowId]
30766 /cell->siCfg.minPeriodicity) == 0)) ||
30767 ((cell->siCb.siArray[windowId].isWarningSi == TRUE) &&
30768 (x % (cell->siCfg.siPeriodicity[0]
30769 /cell->siCfg.minPeriodicity) == 0)))
30771 cell->siCb.siCtx.siId = windowId+1;
30772 cell->siCb.siCtx.retxCntRem = cell->siCfg.retxCnt;
30773 cell->siCb.siCtx.warningSiFlag = cell->siCb.siArray[windowId].
30775 cell->siCb.siCtx.timeToTx.sfn = crntTmInfo.sfn;
30776 cell->siCb.siCtx.timeToTx.slot = crntTmInfo.slot;
30778 RG_SCH_ADD_TO_CRNT_TIME(cell->siCb.siCtx.timeToTx,
30779 cell->siCb.siCtx.maxTimeToTx, (siWinSize - 1))
30783 {/* Update the siCtx with invalid si Id */
30784 cell->siCb.siCtx.siId = 0;
30792 * @brief This function implements scheduler DL allocation for
30797 * Function: rgSCHDlSiSched
30798 * Purpose: This function implements scheduler for DL allocation
30801 * Invoked by: Scheduler
30803 * @param[in] RgSchCellCb* cell
30809 PRIVATE Void rgSCHDlSiSched
30812 RgSchCmnDlRbAllocInfo *allocInfo,
30813 RgInfSfAlloc *subfrmAlloc
30816 PRIVATE Void rgSCHDlSiSched(cell, allocInfo, subfrmAlloc)
30818 RgSchCmnDlRbAllocInfo *allocInfo;
30819 RgInfSfAlloc *subfrmAlloc;
30822 CmLteTimingInfo crntTimInfo;
30828 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
30829 /* DwPTS Scheduling Changes Start */
30832 uint8_t cfi = cellDl->currCfi;
30834 /* DwPTS Scheduling Changes End */
30838 crntTimInfo = cell->crntTime;
30839 #ifdef LTEMAC_HDFDD
30840 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30841 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30842 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30844 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA);
30847 /* Compute the subframe for which allocation is being made.
30848 Essentially, we need pointer to the dl frame for this subframe */
30849 sf = rgSCHUtlSubFrmGet(cell, crntTimInfo);
30851 /*Check if scheduling of MIB is required */
30853 /* since we are adding the MIB repetition logic for EMTC UEs, checking if
30854 * emtcEnabled or not, If enabled MIB would be repeted at as part of EMTC
30855 * feature, otherwise scheduling at (n,0) */
30856 if(0 == cell->emtcEnable)
30859 if((crntTimInfo.sfn % RGSCH_MIB_PERIODICITY == 0)
30860 && (RGSCH_MIB_TX_SF_NUM == crntTimInfo.slot))
30863 uint8_t sfnOctet, mibOct2 = 0;
30864 uint8_t mibOct1 = 0;
30865 /*If MIB has not been yet setup by Application, return*/
30866 if(NULLP == cell->siCb.crntSiInfo.mib)
30869 SFndLenMsg(cell->siCb.crntSiInfo.mib, &mibLen);
30870 sf->bch.tbSize = mibLen;
30871 /*Fill the interface information */
30872 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, NULLD, NULLD);
30874 /*Set the bits of MIB to reflect SFN */
30875 /*First get the Most signficant 8 bits of SFN */
30876 sfnOctet = (uint8_t)(crntTimInfo.sfn >> 2);
30877 /*Get the first two octets of MIB, and then update them
30878 using the SFN octet value obtained above.*/
30879 if(ROK != SExamMsg((Data *)(&mibOct1),
30880 cell->siCb.crntSiInfo.mib, 0))
30883 if(ROK != SExamMsg((Data *)(&mibOct2),
30884 cell->siCb.crntSiInfo.mib, 1))
30887 /* ccpu00114572- Fix for improper way of MIB Octet setting for SFN */
30888 mibOct1 = (mibOct1 & 0xFC) | (sfnOctet >> 6);
30889 mibOct2 = (mibOct2 & 0x03) | (sfnOctet << 2);
30890 /* ccpu00114572- Fix ends*/
30892 /*Now, replace the two octets in MIB */
30893 if(ROK != SRepMsg((Data)(mibOct1),
30894 cell->siCb.crntSiInfo.mib, 0))
30897 if(ROK != SRepMsg((Data)(mibOct2),
30898 cell->siCb.crntSiInfo.mib, 1))
30901 /*Copy the MIB msg buff into interface buffer */
30902 SCpyMsgMsg(cell->siCb.crntSiInfo.mib,
30903 rgSchCb[cell->instIdx].rgSchInit.region,
30904 rgSchCb[cell->instIdx].rgSchInit.pool,
30905 &subfrmAlloc->cmnLcInfo.bchInfo.pdu);
30906 /* Added Dl TB count for MIB message transmission
30907 * This counter is incremented 4 times to consider
30908 * the retransmission at the PHY level on PBCH channel*/
30910 cell->dlUlTbCnt.tbTransDlTotalCnt += RG_SCH_MIB_CNT;
30917 allocInfo->bcchAlloc.schdFirst = FALSE;
30918 /*Check if scheduling of SIB1 is required.
30919 Check of (crntTimInfo.sfn % RGSCH_SIB1_PERIODICITY == 0)
30920 is not required here since the below check takes care
30921 of SFNs applicable for this one too.*/
30922 if((crntTimInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
30923 && (RGSCH_SIB1_TX_SF_NUM == crntTimInfo.slot))
30925 /*If SIB1 has not been yet setup by Application, return*/
30926 if(NULLP == (cell->siCb.crntSiInfo.sib1Info.sib1))
30931 allocInfo->bcchAlloc.schdFirst = TRUE;
30932 mcs = cell->siCb.crntSiInfo.sib1Info.mcs;
30933 nPrb = cell->siCb.crntSiInfo.sib1Info.nPrb;
30934 msgLen = cell->siCb.crntSiInfo.sib1Info.msgLen;
30938 /*Check if scheduling of SI can be performed.*/
30939 Bool invalid = FALSE;
30941 if(cell->siCb.siCtx.siId == 0)
30944 /*Check if the Si-Window for the current Si-Context is completed*/
30945 invalid = rgSCHCmnChkPastWin(crntTimInfo, cell->siCb.siCtx.maxTimeToTx);
30948 /* LTE_ADV_FLAG_REMOVED_START */
30949 if(cell->siCb.siCtx.retxCntRem)
30951 RGSCHLOGERROR(cell->instIdx,ERRCLS_INT_PAR,ERG011,(ErrVal)cell->siCb.siCtx.siId,
30952 "rgSCHDlSiSched(): SI not scheduled and window expired");
30954 /* LTE_ADV_FLAG_REMOVED_END */
30955 if(cell->siCb.siCtx.warningSiFlag == TRUE)
30957 rgSCHUtlFreeWarningSiPdu(cell);
30958 cell->siCb.siCtx.warningSiFlag = FALSE;
30963 /*Check the timinginfo of the current SI-Context to see if its
30964 transmission can be scheduled. */
30965 if(FALSE == (rgSCHCmnChkInWin(crntTimInfo,
30966 cell->siCb.siCtx.timeToTx,
30967 cell->siCb.siCtx.maxTimeToTx)))
30972 /*Check if retransmission count has become 0*/
30973 if(0 == cell->siCb.siCtx.retxCntRem)
30978 /* LTE_ADV_FLAG_REMOVED_START */
30979 /* Check if ABS is enabled/configured */
30980 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
30982 /* The pattern type is RGR_ABS_MUTE, then eNB need to blank the subframe */
30983 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
30985 /* Determine next scheduling subframe is ABS or not */
30986 if(RG_SCH_ABS_ENABLED_ABS_SF == (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern
30987 [((crntTimInfo.sfn*RGSCH_NUM_SUB_FRAMES) + crntTimInfo.slot) % RGR_ABS_PATTERN_LEN]))
30989 /* Skip the SI scheduling to next tti */
30994 /* LTE_ADV_FLAG_REMOVED_END */
30996 /*Schedule the transmission of the current SI-Context */
30997 /*Find out the messg length for the SI message */
30998 /* warningSiFlag is to differentiate between Warning SI
31000 if((rgSCHUtlGetMcsAndNPrb(cell, &nPrb, &mcs, &msgLen)) != ROK)
31005 cell->siCb.siCtx.i = RGSCH_CALC_SF_DIFF(crntTimInfo,
31006 cell->siCb.siCtx.timeToTx);
31010 /*Get the number of rb required */
31011 /*rgSCHCmnClcRbAllocForFxdTb(cell, msgLen, cellDl->ccchCqi, &rb);*/
31012 if(cellDl->bitsPerRb==0)
31014 while ((rgTbSzTbl[0][0][rb]) < (uint32_t) (msgLen*8))
31022 rb = RGSCH_CEIL((msgLen*8), cellDl->bitsPerRb);
31024 /* DwPTS Scheduling Changes Start */
31026 if (sf->sfType == RG_SCH_SPL_SF_DATA)
31028 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
31030 /* Calculate the less RE's because of DwPTS */
31031 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
31033 /* Increase number of RBs in Spl SF to compensate for lost REs */
31034 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
31037 /* DwPTS Scheduling Changes End */
31038 /*ccpu00115595- end*/
31039 /* Additional check to see if required RBs
31040 * exceeds the available */
31041 if (rb > sf->bw - sf->bwAssigned)
31043 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHDlSiSched(): "
31044 "BW allocation failed CRNTI:%d",RGSCH_SI_RNTI);
31048 /* Update the subframe Allocated BW field */
31049 sf->bwAssigned = sf->bwAssigned + rb;
31051 /*Fill the parameters in allocInfo */
31052 allocInfo->bcchAlloc.rnti = RGSCH_SI_RNTI;
31053 allocInfo->bcchAlloc.dlSf = sf;
31054 allocInfo->bcchAlloc.rbsReq = rb;
31055 /*ccpu00116710- MCS is not getting assigned */
31056 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
31058 /* ccpu00117510 - ADD - Assignment of nPrb and other information */
31059 allocInfo->bcchAlloc.nPrb = nPrb;
31060 allocInfo->bcchAlloc.tbInfo[0].bytesReq = msgLen;
31061 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
31064 #endif /*RGR_SI_SCH*/
31067 /* ccpu00117452 - MOD - Changed macro name from
31068 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
31069 #ifdef RGR_CQI_REPT
31071 * @brief This function Updates the DL CQI for the UE.
31075 * Function: rgSCHCmnUeDlPwrCtColltCqiRept
31076 * Purpose: Manages PUSH N CQI reporting
31077 * Step 1: Store the CQI in collation array
31078 * Step 2: Increament the tracking count
31079 * Step 3: Check is it time to to send the report
31080 * Step 4: if yes, Send StaInd to RRM
31081 * Step 4.1: Fill StaInd for sending collated N CQI rpeorts
31082 * Step 4.2: Call utility function (rgSCHUtlRgrStaInd) to send rpts to RRM
31083 * Step 4.2.1: If sending was not sucessful, return RFAILED
31084 * Step 4.2.2: If sending was sucessful, return ROK
31085 * Step 5: If no, return
31086 * Invoked by: rgSCHCmnDlCqiInd
31088 * @param[in] RgSchCellCb *cell
31089 * @param[in] RgSchUeCb *ue
31090 * @param[in] RgrUeCqiRept *ueCqiRpt
31095 PRIVATE S16 rgSCHCmnUeDlPwrCtColltCqiRept
31099 RgrUeCqiRept *ueCqiRpt
31102 PRIVATE S16 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, ueCqiRpt)
31105 RgrUeCqiRept *ueCqiRpt;
31108 uint8_t *cqiCount = NULLP;
31110 RgrStaIndInfo *staInfo = NULLP;
31113 /* Step 1: Store the CQI in collation array */
31114 /* Step 2: Increament the tracking count */
31115 cqiCount = &(ue->schCqiInfo.cqiCount);
31116 ue->schCqiInfo.cqiRept[(*cqiCount)++] =
31120 /* Step 3: Check is it time to to send the report */
31121 if(RG_SCH_CQIR_IS_TIMTOSEND_CQIREPT(ue))
31123 /* Step 4: if yes, Send StaInd to RRM */
31124 retVal = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&staInfo,
31125 sizeof(RgrStaIndInfo));
31128 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
31129 "allocate memory for sending StaInd CRNTI:%d",ue->ueId);
31133 /* Step 4.1: Fill StaInd for sending collated N CQI rpeorts */
31136 extern uint32_t gCqiReptToAppCount;
31137 gCqiReptToAppCount++;
31142 retVal = rgSCHUtlFillSndStaInd(cell, ue, staInfo,
31143 ue->cqiReptCfgInfo.numColltdCqiRept);
31149 } /* End of rgSCHCmnUeDlPwrCtColltCqiRept */
31151 #endif /* End of RGR_CQI_REPT */
31154 * @brief This function checks for the retransmisson
31155 * for a DTX scenario.
31162 * @param[in] RgSchCellCb *cell
31163 * @param[in] RgSchUeCb *ue
31169 Void rgSCHCmnChkRetxAllowDtx
31173 RgSchDlHqProcCb *proc,
31177 Void rgSCHCmnChkRetxAllowDtx(cell, ueCb, proc, reTxAllwd)
31180 RgSchDlHqProcCb *proc;
31188 if ((proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX))
31190 *reTxAllwd = FALSE;
31197 * @brief API for calculating the SI Set Id
31201 * Function: rgSCHCmnGetSiSetId
31203 * This API is used for calculating the SI Set Id, as shown below
31205 * siSetId = 0 siSetId = 1
31206 * |******************|******************|---------------->
31207 * (0,0) (8,0) (16,0) (SFN, SF)
31210 * @param[in] uint16_t sfn
31211 * @param[in] uint8_t sf
31212 * @return uint16_t siSetId
31215 uint16_t rgSCHCmnGetSiSetId
31219 uint16_t minPeriodicity
31222 uint16_t rgSCHCmnGetSiSetId(sfn, sf, minPeriodicity)
31225 uint16_t minPeriodicity;
31228 /* 80 is the minimum SI periodicity in sf. Also
31229 * all other SI periodicities are multiples of 80 */
31230 return (((sfn * RGSCH_NUM_SUB_FRAMES_5G) + sf) / (minPeriodicity * 10));
31234 * @brief API for calculating the DwPts Rb, Itbs and tbSz
31238 * Function: rgSCHCmnCalcDwPtsTbSz
31240 * @param[in] RgSchCellCb *cell
31241 * @param[in] uint32_t bo
31242 * @param[in/out] uint8_t *rb
31243 * @param[in/out] uint8_t *iTbs
31244 * @param[in] uint8_t lyr
31245 * @param[in] uint8_t cfi
31246 * @return uint32_t tbSz
31249 PRIVATE uint32_t rgSCHCmnCalcDwPtsTbSz
31259 PRIVATE uint32_t rgSCHCmnCalcDwPtsTbSz(cell, bo, rb, iTbs, lyr, cfi)
31269 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
31270 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
31271 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
31274 /* DwPts Rb cannot exceed the cell Bw */
31275 numDwPtsRb = RGSCH_MIN(numDwPtsRb, cellDl->maxDlBwPerUe);
31277 /* Adjust the iTbs for optimum usage of the DwPts region.
31278 * Using the same iTbs adjustment will not work for all
31279 * special subframe configurations and iTbs levels. Hence use the
31280 * static iTbs Delta table for adjusting the iTbs */
31281 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs);
31285 while(rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1] < bo*8 &&
31286 numDwPtsRb < cellDl->maxDlBwPerUe)
31291 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
31295 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
31303 * @brief API for calculating the DwPts Rb, Itbs and tbSz
31307 * Function: rgSCHCmnCalcDwPtsTbSz2Cw
31309 * @param[in] RgSchCellCb *cell
31310 * @param[in] uint32_t bo
31311 * @param[in/out] uint8_t *rb
31312 * @param[in] uint8_t maxRb
31313 * @param[in/out] uint8_t *iTbs1
31314 * @param[in/out] uint8_t *iTbs2
31315 * @param[in] uint8_t lyr1
31316 * @param[in] uint8_t lyr2
31317 * @return[in/out] uint32_t *tb1Sz
31318 * @return[in/out] uint32_t *tb2Sz
31319 * @param[in] uint8_t cfi
31322 PRIVATE Void rgSCHCmnCalcDwPtsTbSz2Cw
31337 PRIVATE Void rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, rb, maxRb, iTbs1, iTbs2,
31338 lyr1, lyr2, tb1Sz, tb2Sz, cfi)
31352 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
31353 uint32_t numRE = *rb * cellDl->noResPerRb[cfi];
31354 uint32_t numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
31357 /* DwPts Rb cannot exceed the cell Bw */
31358 numDwPtsRb = RGSCH_MIN(numDwPtsRb, maxRb);
31360 /* Adjust the iTbs for optimum usage of the DwPts region.
31361 * Using the same iTbs adjustment will not work for all
31362 * special subframe configurations and iTbs levels. Hence use the
31363 * static iTbs Delta table for adjusting the iTbs */
31364 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs1);
31365 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs2);
31367 while((rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1] +
31368 rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1])< bo*8 &&
31369 numDwPtsRb < maxRb)
31374 *tb1Sz = rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
31375 *tb2Sz = rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
31385 * @brief Updates the GBR LCGs when datInd is received from MAC
31389 * Function: rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
31390 * Purpose: This function updates the GBR LCGs
31391 * when datInd is received from MAC.
31395 * @param[in] RgSchCellCb *cell
31396 * @param[in] RgSchUeCb *ue
31397 * @param[in] RgInfUeDatInd *datInd
31401 Void rgSCHCmnUpdUeDataIndLcg
31405 RgInfUeDatInd *datInd
31408 Void rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
31411 RgInfUeDatInd *datInd;
31415 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31417 Inst inst = cell->instIdx;
31421 for (idx = 0; (idx < RGINF_MAX_LCG_PER_UE - 1); idx++)
31423 if (datInd->lcgInfo[idx].bytesRcvd != 0)
31425 uint8_t lcgId = datInd->lcgInfo[idx].lcgId;
31426 uint32_t bytesRcvd = datInd->lcgInfo[idx].bytesRcvd;
31428 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
31430 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
31431 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
31433 if(bytesRcvd > cmnLcg->effGbr)
31435 bytesRcvd -= cmnLcg->effGbr;
31436 cmnLcg->effDeltaMbr = (cmnLcg->effDeltaMbr > bytesRcvd) ? \
31437 (cmnLcg->effDeltaMbr - bytesRcvd) : (0);
31438 cmnLcg->effGbr = 0;
31442 cmnLcg->effGbr -= bytesRcvd;
31444 /* To keep BS updated with the amount of data received for the GBR */
31445 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31446 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31447 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr+cmnLcg->effDeltaMbr);
31449 else if(lcgId != 0)
31451 ue->ul.effAmbr = (ue->ul.effAmbr > datInd->lcgInfo[idx].bytesRcvd) ? \
31452 (ue->ul.effAmbr - datInd->lcgInfo[idx].bytesRcvd) : (0);
31453 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31454 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31455 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
31456 ue->ul.nonGbrLcgBs = (ue->ul.nonGbrLcgBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31457 (ue->ul.nonGbrLcgBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31459 ue->ul.nonLcg0Bs = (ue->ul.nonLcg0Bs > datInd->lcgInfo[idx].bytesRcvd) ? \
31460 (ue->ul.nonLcg0Bs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31469 if(TRUE == ue->isEmtcUe)
31471 if (cellSch->apisEmtcUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
31473 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
31480 if (cellSch->apisUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
31482 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
31488 /** @brief This function initializes DL allocation lists and prepares
31493 * Function: rgSCHCmnInitRbAlloc
31495 * @param [in] RgSchCellCb *cell
31501 PRIVATE Void rgSCHCmnInitRbAlloc
31506 PRIVATE Void rgSCHCmnInitRbAlloc (cell)
31510 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31511 CmLteTimingInfo frm;
31516 /* Initializing RgSchCmnUlRbAllocInfo structure.*/
31517 rgSCHCmnInitDlRbAllocInfo(&cellSch->allocInfo);
31519 frm = cellSch->dl.time;
31521 dlSf = rgSCHUtlSubFrmGet(cell, frm);
31523 dlSf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
31524 dlSf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
31525 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
31527 dlSf->sfBeamInfo[idx].totVrbgAllocated = 0;
31528 dlSf->sfBeamInfo[idx].totVrbgRequired = 0;
31529 dlSf->sfBeamInfo[idx].vrbgStart = 0;
31532 dlSf->remUeCnt = cellSch->dl.maxUePerDlSf;
31533 /* Updating the Subframe information in RBAllocInfo */
31534 cellSch->allocInfo.dedAlloc.dedDlSf = dlSf;
31535 cellSch->allocInfo.msg4Alloc.msg4DlSf = dlSf;
31537 /* LTE_ADV_FLAG_REMOVED_START */
31538 /* Determine next scheduling subframe is ABS or not */
31539 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
31541 cell->lteAdvCb.absPatternDlIdx =
31542 ((frm.sfn*RGSCH_NUM_SUB_FRAMES_5G) + frm.slot) % RGR_ABS_PATTERN_LEN;
31543 cell->lteAdvCb.absDlSfInfo = (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern[
31544 cell->lteAdvCb.absPatternDlIdx]);
31549 cell->lteAdvCb.absDlSfInfo = RG_SCH_ABS_DISABLED;
31551 /* LTE_ADV_FLAG_REMOVED_END */
31554 cellSch->allocInfo.ccchSduAlloc.ccchSduDlSf = dlSf;
31557 /* Update subframe-wide allocation information with SPS allocation */
31558 rgSCHCmnSpsDlUpdDlSfAllocWithSps(cell, frm, dlSf);
31567 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
31572 * Function: rgSCHCmnSendTxModeInd(cell, ueUl, newTxMode)
31573 * Purpose: This function sends the TX mode Change
31574 * indication to RRM
31579 * @param[in] RgSchCellCb *cell
31580 * @param[in] RgSchUeCb *ue
31581 * @param[in] uint8_t newTxMode
31585 PRIVATE Void rgSCHCmnSendTxModeInd
31592 PRIVATE Void rgSCHCmnSendTxModeInd(cell, ue, newTxMode)
31598 RgmTransModeInd *txModeChgInd;
31599 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
31602 if(!(ueDl->mimoInfo.forceTD & RG_SCH_CMN_TD_TXMODE_RECFG))
31605 if(SGetSBuf(cell->rgmSap->sapCfg.sapPst.region,
31606 cell->rgmSap->sapCfg.sapPst.pool, (Data**)&txModeChgInd,
31607 sizeof(RgmTransModeInd)) != ROK)
31611 RG_SCH_FILL_RGM_TRANSMODE_IND(ue->ueId, cell->cellId, newTxMode, txModeChgInd);
31612 RgUiRgmChangeTransModeInd(&(cell->rgmSap->sapCfg.sapPst),
31613 cell->rgmSap->sapCfg.suId, txModeChgInd);
31616 ue->mimoInfo.txModUpChgFactor = 0;
31617 ue->mimoInfo.txModDownChgFactor = 0;
31618 ueDl->laCb[0].deltaiTbs = 0;
31624 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
31629 * Function: rgSchCheckAndTriggerModeChange(cell, ueUl, iTbsNew)
31630 * Purpose: This function update and check for threashold for TM mode
31635 * @param[in] RgSchCellCb *cell
31636 * @param[in] RgSchUeCb *ue
31637 * @param[in] uint8_t iTbs
31641 Void rgSchCheckAndTriggerModeChange
31645 uint8_t reportediTbs,
31650 Void rgSchCheckAndTriggerModeChange(cell, ue, reportediTbs, previTbs, maxiTbs)
31653 uint8_t reportediTbs;
31658 RgrTxMode txMode; /*!< UE's Transmission Mode */
31659 RgrTxMode modTxMode; /*!< UE's Transmission Mode */
31662 txMode = ue->mimoInfo.txMode;
31664 /* Check for Step down */
31665 /* Step down only when TM4 is configured. */
31666 if(RGR_UE_TM_4 == txMode)
31668 if((previTbs <= reportediTbs) && ((reportediTbs - previTbs) >= RG_SCH_MODE_CHNG_STEPDOWN_CHECK_FACTOR))
31670 ue->mimoInfo.txModDownChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
31674 ue->mimoInfo.txModDownChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
31677 ue->mimoInfo.txModDownChgFactor =
31678 RGSCH_MAX(ue->mimoInfo.txModDownChgFactor, -(RG_SCH_MODE_CHNG_STEPDOWN_THRSHD));
31680 if(ue->mimoInfo.txModDownChgFactor >= RG_SCH_MODE_CHNG_STEPDOWN_THRSHD)
31682 /* Trigger Mode step down */
31683 modTxMode = RGR_UE_TM_3;
31684 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
31688 /* Check for Setup up */
31689 /* Step Up only when TM3 is configured, Max possible Mode is TM4*/
31690 if(RGR_UE_TM_3 == txMode)
31692 if((previTbs > reportediTbs) || (maxiTbs == previTbs))
31694 ue->mimoInfo.txModUpChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
31698 ue->mimoInfo.txModUpChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
31701 ue->mimoInfo.txModUpChgFactor =
31702 RGSCH_MAX(ue->mimoInfo.txModUpChgFactor, -(RG_SCH_MODE_CHNG_STEPUP_THRSHD));
31704 /* Check if TM step up need to be triggered */
31705 if(ue->mimoInfo.txModUpChgFactor >= RG_SCH_MODE_CHNG_STEPUP_THRSHD)
31707 /* Trigger mode chnage */
31708 modTxMode = RGR_UE_TM_4;
31709 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
31718 * @brief Updates the GBR LCGs when datInd is received from MAC
31722 * Function: rgSCHCmnIsDlCsgPrio (cell)
31723 * Purpose: This function returns if csg UEs are
31724 * having priority at current time
31726 * Invoked by: Scheduler
31728 * @param[in] RgSchCellCb *cell
31729 * @param[in] RgSchUeCb *ue
31730 * @param[in] RgInfUeDatInd *datInd
31734 Bool rgSCHCmnIsDlCsgPrio
31739 Bool rgSCHCmnIsDlCsgPrio(cell)
31744 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
31746 /* Calculating the percentage resource allocated */
31747 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
31753 if(((cmnDlCell->ncsgPrbCnt * 100) / cmnDlCell->totPrbCnt) < cell->minDlResNonCsg)
31765 * @brief Updates the GBR LCGs when datInd is received from MAC
31769 * Function: rgSCHCmnIsUlCsgPrio (cell)
31770 * Purpose: This function returns if csg UEs are
31771 * having priority at current time
31773 * Invoked by: Scheduler
31775 * @param[in] RgSchCellCb *cell
31776 * @param[in] RgSchUeCb *ue
31777 * @param[in] RgInfUeDatInd *datInd
31781 Bool rgSCHCmnIsUlCsgPrio
31786 Bool rgSCHCmnIsUlCsgPrio(cell)
31790 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
31793 /* Calculating the percentage resource allocated */
31794 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
31800 if (((cmnUlCell->ncsgPrbCnt * 100) /cmnUlCell->totPrbCnt) < cell->minUlResNonCsg)
31811 /** @brief DL scheduler for SPS, and all other downlink data
31815 * Function: rgSchCmnPreDlSch
31817 * @param [in] Inst schInst;
31822 Void rgSchCmnPreDlSch
31824 RgSchCellCb **cell,
31826 RgSchCellCb **cellLst
31829 Void rgSchCmnPreDlSch(cell, nCell, cellLst)
31830 RgSchCellCb **cell;
31832 RgSchCellCb **cellLst;
31835 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell[0]);
31840 if(nCell > CM_LTE_MAX_CELLS)
31845 if (cell[0]->isDlDataAllwd && (cell[0]->stopDlSch == FALSE))
31847 /* Specific DL scheduler to perform UE scheduling */
31848 cellSch->apisDl->rgSCHDlPreSched(cell[0]);
31850 /* Rearranging the cell entries based on their remueCnt in SF.
31851 * cells will be processed in the order of number of ue scheduled
31853 for (idx = 0; idx < nCell; idx++)
31856 cellSch = RG_SCH_CMN_GET_CELL(cell[idx]);
31857 sf = cellSch->allocInfo.dedAlloc.dedDlSf;
31861 cellLst[idx] = cell[idx];
31865 for(j = 0; j < idx; j++)
31867 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cellLst[j]);
31868 RgSchDlSf *subfrm = cmnCell->allocInfo.dedAlloc.dedDlSf;
31870 if(sf->remUeCnt < subfrm->remUeCnt)
31873 for(k = idx; k > j; k--)
31875 cellLst[k] = cellLst[k-1];
31880 cellLst[j] = cell[idx];
31885 for (idx = 0; idx < nCell; idx++)
31887 cellLst[idx] = cell[idx];
31893 /** @brief DL scheduler for SPS, and all other downlink data
31896 * Function: rgSchCmnPstDlSch
31898 * @param [in] Inst schInst;
31903 Void rgSchCmnPstDlSch
31908 Void rgSchCmnPstDlSch(cell)
31912 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31915 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
31917 cellSch->apisDl->rgSCHDlPstSched(cell->instIdx);
31922 uint8_t rgSCHCmnCalcPcqiBitSz
31928 uint8_t rgSCHCmnCalcPcqiBitSz(ueCb, numTxAnt)
31933 uint8_t confRepMode;
31936 RgSchUePCqiCb *cqiCb = ueCb->nPCqiCb;
31939 confRepMode = cqiCb->cqiCfg.cqiSetup.prdModeEnum;
31940 if((ueCb->mimoInfo.txMode != RGR_UE_TM_3) &&
31941 (ueCb->mimoInfo.txMode != RGR_UE_TM_4))
31947 ri = cqiCb->perRiVal;
31949 switch(confRepMode)
31951 case RGR_PRD_CQI_MOD10:
31957 case RGR_PRD_CQI_MOD11:
31970 else if(numTxAnt == 4)
31983 /* This is number of antenna case 1.
31984 * This is not applicable for Mode 1-1.
31985 * So setting it to invalid value */
31991 case RGR_PRD_CQI_MOD20:
31999 pcqiSz = 4 + cqiCb->label;
32004 case RGR_PRD_CQI_MOD21:
32019 else if(numTxAnt == 4)
32032 /* This might be number of antenna case 1.
32033 * For mode 2-1 wideband case only antenna port 2 or 4 is supported.
32034 * So setting invalid value.*/
32042 pcqiSz = 4 + cqiCb->label;
32046 pcqiSz = 7 + cqiCb->label;
32059 /** @brief DL scheduler for SPS, and all other downlink data
32063 * Function: rgSCHCmnDlSch
32065 * @param [in] RgSchCellCb *cell
32076 Void rgSCHCmnDlSch (cell)
32081 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
32083 RgSchDynTddCb *rgSchDynTddInfo = &(rgSchCb[cell->instIdx].rgSchDynTdd);
32084 uint16_t dlCntrlSfIdx;
32088 dlSf = rgSCHUtlSubFrmGet(cell, cellSch->dl.time);
32090 if (rgSchDynTddInfo->isDynTddEnbld)
32092 RG_SCH_DYN_TDD_GET_SFIDX(dlCntrlSfIdx, rgSchDynTddInfo->crntDTddSfIdx,
32093 RG_SCH_CMN_DL_DELTA);
32094 if(RG_SCH_DYNTDD_DLC_ULD == rgSchDynTddInfo->sfInfo[dlCntrlSfIdx].sfType)
32096 if(1 == cell->cellId)
32098 ul5gtfsidDlAlreadyMarkUl++;
32100 printf("ul5gtfsidDlAlreadyMarkUl: %d, [sfn:sf] [%04d:%02d]\n",
32101 ul5gtfsidDlAlreadyMarkUl, cellSch->dl.time.sfn,
32102 cellSch->dl.time.slot);
32110 /* Specific DL scheduler to perform UE scheduling */
32111 cellSch->apisDl->rgSCHDlNewSched(cell, &cellSch->allocInfo);
32112 /* LTE_ADV_FLAG_REMOVED_END */
32114 /* call common allocator for RB Allocation */
32115 rgSCHCmnDlRbAlloc(cell, &cellSch->allocInfo);
32117 /* Finalize the Allocations for reqested Against alloced */
32118 rgSCHCmnDlAllocFnlz(cell);
32120 /* Perform Pdcch allocations for PDCCH Order Q.
32121 * As of now, giving this the least preference.
32122 * This func call could be moved above other allocations
32124 rgSCHCmnGenPdcchOrder(cell, dlSf);
32126 /* Do group power control for PUCCH */
32127 rgSCHCmnGrpPwrCntrlPucch(cell, dlSf);
32132 /**********************************************************************
32135 **********************************************************************/