1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /************************************************************************
25 Desc: C source code for Entry point fucntions
29 **********************************************************************/
31 /** @file rg_sch_cmn.c
32 @brief This file implements the schedulers main access to MAC layer code.
35 static const char* RLOG_MODULE_NAME="MAC";
36 static int RLOG_FILE_ID=187;
37 static int RLOG_MODULE_ID=4096;
39 /* header include files -- defines (.h) */
40 #include "common_def.h"
46 #include "rg_sch_err.h"
47 #include "rg_sch_inf.h"
49 #include "rg_sch_cmn.h"
50 #include "rl_interface.h"
51 #include "rl_common.h"
53 /* header/extern include files (.x) */
54 #include "tfu.x" /* TFU types */
55 #include "lrg.x" /* layer management typedefs for MAC */
56 #include "rgr.x" /* layer management typedefs for MAC */
57 #include "rgm.x" /* layer management typedefs for MAC */
58 #include "rg_sch_inf.x" /* typedefs for Scheduler */
59 #include "rg_sch.x" /* typedefs for Scheduler */
60 #include "rg_sch_cmn.x" /* typedefs for Scheduler */
62 #include "lrg.x" /* Stats Structures */
63 #endif /* MAC_SCH_STATS */
66 #endif /* __cplusplus */
69 EXTERN U32 emtcStatsUlTomSrInd;
70 EXTERN U32 emtcStatsUlBsrTmrTxp;
73 #define RG_ITBS_DIFF(_x, _y) ((_x) > (_y) ? (_x) - (_y) : (_y) - (_x))
74 EXTERN Void rgSCHSc1UlInit ARGS((RgUlSchdApis *apis));
75 #ifdef RG_PHASE2_SCHED
76 EXTERN Void rgSCHRrUlInit ARGS((RgUlSchdApis *apis));
78 EXTERN Void rgSCHEmtcHqInfoFree ARGS((RgSchCellCb *cell, RgSchDlHqProcCb *hqP));
79 EXTERN Void rgSCHEmtcRrUlInit ARGS((RgUlSchdApis *apis));
80 EXTERN Void rgSCHEmtcCmnDlInit ARGS((Void));
81 EXTERN Void rgSCHEmtcCmnUlInit ARGS((Void));
82 EXTERN Void rgSCHEmtcCmnUeNbReset ARGS((RgSchUeCb *ueCb));
83 EXTERN RgSchCmnCqiToTbs *rgSchEmtcCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
85 EXTERN Void rgSCHMaxciUlInit ARGS((RgUlSchdApis *apis));
86 EXTERN Void rgSCHPfsUlInit ARGS((RgUlSchdApis *apis));
88 EXTERN Void rgSCHSc1DlInit ARGS((RgDlSchdApis *apis));
89 #ifdef RG_PHASE2_SCHED
90 EXTERN Void rgSCHRrDlInit ARGS((RgDlSchdApis *apis));
92 EXTERN Void rgSCHEmtcRrDlInit ARGS((RgDlEmtcSchdApis *apis));
94 EXTERN Void rgSCHMaxciDlInit ARGS((RgDlSchdApis *apis));
95 EXTERN Void rgSCHPfsDlInit ARGS((RgDlSchdApis *apis));
97 EXTERN Void rgSCHDlfsInit ARGS((RgDlfsSchdApis *apis));
101 EXTERN Void rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl ARGS((RgSchCellCb *cell));
102 EXTERN Void rgSCHCmnGetEmtcDciFrmtSizes ARGS((RgSchCellCb *cell));
103 EXTERN Void rgSCHEmtcRrUlProcRmvFrmRetx ARGS((RgSchCellCb *cell, RgSchUlHqProcCb *proc));
104 EXTERN S16 rgSCHCmnPrecompEmtcMsg3Vars
106 RgSchCmnUlCell *cellUl,
112 Void rgSCHEmtcCmnUeCcchSduDel
117 EXTERN Void rgSCHEmtcRmvFrmTaLst
119 RgSchCmnDlCell *cellDl,
122 EXTERN Void rgSCHEmtcInitTaLst
124 RgSchCmnDlCell *cellDl
126 EXTERN Void rgSCHEmtcAddToTaLst
128 RgSchCmnDlCell *cellDl,
135 PRIVATE Void rgSCHDlSiSched ARGS((RgSchCellCb *cell,
136 RgSchCmnDlRbAllocInfo *allocInfo,
137 RgInfSfAlloc *subfrmAlloc));
138 PRIVATE Void rgSCHChkNUpdSiCfg ARGS((RgSchCellCb *cell));
139 PRIVATE Void rgSCHSelectSi ARGS((RgSchCellCb *cell));
140 #endif /*RGR_SI_SCH*/
141 /* LTE_ADV_FLAG_REMOVED_START */
144 PRIVATE S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
152 PRIVATE S16 rgSCHCmnBuildRntpInfo (
160 PRIVATE Void rgSCHCmnNonDlfsType0Alloc
164 RgSchDlRbAlloc *allocInfo,
167 PRIVATE U8 rgSchCmnUlRvIdxToIMcsTbl[4] = {32, 30, 31, 29};
168 PRIVATE Void rgSCHCmnUlNonadapRetx ARGS((
169 RgSchCmnUlCell *cellUl,
173 PRIVATE Void rgSCHCmnUlSfRlsRetxProcs ARGS((
179 PRIVATE S16 rgSCHCmnUlMdfyGrntForCqi ARGS((
190 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1 ARGS((
192 RgSchDlRbAlloc *rbAllocInfo,
193 RgSchDlHqProcCb *hqP,
197 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1A ARGS((
199 RgSchDlRbAlloc *rbAllocInfo,
200 RgSchDlHqProcCb *hqP,
204 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1B ARGS((
206 RgSchDlRbAlloc *rbAllocInfo,
207 RgSchDlHqProcCb *hqP,
211 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2 ARGS((
213 RgSchDlRbAlloc *rbAllocInfo,
214 RgSchDlHqProcCb *hqP,
218 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2A ARGS((
220 RgSchDlRbAlloc *rbAllocInfo,
221 RgSchDlHqProcCb *hqP,
228 Void rgSCHCmnDlSpsSch
232 /* LTE_ADV_FLAG_REMOVED_END */
234 PRIVATE Void rgSCHCmnNonDlfsBcchPcchRbAlloc ARGS((
236 RgSchCmnDlRbAllocInfo *allocInfo
238 PRIVATE Void rgSCHBcchPcchDlRbAlloc ARGS((
240 RgSchCmnDlRbAllocInfo *allocInfo
242 PRIVATE Void rgSCHCmnDlBcchPcchAlloc ARGS((
246 PRIVATE Void rgSCHCmnDlCqiOnPucchInd ARGS ((
249 TfuDlCqiPucch *pucchCqi,
250 RgrUeCqiRept *ueCqiRept,
252 Bool *is2ndCwCqiAvail
254 PRIVATE Void rgSCHCmnDlCqiOnPuschInd ARGS ((
257 TfuDlCqiPusch *puschCqi,
258 RgrUeCqiRept *ueCqiRept,
260 Bool *is2ndCwCqiAvail
263 PRIVATE Void rgSCHCmnDlCqiOnPucchInd ARGS ((
266 TfuDlCqiPucch *pucchCqi
268 PRIVATE Void rgSCHCmnDlCqiOnPuschInd ARGS ((
271 TfuDlCqiPusch *puschCqi
274 /* ccpu00117452 - MOD - Changed macro name from
275 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
277 PRIVATE S16 rgSCHCmnUeDlPwrCtColltCqiRept ARGS((
280 RgrUeCqiRept *ueCqiRept));
281 #endif /* End of RGR_CQI_REPT */
282 /* Fix: syed align multiple UEs to refresh at same time */
283 PRIVATE Void rgSCHCmnGetRefreshPer ARGS((
287 PRIVATE S16 rgSCHCmnApplyUeRefresh ARGS((
291 Void rgSCHCmnDlSetUeAllocLmtLa ARGS
296 PRIVATE Void rgSCHCheckAndSetTxScheme ARGS
304 PRIVATE U32 rgSCHCmnCalcDwPtsTbSz ARGS
314 PRIVATE Void rgSCHCmnCalcDwPtsTbSz2Cw ARGS
330 PRIVATE Void rgSCHCmnInitRbAlloc ARGS
336 #endif /* __cplusplus */
340 RgSchdApis rgSchCmnApis;
341 PRIVATE RgUlSchdApis rgSchUlSchdTbl[RGSCH_NUM_SCHEDULERS];
342 PRIVATE RgDlSchdApis rgSchDlSchdTbl[RGSCH_NUM_SCHEDULERS];
344 PRIVATE RgUlSchdApis rgSchEmtcUlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
345 PRIVATE RgDlEmtcSchdApis rgSchEmtcDlSchdTbl[RGSCH_NUM_EMTC_SCHEDULERS];
347 #ifdef RG_PHASE2_SCHED
348 PRIVATE RgDlfsSchdApis rgSchDlfsSchdTbl[RGSCH_NUM_DLFS_SCHEDULERS];
350 PRIVATE RgUlSchdInits rgSchUlSchdInits = RGSCH_ULSCHED_INITS;
351 PRIVATE RgDlSchdInits rgSchDlSchdInits = RGSCH_DLSCHED_INITS;
353 PRIVATE RgEmtcUlSchdInits rgSchEmtcUlSchdInits = RGSCH_EMTC_ULSCHED_INITS;
354 PRIVATE RgEmtcDlSchdInits rgSchEmtcDlSchdInits = RGSCH_EMTC_DLSCHED_INITS;
356 #if (defined (RG_PHASE2_SCHED) && defined (TFU_UPGRADE))
357 PRIVATE RgDlfsSchdInits rgSchDlfsSchdInits = RGSCH_DLFSSCHED_INITS;
360 typedef Void (*RgSchCmnDlAllocRbFunc) ARGS((RgSchCellCb *cell, RgSchDlSf *subFrm,
361 RgSchUeCb *ue, U32 bo, U32 *effBo, RgSchDlHqProcCb *proc,
362 RgSchCmnDlRbAllocInfo *cellWdAllocInfo));
363 typedef U8 (*RgSchCmnDlGetPrecInfFunc) ARGS((RgSchCellCb *cell, RgSchUeCb *ue,
364 U8 numLyrs, Bool bothCwEnbld));
365 PRIVATE Void rgSCHCmnDlAllocTxRbTM1 ARGS((
371 RgSchDlHqProcCb *proc,
372 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
374 PRIVATE Void rgSCHCmnDlAllocTxRbTM2 ARGS((
380 RgSchDlHqProcCb *proc,
381 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
383 PRIVATE Void rgSCHCmnDlAllocTxRbTM3 ARGS((
389 RgSchDlHqProcCb *proc,
390 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
392 PRIVATE Void rgSCHCmnDlAllocTxRbTM4 ARGS((
398 RgSchDlHqProcCb *proc,
399 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
402 PRIVATE Void rgSCHCmnDlAllocTxRbTM5 ARGS((
408 RgSchDlHqProcCb *proc,
409 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
412 PRIVATE Void rgSCHCmnDlAllocTxRbTM6 ARGS((
418 RgSchDlHqProcCb *proc,
419 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
421 PRIVATE Void rgSCHCmnDlAllocTxRbTM7 ARGS((
427 RgSchDlHqProcCb *proc,
428 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
430 PRIVATE Void rgSCHCmnDlAllocRetxRbTM1 ARGS((
436 RgSchDlHqProcCb *proc,
437 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
439 PRIVATE Void rgSCHCmnDlAllocRetxRbTM2 ARGS((
445 RgSchDlHqProcCb *proc,
446 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
448 PRIVATE Void rgSCHCmnDlAllocRetxRbTM3 ARGS((
454 RgSchDlHqProcCb *proc,
455 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
457 PRIVATE Void rgSCHCmnDlAllocRetxRbTM4 ARGS((
463 RgSchDlHqProcCb *proc,
464 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
467 PRIVATE Void rgSCHCmnDlAllocRetxRbTM5 ARGS((
473 RgSchDlHqProcCb *proc,
474 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
477 PRIVATE Void rgSCHCmnDlAllocRetxRbTM6 ARGS((
483 RgSchDlHqProcCb *proc,
484 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
486 PRIVATE Void rgSCHCmnDlAllocRetxRbTM7 ARGS((
492 RgSchDlHqProcCb *proc,
493 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
497 PRIVATE U8 rgSchGetN1ResCount ARGS ((
501 Bool rgSchCmnChkDataOnlyOnPcell
507 U8 rgSCHCmnCalcPcqiBitSz
514 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
515 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[7] = {rgSCHCmnDlAllocTxRbTM1,
516 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
517 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7};
519 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
520 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[7] = {rgSCHCmnDlAllocRetxRbTM1,
521 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
522 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7};
524 /* Functions specific to each transmission mode for DL Tx RB Allocation*/
525 RgSchCmnDlAllocRbFunc dlAllocTxRbFunc[9] = {rgSCHCmnDlAllocTxRbTM1,
526 rgSCHCmnDlAllocTxRbTM2, rgSCHCmnDlAllocTxRbTM3, rgSCHCmnDlAllocTxRbTM4,
527 NULLP, rgSCHCmnDlAllocTxRbTM6, rgSCHCmnDlAllocTxRbTM7, NULLP, NULLP};
529 /* Functions specific to each transmission mode for DL Retx RB Allocation*/
530 RgSchCmnDlAllocRbFunc dlAllocRetxRbFunc[9] = {rgSCHCmnDlAllocRetxRbTM1,
531 rgSCHCmnDlAllocRetxRbTM2, rgSCHCmnDlAllocRetxRbTM3, rgSCHCmnDlAllocRetxRbTM4,
532 NULLP, rgSCHCmnDlAllocRetxRbTM6, rgSCHCmnDlAllocRetxRbTM7, NULLP, NULLP};
537 PRIVATE U8 rgSCHCmnDlTM3PrecInf2 ARGS((
543 PRIVATE U8 rgSCHCmnDlTM3PrecInf4 ARGS((
549 PRIVATE U8 rgSCHCmnDlTM4PrecInf2 ARGS((
555 PRIVATE U8 rgSCHCmnDlTM4PrecInf4 ARGS((
561 /* Functions specific to each transmission mode for DL RB Allocation*/
562 RgSchCmnDlGetPrecInfFunc getPrecInfoFunc[2][2] = {
563 {rgSCHCmnDlTM3PrecInf2, rgSCHCmnDlTM3PrecInf4},
564 {rgSCHCmnDlTM4PrecInf2, rgSCHCmnDlTM4PrecInf4}
567 PRIVATE S16 rgSCHCmnDlAlloc1CwRetxRb ARGS((
571 RgSchDlHqTbCb *tbInfo,
576 PRIVATE S16 rgSCHCmnDlAlloc2CwRetxRb ARGS((
580 RgSchDlHqProcCb *proc,
585 PRIVATE Void rgSCHCmnDlTM3TxTx ARGS((
591 RgSchDlHqProcCb *proc,
592 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
594 PRIVATE Void rgSCHCmnDlTM3TxRetx ARGS((
600 RgSchDlHqProcCb *proc,
601 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
603 PRIVATE Void rgSCHCmnDlTM3RetxRetx ARGS((
609 RgSchDlHqProcCb *proc,
610 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
613 PRIVATE Void rgSCHCmnNonDlfsUpdTyp2Alloc ARGS((
619 /* LTE_ADV_FLAG_REMOVED_START */
621 PRIVATE Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc ARGS((
628 /* LTE_ADV_FLAG_REMOVED_END */
629 PRIVATE Void rgSCHCmnDlRbInfoAddUeTx ARGS((
631 RgSchCmnDlRbAllocInfo *allocInfo,
633 RgSchDlHqProcCb *proc
635 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetx ARGS((
637 RgSchCmnDlRbAllocInfo *allocInfo,
641 PRIVATE Void rgSCHCmnDlAdd2NonSchdRetxLst ARGS((
642 RgSchCmnDlRbAllocInfo *allocInfo,
644 RgSchDlHqProcCb *proc
646 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRetxRb ARGS((
650 RgSchDlHqTbCb *reTxTb,
655 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRb ARGS((
659 RgSchDlHqProcCb *proc,
664 PRIVATE S16 rgSCHCmnDlAlloc1CwTxRb ARGS((
668 RgSchDlHqTbCb *tbInfo,
674 PRIVATE Void rgSCHCmnFillHqPTb ARGS((
676 RgSchDlRbAlloc *rbAllocInfo,
682 PRIVATE Void rgSCHCmnDlGetBestFitHole ARGS((
691 #ifdef RGSCH_SPS_UNUSED
692 PRIVATE U32 rgSCHCmnGetRaType1Mask ARGS((
698 PRIVATE U32 rgSCHCmnGetRaType0Mask ARGS((
702 PRIVATE U32 rgSCHCmnGetRaType2Mask ARGS((
708 Bool rgSCHCmnRetxAllocAvoid ARGS((
711 RgSchDlHqProcCb *proc
714 U16 rgSCHCmnGetSiSetId ARGS((
722 //TODO_SID: Currenly table is only for 100 Prbs. Need to modify wrt VRBG table 8.1.5.2.1-1 V5G_213
723 U32 rgSch5gtfTbSzTbl[MAX_5GTF_MCS] =
724 {1864, 5256, 8776, 13176, 17576, 21976, 26376, 31656, 35176, 39576, 43976, 47496, 52776, 59376, 66392};
726 U32 gUl5gtfSrRecv = 0;
727 U32 gUl5gtfBsrRecv = 0;
728 U32 gUl5gtfUeSchPick = 0;
729 U32 gUl5gtfPdcchSchd = 0;
730 U32 gUl5gtfAllocAllocated = 0;
731 U32 gUl5gtfUeRbAllocDone = 0;
732 U32 gUl5gtfUeRmvFnlzZeroBo = 0;
733 U32 gUl5gtfUeFnlzReAdd = 0;
734 U32 gUl5gtfPdcchSend = 0;
735 U32 gUl5gtfRbAllocFail = 0;
736 U32 ul5gtfsidUlMarkUl = 0;
737 U32 ul5gtfsidDlSchdPass = 0;
738 U32 ul5gtfsidDlAlreadyMarkUl = 0;
739 U32 ul5gtfTotSchdCnt = 0;
742 /* CQI Offset Index to Beta CQI Offset value mapping,
743 * stored as parts per 1000. Reserved is set to 0.
744 * Refer 36.213 sec 8.6.3 Tbl 8.6.3-3 */
745 U32 rgSchCmnBetaCqiOffstTbl[16] = {0, 0, 1125,
746 1250, 1375, 1625, 1750, 2000, 2250, 2500, 2875,
747 3125, 3500, 4000, 5000, 6250};
748 U32 rgSchCmnBetaHqOffstTbl[16] = {2000, 2500, 3125,
749 4000, 5000, 6250, 8000,10000, 12625, 15875, 20000,
750 31000, 50000,80000,126000,0};
751 U32 rgSchCmnBetaRiOffstTbl[16] = {1250, 1625, 2000,
752 2500, 3125, 4000, 5000, 6250, 8000, 10000,12625,
754 S8 rgSchCmnDlCqiDiffOfst[8] = {0, 1, 2, 3, -4, -3, -2, -1};
756 /* Include CRS REs while calculating Efficiency */
757 CONSTANT PRIVATE U8 rgSchCmnAntIdx[5] = {0,0,1,0,2};
758 CONSTANT PRIVATE U8 rgSchCmnNumResForCrs[5] = {0,6,12,0,16};
765 S8 rgSchCmnApUeSelDiffCqi[4] = {1, 2, 3, 4};
766 S8 rgSchCmnApEnbConfDiffCqi[4] = {0, 1, 2, -1};
769 typedef struct rgSchCmnDlUeDciFrmtOptns
771 TfuDciFormat spfcDciFrmt; /* TM(Transmission Mode) specific DCI format.
772 * Search space : UE Specific by C-RNTI only. */
773 U8 spfcDciRAType; /* Resource Alloctn(RA) type for spfcDciFrmt */
774 TfuDciFormat prfrdDciFrmt; /* Preferred DCI format among the available
775 * options for TD (Transmit Diversity) */
776 U8 prfrdDciRAType; /* Resource Alloctn(RA) type for prfrdDciFrmt */
777 }RgSchCmnDlUeDciFrmtOptns;
780 /* DCI Format options for each Transmission Mode */
781 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[7] = {
782 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
783 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
784 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
785 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
786 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
787 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
788 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
792 /* DCI Format options for each Transmission Mode */
793 RgSchCmnDlUeDciFrmtOptns rgSchCmnDciFrmtOptns[9] = {
794 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
795 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
796 {TFU_DCI_FORMAT_2A,RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
797 {TFU_DCI_FORMAT_2, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
798 {TFU_DCI_FORMAT_1D,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
799 {TFU_DCI_FORMAT_1B,RG_SCH_CMN_RA_TYPE2, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2},
800 {TFU_DCI_FORMAT_1, RG_SCH_CMN_RA_TYPE0, TFU_DCI_FORMAT_1A, RG_SCH_CMN_RA_TYPE2}
805 typedef struct rgSchCmnDlImcsTbl
807 U8 modOdr; /* Modulation Order */
809 }RgSchCmnDlImcsTbl[29];
811 CONSTANT struct rgSchCmnMult235Info
813 U8 match; /* Closest number satisfying 2^a.3^b.5^c, with a bias
814 * towards the smaller number */
815 U8 prvMatch; /* Closest number not greater than array index
816 * satisfying 2^a.3^b.5^c */
817 } rgSchCmnMult235Tbl[110+1] = {
819 {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {6, 6}, {8, 8},
820 {9, 9}, {10, 10}, {10, 10}, {12, 12}, {12, 12}, {15, 12}, {15, 15},
821 {16, 16}, {16, 16}, {18, 18}, {18, 18}, {20, 20}, {20, 20}, {20, 20},
822 {24, 20}, {24, 24}, {25, 25}, {25, 25}, {27, 27}, {27, 27}, {30, 27},
823 {30, 30}, {30, 30}, {32, 32}, {32, 32}, {32, 32}, {36, 32}, {36, 36},
824 {36, 36}, {36, 36}, {40, 36}, {40, 40}, {40, 40}, {40, 40}, {45, 40},
825 {45, 40}, {45, 45}, {45, 45}, {48, 45}, {48, 48}, {48, 48}, {50, 50},
826 {50, 50}, {50, 50}, {54, 50}, {54, 54}, {54, 54}, {54, 54}, {54, 54},
827 {60, 54}, {60, 54}, {60, 60}, {60, 60}, {60, 60}, {64, 60}, {64, 64},
828 {64, 64}, {64, 64}, {64, 64}, {64, 64}, {72, 64}, {72, 64}, {72, 64},
829 {72, 72}, {72, 72}, {75, 72}, {75, 75}, {75, 75}, {75, 75}, {80, 75},
830 {80, 75}, {80, 80}, {81, 81}, {81, 81}, {81, 81}, {81, 81}, {81, 81},
831 {90, 81}, {90, 81}, {90, 81}, {90, 81}, {90, 90}, {90, 90}, {90, 90},
832 {90, 90}, {96, 90}, {96, 90}, {96, 96}, {96, 96}, {96, 96}, {100, 96},
833 {100, 100}, {100, 100}, {100, 100}, {100, 100}, {100, 100}, {108, 100},
834 {108, 100}, {108, 100}, {108, 108}, {108, 108}, {108, 108}
838 /* BI table from 36.321 Table 7.2.1 */
839 CONSTANT PRIVATE S16 rgSchCmnBiTbl[RG_SCH_CMN_NUM_BI_VAL] = {
840 0, 10, 20, 30,40,60,80,120,160,240,320,480,960};
841 RgSchCmnUlCqiInfo rgSchCmnUlCqiTbl[RG_SCH_CMN_UL_NUM_CQI] = {
843 {RGSCH_CMN_QM_CQI_1,RGSCH_CMN_UL_EFF_CQI_1 },
844 {RGSCH_CMN_QM_CQI_2,RGSCH_CMN_UL_EFF_CQI_2 },
845 {RGSCH_CMN_QM_CQI_3,RGSCH_CMN_UL_EFF_CQI_3 },
846 {RGSCH_CMN_QM_CQI_4,RGSCH_CMN_UL_EFF_CQI_4 },
847 {RGSCH_CMN_QM_CQI_5,RGSCH_CMN_UL_EFF_CQI_5 },
848 {RGSCH_CMN_QM_CQI_6,RGSCH_CMN_UL_EFF_CQI_6 },
849 {RGSCH_CMN_QM_CQI_7,RGSCH_CMN_UL_EFF_CQI_7 },
850 {RGSCH_CMN_QM_CQI_8,RGSCH_CMN_UL_EFF_CQI_8 },
851 {RGSCH_CMN_QM_CQI_9,RGSCH_CMN_UL_EFF_CQI_9 },
852 {RGSCH_CMN_QM_CQI_10,RGSCH_CMN_UL_EFF_CQI_10 },
853 {RGSCH_CMN_QM_CQI_11,RGSCH_CMN_UL_EFF_CQI_11 },
854 {RGSCH_CMN_QM_CQI_12,RGSCH_CMN_UL_EFF_CQI_12 },
855 {RGSCH_CMN_QM_CQI_13,RGSCH_CMN_UL_EFF_CQI_13 },
856 {RGSCH_CMN_QM_CQI_14,RGSCH_CMN_UL_EFF_CQI_14 },
857 {RGSCH_CMN_QM_CQI_15,RGSCH_CMN_UL_EFF_CQI_15 },
861 /* This table maps a (delta_offset * 2 + 2) to a (beta * 8)
862 * where beta is 10^-(delta_offset/10) rounded off to nearest 1/8
864 PRIVATE U16 rgSchCmnUlBeta8Tbl[29] = {
865 6, RG_SCH_CMN_UL_INVALID_BETA8, 8, 9, 10, 11, 13, 14, 16, 18, 20, 23,
866 25, 28, 32, RG_SCH_CMN_UL_INVALID_BETA8, 40, RG_SCH_CMN_UL_INVALID_BETA8,
867 50, RG_SCH_CMN_UL_INVALID_BETA8, 64, RG_SCH_CMN_UL_INVALID_BETA8, 80,
868 RG_SCH_CMN_UL_INVALID_BETA8, 101, RG_SCH_CMN_UL_INVALID_BETA8, 127,
869 RG_SCH_CMN_UL_INVALID_BETA8, 160
873 /* QCI to SVC priority mapping. Index specifies the Qci*/
874 PRIVATE U8 rgSchCmnDlQciPrio[RG_SCH_CMN_MAX_QCI] = RG_SCH_CMN_QCI_TO_PRIO;
876 /* The configuration is efficiency measured per 1024 REs. */
877 /* The first element stands for when CQI is not known */
878 /* This table is used to translate CQI to its corrospoding */
879 /* allocation parameters. These are currently from 36.213 */
880 /* Just this talbe needs to be edited for modifying the */
881 /* the resource allocation behaviour */
883 /* ADD CQI to MCS mapping correction
884 * single dimensional array is replaced by 2 dimensions for different CFI*/
885 PRIVATE U16 rgSchCmnCqiPdschEff[4][16] = {RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI1,
886 RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI2,RG_SCH_CMN_CQI_TO_PDSCH_EFF_CFI3};
888 PRIVATE U16 rgSchCmn2LyrCqiPdschEff[4][16] = {RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI0 ,RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI1,
889 RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI2, RG_SCH_CMN_2LYR_CQI_TO_PDSCH_EFF_CFI3};
891 /* This configuration determines the transalation of a UEs CQI to its */
892 /* PDCCH coding efficiency. This may be edited based on the installation */
893 PRIVATE U8 rgSchCmnDlRvTbl[4] = {0, 2, 3, 1}; /* RVIdx sequence is corrected*/
895 /* Indexed by [DciFrmt].
896 * Considering the following definition in determining the dciFrmt index.
911 PRIVATE U16 rgSchCmnDciFrmtSizes[10];
914 PRIVATE U16 rgSchCmnCqiPdcchEff[16] = RG_SCH_CMN_CQI_TO_PDCCH_EFF;
918 RgSchTddUlDlSubfrmTbl rgSchTddUlDlSubfrmTbl = {
919 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME},
920 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
921 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
922 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
923 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
924 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME},
925 {RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME, RG_SCH_TDD_SPL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_UL_SUBFRAME, RG_SCH_TDD_DL_SUBFRAME}
930 U8 rgSchTddSpsDlMaxRetxTbl[RGSCH_MAX_TDD_UL_DL_CFG] = {
942 /* Special Subframes in OFDM symbols */
943 /* ccpu00134197-MOD-Correct the number of symbols */
944 RgSchTddSplSubfrmInfoTbl rgSchTddSplSubfrmInfoTbl = {
948 {11, 1, 1, 10, 1, 1},
956 /* PHICH 'm' value Table */
957 RgSchTddPhichMValTbl rgSchTddPhichMValTbl = {
958 {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
959 {0, 1, 0, 0, 1, 0, 1, 0, 0, 1},
960 {0, 0, 0, 1, 0, 0, 0, 0, 1, 0},
961 {1, 0, 0, 0, 0, 0, 0, 0, 1, 1},
962 {0, 0, 0, 0, 0, 0, 0, 0, 1, 1},
963 {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
964 {1, 1, 0, 0, 0, 1, 1, 0, 0, 1}
967 /* PHICH 'K' value Table */
968 RgSchTddKPhichTbl rgSchTddKPhichTbl = {
969 {0, 0, 4, 7, 6, 0, 0, 4, 7, 6},
970 {0, 0, 4, 6, 0, 0, 0, 4, 6, 0},
971 {0, 0, 6, 0, 0, 0, 0, 6, 0, 0},
972 {0, 0, 6, 6, 6, 0, 0, 0, 0, 0},
973 {0, 0, 6, 6, 0, 0, 0, 0, 0, 0},
974 {0, 0, 6, 0, 0, 0, 0, 0, 0, 0},
975 {0, 0, 4, 6, 6, 0, 0, 4, 7, 0}
978 /* Uplink association index 'K' value Table */
979 RgSchTddUlAscIdxKDashTbl rgSchTddUlAscIdxKDashTbl = {
980 {0, 0, 6, 4, 0, 0, 0, 6, 4, 0},
981 {0, 0, 4, 0, 0, 0, 0, 4, 0, 0},
982 {0, 0, 4, 4, 4, 0, 0, 0, 0, 0},
983 {0, 0, 4, 4, 0, 0, 0, 0, 0, 0},
984 {0, 0, 4, 0, 0, 0, 0, 0, 0, 0},
985 {0, 0, 7, 7, 5, 0, 0, 7, 7, 0}
989 /* PUSCH 'K' value Table */
990 RgSchTddPuschTxKTbl rgSchTddPuschTxKTbl = {
991 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
992 {0, 6, 0, 0, 4, 0, 6, 0, 0, 4},
993 {0, 0, 0, 4, 0, 0, 0, 0, 4, 0},
994 {4, 0, 0, 0, 0, 0, 0, 0, 4, 4},
995 {0, 0, 0, 0, 0, 0, 0, 0, 4, 4},
996 {0, 0, 0, 0, 0, 0, 0, 0, 4, 0},
997 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1000 /* PDSCH to PUCCH Table for DL Harq Feed back. Based on the
1001 Downlink association set index 'K' table */
1002 U8 rgSchTddPucchTxTbl[7][10] = {
1003 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1004 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1005 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1006 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1007 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1008 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1009 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1012 /* Table to fetch the next DL sf idx for applying the
1013 new CFI. The next Dl sf Idx at which the new CFI
1014 is applied is always the starting Sf of the next ACK/NACK
1017 Ex: In Cfg-2, sf4 and sf9 are the only subframes at which
1018 a new ACK/NACK bundle of DL subframes can start
1020 D S U D D D S U D D D S U D D D S U D D
1023 dlSf Array for Cfg-2:
1024 sfNum: 0 1 3 4 5 6 8 9 0 1 3 4 5 6 8 9
1025 sfIdx: 0 1 2 3 4 5 6 7 8 9 10 11 12 12 14 15
1027 If CFI changes at sf0, nearest DL SF bundle >= 4 TTI is sf4
1028 So at sf4 the new CFI can be applied. To arrive at sf4 from
1029 sf0, the sfIdx has to be increased by 3 */
1031 U8 rgSchTddPdcchSfIncTbl[7][10] = {
1032 /* A/N Bundl: 0,1,5,6*/ {2, 1, 0, 0, 0, 2, 1, 0, 0, 0},
1033 /* A/N Bundl: 0,4,5,9*/ {2, 2, 0, 0, 3, 2, 2, 0, 0, 3},
1034 /* A/N Bundl: 4,9*/ {3, 6, 0, 5, 4, 3, 6, 0, 5, 4},
1035 /* A/N Bundl: 1,7,9*/ {4, 3, 0, 0, 0, 4, 5, 4, 6, 5},
1036 /* A/N Bundl: 0,6*/ {4, 3, 0, 0, 6, 5, 4, 7, 6, 5},
1037 /* A/N Bundl: 9*/ {8, 7, 0, 6, 5, 4, 12, 11, 10, 9},
1038 /* A/N Bundl: 0,1,5,6,9*/ {2, 1, 0, 0, 0, 2, 2, 0, 0, 3}
1042 /* combine compilation fixes */
1044 /* subframe offset values to be used when twoIntervalsConfig is enabled in UL
1046 RgSchTddSfOffTbl rgSchTddSfOffTbl = {
1047 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1048 {0, 0, 1, -1, 0, 0, 0, 1, -1, 0},
1049 {0, 0, 5, 0, 0, 0, 0, -5, 0, 0},
1050 {0, 0, 1, 1, -2, 0, 0, 0, 0, 0},
1051 {0, 0, 1, -1, 0, 0, 0, 0, 0, 0},
1052 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1053 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1057 /* Table to determine when uplink SPS configured grants should
1058 * explicitly be reserved in a subframe. When enries are same
1059 * as that of Msg3SubfrmTbl, indicates competition with msg3.
1060 * As of now, this is same as Msg3SubfrmTbl (leaving out uldlcfg 2),
1061 * except that all 255s are now zeros. */
1062 RgSchTddSpsUlRsrvTbl rgSchTddSpsUlRsrvTbl = {
1063 {0, 0, 0, 6, 8, 0, 0, 0, 6, 8},
1064 {0, 0, 6, 9, 0, 0, 0, 6, 9, 0},
1065 {0, 0, 10, 0, 0, 0, 0, 10, 0, 0},
1066 {0, 0, 0, 0, 8, 0, 7, 7, 14, 0},
1067 {0, 0, 0, 9, 0, 0, 7, 15, 0, 0},
1068 {0, 0, 10, 0, 0, 0, 16, 0, 0, 0},
1069 {0, 0, 0, 0, 8, 0, 0, 0, 9, 0}
1072 /* Inverse DL Assoc Set index Table */
1073 RgSchTddInvDlAscSetIdxTbl rgSchTddInvDlAscSetIdxTbl = {
1074 {4, 6, 0, 0, 0, 4, 6, 0, 0, 0},
1075 {7, 6, 0, 0, 4, 7, 6, 0, 0, 4},
1076 {7, 6, 0, 4, 8, 7, 6, 0, 4, 8},
1077 {4, 11, 0, 0, 0, 7, 6, 6, 5, 5},
1078 {12, 11, 0, 0, 8, 7, 7, 6, 5, 4},
1079 {12, 11, 0, 9, 8, 7, 6, 5, 4, 13},
1080 {7, 7, 0, 0, 0, 7, 7, 0, 0, 5}
1083 #endif /* (LTEMAC_SPS ) */
1085 /* Number of Uplink subframes Table */
1086 PRIVATE U8 rgSchTddNumUlSf[] = {6, 4, 2, 3, 2, 1, 5};
1088 /* Downlink HARQ processes Table */
1089 RgSchTddUlNumHarqProcTbl rgSchTddUlNumHarqProcTbl = { 7, 4, 2, 3, 2, 1, 6};
1091 /* Uplink HARQ processes Table */
1092 RgSchTddDlNumHarqProcTbl rgSchTddDlNumHarqProcTbl = { 4, 7, 10, 9, 12, 15, 6};
1094 /* Downlink association index set 'K' value Table */
1095 RgSchTddDlAscSetIdxKTbl rgSchTddDlAscSetIdxKTbl = {
1096 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1098 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1100 { {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 4, 6}}, {0, {0}}, {0, {0}} },
1102 { {0, {0}}, {0, {0}}, {3, {7, 6, 11}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1104 { {0, {0}}, {0, {0}}, {4, {12, 8, 7, 11}}, {4, {6, 5, 4, 7}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1106 { {0, {0}}, {0, {0}}, {9, {13, 12, 9, 8, 7, 5, 4, 11, 6}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1108 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1111 /* ccpu132282-ADD-the table rgSchTddDlAscSetIdxKTbl is rearranged in
1112 * decreasing order of Km, this is used to calculate the NCE used for
1113 * calculating N1Pucch Resource for Harq*/
1114 RgSchTddDlAscSetIdxKTbl rgSchTddDlHqPucchResCalTbl = {
1115 { {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}}, {0, {0}}, {0, {0}}, {1, {6}}, {0, {0}}, {1, {4}} },
1117 { {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}}, {0, {0}}, {0, {0}}, {2, {7, 6}}, {1, {4}}, {0, {0}} },
1119 { {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {4, {8, 7, 6, 4}}, {0, {0}}, {0, {0}} },
1121 { {0, {0}}, {0, {0}}, {3, {11, 7, 6}}, {2, {6, 5}}, {2, {5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1123 { {0, {0}}, {0, {0}}, {4, {12, 11, 8, 7}}, {4, {7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1125 { {0, {0}}, {0, {0}}, {9, {13, 12, 11, 9, 8, 7, 6, 5, 4}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}}, {0, {0}} },
1127 { {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {1, {5}}, {0, {0}}, {0, {0}}, {1, {7}}, {1, {7}}, {0, {0}} }
1130 /* Minimum number of Ack/Nack feeback information to be
1131 stored for each UL-DL configuration */
1132 RgSchTddANFdbkMapTbl rgSchTddANFdbkMapTbl = {4, 4, 2, 3, 2, 1, 5};
1134 /* Uplink switch points and number of UL subframes Table */
1135 RgSchTddMaxUlSubfrmTbl rgSchTddMaxUlSubfrmTbl = {
1136 {2,3,3}, {2,2,2}, {2,1,1}, {1,3,0}, {1,2,0}, {1,1,0}, {2,3,2}
1139 /* Uplink switch points and number of DL subframes Table */
1140 RgSchTddMaxDlSubfrmTbl rgSchTddMaxDlSubfrmTbl = {
1141 {2,2,2}, {2,3,3}, {2,4,4}, {1,7,0}, {1,8,0}, {1,9,0}, {2,2,3}
1144 /* Number of UL subframes present before a particular subframe */
1145 RgSchTddNumUlSubfrmTbl rgSchTddNumUlSubfrmTbl = {
1146 {0, 0, 1, 2, 3, 3, 3, 4, 5, 6},
1147 {0, 0, 1, 2, 2, 2, 2, 3, 4, 4},
1148 {0, 0, 1, 1, 1, 1, 1, 2, 2, 2},
1149 {0, 0, 1, 2, 3, 3, 3, 3, 3, 3},
1150 {0, 0, 1, 2, 2, 2, 2, 2, 2, 2},
1151 {0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
1152 {0, 0, 1, 2, 3, 3, 3, 4, 5, 5}
1155 /* Number of DL subframes present till a particular subframe */
1156 RgSchTddNumDlSubfrmTbl rgSchTddNumDlSubfrmTbl = {
1157 {1, 2, 2, 2, 2, 3, 4, 4, 4, 4},
1158 {1, 2, 2, 2, 3, 4, 5, 5, 5, 6},
1159 {1, 2, 2, 3, 4, 5, 6, 6, 7, 8},
1160 {1, 2, 2, 2, 2, 3, 4, 5, 6, 7},
1161 {1, 2, 2, 2, 3, 4, 5, 6, 7, 8},
1162 {1, 2, 2, 3, 4, 5, 6, 7, 8, 9},
1163 {1, 2, 2, 2, 2, 3, 4, 4, 4, 5}
1167 /* Nearest possible UL subframe Index from UL subframe
1168 * DL Index < UL Index */
1169 RgSchTddLowDlSubfrmIdxTbl rgSchTddLowDlSubfrmIdxTbl = {
1170 {0, 1, 1, 1, 1, 5, 6, 6, 6, 6},
1171 {0, 1, 1, 1, 4, 5, 6, 6, 6, 9},
1172 {0, 1, 1, 3, 4, 5, 6, 6, 8, 9},
1173 {0, 1, 1, 1, 1, 5, 6, 7, 8, 9},
1174 {0, 1, 1, 1, 4, 5, 6, 7, 8, 9},
1175 {0, 1, 1, 3, 4, 5, 6, 7, 8, 9},
1176 {0, 1, 1, 1, 1, 5, 6, 6, 6, 9}
1179 /* Nearest possible DL subframe Index from UL subframe
1180 * DL Index > UL Index
1181 * 10 represents Next SFN low DL Idx */
1182 RgSchTddHighDlSubfrmIdxTbl rgSchTddHighDlSubfrmIdxTbl = {
1183 {0, 1, 5, 5, 5, 5, 6, 10, 10, 10},
1184 {0, 1, 4, 4, 4, 5, 6, 9, 9, 9},
1185 {0, 1, 3, 3, 4, 5, 6, 8, 8, 9},
1186 {0, 1, 5, 5, 5, 5, 6, 7, 8, 9},
1187 {0, 1, 4, 4, 4, 5, 6, 7, 8, 9},
1188 {0, 1, 3, 3, 4, 5, 6, 7, 8, 9},
1189 {0, 1, 5, 5, 5, 5, 6, 9, 9, 9}
1192 /* RACH Message3 related information */
1193 RgSchTddMsg3SubfrmTbl rgSchTddMsg3SubfrmTbl = {
1194 {7, 6, 255, 255, 255, 7, 6, 255, 255, 255},
1195 {7, 6, 255, 255, 8, 7, 6, 255, 255, 8},
1196 {7, 6, 255, 9, 8, 7, 6, 255, 9, 8},
1197 {12, 11, 255, 255, 255, 7, 6, 6, 6, 13},
1198 {12, 11, 255, 255, 8, 7, 6, 6, 14, 13},
1199 {12, 11, 255, 9, 8, 7, 6, 15, 14, 13},
1200 {7, 6, 255, 255, 255, 7, 6, 255, 255, 8}
1203 /* ccpu00132341-DEL Removed rgSchTddRlsDlSubfrmTbl and used Kset table for
1204 * releasing DL HARQs */
1206 /* DwPTS Scheduling Changes Start */
1207 /* Provides the number of Cell Reference Signals in DwPTS
1209 PRIVATE U8 rgSchCmnDwptsCrs[2][3] = {/* [Spl Sf cfg][Ant Port] */
1210 {4, 8, 16}, /* Spl Sf cfg 1,2,3,6,7,8 */
1211 {6, 12, 20}, /* Spl Sf cfg 4 */
1214 PRIVATE S8 rgSchCmnSplSfDeltaItbs[9] = RG_SCH_DWPTS_ITBS_ADJ;
1215 /* DwPTS Scheduling Changes End */
1219 PRIVATE U32 rgSchCmnBsrTbl[64] = {
1220 0, 10, 12, 14, 17, 19, 22, 26,
1221 31, 36, 42, 49, 57, 67, 78, 91,
1222 107, 125, 146, 171, 200, 234, 274, 321,
1223 376, 440, 515, 603, 706, 826, 967, 1132,
1224 1326, 1552, 1817, 2127, 2490, 2915, 3413, 3995,
1225 4677, 5476, 6411, 7505, 8787, 10287, 12043, 14099,
1226 16507, 19325, 22624, 26487, 31009, 36304, 42502, 49759,
1227 58255, 68201, 79846, 93479, 109439, 128125, 150000, 220000
1230 PRIVATE U32 rgSchCmnExtBsrTbl[64] = {
1231 0, 10, 13, 16, 19, 23, 29, 35,
1232 43, 53, 65, 80, 98, 120, 147, 181,
1233 223, 274, 337, 414, 509, 625, 769, 945,
1234 1162, 1429, 1757, 2161, 2657, 3267, 4017, 4940,
1235 6074, 7469, 9185, 11294, 13888, 17077, 20999, 25822,
1236 31752, 39045, 48012, 59039, 72598, 89272, 109774, 134986,
1237 165989, 204111, 250990, 308634, 379519, 466683, 573866, 705666,
1238 867737, 1067031, 1312097, 1613447, 1984009, 2439678, 3000000, 3100000
1241 U8 rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_MAX_CP][RG_SCH_CMN_UL_NUM_CQI];
1243 RgSchTbSzTbl rgTbSzTbl = {
1245 {16, 32, 56, 88, 120, 152, 176, 208, 224, 256, 288, 328, 344, 376, 392, 424, 456, 488, 504, 536, 568, 600, 616, 648, 680, 712, 744, 776, 776, 808, 840, 872, 904, 936, 968, 1000, 1032, 1032, 1064, 1096, 1128, 1160, 1192, 1224, 1256, 1256, 1288, 1320, 1352, 1384, 1416, 1416, 1480, 1480, 1544, 1544, 1608, 1608, 1608, 1672, 1672, 1736, 1736, 1800, 1800, 1800, 1864, 1864, 1928, 1928, 1992, 1992, 2024, 2088, 2088, 2088, 2152, 2152, 2216, 2216, 2280, 2280, 2280, 2344, 2344, 2408, 2408, 2472, 2472, 2536, 2536, 2536, 2600, 2600, 2664, 2664, 2728, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 2984, 2984, 3112},
1246 {24, 56, 88, 144, 176, 208, 224, 256, 328, 344, 376, 424, 456, 488, 520, 568, 600, 632, 680, 712, 744, 776, 808, 872, 904, 936, 968, 1000, 1032, 1064, 1128, 1160, 1192, 1224, 1256, 1288, 1352, 1384, 1416, 1416, 1480, 1544, 1544, 1608, 1608, 1672, 1736, 1736, 1800, 1800, 1864, 1864, 1928, 1992, 1992, 2024, 2088, 2088, 2152, 2152, 2216, 2280, 2280, 2344, 2344, 2408, 2472, 2472, 2536, 2536, 2600, 2600, 2664, 2728, 2728, 2792, 2792, 2856, 2856, 2856, 2984, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008},
1247 {32, 72, 144, 176, 208, 256, 296, 328, 376, 424, 472, 520, 568, 616, 648, 696, 744, 776, 840, 872, 936, 968, 1000, 1064, 1096, 1160, 1192, 1256, 1288, 1320, 1384, 1416, 1480, 1544, 1544, 1608, 1672, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2024, 2088, 2088, 2152, 2216, 2216, 2280, 2344, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2664, 2728, 2792, 2856, 2856, 2856, 2984, 2984, 3112, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3368, 3496, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968},
1248 {40, 104, 176, 208, 256, 328, 392, 440, 504, 568, 616, 680, 744, 808, 872, 904, 968, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1864, 1928, 1992, 2024, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6200, 6456, 6456},
1249 {56, 120, 208, 256, 328, 408, 488, 552, 632, 696, 776, 840, 904, 1000, 1064, 1128, 1192, 1288, 1352, 1416, 1480, 1544, 1608, 1736, 1800, 1864, 1928, 1992, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2600, 2664, 2728, 2792, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 4968, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992},
1250 {72, 144, 224, 328, 424, 504, 600, 680, 776, 872, 968, 1032, 1128, 1224, 1320, 1384, 1480, 1544, 1672, 1736, 1864, 1928, 2024, 2088, 2216, 2280, 2344, 2472, 2536, 2664, 2728, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3752, 3880, 4008, 4008, 4136, 4264, 4392, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9528},
1251 {328, 176, 256, 392, 504, 600, 712, 808, 936, 1032, 1128, 1224, 1352, 1480, 1544, 1672, 1736, 1864, 1992, 2088, 2216, 2280, 2408, 2472, 2600, 2728, 2792, 2984, 2984, 3112, 3240, 3368, 3496, 3496, 3624, 3752, 3880, 4008, 4136, 4136, 4264, 4392, 4584, 4584, 4776, 4776, 4968, 4968, 5160, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448},
1252 {104, 224, 328, 472, 584, 712, 840, 968, 1096, 1224, 1320, 1480, 1608, 1672, 1800, 1928, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536},
1253 {120, 256, 392, 536, 680, 808, 968, 1096, 1256, 1384, 1544, 1672, 1800, 1928, 2088, 2216, 2344, 2536, 2664, 2792, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4264, 4392, 4584, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12216, 12576, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15264},
1254 {136, 296, 456, 616, 776, 936, 1096, 1256, 1416, 1544, 1736, 1864, 2024, 2216, 2344, 2536, 2664, 2856, 2984, 3112, 3368, 3496, 3624, 3752, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 5160, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11832, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568},
1255 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8504, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080},
1256 {176, 376, 584, 776, 1000, 1192, 1384, 1608, 1800, 2024, 2216, 2408, 2600, 2792, 2984, 3240, 3496, 3624, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152},
1257 {208, 440, 680, 904, 1128, 1352, 1608, 1800, 2024, 2280, 2472, 2728, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4776, 4968, 5352, 5544, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 8760, 9144, 9528, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11064, 11448, 11832, 11832, 12216, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 23688, 24496, 24496, 24496, 24496, 25456},
1258 {224, 488, 744, 1000, 1256, 1544, 1800, 2024, 2280, 2536, 2856, 3112, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 26416, 27376, 27376, 27376, 27376, 28336, 28336},
1259 {256, 552, 840, 1128, 1416, 1736, 1992, 2280, 2600, 2856, 3112, 3496, 3752, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 5992, 6200, 6456, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704},
1260 {280, 600, 904, 1224, 1544, 1800, 2152, 2472, 2728, 3112, 3368, 3624, 4008, 4264, 4584, 4968, 5160, 5544, 5736, 6200, 6456, 6712, 6968, 7224, 7736, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008},
1261 {328, 632, 968, 1288, 1608, 1928, 2280, 2600, 2984, 3240, 3624, 3880, 4264, 4584, 4968, 5160, 5544, 5992, 6200, 6456, 6712, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 35160},
1262 {336, 696, 1064, 1416, 1800, 2152, 2536, 2856, 3240, 3624, 4008, 4392, 4776, 5160, 5352, 5736, 6200, 6456, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 39232},
1263 {376, 776, 1160, 1544, 1992, 2344, 2792, 3112, 3624, 4008, 4392, 4776, 5160, 5544, 5992, 6200, 6712, 7224, 7480, 7992, 8248, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14112, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816},
1264 {408, 840, 1288, 1736, 2152, 2600, 2984, 3496, 3880, 4264, 4776, 5160, 5544, 5992, 6456, 6968, 7224, 7736, 8248, 8504, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19080, 19848, 20616, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888},
1265 {440, 904, 1384, 1864, 2344, 2792, 3240, 3752, 4136, 4584, 5160, 5544, 5992, 6456, 6968, 7480, 7992, 8248, 8760, 9144, 9912, 10296, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 20616, 21384, 22152, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 48936, 51024, 51024, 51024},
1266 {488, 1000, 1480, 1992, 2472, 2984, 3496, 4008, 4584, 4968, 5544, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 9912, 10680, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056},
1267 {520, 1064, 1608, 2152, 2664, 3240, 3752, 4264, 4776, 5352, 5992, 6456, 6968, 7480, 7992, 8504, 9144, 9528, 10296, 10680, 11448, 11832, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 16992, 17568, 18336, 19080, 19080, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256},
1268 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776},
1269 {584, 1192, 1800, 2408, 2984, 3624, 4264, 4968, 5544, 5992, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 22920, 23688, 24496, 25456, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592},
1270 {616, 1256, 1864, 2536, 3112, 3752, 4392, 5160, 5736, 6200, 6968, 7480, 8248, 8760, 9528, 10296, 10680, 11448, 12216, 12576, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 29296, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 66592, 68808, 68808, 68808, 71112},
1271 {712, 1480, 2216, 2984, 3752, 4392, 5160, 5992, 6712, 7480, 8248, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 14688, 15264, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376, 75376}
1274 {32, 88, 152, 208, 256, 328, 376, 424, 488, 536, 600, 648, 712, 776, 808, 872, 936, 1000, 1032, 1096, 1160, 1224, 1256, 1320, 1384, 1416, 1480, 1544, 1608, 1672, 1736, 1800, 1800, 1864, 1928, 1992, 2088, 2088, 2152, 2216, 2280, 2344, 2408, 2472, 2536, 2536, 2600, 2664, 2728, 2792, 2856, 2856, 2984, 2984, 3112, 3112, 3240, 3240, 3240, 3368, 3368, 3496, 3496, 3624, 3624, 3624, 3752, 3752, 3880, 3880, 4008, 4008, 4008, 4136, 4136, 4136, 4264, 4264, 4392, 4392, 4584, 4584, 4584, 4776, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 5992, 5992, 6200},
1275 {56, 144, 208, 256, 344, 424, 488, 568, 632, 712, 776, 872, 936, 1000, 1064, 1160, 1224, 1288, 1384, 1416, 1544, 1608, 1672, 1736, 1800, 1864, 1992, 2024, 2088, 2152, 2280, 2344, 2408, 2472, 2536, 2600, 2728, 2792, 2856, 2856, 2984, 3112, 3112, 3240, 3240, 3368, 3496, 3496, 3624, 3624, 3752, 3752, 3880, 4008, 4008, 4008, 4136, 4136, 4264, 4264, 4392, 4584, 4584, 4776, 4776, 4776, 4968, 4968, 5160, 5160, 5160, 5160, 5352, 5544, 5544, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992},
1276 {72, 176, 256, 328, 424, 520, 616, 696, 776, 872, 968, 1064, 1160, 1256, 1320, 1416, 1544, 1608, 1672, 1800, 1864, 1992, 2088, 2152, 2216, 2344, 2408, 2536, 2600, 2664, 2792, 2856, 2984, 3112, 3112, 3240, 3368, 3368, 3496, 3624, 3624, 3752, 3880, 4008, 4008, 4136, 4264, 4264, 4392, 4584, 4584, 4584, 4776, 4776, 4968, 5160, 5160, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5736, 5992, 5992, 6200, 6200, 6200, 6456, 6456, 6456, 6712, 6712, 6712, 6968, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7736, 7992, 7992, 7992, 8248, 8248, 8248, 8504, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912},
1277 {104, 208, 328, 440, 568, 680, 808, 904, 1032, 1160, 1256, 1384, 1480, 1608, 1736, 1864, 1992, 2088, 2216, 2344, 2472, 2536, 2664, 2792, 2856, 2984, 3112, 3240, 3368, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4392, 4584, 4776, 4776, 4968, 4968, 5160, 5352, 5352, 5544, 5544, 5736, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6712, 6968, 6968, 7224, 7224, 7224, 7480, 7480, 7736, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12576, 12960, 12960},
1278 {120, 256, 408, 552, 696, 840, 1000, 1128, 1288, 1416, 1544, 1736, 1864, 1992, 2152, 2280, 2408, 2600, 2728, 2856, 2984, 3112, 3240, 3496, 3624, 3752, 3880, 4008, 4136, 4264, 4392, 4584, 4776, 4968, 4968, 5160, 5352, 5544, 5544, 5736, 5992, 5992, 6200, 6200, 6456, 6456, 6712, 6968, 6968, 7224, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8248, 8504, 8504, 8760, 8760, 9144, 9144, 9144, 9528, 9528, 9912, 9912, 9912, 10296, 10296, 10296, 10680, 10680, 11064, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840},
1279 {144, 328, 504, 680, 872, 1032, 1224, 1384, 1544, 1736, 1928, 2088, 2280, 2472, 2664, 2792, 2984, 3112, 3368, 3496, 3752, 3880, 4008, 4264, 4392, 4584, 4776, 4968, 5160, 5352, 5544, 5736, 5736, 5992, 6200, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7480, 7736, 7992, 7992, 8248, 8504, 8760, 8760, 9144, 9144, 9528, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11064, 11448, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12576, 12960, 12960, 13536, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19080},
1280 {176, 392, 600, 808, 1032, 1224, 1480, 1672, 1864, 2088, 2280, 2472, 2728, 2984, 3112, 3368, 3496, 3752, 4008, 4136, 4392, 4584, 4776, 4968, 5160, 5352, 5736, 5992, 5992, 6200, 6456, 6712, 6968, 6968, 7224, 7480, 7736, 7992, 8248, 8248, 8504, 8760, 9144, 9144, 9528, 9528, 9912, 9912, 10296, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 11832, 12576, 12576, 12960, 12960, 12960, 13536, 13536, 14112, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 18336, 19080, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 20616, 21384, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920},
1281 {224, 472, 712, 968, 1224, 1480, 1672, 1928, 2216, 2472, 2664, 2984, 3240, 3368, 3624, 3880, 4136, 4392, 4584, 4968, 5160, 5352, 5736, 5992, 6200, 6456, 6712, 6712, 6968, 7224, 7480, 7736, 7992, 8248, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 10680, 11064, 11448, 11448, 11832, 11832, 12216, 12576, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 14688, 14688, 15264, 15264, 15840, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376},
1282 {256, 536, 808, 1096, 1384, 1672, 1928, 2216, 2536, 2792, 3112, 3368, 3624, 3880, 4264, 4584, 4776, 4968, 5352, 5544, 5992, 6200, 6456, 6712, 6968, 7224, 7480, 7736, 7992, 8504, 8760, 9144, 9144, 9528, 9912, 9912, 10296, 10680, 11064, 11064, 11448, 11832, 12216, 12216, 12576, 12960, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15264, 15840, 15840, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19080, 19848, 19848, 19848, 20616, 20616, 21384, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 30576},
1283 {296, 616, 936, 1256, 1544, 1864, 2216, 2536, 2856, 3112, 3496, 3752, 4136, 4392, 4776, 5160, 5352, 5736, 5992, 6200, 6712, 6968, 7224, 7480, 7992, 8248, 8504, 8760, 9144, 9528, 9912, 10296, 10296, 10680, 11064, 11448, 11832, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 20616, 21384, 21384, 24264, 24264, 24264, 22920, 22920, 23688, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 27376, 28336, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160},
1284 {328, 680, 1032, 1384, 1736, 2088, 2472, 2792, 3112, 3496, 3880, 4264, 4584, 4968, 5352, 5736, 5992, 6200, 6712, 6968, 7480, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10296, 10680, 11064, 11448, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15264, 15840, 16416, 16416, 16992, 16992, 17568, 18336, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 20616, 21384, 21384, 24264, 24264, 22920, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888},
1285 {376, 776, 1192, 1608, 2024, 2408, 2792, 3240, 3624, 4008, 4392, 4776, 5352, 5736, 5992, 6456, 6968, 7224, 7736, 7992, 8504, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16416, 16992, 17568, 17568, 18336, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816},
1286 {440, 904, 1352, 1800, 2280, 2728, 3240, 3624, 4136, 4584, 4968, 5544, 5992, 6456, 6712, 7224, 7736, 8248, 8760, 9144, 9528, 9912, 10680, 11064, 11448, 11832, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15264, 15840, 16416, 16992, 17568, 17568, 18336, 19080, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22152, 22920, 23688, 23688, 24496, 24496, 25456, 25456, 25456, 25456, 27376, 27376, 28336, 28336, 28336, 29296, 29296, 30576, 30576, 30576, 31704, 31704, 32856, 32856, 32856, 34008, 34008, 35160, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 46888, 48936, 48936, 48936, 48936, 51024},
1287 {488, 1000, 1544, 2024, 2536, 3112, 3624, 4136, 4584, 5160, 5736, 6200, 6712, 7224, 7736, 8248, 8760, 9144, 9912, 10296, 10680, 11448, 11832, 12216, 12960, 13536, 14112, 14688, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 18336, 19080, 19848, 19848, 20616, 21384, 21384, 22152, 22920, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 26416, 27376, 27376, 28336, 29296, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 52752, 55056, 55056, 55056, 55056, 57336, 57336},
1288 {552, 1128, 1736, 2280, 2856, 3496, 4008, 4584, 5160, 5736, 6200, 6968, 7480, 7992, 8504, 9144, 9912, 10296, 11064, 11448, 12216, 12576, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22152, 22920, 23688, 24496, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776},
1289 {600, 1224, 1800, 2472, 3112, 3624, 4264, 4968, 5544, 6200, 6712, 7224, 7992, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15264, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 23688, 24496, 25456, 25456, 26416, 27376, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808},
1290 {632, 1288, 1928, 2600, 3240, 3880, 4584, 5160, 5992, 6456, 7224, 7736, 8504, 9144, 9912, 10296, 11064, 11832, 12216, 12960, 13536, 14112, 14688, 15840, 16416, 16992, 17568, 18336, 19080, 19848, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 24496, 25456, 26416, 26416, 27376, 28336, 28336, 29296, 30576, 30576, 31704, 31704, 32856, 32856, 34008, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 68808, 71112, 71112, 71112, 71112},
1291 {696, 1416, 2152, 2856, 3624, 4392, 5160, 5736, 6456, 7224, 7992, 8760, 9528, 10296, 10680, 11448, 12216, 12960, 13536, 14688, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 26416, 27376, 28336, 29296, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 71112, 73712, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 78704},
1292 {776, 1544, 2344, 3112, 4008, 4776, 5544, 6200, 7224, 7992, 8760, 9528, 10296, 11064, 11832, 12576, 13536, 14112, 15264, 15840, 16416, 17568, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 23688, 24496, 25456, 26416, 27376, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 32856, 34008, 35160, 35160, 36696, 37888, 37888, 39232, 39232, 40576, 40576, 42368, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936},
1293 {840, 1736, 2600, 3496, 4264, 5160, 5992, 6968, 7736, 8504, 9528, 10296, 11064, 12216, 12960, 13536, 14688, 15264, 16416, 16992, 18336, 19080, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 25456, 26416, 27376, 28336, 29296, 30576, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 61664, 63776, 63776, 66592, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800},
1294 {904, 1864, 2792, 3752, 4584, 5544, 6456, 7480, 8248, 9144, 10296, 11064, 12216, 12960, 14112, 14688, 15840, 16992, 17568, 18336, 19848, 20616, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 42368, 43816, 45352, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 97896, 101840, 101840, 101840},
1295 {1000, 1992, 2984, 4008, 4968, 5992, 6968, 7992, 9144, 9912, 11064, 12216, 12960, 14112, 15264, 15840, 16992, 18336, 19080, 19848, 21384, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 31704, 32856, 34008, 35160, 36696, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136},
1296 {1064, 2152, 3240, 4264, 5352, 6456, 7480, 8504, 9528, 10680, 11832, 12960, 14112, 15264, 16416, 16992, 18336, 19080, 20616, 21384, 22920, 23688, 24496, 25456, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 43816, 45352, 46888, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 97896, 101840, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816},
1297 {1128, 2280, 3496, 4584, 5736, 6968, 7992, 9144, 10296, 11448, 12576, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 27376, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 40576, 42368, 43816, 45352, 45352, 46888, 48936, 48936, 51024, 51024, 52752, 55056, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840,101840,101840,101840,105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496},
1298 {1192, 2408, 3624, 4968, 5992, 7224, 8504, 9912, 11064, 12216, 13536, 14688, 15840, 16992, 18336, 19848, 20616, 22152, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 42368, 43816, 45352, 46888, 46888, 48936, 51024, 51024, 52752, 52752, 55056, 57336, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 84760, 84760, 84760, 87936, 87936, 90816, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 105528, 110136, 110136, 110136, 115040, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208},
1299 {1256, 2536, 3752, 5160, 6200, 7480, 8760, 10296, 11448, 12576, 14112, 15264, 16416, 17568, 19080, 20616, 21384, 22920, 24496, 25456, 26416, 28336, 29296, 30576, 31704, 32856, 34008, 35160, 36696, 37888, 39232, 40576, 42368, 43816, 43816, 45352, 46888, 48936, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 61664, 63776, 63776, 66592, 66592, 68808, 71112, 71112, 73712, 73712, 76208, 76208, 78704, 78704, 81176, 81176, 81176, 84760, 84760, 87936, 87936, 87936, 90816, 90816, 93800, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040,115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 124464, 128496, 128496, 128496, 128496, 133208, 133208, 133208, 133208, 137792, 137792, 137792, 142248},
1300 {1480, 2984, 4392, 5992, 7480, 8760, 10296, 11832, 13536, 14688, 16416, 17568, 19080, 20616, 22152, 23688, 25456, 26416, 28336, 29296, 30576, 32856, 34008, 35160, 36696, 37888, 40576, 40576, 42368, 43816, 45352, 46888, 48936, 51024, 52752, 52752, 55056, 55056, 57336, 59256, 59256, 61664, 63776, 63776, 66592, 68808, 68808, 71112, 73712, 75376, 75376, 75376, 75376, 75376, 75376, 81176, 84760, 84760, 87936, 87936, 90816, 90816, 93800, 93800, 97896, 97896, 97896, 101840, 101840, 105528, 105528, 105528, 110136, 110136, 110136, 110136, 115040, 115040, 115040, 119816, 119816, 119816, 124464, 124464, 124464, 128496, 128496, 128496, 133208, 133208, 133208, 137792, 137792, 137792, 142248, 142248, 142248, 146856, 146856,149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776, 149776}
1303 RgSchUlIMcsTbl rgUlIMcsTbl = {
1304 {2, 0}, {2, 1}, {2, 2}, {2, 3}, {2, 4}, {2, 5},
1305 {2, 6}, {2, 7}, {2, 8}, {2, 9}, {2, 10},
1306 {4, 10}, {4, 11}, {4, 12}, {4, 13}, {4, 14},
1307 {4, 15}, {4, 16}, {4, 17}, {4, 18}, {4, 19},
1308 {6, 19}, {6, 20}, {6, 21}, {6, 22}, {6, 23},
1309 {6, 24}, {6, 25}, {6, 26}
1311 RgSchUeCatTbl rgUeCatTbl = {
1312 /*Column1:Maximum number of bits of an UL-SCH
1313 transport block transmitted within a TTI
1315 Column2:Maximum number of bits of a DLSCH
1316 transport block received within a TTI
1318 Column3:Total number of soft channel bits
1320 Column4:Support for 64QAM in UL
1322 Column5:Maximum number of DL-SCH transport
1323 block bits received within a TTI
1325 Column6:Maximum number of supported layers for
1326 spatial multiplexing in DL
1328 {5160, {10296,0}, 250368, FALSE, 10296, 1},
1329 {25456, {51024,0}, 1237248, FALSE, 51024, 2},
1330 {51024, {75376,0}, 1237248, FALSE, 102048, 2},
1331 {51024, {75376,0}, 1827072, FALSE, 150752, 2},
1332 {75376, {149776,0}, 3667200, TRUE, 299552, 4},
1333 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1334 {51024, {75376,149776}, 3654144, FALSE, 301504, 4},
1335 {149776,{299856,0}, 35982720,TRUE, 2998560, 8}
1338 /* [ccpu00138532]-ADD-The below table stores the min HARQ RTT time
1339 in Downlink for TDD and FDD. Indices 0 to 6 map to tdd UL DL config 0-6.
1340 Index 7 map to FDD */
1341 U8 rgSchCmnHarqRtt[8] = {4,7,10,9,12,15,6,8};
1342 /* Number of CFI Switchover Index is equals to 7 TDD Indexes + 1 FDD index */
1343 U8 rgSchCfiSwitchOvrWinLen[] = {7, 4, 2, 3, 2, 1, 6, 8};
1345 /* EffTbl is calculated for single layer and two layers.
1346 * CqiToTbs is calculated for single layer and two layers */
1347 RgSchCmnTbSzEff rgSchCmnNorCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1348 RgSchCmnTbSzEff rgSchCmnNorCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1349 /* New variable to store UL effiency values for normal and extended CP*/
1350 RgSchCmnTbSzEff rgSchCmnNorUlEff[1],rgSchCmnExtUlEff[1];
1351 RgSchCmnCqiToTbs rgSchCmnNorCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1352 RgSchCmnCqiToTbs rgSchCmnNorCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnNorCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1353 RgSchCmnCqiToTbs *rgSchCmnCqiToTbs[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_CFI];
1354 RgSchCmnTbSzEff rgSchCmnExtCfi1Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2Eff[RGSCH_MAX_NUM_LYR_PERCW];
1355 RgSchCmnTbSzEff rgSchCmnExtCfi3Eff[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4Eff[RGSCH_MAX_NUM_LYR_PERCW];
1356 RgSchCmnCqiToTbs rgSchCmnExtCfi1CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi2CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1357 RgSchCmnCqiToTbs rgSchCmnExtCfi3CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW], rgSchCmnExtCfi4CqiToTbs[RGSCH_MAX_NUM_LYR_PERCW];
1358 /* Include CRS REs while calculating Efficiency */
1359 RgSchCmnTbSzEff *rgSchCmnEffTbl[RGSCH_MAX_NUM_LYR_PERCW][RG_SCH_CMN_MAX_CP][RG_SCH_CMN_MAX_ANT_CONF][RG_SCH_CMN_MAX_CFI];
1360 RgSchCmnTbSzEff *rgSchCmnUlEffTbl[RG_SCH_CMN_MAX_CP];
1362 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3, 1};
1364 /* Added matrix 'rgRaPrmblToRaFrmTbl'for computation of RA sub-frames from RA preamble */
1365 RgSchRaPrmblToRaFrmTbl rgRaPrmblToRaFrmTbl = {1, 2, 2, 3};
1368 EXTERN RgUlSchdInits rgSchUlSchdInits;
1369 EXTERN RgDlSchdInits rgSchDlSchdInits;
1370 EXTERN RgDlfsSchdInits rgSchDlfsSchdInits;
1372 EXTERN RgEmtcUlSchdInits rgSchEmtcUlSchdInits;
1373 EXTERN RgEmtcDlSchdInits rgSchEmtcDlSchdInits;
1377 PRIVATE S16 rgSCHCmnUeIdleExdThrsld ARGS((
1381 RgSchUeCb* rgSCHCmnGetHoUe ARGS((
1385 PRIVATE Void rgSCHCmnDelDedPreamble ARGS((
1389 RgSchUeCb* rgSCHCmnGetPoUe ARGS((
1392 CmLteTimingInfo timingInfo
1394 PRIVATE Void rgSCHCmnDelRachInfo ARGS((
1398 PRIVATE S16 rgSCHCmnUlRbAllocForPoHoUe ARGS((
1404 PRIVATE Void rgSCHCmnHdlHoPo ARGS((
1406 CmLListCp *raRspLst,
1407 RgSchRaReqInfo *raReq
1409 PRIVATE Void rgSCHCmnAllocPoHoGrnt ARGS((
1411 CmLListCp *raRspLst,
1413 RgSchRaReqInfo *raReq
1415 PRIVATE Void rgSCHCmnFillPdcchOdr2Sf ARGS((
1422 PRIVATE Void rgSCHCmnDlAdd2PdcchOdrQ ARGS((
1426 PRIVATE Void rgSCHCmnDlRmvFrmPdcchOdrQ ARGS((
1430 PRIVATE Void rgSCHCmnUpdNxtPrchMskIdx ARGS((
1433 PRIVATE Void rgSCHCmnUpdRachParam ARGS((
1436 PRIVATE S16 rgSCHCmnAllocPOParam ARGS((
1444 PRIVATE Void rgSCHCmnGenPdcchOrder ARGS((
1448 PRIVATE Void rgSCHCmnCfgRachDedPrm ARGS((
1453 PRIVATE Void rgSCHCmnHdlUlInactUes ARGS((
1456 PRIVATE Void rgSCHCmnHdlDlInactUes ARGS((
1459 PRIVATE Void rgSCHCmnUlInit ARGS((Void
1461 PRIVATE Void rgSCHCmnDlInit ARGS((Void
1463 PRIVATE Void rgSCHCmnInitDlRbAllocInfo ARGS((
1464 RgSchCmnDlRbAllocInfo *allocInfo
1466 PRIVATE Void rgSCHCmnUpdUlCompEffBsr ARGS((
1470 PRIVATE Void rgSCHCmnUlSetAllUnSched ARGS((
1471 RgSchCmnUlRbAllocInfo *allocInfo
1473 PRIVATE Void rgSCHCmnUlUpdSf ARGS((
1475 RgSchCmnUlRbAllocInfo *allocInfo,
1478 PRIVATE Void rgSCHCmnUlHndlAllocRetx ARGS((
1480 RgSchCmnUlRbAllocInfo *allocInfo,
1485 PRIVATE Void rgSCHCmnGrpPwrCntrlPucch ARGS((
1489 PRIVATE Void rgSCHCmnGrpPwrCntrlPusch ARGS((
1493 PRIVATE Void rgSCHCmnDelUeFrmRefreshQ ARGS((
1497 PRIVATE S16 rgSCHCmnTmrExpiry ARGS((
1498 PTR cb, /* Pointer to timer control block */
1499 S16 tmrEvnt /* Timer Event */
1501 PRIVATE S16 rgSCHCmnTmrProc ARGS((
1504 PRIVATE Void rgSCHCmnAddUeToRefreshQ ARGS((
1509 PRIVATE Void rgSCHCmnDlCcchRetx ARGS((
1511 RgSchCmnDlRbAllocInfo *allocInfo
1513 PRIVATE Void rgSCHCmnUpdUeMimoInfo ARGS((
1517 RgSchCmnCell *cellSchd
1519 PRIVATE Void rgSCHCmnUpdUeUlCqiInfo ARGS((
1523 RgSchCmnUe *ueSchCmn,
1524 RgSchCmnCell *cellSchd,
1528 PRIVATE Void rgSCHCmnDlCcchSduRetx ARGS((
1530 RgSchCmnDlRbAllocInfo *allocInfo
1532 PRIVATE Void rgSCHCmnDlCcchSduTx ARGS((
1534 RgSchCmnDlRbAllocInfo *allocInfo
1536 PRIVATE S16 rgSCHCmnCcchSduAlloc ARGS((
1539 RgSchCmnDlRbAllocInfo *allocInfo
1541 PRIVATE S16 rgSCHCmnCcchSduDedAlloc ARGS((
1545 PRIVATE S16 rgSCHCmnNonDlfsCcchSduRbAlloc ARGS((
1551 PRIVATE Void rgSCHCmnInitVars ARGS((
1555 /*ccpu00117180 - DEL - Moved rgSCHCmnUpdVars to .x as its access is now */
1556 PRIVATE Void rgSCHCmnUlRbAllocForLst ARGS((
1562 CmLListCp *nonSchdLst,
1565 PRIVATE S16 rgSCHCmnUlRbAllocForUe ARGS((
1572 PRIVATE Void rgSCHCmnMsg3GrntReq ARGS((
1576 RgSchUlHqProcCb *hqProc,
1577 RgSchUlAlloc **ulAllocRef,
1580 PRIVATE Void rgSCHCmnDlCcchRarAlloc ARGS((
1583 PRIVATE Void rgSCHCmnDlCcchTx ARGS((
1585 RgSchCmnDlRbAllocInfo *allocInfo
1587 PRIVATE Void rgSCHCmnDlBcchPcch ARGS((
1589 RgSchCmnDlRbAllocInfo *allocInfo,
1590 RgInfSfAlloc *subfrmAlloc
1592 Bool rgSCHCmnChkInWin ARGS((
1593 CmLteTimingInfo frm,
1594 CmLteTimingInfo start,
1597 Bool rgSCHCmnChkPastWin ARGS((
1598 CmLteTimingInfo frm,
1601 PRIVATE Void rgSCHCmnClcAlloc ARGS((
1604 RgSchClcDlLcCb *lch,
1606 RgSchCmnDlRbAllocInfo *allocInfo
1609 PRIVATE Void rgSCHCmnClcRbAlloc ARGS((
1620 PRIVATE S16 rgSCHCmnMsg4Alloc ARGS((
1623 RgSchCmnDlRbAllocInfo *allocInfo
1625 PRIVATE S16 rgSCHCmnMsg4DedAlloc ARGS((
1629 PRIVATE Void rgSCHCmnDlRaRsp ARGS((
1631 RgSchCmnDlRbAllocInfo *allocInfo
1633 PRIVATE S16 rgSCHCmnRaRspAlloc ARGS((
1639 RgSchCmnDlRbAllocInfo *allocInfo
1641 PRIVATE Void rgSCHCmnUlUeDelAllocs ARGS((
1645 PRIVATE Void rgSCHCmnDlSetUeAllocLmt ARGS((
1650 PRIVATE S16 rgSCHCmnDlRgrCellCfg ARGS((
1655 PRIVATE Void rgSCHCmnUlAdapRetx ARGS((
1656 RgSchUlAlloc *alloc,
1657 RgSchUlHqProcCb *proc
1659 PRIVATE Void rgSCHCmnUlUpdAllocRetx ARGS((
1663 PRIVATE Void rgSCHCmnUlSfReTxAllocs ARGS((
1667 /* Fix: syed Adaptive Msg3 Retx crash. */
1669 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg ARGS
1673 RgrUeRecfg *ueRecfg,
1677 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg ARGS
1687 * DL RB allocation specific functions
1690 PRIVATE Void rgSCHCmnDlRbAlloc ARGS((
1692 RgSchCmnDlRbAllocInfo *allocInfo
1694 PRIVATE Void rgSCHCmnNonDlfsRbAlloc ARGS((
1696 RgSchCmnDlRbAllocInfo *allocInfo
1698 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc ARGS((
1700 RgSchDlRbAlloc *cmnAllocInfo));
1703 PRIVATE Void rgSCHCmnNonDlfsPbchRbAllocAdj ARGS((
1705 RgSchDlRbAlloc *cmnAllocInfo,
1709 /* Added function to adjust TBSize*/
1710 PRIVATE Void rgSCHCmnNonDlfsPbchTbSizeAdj ARGS((
1711 RgSchDlRbAlloc *allocInfo,
1712 U8 numOvrlapgPbchRb,
1718 /* Added function to find num of overlapping PBCH rb*/
1719 PRIVATE Void rgSCHCmnFindNumPbchOvrlapRbs ARGS((
1722 RgSchDlRbAlloc *allocInfo,
1723 U8 *numOvrlapgPbchRb
1726 PRIVATE U8 rgSCHCmnFindNumAddtlRbsAvl ARGS((
1729 RgSchDlRbAlloc *allocInfo
1733 PRIVATE Void rgSCHCmnFindCodeRate ARGS((
1736 RgSchDlRbAlloc *allocInfo,
1742 PRIVATE Void rgSCHCmnNonDlfsMsg4Alloc ARGS((
1744 RgSchCmnMsg4RbAlloc *msg4AllocInfo,
1747 PRIVATE S16 rgSCHCmnNonDlfsMsg4RbAlloc ARGS((
1753 PRIVATE S16 rgSCHCmnNonDlfsUeRbAlloc ARGS((
1760 PRIVATE U32 rgSCHCmnCalcRiv ARGS(( U8 bw,
1766 PRIVATE Void rgSCHCmnUpdHqAndDai ARGS((
1767 RgSchDlHqProcCb *hqP,
1769 RgSchDlHqTbCb *tbCb,
1772 PRIVATE S16 rgSCHCmnUlCalcAvailBw ARGS((
1774 RgrCellCfg *cellCfg,
1779 PRIVATE S16 rgSCHCmnDlKdashUlAscInit ARGS((
1782 PRIVATE S16 rgSCHCmnDlANFdbkInit ARGS((
1785 PRIVATE S16 rgSCHCmnDlNpValInit ARGS((
1788 PRIVATE S16 rgSCHCmnDlCreateRachPrmLst ARGS((
1791 PRIVATE S16 rgSCHCmnDlCpyRachInfo ARGS((
1793 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
1796 PRIVATE S16 rgSCHCmnDlRachInfoInit ARGS((
1799 PRIVATE S16 rgSCHCmnDlPhichOffsetInit ARGS((
1804 PRIVATE Void rgSCHCmnFindUlCqiUlTxAnt ARGS
1810 PRIVATE RgSchCmnRank rgSCHCmnComputeRank ARGS
1817 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode3 ARGS
1822 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode3 ARGS
1827 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode4 ARGS
1832 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode4 ARGS
1837 PRIVATE U8 rgSCHCmnCalcWcqiFrmSnr ARGS
1844 /* comcodsepa : start */
1847 * @brief This function computes efficiency and stores in a table.
1851 * Function: rgSCHCmnCompEff
1852 * Purpose: this function computes the efficiency as number of
1853 * bytes per 1024 symbols. The CFI table is also filled
1854 * with the same information such that comparison is valid
1856 * Invoked by: Scheduler
1858 * @param[in] U8 noPdcchSym
1859 * @param[in] U8 cpType
1860 * @param[in] U8 txAntIdx
1861 * @param[in] RgSchCmnTbSzEff* effTbl
1866 PRIVATE Void rgSCHCmnCompEff
1871 RgSchCmnTbSzEff *effTbl
1874 PRIVATE Void rgSCHCmnCompEff(noPdcchSym, cpType, txAntIdx, effTbl)
1878 RgSchCmnTbSzEff *effTbl;
1883 U8 resOfCrs; /* Effective REs occupied by CRS */
1889 case RG_SCH_CMN_NOR_CP:
1892 case RG_SCH_CMN_EXT_CP:
1896 /* Generate a log error. This case should never be executed */
1900 /* Depending on the Tx Antenna Index, deduct the
1901 * Resource elements for the CRS */
1905 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
1908 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
1911 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
1914 /* Generate a log error. This case should never be executed */
1917 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
1918 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1921 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1923 /* This line computes the coding efficiency per 1024 REs */
1924 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1926 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1931 * @brief This function computes efficiency and stores in a table.
1935 * Function: rgSCHCmnCompUlEff
1936 * Purpose: this function computes the efficiency as number of
1937 * bytes per 1024 symbols. The CFI table is also filled
1938 * with the same information such that comparison is valid
1940 * Invoked by: Scheduler
1942 * @param[in] U8 noUlRsSym
1943 * @param[in] U8 cpType
1944 * @param[in] U8 txAntIdx
1945 * @param[in] RgSchCmnTbSzEff* effTbl
1950 PRIVATE Void rgSCHCmnCompUlEff
1954 RgSchCmnTbSzEff *effTbl
1957 PRIVATE Void rgSCHCmnCompUlEff(noUlRsSym, cpType, effTbl)
1960 RgSchCmnTbSzEff *effTbl;
1970 case RG_SCH_CMN_NOR_CP:
1973 case RG_SCH_CMN_EXT_CP:
1977 /* Generate a log error. This case should never be executed */
1981 noResPerRb = ((noSymPerRb - noUlRsSym) * RB_SCH_CMN_NUM_SCS_PER_RB);
1982 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
1985 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
1987 /* This line computes the coding efficiency per 1024 REs */
1988 (*effTbl)[i] += (rgTbSzTbl[0][i][j] * 1024) / (noResPerRb * (j+1));
1990 (*effTbl)[i] /= RG_SCH_CMN_NUM_RBS;
1996 * @brief This function computes efficiency for 2 layers and stores in a table.
2000 * Function: rgSCHCmn2LyrCompEff
2001 * Purpose: this function computes the efficiency as number of
2002 * bytes per 1024 symbols. The CFI table is also filled
2003 * with the same information such that comparison is valid
2005 * Invoked by: Scheduler
2007 * @param[in] U8 noPdcchSym
2008 * @param[in] U8 cpType
2009 * @param[in] U8 txAntIdx
2010 * @param[in] RgSchCmnTbSzEff* effTbl2Lyr
2015 PRIVATE Void rgSCHCmn2LyrCompEff
2020 RgSchCmnTbSzEff *effTbl2Lyr
2023 PRIVATE Void rgSCHCmn2LyrCompEff(noPdcchSym, cpType, txAntIdx, effTbl2Lyr)
2027 RgSchCmnTbSzEff *effTbl2Lyr;
2032 U8 resOfCrs; /* Effective REs occupied by CRS */
2038 case RG_SCH_CMN_NOR_CP:
2041 case RG_SCH_CMN_EXT_CP:
2045 /* Generate a log error. This case should never be executed */
2049 /* Depending on the Tx Antenna Index, deduct the
2050 * Resource elements for the CRS */
2054 resOfCrs = RG_SCH_CMN_EFF_CRS_ONE_ANT_PORT;
2057 resOfCrs = RG_SCH_CMN_EFF_CRS_TWO_ANT_PORT;
2060 resOfCrs = RG_SCH_CMN_EFF_CRS_FOUR_ANT_PORT;
2063 /* Generate a log error. This case should never be executed */
2067 noResPerRb = ((noSymPerRb - noPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - resOfCrs;
2068 for (i = 0; i < RG_SCH_CMN_NUM_TBS; i++)
2070 (*effTbl2Lyr)[i] = 0;
2071 for (j = 0; j < RG_SCH_CMN_NUM_RBS; j++)
2073 /* This line computes the coding efficiency per 1024 REs */
2074 (*effTbl2Lyr)[i] += (rgTbSzTbl[1][i][j] * 1024) / (noResPerRb * (j+1));
2076 (*effTbl2Lyr)[i] /= RG_SCH_CMN_NUM_RBS;
2083 * @brief This function initializes the rgSchCmnDciFrmtSizes table.
2087 * Function: rgSCHCmnGetDciFrmtSizes
2088 * Purpose: This function determines the sizes of all
2089 * the available DCI Formats. The order of
2090 * bits addition for each format is inaccordance
2092 * Invoked by: rgSCHCmnRgrCellCfg
2098 PRIVATE Void rgSCHCmnGetDciFrmtSizes
2103 PRIVATE Void rgSCHCmnGetDciFrmtSizes(cell)
2109 /* DCI Format 0 size determination */
2110 rgSchCmnDciFrmtSizes[0] = 1 +
2112 rgSCHUtlLog32bitNbase2((cell->bwCfg.ulTotalBw * \
2113 (cell->bwCfg.ulTotalBw + 1))/2) +
2123 /* DCI Format 1 size determination */
2124 rgSchCmnDciFrmtSizes[1] = 1 +
2125 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2130 4 + 2 + /* HqProc Id and DAI */
2136 /* DCI Format 1A size determination */
2137 rgSchCmnDciFrmtSizes[2] = 1 + /* Flag for format0/format1a differentiation */
2138 1 + /* Localized/distributed VRB assignment flag */
2141 3 + /* Harq process Id */
2143 4 + /* Harq process Id */
2144 2 + /* UL Index or DAI */
2146 1 + /* New Data Indicator */
2149 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2150 (cell->bwCfg.dlTotalBw + 1))/2);
2151 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
2152 Since VRB is local */
2154 /* DCI Format 1B size determination */
2155 rgSchCmnDciFrmtSizes[3] = 1 +
2156 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2157 (cell->bwCfg.dlTotalBw + 1))/2) +
2167 ((cell->numTxAntPorts == 4)? 4:2) +
2170 /* DCI Format 1C size determination */
2171 /* Approximation: NDLVrbGap1 ~= Nprb for DL */
2172 rgSchCmnDciFrmtSizes[4] = (cell->bwCfg.dlTotalBw < 50)? 0:1 +
2173 (cell->bwCfg.dlTotalBw < 50)?
2174 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/2 * \
2175 (cell->bwCfg.dlTotalBw/2 + 1))/2)) :
2176 (rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw/4 * \
2177 (cell->bwCfg.dlTotalBw/4 + 1))/2)) +
2180 /* DCI Format 1D size determination */
2181 rgSchCmnDciFrmtSizes[5] = 1 +
2182 rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
2183 (cell->bwCfg.dlTotalBw + 1))/2) +
2192 ((cell->numTxAntPorts == 4)? 4:2) +
2195 /* DCI Format 2 size determination */
2196 rgSchCmnDciFrmtSizes[6] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2197 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2205 ((cell->numTxAntPorts == 4)? 6:3);
2207 /* DCI Format 2A size determination */
2208 rgSchCmnDciFrmtSizes[7] = ((cell->bwCfg.dlTotalBw < 10)?0:1) +
2209 RGSCH_CEIL(cell->bwCfg.dlTotalBw, cell->rbgSize) +
2217 ((cell->numTxAntPorts == 4)? 2:0);
2219 /* DCI Format 3 size determination */
2220 rgSchCmnDciFrmtSizes[8] = rgSchCmnDciFrmtSizes[0];
2222 /* DCI Format 3A size determination */
2223 rgSchCmnDciFrmtSizes[9] = rgSchCmnDciFrmtSizes[0];
2230 * @brief This function initializes the cmnCell->dciAggrLvl table.
2234 * Function: rgSCHCmnGetCqiDciFrmt2AggrLvl
2235 * Purpose: This function determines the Aggregation level
2236 * for each CQI level against each DCI format.
2237 * Invoked by: rgSCHCmnRgrCellCfg
2243 PRIVATE Void rgSCHCmnGetCqiDciFrmt2AggrLvl
2248 PRIVATE Void rgSCHCmnGetCqiDciFrmt2AggrLvl(cell)
2252 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2257 for (i = 0; i < RG_SCH_CMN_MAX_CQI; i++)
2259 for (j = 0; j < 10; j++)
2261 U32 pdcchBits; /* Actual number of phy bits needed for a given DCI Format
2262 * for a given CQI Level */
2263 pdcchBits = (rgSchCmnDciFrmtSizes[j] * 1024)/rgSchCmnCqiPdcchEff[i];
2265 if (pdcchBits < 192)
2267 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL2;
2270 if (pdcchBits < 384)
2272 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL4;
2275 if (pdcchBits < 768)
2277 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL8;
2280 cellSch->dciAggrLvl[i][j] = CM_LTE_AGGR_LVL16;
2287 * @brief This function initializes all the data for the scheduler.
2291 * Function: rgSCHCmnDlInit
2292 * Purpose: This function initializes the following information:
2293 * 1. Efficiency table
2294 * 2. CQI to table index - It is one row for upto 3 RBs
2295 * and another row for greater than 3 RBs
2296 * currently extended prefix is compiled out.
2297 * Invoked by: MAC intialization code..may be ActvInit
2303 PRIVATE Void rgSCHCmnDlInit
2307 PRIVATE Void rgSCHCmnDlInit()
2314 RgSchCmnTbSzEff *effTbl;
2315 RgSchCmnCqiToTbs *tbsTbl;
2318 /* 0 corresponds to Single layer case, 1 corresponds to 2 layers case*/
2319 /* Init Efficiency table for normal cyclic prefix */
2320 /*Initialize Efficiency table for Layer Index 0 */
2321 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2322 /*Initialize Efficiency table for each of the CFI indices. The
2323 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2324 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[0];
2325 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[0];
2326 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[0];
2327 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[0];
2328 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2329 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[0];
2330 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[0];
2331 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[0];
2332 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[0];
2333 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2334 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[0];
2335 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[0];
2336 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[0];
2337 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[0];
2339 /*Initialize CQI to TBS table for Layer Index 0 for Normal CP */
2340 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[0];
2341 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[0];
2342 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[0];
2343 rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[0];
2345 /*Intialize Efficency table for Layer Index 1 */
2346 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2347 /*Initialize Efficiency table for each of the CFI indices. The
2348 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2349 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][0] = &rgSchCmnNorCfi1Eff[1];
2350 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][1] = &rgSchCmnNorCfi2Eff[1];
2351 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][2] = &rgSchCmnNorCfi3Eff[1];
2352 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][0][3] = &rgSchCmnNorCfi4Eff[1];
2353 /*Initialize Efficiency table for Tx Antenna Port Index 1 */
2354 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][0] = &rgSchCmnNorCfi1Eff[1];
2355 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][1] = &rgSchCmnNorCfi2Eff[1];
2356 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][2] = &rgSchCmnNorCfi3Eff[1];
2357 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][1][3] = &rgSchCmnNorCfi4Eff[1];
2358 /*Initialize Efficiency table for Tx Antenna Port Index 2 */
2359 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][0] = &rgSchCmnNorCfi1Eff[1];
2360 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][1] = &rgSchCmnNorCfi2Eff[1];
2361 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][2] = &rgSchCmnNorCfi3Eff[1];
2362 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][2][3] = &rgSchCmnNorCfi4Eff[1];
2364 /*Initialize CQI to TBS table for Layer Index 1 for Normal CP */
2365 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][0] = &rgSchCmnNorCfi1CqiToTbs[1];
2366 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][1] = &rgSchCmnNorCfi2CqiToTbs[1];
2367 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][2] = &rgSchCmnNorCfi3CqiToTbs[1];
2368 rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][3] = &rgSchCmnNorCfi4CqiToTbs[1];
2370 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2372 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2374 /* EfficiencyTbl calculation incase of 2 layers for normal CP */
2375 rgSCHCmnCompEff((U8)(i + 1), RG_SCH_CMN_NOR_CP, idx,\
2376 rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i]);
2377 rgSCHCmn2LyrCompEff((U8)(i + 1), RG_SCH_CMN_NOR_CP, idx, \
2378 rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i]);
2382 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2384 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2386 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_NOR_CP][idx][i];
2387 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_NOR_CP][i];
2388 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2389 (j >= 0) && (k > 0); --j)
2391 /* ADD CQI to MCS mapping correction
2392 * single dimensional array is replaced by 2 dimensions for different CFI*/
2393 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2395 (*tbsTbl)[k--] = (U8)j;
2402 /* effTbl,tbsTbl calculation incase of 2 layers for normal CP */
2403 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_NOR_CP][idx][i];
2404 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_NOR_CP][i];
2405 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2406 (j >= 0) && (k > 0); --j)
2408 /* ADD CQI to MCS mapping correction
2409 * single dimensional array is replaced by 2 dimensions for different CFI*/
2410 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2412 (*tbsTbl)[k--] = (U8)j;
2422 /* Efficiency Table for Extended CP */
2423 /*Initialize Efficiency table for Layer Index 0 */
2424 /*Initialize Efficiency table for Tx Antenna Port Index 0 */
2425 /*Initialize Efficiency table for each of the CFI indices. The
2426 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2427 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[0];
2428 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[0];
2429 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[0];
2430 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[0];
2431 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2432 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[0];
2433 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[0];
2434 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[0];
2435 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[0];
2436 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2437 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[0];
2438 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[0];
2439 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[0];
2440 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[0];
2442 /*Initialize CQI to TBS table for Layer Index 0 for Extended CP */
2443 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[0];
2444 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[0];
2445 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[0];
2446 rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[0];
2448 /*Initialize Efficiency table for Layer Index 1 */
2449 /*Initialize Efficiency table for each of the CFI indices. The
2450 * 4th Dimension of the rgSCHCmnEffTbl table refers to the CFI Index*/
2451 /*Initialize Efficency table for Tx Antenna Port Index 0 */
2452 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][0] = &rgSchCmnExtCfi1Eff[1];
2453 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][1] = &rgSchCmnExtCfi2Eff[1];
2454 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][2] = &rgSchCmnExtCfi3Eff[1];
2455 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][0][3] = &rgSchCmnExtCfi4Eff[1];
2456 /*Initialize Efficency table for Tx Antenna Port Index 1 */
2457 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][0] = &rgSchCmnExtCfi1Eff[1];
2458 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][1] = &rgSchCmnExtCfi2Eff[1];
2459 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][2] = &rgSchCmnExtCfi3Eff[1];
2460 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][1][3] = &rgSchCmnExtCfi4Eff[1];
2461 /*Initialize Efficency table for Tx Antenna Port Index 2 */
2462 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][0] = &rgSchCmnExtCfi1Eff[1];
2463 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][1] = &rgSchCmnExtCfi2Eff[1];
2464 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][2] = &rgSchCmnExtCfi3Eff[1];
2465 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][2][3] = &rgSchCmnExtCfi4Eff[1];
2467 /*Initialize CQI to TBS table for Layer Index 1 for Extended CP */
2468 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][0] = &rgSchCmnExtCfi1CqiToTbs[1];
2469 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][1] = &rgSchCmnExtCfi2CqiToTbs[1];
2470 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][2] = &rgSchCmnExtCfi3CqiToTbs[1];
2471 rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][3] = &rgSchCmnExtCfi4CqiToTbs[1];
2472 /* Activate this code when extended cp is supported */
2473 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2475 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2477 /* EfficiencyTbl calculation incase of 2 layers for extendedl CP */
2478 rgSCHCmnCompEff( (U8)(i + 1 ), (U8)RG_SCH_CMN_EXT_CP, idx,\
2479 rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i]);
2480 rgSCHCmn2LyrCompEff((U8)(i + 1), (U8) RG_SCH_CMN_EXT_CP,idx, \
2481 rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i]);
2485 for (idx = 0; idx < RG_SCH_CMN_MAX_ANT_CONF; idx++)
2487 for (i = 0; i < RG_SCH_CMN_MAX_CFI; i++)
2489 effTbl = rgSchCmnEffTbl[0][RG_SCH_CMN_EXT_CP][idx][i];
2490 tbsTbl = rgSchCmnCqiToTbs[0][RG_SCH_CMN_EXT_CP][i];
2491 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2492 (j >= 0) && (k > 0); --j)
2494 /* ADD CQI to MCS mapping correction
2495 * single dimensional array is replaced by 2 dimensions for different CFI*/
2496 if ((*effTbl)[j] <= rgSchCmnCqiPdschEff[i][k])
2498 (*tbsTbl)[k--] = (U8)j;
2505 /* effTbl,tbsTbl calculation incase of 2 layers for extended CP */
2506 effTbl = rgSchCmnEffTbl[1][RG_SCH_CMN_EXT_CP][idx][i];
2507 tbsTbl = rgSchCmnCqiToTbs[1][RG_SCH_CMN_EXT_CP][i];
2508 for (j = RG_SCH_CMN_NUM_TBS - 1, k = RG_SCH_CMN_MAX_CQI - 1;
2509 (j >= 0) && (k > 0); --j)
2511 /* ADD CQI to MCS mapping correction
2512 * single dimensional array is replaced by 2 dimensions for different CFI*/
2513 if ((*effTbl)[j] <= rgSchCmn2LyrCqiPdschEff[i][k])
2515 (*tbsTbl)[k--] = (U8)j;
2528 * @brief This function initializes all the data for the scheduler.
2532 * Function: rgSCHCmnUlInit
2533 * Purpose: This function initializes the following information:
2534 * 1. Efficiency table
2535 * 2. CQI to table index - It is one row for upto 3 RBs
2536 * and another row for greater than 3 RBs
2537 * currently extended prefix is compiled out.
2538 * Invoked by: MAC intialization code..may be ActvInit
2544 PRIVATE Void rgSCHCmnUlInit
2548 PRIVATE Void rgSCHCmnUlInit()
2551 U8 *mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_NOR_CP][0];
2552 RgSchCmnTbSzEff *effTbl = &rgSchCmnNorUlEff[0];
2553 CONSTANT RgSchCmnUlCqiInfo *cqiTbl = &rgSchCmnUlCqiTbl[0];
2557 /* Initaializing new variable added for UL eff */
2558 rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP] = &rgSchCmnNorUlEff[0];
2559 /* Reason behind using 3 as the number of symbols to rule out for
2560 * efficiency table computation would be that we are using 2 symbols for
2561 * DMRS(1 in each slot) and 1 symbol for SRS*/
2562 rgSCHCmnCompUlEff(RGSCH_UL_SYM_DMRS_SRS,RG_SCH_CMN_NOR_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_NOR_CP]);
2564 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2565 i >= 0 && j > 0; --i)
2567 if ((*effTbl)[i] <= cqiTbl[j].eff)
2569 mapTbl[j--] = (U8)i;
2576 effTbl = &rgSchCmnExtUlEff[0];
2577 mapTbl = &rgSchCmnUlCqiToTbsTbl[RG_SCH_CMN_EXT_CP][0];
2579 /* Initaializing new variable added for UL eff */
2580 rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP] = &rgSchCmnExtUlEff[0];
2581 /* Reason behind using 3 as the number of symbols to rule out for
2582 * efficiency table computation would be that we are using 2 symbols for
2583 * DMRS(1 in each slot) and 1 symbol for SRS*/
2584 rgSCHCmnCompUlEff(3,RG_SCH_CMN_EXT_CP,rgSchCmnUlEffTbl[RG_SCH_CMN_EXT_CP]);
2586 for (i = RGSCH_NUM_ITBS - 1, j = RG_SCH_CMN_UL_NUM_CQI - 1;
2587 i >= 0 && j > 0; --i)
2589 if ((*effTbl)[i] <= cqiTbl[j].eff)
2591 mapTbl[j--] = (U8)i;
2603 * @brief This function initializes all the data for the scheduler.
2607 * Function: rgSCHCmnInit
2608 * Purpose: This function initializes the following information:
2609 * 1. Efficiency table
2610 * 2. CQI to table index - It is one row for upto 3 RBs
2611 * and another row for greater than 3 RBs
2612 * currently extended prefix is compiled out.
2613 * Invoked by: MAC intialization code..may be ActvInit
2631 rgSCHEmtcCmnDlInit();
2632 rgSCHEmtcCmnUlInit();
2638 /* Init the function pointers */
2639 rgSchCmnApis.rgSCHRgrUeCfg = rgSCHCmnRgrUeCfg;
2640 rgSchCmnApis.rgSCHRgrUeRecfg = rgSCHCmnRgrUeRecfg;
2641 rgSchCmnApis.rgSCHFreeUe = rgSCHCmnUeDel;
2642 rgSchCmnApis.rgSCHRgrCellCfg = rgSCHCmnRgrCellCfg;
2643 rgSchCmnApis.rgSCHRgrCellRecfg = rgSCHCmnRgrCellRecfg;
2644 rgSchCmnApis.rgSCHFreeCell = rgSCHCmnCellDel;
2645 rgSchCmnApis.rgSCHRgrLchCfg = rgSCHCmnRgrLchCfg;
2646 rgSchCmnApis.rgSCHRgrLcgCfg = rgSCHCmnRgrLcgCfg;
2647 rgSchCmnApis.rgSCHRgrLchRecfg = rgSCHCmnRgrLchRecfg;
2648 rgSchCmnApis.rgSCHRgrLcgRecfg = rgSCHCmnRgrLcgRecfg;
2649 rgSchCmnApis.rgSCHFreeDlLc = rgSCHCmnFreeDlLc;
2650 rgSchCmnApis.rgSCHFreeLcg = rgSCHCmnLcgDel;
2651 rgSchCmnApis.rgSCHRgrLchDel = rgSCHCmnRgrLchDel;
2652 rgSchCmnApis.rgSCHActvtUlUe = rgSCHCmnActvtUlUe;
2653 rgSchCmnApis.rgSCHActvtDlUe = rgSCHCmnActvtDlUe;
2654 rgSchCmnApis.rgSCHHdlUlTransInd = rgSCHCmnHdlUlTransInd;
2655 rgSchCmnApis.rgSCHDlDedBoUpd = rgSCHCmnDlDedBoUpd;
2656 rgSchCmnApis.rgSCHUlRecMsg3Alloc = rgSCHCmnUlRecMsg3Alloc;
2657 rgSchCmnApis.rgSCHUlCqiInd = rgSCHCmnUlCqiInd;
2658 rgSchCmnApis.rgSCHPucchDeltaPwrInd = rgSCHPwrPucchDeltaInd;
2659 rgSchCmnApis.rgSCHUlHqProcForUe = rgSCHCmnUlHqProcForUe;
2661 rgSchCmnApis.rgSCHUpdUlHqProc = rgSCHCmnUpdUlHqProc;
2663 rgSchCmnApis.rgSCHUpdBsrShort = rgSCHCmnUpdBsrShort;
2664 rgSchCmnApis.rgSCHUpdBsrTrunc = rgSCHCmnUpdBsrTrunc;
2665 rgSchCmnApis.rgSCHUpdBsrLong = rgSCHCmnUpdBsrLong;
2666 rgSchCmnApis.rgSCHUpdPhr = rgSCHCmnUpdPhr;
2667 rgSchCmnApis.rgSCHUpdExtPhr = rgSCHCmnUpdExtPhr;
2668 rgSchCmnApis.rgSCHContResUlGrant = rgSCHCmnContResUlGrant;
2669 rgSchCmnApis.rgSCHSrRcvd = rgSCHCmnSrRcvd;
2670 rgSchCmnApis.rgSCHFirstRcptnReq = rgSCHCmnFirstRcptnReq;
2671 rgSchCmnApis.rgSCHNextRcptnReq = rgSCHCmnNextRcptnReq;
2672 rgSchCmnApis.rgSCHFirstHqFdbkAlloc = rgSCHCmnFirstHqFdbkAlloc;
2673 rgSchCmnApis.rgSCHNextHqFdbkAlloc = rgSCHCmnNextHqFdbkAlloc;
2674 rgSchCmnApis.rgSCHDlProcAddToRetx = rgSCHCmnDlProcAddToRetx;
2675 rgSchCmnApis.rgSCHDlCqiInd = rgSCHCmnDlCqiInd;
2677 rgSchCmnApis.rgSCHUlProcAddToRetx = rgSCHCmnEmtcUlProcAddToRetx;
2680 rgSchCmnApis.rgSCHSrsInd = rgSCHCmnSrsInd;
2682 rgSchCmnApis.rgSCHDlTARpt = rgSCHCmnDlTARpt;
2683 rgSchCmnApis.rgSCHDlRlsSubFrm = rgSCHCmnDlRlsSubFrm;
2684 rgSchCmnApis.rgSCHUeReset = rgSCHCmnUeReset;
2686 rgSchCmnApis.rgSCHHdlCrntiCE = rgSCHCmnHdlCrntiCE;
2687 rgSchCmnApis.rgSCHDlProcAck = rgSCHCmnDlProcAck;
2688 rgSchCmnApis.rgSCHDlRelPdcchFbk = rgSCHCmnDlRelPdcchFbk;
2689 rgSchCmnApis.rgSCHUlSpsRelInd = rgSCHCmnUlSpsRelInd;
2690 rgSchCmnApis.rgSCHUlSpsActInd = rgSCHCmnUlSpsActInd;
2691 rgSchCmnApis.rgSCHUlCrcFailInd = rgSCHCmnUlCrcFailInd;
2692 rgSchCmnApis.rgSCHUlCrcInd = rgSCHCmnUlCrcInd;
2694 rgSchCmnApis.rgSCHDrxStrtInActvTmrInUl = rgSCHCmnDrxStrtInActvTmrInUl;
2695 rgSchCmnApis.rgSCHUpdUeDataIndLcg = rgSCHCmnUpdUeDataIndLcg;
2697 for (idx = 0; idx < RGSCH_NUM_SCHEDULERS; ++idx)
2699 rgSchUlSchdInits[idx](&rgSchUlSchdTbl[idx]);
2700 rgSchDlSchdInits[idx](&rgSchDlSchdTbl[idx]);
2703 for (idx = 0; idx < RGSCH_NUM_EMTC_SCHEDULERS; ++idx)
2705 rgSchEmtcUlSchdInits[idx](&rgSchEmtcUlSchdTbl[idx]);
2706 rgSchEmtcDlSchdInits[idx](&rgSchEmtcDlSchdTbl[idx]);
2709 #if (defined (RG_PHASE2_SCHED) && defined(TFU_UPGRADE))
2710 for (idx = 0; idx < RGSCH_NUM_DLFS_SCHEDULERS; ++idx)
2712 rgSchDlfsSchdInits[idx](&rgSchDlfsSchdTbl[idx]);
2716 rgSchCmnApis.rgSCHRgrSCellUeCfg = rgSCHCmnRgrSCellUeCfg;
2717 rgSchCmnApis.rgSCHRgrSCellUeDel = rgSCHCmnRgrSCellUeDel;
2724 * @brief This function is a wrapper to call scheduler specific API.
2728 * Function: rgSCHCmnDlRlsSubFrm
2729 * Purpose: Releases scheduler Information from DL SubFrm.
2733 * @param[in] RgSchCellCb *cell
2734 * @param[out] CmLteTimingInfo frm
2739 Void rgSCHCmnDlRlsSubFrm
2745 Void rgSCHCmnDlRlsSubFrm(cell, frm)
2747 CmLteTimingInfo frm;
2750 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2754 /* Get the pointer to the subframe */
2755 sf = rgSCHUtlSubFrmGet(cell, frm);
2757 rgSCHUtlSubFrmPut(cell, sf);
2760 /* Re-initialize DLFS specific information for the sub-frame */
2761 cellSch->apisDlfs->rgSCHDlfsReinitSf(cell, sf);
2769 * @brief This function is the starting function for DL allocation.
2773 * Function: rgSCHCmnDlCmnChAlloc
2774 * Purpose: Scheduling for downlink. It performs allocation in the order
2775 * of priority wich BCCH/PCH first, CCCH, Random Access and TA.
2777 * Invoked by: Scheduler
2779 * @param[in] RgSchCellCb* cell
2780 * @param[out] RgSchCmnDlRbAllocInfo* allocInfo
2785 PRIVATE Void rgSCHCmnDlCcchRarAlloc
2790 PRIVATE Void rgSCHCmnDlCcchRarAlloc(cell)
2794 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2797 rgSCHCmnDlCcchRetx(cell, &cellSch->allocInfo);
2798 /* LTE_ADV_FLAG_REMOVED_START */
2799 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2801 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2803 /*eNodeB need to blank the subframe */
2807 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2812 rgSCHCmnDlCcchTx(cell, &cellSch->allocInfo);
2814 /* LTE_ADV_FLAG_REMOVED_END */
2818 /*Added these function calls for processing CCCH SDU arriving
2819 * after guard timer expiry.Functions differ from above two functions
2820 * in using ueCb instead of raCb.*/
2821 rgSCHCmnDlCcchSduRetx(cell, &cellSch->allocInfo);
2822 /* LTE_ADV_FLAG_REMOVED_START */
2823 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2825 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2827 /*eNodeB need to blank the subframe */
2831 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2836 rgSCHCmnDlCcchSduTx(cell, &cellSch->allocInfo);
2838 /* LTE_ADV_FLAG_REMOVED_END */
2842 if(cellSch->ul.msg3SchdIdx != RGSCH_INVALID_INFO)
2844 /* Do not schedule msg3 if there is a CFI change ongoing */
2845 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2847 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2851 /* LTE_ADV_FLAG_REMOVED_START */
2852 if(RG_SCH_ABS_ENABLED_ABS_SF == cell->lteAdvCb.absDlSfInfo)
2854 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
2856 /*eNodeB need to blank the subframe */
2860 /* Do not schedule msg3 if there is a CFI change ongoing */
2861 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2863 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2869 /* Do not schedule msg3 if there is a CFI change ongoing */
2870 if (cellSch->dl.currCfi == cellSch->dl.newCfi)
2872 rgSCHCmnDlRaRsp(cell, &cellSch->allocInfo);
2875 /* LTE_ADV_FLAG_REMOVED_END */
2883 * @brief Scheduling for CCCH SDU.
2887 * Function: rgSCHCmnCcchSduAlloc
2888 * Purpose: Scheduling for CCCH SDU
2890 * Invoked by: Scheduler
2892 * @param[in] RgSchCellCb* cell
2893 * @param[in] RgSchUeCb* ueCb
2894 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2899 PRIVATE S16 rgSCHCmnCcchSduAlloc
2903 RgSchCmnDlRbAllocInfo *allocInfo
2906 PRIVATE S16 rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)
2909 RgSchCmnDlRbAllocInfo *allocInfo;
2912 RgSchDlRbAlloc *rbAllocInfo;
2913 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2914 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2917 /* Return if subframe BW exhausted */
2918 if (allocInfo->ccchSduAlloc.ccchSduDlSf->bw <=
2919 allocInfo->ccchSduAlloc.ccchSduDlSf->bwAssigned)
2921 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2922 "bw<=bwAssigned for UEID:%d",ueCb->ueId);
2926 if (rgSCHDhmGetCcchSduHqProc(ueCb, cellSch->dl.time, &(ueDl->proc)) != ROK)
2928 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2929 "rgSCHDhmGetCcchSduHqProc failed UEID:%d",ueCb->ueId);
2933 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
2934 rbAllocInfo->dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2936 if (rgSCHCmnCcchSduDedAlloc(cell, ueCb) != ROK)
2938 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
2939 rgSCHDhmRlsHqpTb(ueDl->proc, 0, FALSE);
2940 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
2941 "rgSCHCmnCcchSduDedAlloc failed UEID:%d",ueCb->ueId);
2944 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduTxLst, &ueDl->proc->reqLnk);
2945 ueDl->proc->reqLnk.node = (PTR)ueDl->proc;
2946 allocInfo->ccchSduAlloc.ccchSduDlSf->schdCcchUe++;
2951 * @brief This function scheduler for downlink CCCH messages.
2955 * Function: rgSCHCmnDlCcchSduTx
2956 * Purpose: Scheduling for downlink CCCH
2958 * Invoked by: Scheduler
2960 * @param[in] RgSchCellCb *cell
2961 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
2966 PRIVATE Void rgSCHCmnDlCcchSduTx
2969 RgSchCmnDlRbAllocInfo *allocInfo
2972 PRIVATE Void rgSCHCmnDlCcchSduTx(cell, allocInfo)
2974 RgSchCmnDlRbAllocInfo *allocInfo;
2979 RgSchCmnDlUe *ueCmnDl;
2980 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
2982 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
2985 node = cell->ccchSduUeLst.first;
2988 if(cellSch->dl.maxCcchPerDlSf &&
2989 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
2995 ueCb = (RgSchUeCb *)(node->node);
2996 ueCmnDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
2998 /* Fix : syed postpone scheduling for this
2999 * until msg4 is done */
3000 /* Fix : syed RLC can erroneously send CCCH SDU BO
3001 * twice. Hence an extra guard to avoid if already
3002 * scheduled for RETX */
3003 if ((!(ueCb->dl.dlInactvMask & RG_HQENT_INACTIVE)) &&
3006 if ((rgSCHCmnCcchSduAlloc(cell, ueCb, allocInfo)) != ROK)
3013 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"ERROR!! THIS SHOULD "
3014 "NEVER HAPPEN for UEID:%d", ueCb->ueId);
3024 * @brief This function scheduler for downlink CCCH messages.
3028 * Function: rgSCHCmnDlCcchTx
3029 * Purpose: Scheduling for downlink CCCH
3031 * Invoked by: Scheduler
3033 * @param[in] RgSchCellCb *cell
3034 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3039 PRIVATE Void rgSCHCmnDlCcchTx
3042 RgSchCmnDlRbAllocInfo *allocInfo
3045 PRIVATE Void rgSCHCmnDlCcchTx(cell, allocInfo)
3047 RgSchCmnDlRbAllocInfo *allocInfo;
3052 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3053 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3056 node = cell->raInfo.toBeSchdLst.first;
3059 if(cellSch->dl.maxCcchPerDlSf &&
3060 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3067 raCb = (RgSchRaCb *)(node->node);
3069 /* Address allocation for this UE for MSG 4 */
3070 /* Allocation for Msg4 */
3071 if ((rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)) != ROK)
3082 * @brief This function scheduler for downlink CCCH messages.
3086 * Function: rgSCHCmnDlCcchSduRetx
3087 * Purpose: Scheduling for downlink CCCH
3089 * Invoked by: Scheduler
3091 * @param[in] RgSchCellCb *cell
3092 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3097 PRIVATE Void rgSCHCmnDlCcchSduRetx
3100 RgSchCmnDlRbAllocInfo *allocInfo
3103 PRIVATE Void rgSCHCmnDlCcchSduRetx(cell, allocInfo)
3105 RgSchCmnDlRbAllocInfo *allocInfo;
3108 RgSchDlRbAlloc *rbAllocInfo;
3110 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3112 RgSchDlHqProcCb *hqP;
3115 RgSchDlSf *dlSf = allocInfo->ccchSduAlloc.ccchSduDlSf;
3118 node = cellSch->dl.ccchSduRetxLst.first;
3121 if(cellSch->dl.maxCcchPerDlSf &&
3122 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3129 hqP = (RgSchDlHqProcCb *)(node->node);
3132 /* DwPts Scheduling Changes Start */
3134 if (rgSCHCmnRetxAvoidTdd(allocInfo->ccchSduAlloc.ccchSduDlSf,
3140 /* DwPts Scheduling Changes End */
3142 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3146 ueCb = (RgSchUeCb*)(hqP->hqE->ue);
3147 ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
3149 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
3150 /* Fill RB Alloc Info */
3151 rbAllocInfo->dlSf = dlSf;
3152 rbAllocInfo->tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3153 rbAllocInfo->rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3154 /* Fix : syed iMcs setting did not correspond to RETX */
3155 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3156 rbAllocInfo->tbInfo[0].imcs);
3157 rbAllocInfo->rnti = ueCb->ueId;
3158 rbAllocInfo->tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3159 /* Fix : syed Copying info in entirety without depending on stale TX information */
3160 rbAllocInfo->tbInfo[0].tbCb = &hqP->tbInfo[0];
3161 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
3162 /* Fix : syed Assigning proc to scratchpad */
3165 retxBw += rbAllocInfo->rbsReq;
3167 cmLListAdd2Tail(&allocInfo->ccchSduAlloc.ccchSduRetxLst, \
3169 hqP->reqLnk.node = (PTR)hqP;
3173 dlSf->bwAssigned += retxBw;
3179 * @brief This function scheduler for downlink CCCH messages.
3183 * Function: rgSCHCmnDlCcchRetx
3184 * Purpose: Scheduling for downlink CCCH
3186 * Invoked by: Scheduler
3188 * @param[in] RgSchCellCb *cell
3189 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3194 PRIVATE Void rgSCHCmnDlCcchRetx
3197 RgSchCmnDlRbAllocInfo *allocInfo
3200 PRIVATE Void rgSCHCmnDlCcchRetx(cell, allocInfo)
3202 RgSchCmnDlRbAllocInfo *allocInfo;
3206 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3208 RgSchDlHqProcCb *hqP;
3210 RgSchDlSf *dlSf = allocInfo->msg4Alloc.msg4DlSf;
3213 node = cellSch->dl.msg4RetxLst.first;
3216 if(cellSch->dl.maxCcchPerDlSf &&
3217 dlSf->schdCcchUe == cellSch->dl.maxCcchPerDlSf)
3223 hqP = (RgSchDlHqProcCb *)(node->node);
3227 /* DwPts Scheduling Changes Start */
3229 if (rgSCHCmnRetxAvoidTdd(allocInfo->msg4Alloc.msg4DlSf,
3235 /* DwPts Scheduling Changes End */
3237 if (hqP->tbInfo[0].dlGrnt.numRb > (dlSf->bw - dlSf->bwAssigned))
3241 raCb = (RgSchRaCb*)(hqP->hqE->raCb);
3242 /* Fill RB Alloc Info */
3243 raCb->rbAllocInfo.dlSf = dlSf;
3244 raCb->rbAllocInfo.tbInfo[0].bytesReq = hqP->tbInfo[0].ccchSchdInfo.totBytes;
3245 raCb->rbAllocInfo.rbsReq = hqP->tbInfo[0].dlGrnt.numRb;
3246 /* Fix : syed iMcs setting did not correspond to RETX */
3247 RG_SCH_CMN_GET_MCS_FOR_RETX((&hqP->tbInfo[0]),
3248 raCb->rbAllocInfo.tbInfo[0].imcs);
3249 raCb->rbAllocInfo.rnti = raCb->tmpCrnti;
3250 raCb->rbAllocInfo.tbInfo[0].noLyr = hqP->tbInfo[0].numLyrs;
3251 /* Fix; syed Copying info in entirety without depending on stale TX information */
3252 raCb->rbAllocInfo.tbInfo[0].tbCb = &hqP->tbInfo[0];
3253 raCb->rbAllocInfo.tbInfo[0].schdlngForTb = TRUE;
3255 retxBw += raCb->rbAllocInfo.rbsReq;
3257 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4RetxLst, \
3259 hqP->reqLnk.node = (PTR)hqP;
3263 dlSf->bwAssigned += retxBw;
3269 * @brief This function implements scheduler DL allocation for
3270 * for broadcast (on PDSCH) and paging.
3274 * Function: rgSCHCmnDlBcchPcch
3275 * Purpose: This function implements scheduler for DL allocation
3276 * for broadcast (on PDSCH) and paging.
3278 * Invoked by: Scheduler
3280 * @param[in] RgSchCellCb* cell
3286 PRIVATE Void rgSCHCmnDlBcchPcch
3289 RgSchCmnDlRbAllocInfo *allocInfo,
3290 RgInfSfAlloc *subfrmAlloc
3293 PRIVATE Void rgSCHCmnDlBcchPcch(cell, allocInfo, subfrmAlloc)
3295 RgSchCmnDlRbAllocInfo *allocInfo;
3296 RgInfSfAlloc *subfrmAlloc;
3299 CmLteTimingInfo frm;
3301 RgSchClcDlLcCb *pcch;
3305 RgSchClcDlLcCb *bcch, *bch;
3306 #endif/*RGR_SI_SCH*/
3310 frm = cell->crntTime;
3312 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
3313 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
3314 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
3316 // RGSCH_SUBFRAME_INDEX(frm);
3317 //RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
3320 /* Compute the subframe for which allocation is being made */
3321 /* essentially, we need pointer to the dl frame for this subframe */
3322 sf = rgSCHUtlSubFrmGet(cell, frm);
3326 bch = rgSCHDbmGetBcchOnBch(cell);
3327 #if (ERRCLASS & ERRCLS_DEBUG)
3330 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on BCH is not configured");
3334 if (bch->boLst.first != NULLP)
3336 bo = (RgSchClcBoRpt *)(bch->boLst.first->node);
3337 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3339 sf->bch.tbSize = bo->bo;
3340 cmLListDelFrm(&bch->boLst, bch->boLst.first);
3341 /* ccpu00117052 - MOD - Passing double pointer
3342 for proper NULLP assignment*/
3343 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(*bo));
3344 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, bch->lcId,TRUE);
3349 if ((frm.sfn % 4 == 0) && (frm.subframe == 0))
3354 allocInfo->bcchAlloc.schdFirst = FALSE;
3355 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
3356 #if (ERRCLASS & ERRCLS_DEBUG)
3359 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3363 if (bcch->boLst.first != NULLP)
3365 bo = (RgSchClcBoRpt *)(bcch->boLst.first->node);
3367 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3369 allocInfo->bcchAlloc.schdFirst = TRUE;
3370 /* Time to perform allocation for this BCCH transmission */
3371 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3375 if(!allocInfo->bcchAlloc.schdFirst)
3378 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
3379 #if (ERRCLASS & ERRCLS_DEBUG)
3382 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"BCCH on DLSCH is not configured");
3386 lnk = bcch->boLst.first;
3387 while (lnk != NULLP)
3389 bo = (RgSchClcBoRpt *)(lnk->node);
3391 valid = rgSCHCmnChkInWin(frm, bo->timeToTx, bo->maxTimeToTx);
3395 bo->i = RGSCH_CALC_SF_DIFF(frm, bo->timeToTx);
3396 /* Time to perform allocation for this BCCH transmission */
3397 rgSCHCmnClcAlloc(cell, sf, bcch, RGSCH_SI_RNTI, allocInfo);
3402 valid = rgSCHCmnChkPastWin(frm, bo->maxTimeToTx);
3405 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
3406 /* ccpu00117052 - MOD - Passing double pointer
3407 for proper NULLP assignment*/
3408 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo,
3409 sizeof(RgSchClcBoRpt));
3415 rgSCHDlSiSched(cell, allocInfo, subfrmAlloc);
3416 #endif/*RGR_SI_SCH*/
3418 pcch = rgSCHDbmGetPcch(cell);
3422 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"PCCH on DLSCH is not configured");
3426 if (pcch->boLst.first != NULLP)
3428 bo = (RgSchClcBoRpt *)(pcch->boLst.first->node);
3430 if (RGSCH_TIMEINFO_SAME(frm, bo->timeToTx))
3432 /* Time to perform allocation for this PCCH transmission */
3433 rgSCHCmnClcAlloc(cell, sf, pcch, RGSCH_P_RNTI, allocInfo);
3441 * Fun: rgSCHCmnChkInWin
3443 * Desc: This function checks if frm occurs in window
3445 * Ret: TRUE - if in window
3450 * File: rg_sch_cmn.c
3454 Bool rgSCHCmnChkInWin
3456 CmLteTimingInfo frm,
3457 CmLteTimingInfo start,
3461 Bool rgSCHCmnChkInWin(frm, start, end)
3462 CmLteTimingInfo frm;
3463 CmLteTimingInfo start;
3464 CmLteTimingInfo end;
3470 if (end.sfn > start.sfn)
3472 if (frm.sfn > start.sfn
3473 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3475 if (frm.sfn < end.sfn
3477 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3479 || (frm.sfn == end.sfn && frm.slot <= start.slot))
3486 /* Testing for wrap around, sfn wraparound check should be enough */
3487 else if (end.sfn < start.sfn)
3489 if (frm.sfn > start.sfn
3490 || (frm.sfn == start.sfn && frm.slot >= start.slot))
3496 if (frm.sfn < end.sfn
3497 || (frm.sfn == end.sfn && frm.slot <= end.slot))
3503 else /* start.sfn == end.sfn */
3505 if (frm.sfn == start.sfn
3506 && (frm.slot >= start.slot
3507 && frm.slot <= end.slot))
3514 } /* end of rgSCHCmnChkInWin*/
3518 * Fun: rgSCHCmnChkPastWin
3520 * Desc: This function checks if frm has gone past window edge
3522 * Ret: TRUE - if past window edge
3527 * File: rg_sch_cmn.c
3531 Bool rgSCHCmnChkPastWin
3533 CmLteTimingInfo frm,
3537 Bool rgSCHCmnChkPastWin(frm, end)
3538 CmLteTimingInfo frm;
3539 CmLteTimingInfo end;
3542 CmLteTimingInfo refFrm = end;
3546 RGSCH_INCR_FRAME(refFrm.sfn);
3547 RGSCH_INCR_SUB_FRAME(end, 1);
3548 pastWin = rgSCHCmnChkInWin(frm, end, refFrm);
3551 } /* end of rgSCHCmnChkPastWin*/
3554 * @brief This function implements allocation of the resources for common
3555 * channels BCCH, PCCH.
3559 * Function: rgSCHCmnClcAlloc
3560 * Purpose: This function implements selection of number of RBs based
3561 * the allowed grant for the service. It is also responsible
3562 * for selection of MCS for the transmission.
3564 * Invoked by: Scheduler
3566 * @param[in] RgSchCellCb *cell,
3567 * @param[in] RgSchDlSf *sf,
3568 * @param[in] RgSchClcDlLcCb *lch,
3569 * @param[in] U16 rnti,
3570 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3575 PRIVATE Void rgSCHCmnClcAlloc
3579 RgSchClcDlLcCb *lch,
3581 RgSchCmnDlRbAllocInfo *allocInfo
3584 PRIVATE Void rgSCHCmnClcAlloc(cell, sf, lch, rnti, allocInfo)
3587 RgSchClcDlLcCb *lch;
3589 RgSchCmnDlRbAllocInfo *allocInfo;
3592 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
3599 U8 cfi = cellDl->currCfi;
3603 bo = (RgSchClcBoRpt *)(lch->boLst.first->node);
3607 /* rgSCHCmnClcRbAllocForFxdTb(cell, bo->bo, cellDl->ccchCqi, &rb);*/
3608 if(cellDl->bitsPerRb==0)
3610 while ((rgTbSzTbl[0][0][rb]) < (tbs*8))
3618 rb = RGSCH_CEIL((tbs*8), cellDl->bitsPerRb);
3620 /* DwPTS Scheduling Changes Start */
3622 if(sf->sfType == RG_SCH_SPL_SF_DATA)
3624 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
3626 /* Calculate the less RE's because of DwPTS */
3627 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
3629 /* Increase number of RBs in Spl SF to compensate for lost REs */
3630 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
3633 /* DwPTS Scheduling Changes End */
3634 /*ccpu00115595- end*/
3635 /* additional check to see if required RBs
3636 * exceeds the available */
3637 if (rb > sf->bw - sf->bwAssigned)
3639 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"BW allocation "
3640 "failed for CRNTI:%d",rnti);
3644 /* Update the subframe Allocated BW field */
3645 sf->bwAssigned = sf->bwAssigned + rb;
3646 /* Fill in the BCCH/PCCH transmission info to the RBAllocInfo struct */
3647 if (rnti == RGSCH_SI_RNTI)
3649 allocInfo->bcchAlloc.rnti = rnti;
3650 allocInfo->bcchAlloc.dlSf = sf;
3651 allocInfo->bcchAlloc.tbInfo[0].bytesReq = tbs;
3652 allocInfo->bcchAlloc.rbsReq = rb;
3653 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
3654 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
3655 /* Nprb indication at PHY for common Ch */
3656 allocInfo->bcchAlloc.nPrb = bo->nPrb;
3660 allocInfo->pcchAlloc.rnti = rnti;
3661 allocInfo->pcchAlloc.dlSf = sf;
3662 allocInfo->pcchAlloc.tbInfo[0].bytesReq = tbs;
3663 allocInfo->pcchAlloc.rbsReq = rb;
3664 allocInfo->pcchAlloc.tbInfo[0].imcs = mcs;
3665 allocInfo->pcchAlloc.tbInfo[0].noLyr = 1;
3666 allocInfo->pcchAlloc.nPrb = bo->nPrb;
3673 * @brief This function implements PDCCH allocation for common channels.
3677 * Function: rgSCHCmnCmnPdcchAlloc
3678 * Purpose: This function implements allocation of PDCCH for a UE.
3679 * 1. This uses index 0 of PDCCH table for efficiency.
3680 * 2. Uses he candidate PDCCH count for the aggr level.
3681 * 3. Look for availability for each candidate and choose
3682 * the first one available.
3684 * Invoked by: Scheduler
3686 * @param[in] RgSchCellCb *cell
3687 * @param[in] RgSchDlSf *sf
3688 * @return RgSchPdcch *
3689 * -# NULLP when unsuccessful
3693 RgSchPdcch *rgSCHCmnCmnPdcchAlloc
3699 RgSchPdcch *rgSCHCmnCmnPdcchAlloc(cell, subFrm)
3704 CmLteAggrLvl aggrLvl;
3705 RgSchPdcchInfo *pdcchInfo;
3707 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3708 U8 numCce; /*store num CCEs based on
3709 aggregation level */
3711 aggrLvl = cellSch->dl.cmnChAggrLvl;
3713 pdcchInfo = &(subFrm->pdcchInfo);
3715 /* Updating the no. of nCce in pdcchInfo, in case if CFI
3718 if(subFrm->nCce != pdcchInfo->nCce)
3720 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
3723 if(cell->nCce != pdcchInfo->nCce)
3725 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
3731 case CM_LTE_AGGR_LVL4:
3734 case CM_LTE_AGGR_LVL8:
3737 case CM_LTE_AGGR_LVL16:
3744 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
3747 pdcch->isSpsRnti = FALSE;
3749 /* Increment the CCE used counter in the current subframe */
3750 subFrm->cceCnt += numCce;
3751 pdcch->pdcchSearchSpace = RG_SCH_CMN_SEARCH_SPACE;
3756 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
3757 subFrm->isCceFailure = TRUE;
3759 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
3760 "PDCCH ERR: NO PDDCH AVAIL IN COMMON SEARCH SPACE aggr:%u",
3767 * @brief This function implements bandwidth allocation for common channels.
3771 * Function: rgSCHCmnClcRbAlloc
3772 * Purpose: This function implements bandwith allocation logic
3773 * for common control channels.
3775 * Invoked by: Scheduler
3777 * @param[in] RgSchCellCb* cell
3781 * @param[in] U32 *tbs
3782 * @param[in] U8 *mcs
3783 * @param[in] RgSchDlSf *sf
3789 Void rgSCHCmnClcRbAlloc
3802 Void rgSCHCmnClcRbAlloc(cell, bo, cqi, rb, tbs, mcs, iTbs, isSpsBo)
3815 PRIVATE Void rgSCHCmnClcRbAlloc
3826 PRIVATE Void rgSCHCmnClcRbAlloc(cell, bo, cqi, rb, tbs, mcs, sf)
3835 #endif /* LTEMAC_SPS */
3838 RgSchCmnTbSzEff *effTbl;
3841 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3842 U8 cfi = cellSch->dl.currCfi;
3845 /* first get the CQI to MCS table and determine the number of RBs */
3846 effTbl = (RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]);
3847 iTbsVal = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[cqi];
3848 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3850 /* Efficiency is number of bits per 1024 REs */
3851 eff = (*effTbl)[iTbsVal];
3853 /* Get the number of REs needed for this bo */
3854 noRes = ((bo * 8 * 1024) / eff );
3856 /* Get the number of RBs needed for this transmission */
3857 /* Number of RBs = No of REs / No of REs per RB */
3858 tmpRb = RGSCH_CEIL(noRes, cellSch->dl.noResPerRb[cfi]);
3859 /* KWORK_FIX: added check to see if rb has crossed maxRb*/
3860 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3861 if (tmpRb > cellSch->dl.maxDlBwPerUe)
3863 tmpRb = cellSch->dl.maxDlBwPerUe;
3865 while ((rgTbSzTbl[0][iTbsVal][tmpRb-1]/8) < bo &&
3866 (tmpRb < cellSch->dl.maxDlBwPerUe))
3869 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, rgTbSzTbl[0][0], (tmpRb-1));
3871 *tbs = rgTbSzTbl[0][iTbsVal][tmpRb-1]/8;
3873 RG_SCH_CMN_DL_TBS_TO_MCS(iTbsVal, *mcs);
3881 * @brief Scheduling for MSG4.
3885 * Function: rgSCHCmnMsg4Alloc
3886 * Purpose: Scheduling for MSG4
3888 * Invoked by: Scheduler
3890 * @param[in] RgSchCellCb* cell
3891 * @param[in] RgSchRaCb* raCb
3892 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
3897 PRIVATE S16 rgSCHCmnMsg4Alloc
3901 RgSchCmnDlRbAllocInfo *allocInfo
3904 PRIVATE S16 rgSCHCmnMsg4Alloc(cell, raCb, allocInfo)
3907 RgSchCmnDlRbAllocInfo *allocInfo;
3910 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
3913 /* SR_RACH_STATS : MSG4 TO BE TXED */
3915 /* Return if subframe BW exhausted */
3916 if (allocInfo->msg4Alloc.msg4DlSf->bw <=
3917 allocInfo->msg4Alloc.msg4DlSf->bwAssigned)
3919 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId ,
3924 if (rgSCHDhmGetMsg4HqProc(raCb, cellSch->dl.time) != ROK)
3926 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3927 "rgSCHDhmGetMsg4HqProc failed");
3931 raCb->rbAllocInfo.dlSf = allocInfo->msg4Alloc.msg4DlSf;
3933 if (rgSCHCmnMsg4DedAlloc(cell, raCb) != ROK)
3935 /* Fix : syed Minor failure handling, release hqP if Unsuccessful */
3936 rgSCHDhmRlsHqpTb(raCb->dlHqE->msg4Proc, 0, FALSE);
3937 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
3938 "rgSCHCmnMsg4DedAlloc failed.");
3941 cmLListAdd2Tail(&allocInfo->msg4Alloc.msg4TxLst, &raCb->dlHqE->msg4Proc->reqLnk);
3942 raCb->dlHqE->msg4Proc->reqLnk.node = (PTR)raCb->dlHqE->msg4Proc;
3943 allocInfo->msg4Alloc.msg4DlSf->schdCcchUe++;
3950 * @brief This function implements PDCCH allocation for an UE.
3954 * Function: PdcchAlloc
3955 * Purpose: This function implements allocation of PDCCH for an UE.
3956 * 1. Get the aggregation level for the CQI of the UE.
3957 * 2. Get the candidate PDCCH count for the aggr level.
3958 * 3. Look for availability for each candidate and choose
3959 * the first one available.
3961 * Invoked by: Scheduler
3966 * @param[in] dciFrmt
3967 * @return RgSchPdcch *
3968 * -# NULLP when unsuccessful
3972 RgSchPdcch *rgSCHCmnPdcchAlloc
3978 TfuDciFormat dciFrmt,
3982 RgSchPdcch *rgSCHCmnPdcchAlloc(cell, subFrm, cqi, dciFrmt, isDtx)
3987 TfuDciFormat dciFrmt;
3991 CmLteAggrLvl aggrLvl;
3992 RgSchPdcchInfo *pdcchInfo;
3996 /* 3.1 consider the selected DCI format size in determining the
3997 * aggregation level */
3998 //TODO_SID Need to update. Currently using 4 aggregation level
3999 aggrLvl = CM_LTE_AGGR_LVL2;//cellSch->dciAggrLvl[cqi][dciFrmt];
4002 if((dciFrmt == TFU_DCI_FORMAT_1A) &&
4003 ((ue) && (ue->allocCmnUlPdcch)) )
4005 pdcch = rgSCHCmnCmnPdcchAlloc(cell, subFrm);
4006 /* Since CRNTI Scrambled */
4009 pdcch->dciNumOfBits = ue->dciSize.cmnSize[dciFrmt];
4010 // prc_trace_format_string(PRC_TRACE_GROUP_PS, PRC_TRACE_INFO_LOW,"Forcing alloc in CMN search spc size %d fmt %d \n",
4011 // pdcch->dciNumOfBits, dciFrmt);
4017 /* Incrementing aggrLvl by 1 if it not AGGR_LVL8(MAX SIZE)
4018 * inorder to increse the redudancy bits for better decoding of UE */
4021 if (aggrLvl != CM_LTE_AGGR_LVL16)
4025 case CM_LTE_AGGR_LVL2:
4026 aggrLvl = CM_LTE_AGGR_LVL4;
4028 case CM_LTE_AGGR_LVL4:
4029 aggrLvl = CM_LTE_AGGR_LVL8;
4031 case CM_LTE_AGGR_LVL8:
4032 aggrLvl = CM_LTE_AGGR_LVL16;
4041 pdcchInfo = &subFrm->pdcchInfo;
4043 /* Updating the no. of nCce in pdcchInfo, in case if CFI
4046 if(subFrm->nCce != pdcchInfo->nCce)
4048 rgSCHUtlPdcchInit(cell, subFrm, subFrm->nCce);
4051 if(cell->nCce != pdcchInfo->nCce)
4053 rgSCHUtlPdcchInit(cell, subFrm, cell->nCce);
4057 if (pdcchInfo->nCce < (1 << (aggrLvl - 1)))
4059 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
4060 subFrm->isCceFailure = TRUE;
4061 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
4062 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
4068 if (rgSCHUtlPdcchAvail(cell, pdcchInfo, aggrLvl, &pdcch) == TRUE)
4070 /* SR_RACH_STATS : Reset isTBMsg4 */
4071 pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4= FALSE;
4072 pdcch->dci.u.format0Info.isSrGrant = FALSE;
4074 pdcch->isSpsRnti = FALSE;
4076 /* Increment the CCE used counter in the current subframe */
4077 subFrm->cceCnt += aggrLvl;
4078 pdcch->pdcchSearchSpace = RG_SCH_UE_SPECIFIC_SEARCH_SPACE;
4082 if (ue->cell != cell)
4084 /* Secondary Cell */
4085 //pdcch->dciNumOfBits = ue->dciSize.noUlCcSize[dciFrmt];
4086 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
4091 //pdcch->dciNumOfBits = ue->dciSize.dedSize[dciFrmt];
4092 //TODO_SID Need to update dci size.
4093 pdcch->dciNumOfBits = MAX_5GTF_DCIA1B1_SIZE;
4099 pdcch->dciNumOfBits = cell->dciSize.size[dciFrmt];
4104 /* PDCCH Allocation Failed, Mark cceFailure flag as TRUE */
4105 subFrm->isCceFailure = TRUE;
4107 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
4108 "PDCCH ERR: NO PDDCH AVAIL IN UE SEARCH SPACE :aggr(%u)",
4115 * @brief This function implements BW allocation for CCCH SDU
4119 * Function: rgSCHCmnCcchSduDedAlloc
4120 * Purpose: Downlink bandwidth Allocation for CCCH SDU.
4122 * Invoked by: Scheduler
4124 * @param[in] RgSchCellCb* cell
4125 * @param[out] RgSchUeCb *ueCb
4130 PRIVATE S16 rgSCHCmnCcchSduDedAlloc
4136 PRIVATE S16 rgSCHCmnCcchSduDedAlloc(cell, ueCb)
4141 RgSchDlHqEnt *hqE = NULLP;
4143 RgSchDlRbAlloc *rbAllocinfo = NULLP;
4144 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4148 U8 cfi = cellDl->currCfi;
4152 rbAllocinfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
4154 effBo = ueCb->dlCcchInfo.bo + RGSCH_CCCH_SDU_HDRSIZE;
4157 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
4158 &rbAllocinfo->tbInfo[0].bytesReq,
4159 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
4160 #else /* LTEMAC_SPS */
4161 rgSCHCmnClcRbAlloc(cell, effBo, cellDl->ccchCqi, &rbAllocinfo->rbsReq, \
4162 &rbAllocinfo->tbInfo[0].bytesReq,\
4163 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
4165 #endif /* LTEMAC_SPS */
4168 /* Cannot exceed the total number of RBs in the cell */
4169 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
4170 rbAllocinfo->dlSf->bwAssigned)))
4172 /* Check if atleast one allocation was possible.
4173 This may be the case where the Bw is very less and
4174 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
4175 if (rbAllocinfo->dlSf->bwAssigned == 0)
4177 numRb = rbAllocinfo->dlSf->bw;
4178 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
4179 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
4183 rbAllocinfo->rbsReq = numRb;
4184 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
4185 /* DwPTS Scheduling Changes Start */
4187 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
4189 rbAllocinfo->tbInfo[0].bytesReq =
4190 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1,cfi);
4193 /* DwPTS Scheduling Changes End */
4194 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4202 /* Update the subframe Allocated BW field */
4203 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4204 rbAllocinfo->rbsReq;
4205 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
4206 rbAllocinfo->tbInfo[0].tbCb = &hqE->ccchSduProc->tbInfo[0];
4207 rbAllocinfo->rnti = ueCb->ueId;
4208 rbAllocinfo->tbInfo[0].noLyr = 1;
4215 * @brief This function implements BW allocation for MSG4
4219 * Function: rgSCHCmnMsg4DedAlloc
4220 * Purpose: Downlink bandwidth Allocation for MSG4.
4222 * Invoked by: Scheduler
4224 * @param[in] RgSchCellCb* cell
4225 * @param[out] RgSchRaCb *raCb
4230 PRIVATE S16 rgSCHCmnMsg4DedAlloc
4236 PRIVATE S16 rgSCHCmnMsg4DedAlloc(cell, raCb)
4242 RgSchDlRbAlloc *rbAllocinfo = &raCb->rbAllocInfo;
4246 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4247 U8 cfi = cellDl->currCfi;
4251 effBo = raCb->dlCcchInfo.bo + RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE;
4254 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
4255 &rbAllocinfo->tbInfo[0].bytesReq,\
4256 &rbAllocinfo->tbInfo[0].imcs, rbAllocinfo->dlSf);
4257 #else /* LTEMAC_SPS */
4258 rgSCHCmnClcRbAlloc(cell, effBo, raCb->ccchCqi, &rbAllocinfo->rbsReq, \
4259 &rbAllocinfo->tbInfo[0].bytesReq,\
4260 &rbAllocinfo->tbInfo[0].imcs, &iTbs, FALSE,
4262 #endif /* LTEMAC_SPS */
4265 /* Cannot exceed the total number of RBs in the cell */
4266 if ((S16)rbAllocinfo->rbsReq > ((S16)(rbAllocinfo->dlSf->bw - \
4267 rbAllocinfo->dlSf->bwAssigned)))
4269 /* Check if atleast one allocation was possible.
4270 This may be the case where the Bw is very less and
4271 with the configured CCCH CQI, CCCH SDU exceeds the min Bw */
4272 if (rbAllocinfo->dlSf->bwAssigned == 0)
4274 numRb = rbAllocinfo->dlSf->bw;
4275 RG_SCH_CMN_DL_MCS_TO_TBS(rbAllocinfo->tbInfo[0].imcs, iTbs);
4276 while (rgTbSzTbl[0][++iTbs][numRb-1]/8 < effBo)
4280 rbAllocinfo->rbsReq = numRb;
4281 rbAllocinfo->tbInfo[0].bytesReq = rgTbSzTbl[0][iTbs][numRb-1]/8;
4282 /* DwPTS Scheduling Changes Start */
4284 if(rbAllocinfo->dlSf->sfType == RG_SCH_SPL_SF_DATA)
4286 rbAllocinfo->tbInfo[0].bytesReq =
4287 rgSCHCmnCalcDwPtsTbSz(cell, effBo, &numRb, &iTbs, 1, cfi);
4290 /* DwPTS Scheduling Changes End */
4291 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, rbAllocinfo->tbInfo[0].imcs);
4299 /* Update the subframe Allocated BW field */
4300 rbAllocinfo->dlSf->bwAssigned = rbAllocinfo->dlSf->bwAssigned + \
4301 rbAllocinfo->rbsReq;
4302 rbAllocinfo->rnti = raCb->tmpCrnti;
4303 rbAllocinfo->tbInfo[0].tbCb = &raCb->dlHqE->msg4Proc->tbInfo[0];
4304 rbAllocinfo->tbInfo[0].schdlngForTb = TRUE;
4305 rbAllocinfo->tbInfo[0].noLyr = 1;
4312 * @brief This function implements scheduling for RA Response.
4316 * Function: rgSCHCmnDlRaRsp
4317 * Purpose: Downlink scheduling for RA responses.
4319 * Invoked by: Scheduler
4321 * @param[in] RgSchCellCb* cell
4326 PRIVATE Void rgSCHCmnDlRaRsp
4329 RgSchCmnDlRbAllocInfo *allocInfo
4332 PRIVATE Void rgSCHCmnDlRaRsp(cell, allocInfo)
4334 RgSchCmnDlRbAllocInfo *allocInfo;
4337 CmLteTimingInfo frm;
4338 CmLteTimingInfo schFrm;
4344 RgSchTddRachRspLst *rachRsp;
4345 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
4350 frm = cell->crntTime;
4351 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4353 /* Compute the subframe for which allocation is being made */
4354 /* essentially, we need pointer to the dl frame for this subframe */
4355 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4357 /* Get the RACH Response scheduling related information
4358 * for the subframe with RA index */
4359 raIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][frm.subframe]-1;
4361 rachRsp = &cell->rachRspLst[raIdx];
4363 for(sfnIdx = 0; sfnIdx < rachRsp->numRadiofrms; sfnIdx++)
4365 /* For all scheduled RACH Responses in SFNs */
4367 RG_SCH_CMN_DECR_FRAME(schFrm.sfn, rachRsp->rachRsp[sfnIdx].sfnOffset);
4368 /* For all scheduled RACH Responses in subframes */
4370 subfrmIdx < rachRsp->rachRsp[sfnIdx].numSubfrms; subfrmIdx++)
4372 schFrm.subframe = rachRsp->rachRsp[sfnIdx].subframe[subfrmIdx];
4373 /* compute the last RA RNTI used in the previous subframe */
4374 raIdx = (((schFrm.sfn % cell->raInfo.maxRaSize) * \
4375 RGSCH_NUM_SUB_FRAMES * RGSCH_MAX_RA_RNTI_PER_SUBFRM) \
4378 /* For all RA RNTIs within a subframe */
4380 for(i=0; (i < RGSCH_MAX_RA_RNTI_PER_SUBFRM) && \
4381 (noRaRnti < RGSCH_MAX_TDD_RA_RSP_ALLOC); i++)
4383 rarnti = (schFrm.subframe + RGSCH_NUM_SUB_FRAMES*i + 1);
4384 rntiIdx = (raIdx + RGSCH_NUM_SUB_FRAMES*i);
4386 if (cell->raInfo.raReqLst[rntiIdx].first != NULLP)
4388 /* compute the next RA RNTI */
4389 if (rgSCHCmnRaRspAlloc(cell, subFrm, rntiIdx,
4390 rarnti, noRaRnti, allocInfo) != ROK)
4392 /* The resources are exhausted */
4406 * @brief This function implements scheduling for RA Response.
4410 * Function: rgSCHCmnDlRaRsp
4411 * Purpose: Downlink scheduling for RA responses.
4413 * Invoked by: Scheduler
4415 * @param[in] RgSchCellCb* cell
4416 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4421 PRIVATE Void rgSCHCmnDlRaRsp //FDD
4424 RgSchCmnDlRbAllocInfo *allocInfo
4427 PRIVATE Void rgSCHCmnDlRaRsp(cell, allocInfo)
4429 RgSchCmnDlRbAllocInfo *allocInfo;
4432 CmLteTimingInfo frm;
4433 CmLteTimingInfo winStartFrm;
4439 RgSchCmnCell *sched;
4442 frm = cell->crntTime;
4443 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
4445 /* Compute the subframe for which allocation is being made */
4446 /* essentially, we need pointer to the dl frame for this subframe */
4447 subFrm = rgSCHUtlSubFrmGet(cell, frm);
4448 sched = RG_SCH_CMN_GET_CELL(cell);
4450 /* ccpu00132523 - Window Start calculated by considering RAR window size,
4451 * RAR Wait period, Subframes occuppied for respective preamble format*/
4452 winGap = (sched->dl.numRaSubFrms-1) + (cell->rachCfg.raWinSize-1)
4453 +RGSCH_RARSP_WAIT_PERIOD;
4455 /* Window starting occassion is retrieved using the gap and tried to
4456 * fit to the size of raReqLst array*/
4457 RGSCHDECRFRMCRNTTIME(frm, winStartFrm, winGap);
4459 //5G_TODO TIMING update. Need to check
4460 winStartIdx = (winStartFrm.sfn & 1) * RGSCH_MAX_RA_RNTI+ winStartFrm.slot;
4462 for(i = 0; ((i < cell->rachCfg.raWinSize) && (noRaRnti < RG_SCH_CMN_MAX_CMN_PDCCH)); i++)
4464 raIdx = (winStartIdx + i) % RGSCH_RAREQ_ARRAY_SIZE;
4466 if (cell->raInfo.raReqLst[raIdx].first != NULLP)
4468 allocInfo->raRspAlloc[noRaRnti].biEstmt = \
4469 (!i * RGSCH_ONE_BIHDR_SIZE);
4470 rarnti = raIdx % RGSCH_MAX_RA_RNTI+ 1;
4471 if (rgSCHCmnRaRspAlloc(cell, subFrm, raIdx,
4472 rarnti, noRaRnti, allocInfo) != ROK)
4474 /* The resources are exhausted */
4477 /* ccpu00132523- If all the RAP ids are not scheduled then need not
4478 * proceed for next RA RNTIs*/
4479 if(allocInfo->raRspAlloc[noRaRnti].numRapids < cell->raInfo.raReqLst[raIdx].count)
4483 noRaRnti++; /* Max of RG_SCH_CMN_MAX_CMN_PDCCH RARNTIs
4484 for response allocation */
4493 * @brief This function allocates the resources for an RARNTI.
4497 * Function: rgSCHCmnRaRspAlloc
4498 * Purpose: Allocate resources to a RARNTI.
4499 * 0. Allocate PDCCH for sending the response.
4500 * 1. Locate the number of RA requests pending for the RARNTI.
4501 * 2. Compute the size of data to be built.
4502 * 3. Using common channel CQI, compute the number of RBs.
4504 * Invoked by: Scheduler
4506 * @param[in] RgSchCellCb *cell,
4507 * @param[in] RgSchDlSf *subFrm,
4508 * @param[in] U16 rarnti,
4509 * @param[in] U8 noRaRnti
4510 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
4515 PRIVATE S16 rgSCHCmnRaRspAlloc
4522 RgSchCmnDlRbAllocInfo *allocInfo
4525 PRIVATE S16 rgSCHCmnRaRspAlloc(cell,subFrm,raIndex,rarnti,noRaRnti,allocInfo)
4531 RgSchCmnDlRbAllocInfo *allocInfo;
4534 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4535 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4539 /*ccpu00116700,ccpu00116708- Corrected the wrong type for mcs*/
4542 /* RACH handling related changes */
4543 Bool isAlloc = FALSE;
4544 static U8 schdNumRapid = 0;
4550 U8 cfi = cellDl->currCfi;
4557 /* ccpu00132523: Resetting the schdRap Id count in every scheduling subframe*/
4564 if (subFrm->bw == subFrm->bwAssigned)
4566 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4567 "bw == bwAssigned RARNTI:%d",rarnti);
4571 reqLst = &cell->raInfo.raReqLst[raIndex];
4572 if (reqLst->count == 0)
4574 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4575 "reqLst Count=0 RARNTI:%d",rarnti);
4578 remNumRapid = reqLst->count;
4581 /* Limit number of rach rsps to maxMsg3PerUlsf */
4582 if ( schdNumRapid+remNumRapid > cellUl->maxMsg3PerUlSf )
4584 remNumRapid = cellUl->maxMsg3PerUlSf-schdNumRapid;
4590 /* Try allocating for as many RAPIDs as possible */
4591 /* BI sub-header size to the tbSize requirement */
4592 noBytes = RGSCH_GET_RAR_BYTES(remNumRapid) +\
4593 allocInfo->raRspAlloc[noRaRnti].biEstmt;
4594 if ((allwdTbSz = rgSCHUtlGetAllwdCchTbSz(noBytes*8, &nPrb, &mcs)) == -1)
4600 /* rgSCHCmnClcRbAllocForFxdTb(cell, allwdTbSz/8, cellDl->ccchCqi, &rb);*/
4601 if(cellDl->bitsPerRb==0)
4603 while ((rgTbSzTbl[0][0][rb]) <(U32) allwdTbSz)
4611 rb = RGSCH_CEIL(allwdTbSz, cellDl->bitsPerRb);
4613 /* DwPTS Scheduling Changes Start */
4615 if (subFrm->sfType == RG_SCH_SPL_SF_DATA)
4617 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
4619 /* Calculate the less RE's because of DwPTS */
4620 lostRe = rb * (cellDl->noResPerRb[cfi] -
4621 cellDl->numReDwPts[cfi]);
4623 /* Increase number of RBs in Spl SF to compensate for lost REs */
4624 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
4627 /* DwPTS Scheduling Changes End */
4629 /*ccpu00115595- end*/
4630 if (rb > subFrm->bw - subFrm->bwAssigned)
4635 /* Allocation succeeded for 'remNumRapid' */
4638 printf("\n!!!RAR alloc noBytes:%u,allwdTbSz:%u,tbs:%u,rb:%u\n",
4639 noBytes,allwdTbSz,tbs,rb);
4644 RLOG_ARG0(L_INFO,DBG_CELLID,cell->cellId,"BW alloc Failed");
4648 subFrm->bwAssigned = subFrm->bwAssigned + rb;
4650 /* Fill AllocInfo structure */
4651 allocInfo->raRspAlloc[noRaRnti].rnti = rarnti;
4652 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].bytesReq = tbs;
4653 allocInfo->raRspAlloc[noRaRnti].rbsReq = rb;
4654 allocInfo->raRspAlloc[noRaRnti].dlSf = subFrm;
4655 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].imcs = mcs;
4656 allocInfo->raRspAlloc[noRaRnti].raIndex = raIndex;
4657 /* RACH changes for multiple RAPID handling */
4658 allocInfo->raRspAlloc[noRaRnti].numRapids = remNumRapid;
4659 allocInfo->raRspAlloc[noRaRnti].nPrb = nPrb;
4660 allocInfo->raRspAlloc[noRaRnti].tbInfo[0].noLyr = 1;
4661 allocInfo->raRspAlloc[noRaRnti].vrbgReq = RGSCH_CEIL(nPrb,MAX_5GTF_VRBG_SIZE);
4662 schdNumRapid += remNumRapid;
4666 /***********************************************************
4668 * Func : rgSCHCmnUlAllocFillRbInfo
4670 * Desc : Fills the start RB and the number of RBs for
4671 * uplink allocation.
4679 **********************************************************/
4681 Void rgSCHCmnUlAllocFillRbInfo
4688 Void rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc)
4691 RgSchUlAlloc *alloc;
4694 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4695 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4696 U8 cfi = cellDl->currCfi;
4699 alloc->grnt.rbStart = (alloc->sbStart * cellUl->sbSize) +
4700 cell->dynCfiCb.bwInfo[cfi].startRb;
4702 /* Num RBs = numSbAllocated * sbSize - less RBs in the last SB */
4703 alloc->grnt.numRb = (alloc->numSb * cellUl->sbSize);
4709 * @brief Grant request for Msg3.
4713 * Function : rgSCHCmnMsg3GrntReq
4715 * This is invoked by downlink scheduler to request allocation
4718 * - Attempt to allocate msg3 in the current msg3 subframe
4719 * Allocation attempt based on whether preamble is from group A
4720 * and the value of MESSAGE_SIZE_GROUP_A
4721 * - Link allocation with passed RNTI and msg3 HARQ process
4722 * - Set the HARQ process ID (*hqProcIdRef)
4724 * @param[in] RgSchCellCb *cell
4725 * @param[in] CmLteRnti rnti
4726 * @param[in] Bool preamGrpA
4727 * @param[in] RgSchUlHqProcCb *hqProc
4728 * @param[out] RgSchUlAlloc **ulAllocRef
4729 * @param[out] U8 *hqProcIdRef
4733 PRIVATE Void rgSCHCmnMsg3GrntReq
4738 RgSchUlHqProcCb *hqProc,
4739 RgSchUlAlloc **ulAllocRef,
4743 PRIVATE Void rgSCHCmnMsg3GrntReq(cell, rnti, preamGrpA, hqProc,
4744 ulAllocRef, hqProcIdRef)
4748 RgSchUlHqProcCb *hqProc;
4749 RgSchUlAlloc **ulAllocRef;
4753 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
4754 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
4756 RgSchUlAlloc *alloc;
4761 *ulAllocRef = NULLP;
4763 /* Fix: ccpu00120610 Use remAllocs from subframe during msg3 allocation */
4764 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
4768 if (preamGrpA == FALSE)
4770 numSb = cellUl->ra.prmblBNumSb;
4771 iMcs = cellUl->ra.prmblBIMcs;
4775 numSb = cellUl->ra.prmblANumSb;
4776 iMcs = cellUl->ra.prmblAIMcs;
4779 if ((hole = rgSCHUtlUlHoleFirst(sf)) != NULLP)
4781 if(*sf->allocCountRef == 0)
4783 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
4784 /* Reinitialize the hole */
4785 if (sf->holeDb->count == 1 && (hole->start == 0)) /* Sanity check of holeDb */
4787 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4788 /* Re-Initialize available subbands because of CFI change*/
4789 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
4793 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
4794 "Error! holeDb sanity check failed RNTI:%d",rnti);
4797 if (numSb <= hole->num)
4800 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
4801 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
4802 alloc->grnt.iMcs = iMcs;
4803 alloc->grnt.iMcsCrnt = iMcs;
4804 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
4805 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
4806 /* To include the length and ModOrder in DataRecp Req.*/
4807 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
4808 RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
4809 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
4810 alloc->grnt.nDmrs = 0;
4811 alloc->grnt.hop = 0;
4812 alloc->grnt.delayBit = 0;
4813 alloc->grnt.isRtx = FALSE;
4814 *ulAllocRef = alloc;
4815 *hqProcIdRef = (cellUl->msg3SchdHqProcIdx);
4816 hqProc->procId = *hqProcIdRef;
4817 hqProc->ulSfIdx = (cellUl->msg3SchdIdx);
4820 alloc->pdcch = FALSE;
4821 alloc->forMsg3 = TRUE;
4822 alloc->hqProc = hqProc;
4823 rgSCHUhmNewTx(hqProc, (U8)(cell->rachCfg.maxMsg3Tx - 1), alloc);
4824 //RLOG_ARG4(L_DEBUG,DBG_CELLID,cell->cellId,
4826 "\nRNTI:%d MSG3 ALLOC proc(%lu)procId(%d)schdIdx(%d)\n",
4828 ((PTR)alloc->hqProc),
4829 alloc->hqProc->procId,
4830 alloc->hqProc->ulSfIdx);
4831 RLOG_ARG2(L_DEBUG,DBG_CELLID,cell->cellId,
4832 "alloc(%p)maxMsg3Tx(%d)",
4834 cell->rachCfg.maxMsg3Tx);
4843 * @brief This function determines the allocation limits and
4844 * parameters that aid in DL scheduling.
4848 * Function: rgSCHCmnDlSetUeAllocLmt
4849 * Purpose: This function determines the Maximum RBs
4850 * a UE is eligible to get based on softbuffer
4851 * limitation and cell->>>maxDlBwPerUe. The Codeword
4852 * specific parameters like iTbs, eff and noLyrs
4853 * are also set in this function. This function
4854 * is called while UE configuration and UeDlCqiInd.
4856 * Invoked by: Scheduler
4858 * @param[in] RgSchCellCb *cellCb
4859 * @param[in] RgSchCmnDlUe *ueDl
4864 PRIVATE Void rgSCHCmnDlSetUeAllocLmt
4871 PRIVATE Void rgSCHCmnDlSetUeAllocLmt(cell, ueDl, isEmtcUe)
4879 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
4880 U8 cfi = cellSch->dl.currCfi;
4884 if(TRUE == isEmtcUe)
4886 /* ITbs for CW0 for 1 Layer Tx */
4887 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4888 [ueDl->mimoInfo.cwInfo[0].cqi];
4889 /* ITbs for CW0 for 2 Layer Tx */
4890 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4891 [ueDl->mimoInfo.cwInfo[0].cqi];
4892 /* Eff for CW0 for 1 Layer Tx */
4893 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4894 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4895 /* Eff for CW0 for 2 Layer Tx */
4896 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4897 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4899 /* ITbs for CW1 for 1 Layer Tx */
4900 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[0][cfi]))\
4901 [ueDl->mimoInfo.cwInfo[1].cqi];
4902 /* ITbs for CW1 for 2 Layer Tx */
4903 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchEmtcCmnCqiToTbs *)(cellSch->dl.emtcCqiToTbsTbl[1][cfi]))\
4904 [ueDl->mimoInfo.cwInfo[1].cqi];
4905 /* Eff for CW1 for 1 Layer Tx */
4906 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4907 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4908 /* Eff for CW1 for 2 Layer Tx */
4909 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4910 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4915 /* ITbs for CW0 for 1 Layer Tx */
4916 ueDl->mimoInfo.cwInfo[0].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4917 [ueDl->mimoInfo.cwInfo[0].cqi];
4918 /* ITbs for CW0 for 2 Layer Tx */
4919 ueDl->mimoInfo.cwInfo[0].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4920 [ueDl->mimoInfo.cwInfo[0].cqi];
4921 /* Eff for CW0 for 1 Layer Tx */
4922 ueDl->mimoInfo.cwInfo[0].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4923 [ueDl->mimoInfo.cwInfo[0].iTbs[0]];
4924 /* Eff for CW0 for 2 Layer Tx */
4925 ueDl->mimoInfo.cwInfo[0].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4926 [ueDl->mimoInfo.cwInfo[0].iTbs[1]];
4928 /* ITbs for CW1 for 1 Layer Tx */
4929 ueDl->mimoInfo.cwInfo[1].iTbs[0] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
4930 [ueDl->mimoInfo.cwInfo[1].cqi];
4931 /* ITbs for CW1 for 2 Layer Tx */
4932 ueDl->mimoInfo.cwInfo[1].iTbs[1] = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[1][cfi]))\
4933 [ueDl->mimoInfo.cwInfo[1].cqi];
4934 /* Eff for CW1 for 1 Layer Tx */
4935 ueDl->mimoInfo.cwInfo[1].eff[0] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[0][cfi]))\
4936 [ueDl->mimoInfo.cwInfo[1].iTbs[0]];
4937 /* Eff for CW1 for 2 Layer Tx */
4938 ueDl->mimoInfo.cwInfo[1].eff[1] = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[1][cfi]))\
4939 [ueDl->mimoInfo.cwInfo[1].iTbs[1]];
4943 // ueDl->laCb.cqiBasediTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0] * 100;
4945 /* Assigning noLyrs to each CW assuming optimal Spatial multiplexing
4947 (ueDl->mimoInfo.ri/2 == 0)? (ueDl->mimoInfo.cwInfo[0].noLyr = 1) : \
4948 (ueDl->mimoInfo.cwInfo[0].noLyr = ueDl->mimoInfo.ri/2);
4949 ueDl->mimoInfo.cwInfo[1].noLyr = ueDl->mimoInfo.ri - ueDl->mimoInfo.cwInfo[0].noLyr;
4950 /* rg002.101:ccpu00102106: correcting DL harq softbuffer limitation logic.
4951 * The maxTbSz is the maximum number of PHY bits a harq process can
4952 * hold. Hence we limit our allocation per harq process based on this.
4953 * Earlier implementation we misinterpreted the maxTbSz to be per UE
4954 * per TTI, but in fact it is per Harq per TTI. */
4955 /* rg002.101:ccpu00102106: cannot exceed the harq Tb Size
4956 * and harq Soft Bits limit.*/
4958 /* Considering iTbs corresponding to 2 layer transmission for
4959 * codeword0(approximation) and the maxLayers supported by
4960 * this UE at this point of time. */
4961 RG_SCH_CMN_TBS_TO_MODODR(ueDl->mimoInfo.cwInfo[0].iTbs[1], modOrder);
4963 /* Bits/modOrder gives #REs, #REs/noResPerRb gives #RBs */
4964 /* rg001.301 -MOD- [ccpu00119213] : avoiding wraparound */
4965 maxRb = ((ueDl->maxSbSz)/(cellSch->dl.noResPerRb[cfi] * modOrder *\
4966 ueDl->mimoInfo.ri));
4967 if (cellSch->dl.isDlFreqSel)
4969 /* Rounding off to left nearest multiple of RBG size */
4970 maxRb -= maxRb % cell->rbgSize;
4972 ueDl->maxRb = RGSCH_MIN(maxRb, cellSch->dl.maxDlBwPerUe);
4973 if (cellSch->dl.isDlFreqSel)
4975 /* Rounding off to right nearest multiple of RBG size */
4976 if (ueDl->maxRb % cell->rbgSize)
4978 ueDl->maxRb += (cell->rbgSize -
4979 (ueDl->maxRb % cell->rbgSize));
4983 /* Set the index of the cwInfo, which is better in terms of
4984 * efficiency. If RI<2, only 1 CW, hence btrCwIdx shall be 0 */
4985 if (ueDl->mimoInfo.ri < 2)
4987 ueDl->mimoInfo.btrCwIdx = 0;
4991 if (ueDl->mimoInfo.cwInfo[0].eff[ueDl->mimoInfo.cwInfo[0].noLyr-1] <\
4992 ueDl->mimoInfo.cwInfo[1].eff[ueDl->mimoInfo.cwInfo[1].noLyr-1])
4994 ueDl->mimoInfo.btrCwIdx = 1;
4998 ueDl->mimoInfo.btrCwIdx = 0;
5008 * @brief This function updates TX Scheme.
5012 * Function: rgSCHCheckAndSetTxScheme
5013 * Purpose: This function determines the Maximum RBs
5014 * a UE is eligible to get based on softbuffer
5015 * limitation and cell->>>maxDlBwPerUe. The Codeword
5016 * specific parameters like iTbs, eff and noLyrs
5017 * are also set in this function. This function
5018 * is called while UE configuration and UeDlCqiInd.
5020 * Invoked by: Scheduler
5022 * @param[in] RgSchCellCb *cell
5023 * @param[in] RgSchUeCb *ue
5028 PRIVATE Void rgSCHCheckAndSetTxScheme
5034 PRIVATE Void rgSCHCheckAndSetTxScheme(cell, ue)
5039 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
5040 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue ,cell);
5041 U8 cfi = cellSch->dl.currCfi;
5047 maxiTbs = (*(RgSchCmnCqiToTbs*)(cellSch->dl.cqiToTbsTbl[0][cfi]))\
5048 [RG_SCH_CMN_MAX_CQI - 1];
5049 cqiBasediTbs = (ueDl->laCb[0].cqiBasediTbs)/100;
5050 actualiTbs = ueDl->mimoInfo.cwInfo[0].iTbs[0];
5052 if((actualiTbs < RG_SCH_TXSCHEME_CHNG_ITBS_FACTOR) && (cqiBasediTbs >
5053 actualiTbs) && ((cqiBasediTbs - actualiTbs) > RG_SCH_TXSCHEME_CHNG_THRSHD))
5055 RG_SCH_CMN_SET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
5058 if(actualiTbs >= maxiTbs)
5060 RG_SCH_CMN_UNSET_FORCE_TD(ue,cell, RG_SCH_CMN_TD_TXSCHEME_CHNG);
5067 * @brief This function determines the allocation limits and
5068 * parameters that aid in DL scheduling.
5072 * Function: rgSCHCmnDlSetUeAllocLmtLa
5073 * Purpose: This function determines the Maximum RBs
5074 * a UE is eligible to get based on softbuffer
5075 * limitation and cell->>>maxDlBwPerUe. The Codeword
5076 * specific parameters like iTbs, eff and noLyrs
5077 * are also set in this function. This function
5078 * is called while UE configuration and UeDlCqiInd.
5080 * Invoked by: Scheduler
5082 * @param[in] RgSchCellCb *cell
5083 * @param[in] RgSchUeCb *ue
5088 Void rgSCHCmnDlSetUeAllocLmtLa
5094 Void rgSCHCmnDlSetUeAllocLmtLa(cell, ue)
5102 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
5103 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
5104 U8 cfi = cellSch->dl.currCfi;
5109 maxiTbs = (*(RgSchCmnCqiToTbs *)(cellSch->dl.cqiToTbsTbl[0][cfi]))[RG_SCH_CMN_MAX_CQI - 1];
5110 if(ueDl->cqiFlag == TRUE)
5112 for(cwIdx=0; cwIdx < RG_SCH_CMN_MAX_CW_PER_UE; cwIdx++)
5116 /* Calcluating the reported iTbs for code word 0 */
5117 reportediTbs = ue->ue5gtfCb.mcs;
5119 iTbsNew = (S32) reportediTbs;
5121 if(!ueDl->laCb[cwIdx].notFirstCqi)
5123 /* This is the first CQI report from UE */
5124 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
5125 ueDl->laCb[cwIdx].notFirstCqi = TRUE;
5127 else if ((RG_ITBS_DIFF(reportediTbs, ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0])) > 5)
5129 /* Ignore this iTBS report and mark that last iTBS report was */
5130 /* ignored so that subsequently we reset the LA algorithm */
5131 ueDl->laCb[cwIdx].lastiTbsIgnored = TRUE;
5132 ueDl->laCb[cwIdx].numLastiTbsIgnored++;
5133 if( ueDl->laCb[cwIdx].numLastiTbsIgnored > 10)
5135 /* CQI reported by UE is not catching up. Reset the LA algorithm */
5136 ueDl->laCb[cwIdx].cqiBasediTbs = (iTbsNew * 100);
5137 ueDl->laCb[cwIdx].deltaiTbs = 0;
5138 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
5139 ueDl->laCb[cwIdx].numLastiTbsIgnored = 0;
5144 if (ueDl->laCb[cwIdx].lastiTbsIgnored != TRUE)
5146 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
5147 (80 * ueDl->laCb[cwIdx].cqiBasediTbs))/100;
5151 /* Reset the LA as iTbs in use caught up with the value */
5152 /* reported by UE. */
5153 ueDl->laCb[cwIdx].cqiBasediTbs = ((20 * iTbsNew * 100) +
5154 (80 * ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] * 100))/100;
5155 ueDl->laCb[cwIdx].deltaiTbs = 0;
5156 ueDl->laCb[cwIdx].lastiTbsIgnored = FALSE;
5160 iTbsNew = (ueDl->laCb[cwIdx].cqiBasediTbs + ueDl->laCb[cwIdx].deltaiTbs)/100;
5162 RG_SCH_CHK_ITBS_RANGE(iTbsNew, maxiTbs);
5164 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0] = RGSCH_MIN(iTbsNew, cell->thresholds.maxDlItbs);
5165 //ueDl->mimoInfo.cwInfo[cwIdx].iTbs[1] = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
5167 ue->ue5gtfCb.mcs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[0];
5169 printf("reportediTbs[%d] cqiBasediTbs[%d] deltaiTbs[%d] iTbsNew[%d] mcs[%d] cwIdx[%d]\n",
5170 reportediTbs, ueDl->laCb[cwIdx].cqiBasediTbs, ueDl->laCb[cwIdx].deltaiTbs,
5171 iTbsNew, ue->ue5gtfCb.mcs, cwIdx);
5175 if((ue->mimoInfo.txMode != RGR_UE_TM_3) && (ue->mimoInfo.txMode != RGR_UE_TM_4))
5180 ueDl->cqiFlag = FALSE;
5187 /***********************************************************
5189 * Func : rgSCHCmnDlUeResetTemp
5191 * Desc : Reset whatever variables where temporarily used
5192 * during UE scheduling.
5200 **********************************************************/
5202 Void rgSCHCmnDlHqPResetTemp
5204 RgSchDlHqProcCb *hqP
5207 Void rgSCHCmnDlHqPResetTemp(hqP)
5208 RgSchDlHqProcCb *hqP;
5213 /* Fix: syed having a hqP added to Lists for RB assignment rather than
5214 * a UE, as adding UE was limiting handling some scenarios */
5215 hqP->reqLnk.node = (PTR)NULLP;
5216 hqP->schdLstLnk.node = (PTR)NULLP;
5219 } /* rgSCHCmnDlHqPResetTemp */
5221 /***********************************************************
5223 * Func : rgSCHCmnDlUeResetTemp
5225 * Desc : Reset whatever variables where temporarily used
5226 * during UE scheduling.
5234 **********************************************************/
5236 Void rgSCHCmnDlUeResetTemp
5239 RgSchDlHqProcCb *hqP
5242 Void rgSCHCmnDlUeResetTemp(ue, hqP)
5244 RgSchDlHqProcCb *hqP;
5247 RgSchDlRbAlloc *allocInfo;
5248 RgSchCmnDlUe *cmnUe = RG_SCH_CMN_GET_DL_UE(ue,hqP->hqE->cell);
5254 /* Fix : syed check for UE's existence was useless.
5255 * Instead we need to check that reset is done only for the
5256 * information of a scheduled harq proc, which is cmnUe->proc.
5257 * Reset should not be done for non-scheduled hqP */
5258 if((cmnUe->proc == hqP) || (cmnUe->proc == NULLP))
5260 cmnUe->proc = NULLP;
5261 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, hqP->hqE->cell);
5263 tmpCb = allocInfo->laaCb;
5265 memset(allocInfo, 0, sizeof(RgSchDlRbAlloc));
5266 allocInfo->rnti = ue->ueId;
5268 allocInfo->laaCb = tmpCb;
5270 /* Fix: syed moving this to a common function for both scheduled
5271 * and non-scheduled UEs */
5272 cmnUe->outStndAlloc = 0;
5274 rgSCHCmnDlHqPResetTemp(hqP);
5277 } /* rgSCHCmnDlUeResetTemp */
5279 /***********************************************************
5281 * Func : rgSCHCmnUlUeResetTemp
5283 * Desc : Reset whatever variables where temporarily used
5284 * during UE scheduling.
5292 **********************************************************/
5294 Void rgSCHCmnUlUeResetTemp
5300 Void rgSCHCmnUlUeResetTemp(cell, ue)
5305 RgSchCmnUlUe *cmnUlUe = RG_SCH_CMN_GET_UL_UE(ue,cell);
5308 memset(&cmnUlUe->alloc, 0, sizeof(cmnUlUe->alloc));
5311 } /* rgSCHCmnUlUeResetTemp */
5316 * @brief This function fills the PDCCH information from dlProc.
5320 * Function: rgSCHCmnFillPdcch
5321 * Purpose: This function fills in the PDCCH information
5322 * obtained from the RgSchDlRbAlloc
5323 * during common channel scheduling(P, SI, RA - RNTI's).
5325 * Invoked by: Downlink Scheduler
5327 * @param[out] RgSchPdcch* pdcch
5328 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5333 Void rgSCHCmnFillPdcch
5337 RgSchDlRbAlloc *rbAllocInfo
5340 Void rgSCHCmnFillPdcch(cell, pdcch, rbAllocInfo)
5343 RgSchDlRbAlloc *rbAllocInfo;
5348 /* common channel pdcch filling,
5349 * only 1A and Local is supported */
5350 pdcch->rnti = rbAllocInfo->rnti;
5351 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
5352 switch(rbAllocInfo->dciFormat)
5354 #ifdef RG_5GTF /* ANOOP: ToDo: DCI format B1/B2 filling */
5355 case TFU_DCI_FORMAT_B1:
5358 pdcch->dci.u.formatB1Info.formatType = 0;
5359 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].cmnGrnt.xPDSCHRange;
5360 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].cmnGrnt.rbAssign;
5361 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = 0;
5362 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5363 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = 0;
5364 //pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].ndi;
5365 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].cmnGrnt.rv;
5366 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5367 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5368 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5369 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5370 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5371 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5372 //TODO_SID: Need to update
5373 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5374 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5375 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5376 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5377 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5378 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5379 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].cmnGrnt.SCID;
5380 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5381 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5382 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5384 break; /* case TFU_DCI_FORMAT_B1: */
5387 case TFU_DCI_FORMAT_B2:
5389 //printf(" RG_5GTF:: Pdcch filling with DCI format B2\n");
5391 break; /* case TFU_DCI_FORMAT_B2: */
5394 case TFU_DCI_FORMAT_1A:
5395 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
5397 /*Nprb indication at PHY for common Ch
5398 *setting least significant bit of tpc field to 1 if
5399 nPrb=3 and 0 otherwise. */
5400 if (rbAllocInfo->nPrb == 3)
5402 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 1;
5406 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = 0;
5408 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
5409 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
5410 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
5411 rbAllocInfo->tbInfo[0].imcs;
5412 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = 0;
5413 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = 0;
5415 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
5417 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
5418 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
5419 rbAllocInfo->allocInfo.raType2.rbStart,
5420 rbAllocInfo->allocInfo.raType2.numRb);
5423 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = \
5426 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
5427 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
5430 break; /* case TFU_DCI_FORMAT_1A: */
5431 case TFU_DCI_FORMAT_1:
5432 pdcch->dci.u.format1Info.tpcCmd = 0;
5433 /* Avoiding this check,as we dont support Type1 RA */
5435 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
5438 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
5439 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
5440 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
5442 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
5443 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
5445 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
5446 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
5448 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
5449 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
5453 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
5454 pdcch->dci.u.format1Info.allocInfo.ndi = 0;
5455 pdcch->dci.u.format1Info.allocInfo.mcs = rbAllocInfo->tbInfo[0].imcs;
5456 pdcch->dci.u.format1Info.allocInfo.rv = 0;
5458 pdcch->dci.u.format1Info.dai = 1;
5462 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Allocator's icorrect "
5463 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5471 * @brief This function finds whether the subframe is special subframe or not.
5475 * Function: rgSCHCmnIsSplSubfrm
5476 * Purpose: This function finds the subframe index of the special subframe
5477 * and finds whether the current DL index matches it or not.
5479 * Invoked by: Scheduler
5481 * @param[in] U8 splfrmCnt
5482 * @param[in] U8 curSubfrmIdx
5483 * @param[in] U8 periodicity
5484 * @param[in] RgSchTddSubfrmInfo *subfrmInfo
5489 PRIVATE Bool rgSCHCmnIsSplSubfrm
5494 RgSchTddSubfrmInfo *subfrmInfo
5497 PRIVATE Bool rgSCHCmnIsSplSubfrm(splfrmCnt, curSubfrmIdx, periodicity, subfrmInfo)
5501 RgSchTddSubfrmInfo *subfrmInfo;
5510 if(periodicity == RG_SCH_CMN_5_MS_PRD)
5514 dlSfCnt = ((splfrmCnt-1)/2) *\
5515 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5516 dlSfCnt = dlSfCnt + subfrmInfo->numFrmHf1;
5520 dlSfCnt = (splfrmCnt/2) * \
5521 (subfrmInfo->numFrmHf1 + subfrmInfo->numFrmHf2);
5526 dlSfCnt = splfrmCnt * subfrmInfo->numFrmHf1;
5528 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1 +\
5529 (periodicity*splfrmCnt - dlSfCnt);
5533 splfrmIdx = RG_SCH_CMN_SPL_SUBFRM_1;
5536 if(splfrmIdx == curSubfrmIdx)
5545 * @brief This function updates DAI or UL index.
5549 * Function: rgSCHCmnUpdHqAndDai
5550 * Purpose: Updates the DAI based on UL-DL Configuration
5551 * index and UE. It also updates the HARQ feedback
5552 * time and 'm' index.
5556 * @param[in] RgDlHqProcCb *hqP
5557 * @param[in] RgSchDlSf *subFrm
5558 * @param[in] RgSchDlHqTbCb *tbCb
5559 * @param[in] U8 tbAllocIdx
5564 PRIVATE Void rgSCHCmnUpdHqAndDai
5566 RgSchDlHqProcCb *hqP,
5568 RgSchDlHqTbCb *tbCb,
5572 PRIVATE Void rgSCHCmnUpdHqAndDai(hqP, subFrm, tbCb,tbAllocIdx)
5573 RgSchDlHqProcCb *hqP;
5575 RgSchDlHqTbCb *tbCb;
5579 RgSchUeCb *ue = hqP->hqE->ue;
5584 /* set the time at which UE shall send the feedback
5585 * for this process */
5586 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5587 subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5588 tbCb->fdbkTime.subframe = subFrm->dlFdbkInfo.subframe;
5589 tbCb->m = subFrm->dlFdbkInfo.m;
5593 /* set the time at which UE shall send the feedback
5594 * for this process */
5595 tbCb->fdbkTime.sfn = (tbCb->timingInfo.sfn + \
5596 hqP->subFrm->dlFdbkInfo.sfnOffset) % RGSCH_MAX_SFN;
5597 tbCb->fdbkTime.subframe = hqP->subFrm->dlFdbkInfo.subframe;
5598 tbCb->m = hqP->subFrm->dlFdbkInfo.m;
5601 /* ccpu00132340-MOD- DAI need to be updated for first TB only*/
5602 if(ue && !tbAllocIdx)
5604 Bool havePdcch = (tbCb->hqP->pdcch ? TRUE : FALSE);
5607 dlDai = rgSCHCmnUpdDai(ue, &tbCb->fdbkTime, tbCb->m, havePdcch,tbCb->hqP,
5610 {/* Non SPS occasions */
5611 tbCb->hqP->pdcch->dlDai = dlDai;
5612 /* hqP->ulDai is used for N1 resource filling
5613 * when SPS occaions present in a bundle */
5614 tbCb->hqP->ulDai = tbCb->dai;
5615 tbCb->hqP->dlDai = dlDai;
5619 /* Updatijng pucchFdbkIdx for both PUCCH or PUSCH
5621 tbCb->pucchFdbkIdx = tbCb->hqP->ulDai;
5628 * @brief This function updates DAI or UL index.
5632 * Function: rgSCHCmnUpdDai
5633 * Purpose: Updates the DAI in the ack-nack info, a valid
5634 * ue should be passed
5638 * @param[in] RgDlHqProcCb *hqP
5639 * @param[in] RgSchDlSf *subFrm
5640 * @param[in] RgSchDlHqTbCb *tbCb
5648 CmLteTimingInfo *fdbkTime,
5651 RgSchDlHqProcCb *hqP,
5655 U8 rgSCHCmnUpdDai(ue, fdbkTime, m, havePdcch,tbCb,servCellId,hqP,ulDai)
5657 CmLteTimingInfo *fdbkTime;
5660 RgSchDlHqProcCb *hqP;
5664 RgSchTddANInfo *anInfo;
5666 U8 ackNackFdbkArrSize;
5673 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
5674 hqP->hqE->cell->cellId,
5677 servCellIdx = RGSCH_PCELL_INDEX;
5679 ackNackFdbkArrSize = hqP->hqE->cell->ackNackFdbkArrSize;
5681 {/* SPS on primary cell */
5682 servCellIdx = RGSCH_PCELL_INDEX;
5683 ackNackFdbkArrSize = ue->cell->ackNackFdbkArrSize;
5687 anInfo = rgSCHUtlGetUeANFdbkInfo(ue, fdbkTime,servCellIdx);
5689 /* If no ACK/NACK feedback already present, create a new one */
5692 anInfo = &ue->cellInfo[servCellIdx]->anInfo[ue->cellInfo[servCellIdx]->nextFreeANIdx];
5693 anInfo->sfn = fdbkTime->sfn;
5694 anInfo->subframe = fdbkTime->subframe;
5695 anInfo->latestMIdx = m;
5696 /* Fixing DAI value - ccpu00109162 */
5697 /* Handle TDD case as in MIMO definition of the function */
5703 anInfo->isSpsOccasion = FALSE;
5704 /* set the free Index to store Ack/Nack Information*/
5705 ue->cellInfo[servCellIdx]->nextFreeANIdx = (ue->cellInfo[servCellIdx]->nextFreeANIdx + 1) %
5711 anInfo->latestMIdx = m;
5712 /* Fixing DAI value - ccpu00109162 */
5713 /* Handle TDD case as in MIMO definition of the function */
5714 anInfo->ulDai = anInfo->ulDai + 1;
5717 anInfo->dlDai = anInfo->dlDai + 1;
5721 /* ignoring the Scell check,
5722 * for primary cell this field is unused*/
5725 anInfo->n1ResTpcIdx = hqP->tpc;
5729 {/* As this not required for release pdcch */
5730 *ulDai = anInfo->ulDai;
5733 return (anInfo->dlDai);
5736 #endif /* ifdef LTE_TDD */
5738 U32 rgHqRvRetxCnt[4][2];
5742 * @brief This function fills the HqP TB with rbAllocInfo.
5746 * Function: rgSCHCmnFillHqPTb
5747 * Purpose: This function fills in the HqP TB with rbAllocInfo.
5749 * Invoked by: rgSCHCmnFillHqPTb
5751 * @param[in] RgSchCellCb* cell
5752 * @param[in] RgSchDlRbAlloc *rbAllocInfo,
5753 * @param[in] U8 tbAllocIdx
5754 * @param[in] RgSchPdcch *pdcch
5760 Void rgSCHCmnFillHqPTb
5763 RgSchDlRbAlloc *rbAllocInfo,
5768 Void rgSCHCmnFillHqPTb(cell, rbAllocInfo, tbAllocIdx, pdcch)
5770 RgSchDlRbAlloc *rbAllocInfo;
5776 PRIVATE Void rgSCHCmnFillHqPTb
5779 RgSchDlRbAlloc *rbAllocInfo,
5784 PRIVATE Void rgSCHCmnFillHqPTb(cell, rbAllocInfo, tbAllocIdx, pdcch)
5786 RgSchDlRbAlloc *rbAllocInfo;
5790 #endif /* LTEMAC_SPS */
5792 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
5793 RgSchDlTbAllocInfo *tbAllocInfo = &rbAllocInfo->tbInfo[tbAllocIdx];
5794 RgSchDlHqTbCb *tbInfo = tbAllocInfo->tbCb;
5795 RgSchDlHqProcCb *hqP = tbInfo->hqP;
5798 /*ccpu00120365-ADD-if tb is disabled, set mcs=0,rv=1.
5799 * Relevant for DCI format 2 & 2A as per 36.213-7.1.7.2
5801 if ( tbAllocInfo->isDisabled)
5804 tbInfo->dlGrnt.iMcs = 0;
5805 tbInfo->dlGrnt.rv = 1;
5807 /* Fill for TB retransmission */
5808 else if (tbInfo->txCntr > 0)
5811 tbInfo->timingInfo = cmnCellDl->time;
5813 if ((tbInfo->isAckNackDtx == TFU_HQFDB_DTX))
5815 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5816 rgHqRvRetxCnt[tbInfo->dlGrnt.rv][tbInfo->tbIdx]++;
5820 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[++(tbInfo->ccchSchdInfo.rvIdx) & 0x03];
5823 /* fill the scheduler information of hqProc */
5824 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5825 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx,hqP->tbInfo,tbInfo->tbIdx );
5826 rgSCHDhmHqTbRetx(hqP->hqE, tbInfo->timingInfo, hqP, tbInfo->tbIdx);
5828 /* Fill for TB transmission */
5831 /* Fill the HqProc */
5832 tbInfo->dlGrnt.iMcs = tbAllocInfo->imcs;
5833 tbInfo->tbSz = tbAllocInfo->bytesAlloc;
5834 tbInfo->timingInfo = cmnCellDl->time;
5836 tbInfo->dlGrnt.rv = rgSchCmnDlRvTbl[0];
5837 /* fill the scheduler information of hqProc */
5838 tbInfo->ccchSchdInfo.rvIdx = 0;
5839 tbInfo->ccchSchdInfo.totBytes = tbAllocInfo->bytesAlloc;
5840 /* DwPts Scheduling Changes Start */
5841 /* DwPts Scheduling Changes End */
5842 cell->measurements.dlBytesCnt += tbAllocInfo->bytesAlloc;
5845 /*ccpu00120365:-ADD-only add to subFrm list if tb is not disabled */
5846 if ( tbAllocInfo->isDisabled == FALSE )
5848 /* Set the number of transmitting SM layers for this TB */
5849 tbInfo->numLyrs = tbAllocInfo->noLyr;
5850 /* Set the TB state as WAITING to indicate TB has been
5851 * considered for transmission */
5852 tbInfo->state = HQ_TB_WAITING;
5853 hqP->subFrm = rbAllocInfo->dlSf;
5854 tbInfo->hqP->pdcch = pdcch;
5855 //tbInfo->dlGrnt.numRb = rbAllocInfo->rbsAlloc;
5856 rgSCHUtlDlHqPTbAddToTx(hqP->subFrm, hqP, tbInfo->tbIdx);
5862 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
5866 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
5867 * Purpose: This function fills in the PDCCH information
5868 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5869 * for dedicated service scheduling. It also
5870 * obtains TPC to be filled in from the power module.
5871 * Assign the PDCCH to HQProc.
5873 * Invoked by: Downlink Scheduler
5875 * @param[in] RgSchCellCb* cell
5876 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5877 * @param[in] RgDlHqProc* hqP
5878 * @param[out] RgSchPdcch *pdcch
5884 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmtB1B2
5887 RgSchDlRbAlloc *rbAllocInfo,
5888 RgSchDlHqProcCb *hqP,
5893 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, pdcch, tpc)
5895 RgSchDlRbAlloc *rbAllocInfo;
5896 RgSchDlHqProcCb *hqP;
5903 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
5904 //Currently hardcoding values here.
5905 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
5906 switch(rbAllocInfo->dciFormat)
5908 case TFU_DCI_FORMAT_B1:
5910 pdcch->dci.u.formatB1Info.formatType = 0;
5911 pdcch->dci.u.formatB1Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5912 pdcch->dci.u.formatB1Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5913 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5914 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5915 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5916 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5917 pdcch->dci.u.formatB1Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5918 pdcch->dci.u.formatB1Info.CSI_BSI_BRI_Req = 0;
5919 pdcch->dci.u.formatB1Info.CSIRS_BRRS_TxTiming = 0;
5920 pdcch->dci.u.formatB1Info.CSIRS_BRRS_SymbIdx = 0;
5921 pdcch->dci.u.formatB1Info.CSIRS_BRRS_ProcInd = 0;
5922 pdcch->dci.u.formatB1Info.xPUCCH_TxTiming = 0;
5923 //TODO_SID: Need to update
5924 pdcch->dci.u.formatB1Info.freqResIdx_xPUCCH = 0;
5925 pdcch->dci.u.formatB1Info.beamSwitch = 0;
5926 pdcch->dci.u.formatB1Info.SRS_Config = 0;
5927 pdcch->dci.u.formatB1Info.SRS_Symbol = 0;
5928 //TODO_SID: Need to check.Currently setting 0(1 layer, ports(8) w/o OCC).
5929 pdcch->dci.u.formatB1Info.AntPorts_numLayers = 0;
5930 pdcch->dci.u.formatB1Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5931 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5932 pdcch->dci.u.formatB1Info.tpcCmd = 1; //tpc;
5933 pdcch->dci.u.formatB1Info.DL_PCRS = 0;
5936 case TFU_DCI_FORMAT_B2:
5938 pdcch->dci.u.formatB2Info.formatType = 1;
5939 pdcch->dci.u.formatB2Info.xPDSCHRange = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange;
5940 pdcch->dci.u.formatB2Info.RBAssign = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign;
5941 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.hqProcId = hqP->procId;
5942 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.mcs = rbAllocInfo->tbInfo[0].imcs;
5943 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
5944 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.RV = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
5945 pdcch->dci.u.formatB2Info.u.rbAssignB1Val324.bmiHqAckNack = 0;
5946 pdcch->dci.u.formatB2Info.CSI_BSI_BRI_Req = 0;
5947 pdcch->dci.u.formatB2Info.CSIRS_BRRS_TxTiming = 0;
5948 pdcch->dci.u.formatB2Info.CSIRS_BRRS_SymbIdx = 0;
5949 pdcch->dci.u.formatB2Info.CSIRS_BRRS_ProcInd = 0;
5950 pdcch->dci.u.formatB2Info.xPUCCH_TxTiming = 0;
5951 //TODO_SID: Need to update
5952 pdcch->dci.u.formatB2Info.freqResIdx_xPUCCH = 0;
5953 pdcch->dci.u.formatB2Info.beamSwitch = 0;
5954 pdcch->dci.u.formatB2Info.SRS_Config = 0;
5955 pdcch->dci.u.formatB2Info.SRS_Symbol = 0;
5956 //TODO_SID: Need to check.Currently setting 4(2 layer, ports(8,9) w/o OCC).
5957 pdcch->dci.u.formatB2Info.AntPorts_numLayers = 4;
5958 pdcch->dci.u.formatB2Info.SCID = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.SCID;
5959 //TODO_SID: Hardcoding TPC command to 1 i.e. No change
5960 pdcch->dci.u.formatB2Info.tpcCmd = 1; //tpc;
5961 pdcch->dci.u.formatB2Info.DL_PCRS = 0;
5965 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId," 5GTF_ERROR Allocator's icorrect "
5966 "dciForamt Fill RNTI:%d",rbAllocInfo->rnti);
5973 extern U32 totPcellSCell;
5974 extern U32 addedForScell;
5975 extern U32 addedForScell1;
5976 extern U32 addedForScell2;
5978 * @brief This function fills the PDCCH information from dlProc.
5982 * Function: rgSCHCmnFillHqPPdcch
5983 * Purpose: This function fills in the PDCCH information
5984 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
5985 * for dedicated service scheduling. It also
5986 * obtains TPC to be filled in from the power module.
5987 * Assign the PDCCH to HQProc.
5989 * Invoked by: Downlink Scheduler
5991 * @param[in] RgSchCellCb* cell
5992 * @param[in] RgSchDlRbAlloc* rbAllocInfo
5993 * @param[in] RgDlHqProc* hqP
5998 Void rgSCHCmnFillHqPPdcch
6001 RgSchDlRbAlloc *rbAllocInfo,
6002 RgSchDlHqProcCb *hqP
6005 Void rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP)
6007 RgSchDlRbAlloc *rbAllocInfo;
6008 RgSchDlHqProcCb *hqP;
6011 RgSchCmnDlCell *cmnCell = RG_SCH_CMN_GET_DL_CELL(cell);
6012 RgSchPdcch *pdcch = rbAllocInfo->pdcch;
6019 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6026 tpc = rgSCHPwrPucchTpcForUe(cell, hqP->hqE->ue);
6028 /* Fix: syed moving this to a common function for both scheduled
6029 * and non-scheduled UEs */
6031 pdcch->ue = hqP->hqE->ue;
6032 if (hqP->hqE->ue->csgMmbrSta == FALSE)
6034 cmnCell->ncsgPrbCnt += rbAllocInfo->rbsAlloc;
6036 cmnCell->totPrbCnt += rbAllocInfo->rbsAlloc;
6039 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlPrbUsg +=
6040 rbAllocInfo->rbsAlloc;
6041 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw0iTbs +=
6042 rbAllocInfo->tbInfo[0].iTbs;
6043 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw0iTbs ++;
6044 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
6045 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6048 totPcellSCell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6049 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6051 addedForScell += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6052 addedForScell1 += (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6054 printf (" Hqp %d cell %d addedForScell %lu addedForScell1 %lu sfn:sf %d:%d \n",
6056 hqP->hqE->cell->cellId,
6060 cell->crntTime.slot);
6064 hqP->hqE->cell->tenbStats->sch.dlPrbUsage[0] +=
6065 rbAllocInfo->rbsAlloc;
6066 hqP->hqE->cell->tenbStats->sch.dlSumCw0iTbs +=
6067 rbAllocInfo->tbInfo[0].iTbs;
6068 hqP->hqE->cell->tenbStats->sch.dlNumCw0iTbs ++;
6069 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
6070 (rbAllocInfo->tbInfo[0].bytesAlloc << 3);
6071 if (rbAllocInfo->tbInfo[1].schdlngForTb)
6073 hqP->hqE->cell->tenbStats->sch.dlSumCw1iTbs +=
6074 rbAllocInfo->tbInfo[1].iTbs;
6075 hqP->hqE->cell->tenbStats->sch.dlNumCw1iTbs ++;
6076 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlSumCw1iTbs +=
6077 rbAllocInfo->tbInfo[1].iTbs;
6078 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlNumCw1iTbs ++;
6079 hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt +=
6080 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6084 if(RG_SCH_IS_CELL_SEC(hqP->hqE->ue, cell))
6086 addedForScell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6087 addedForScell2 += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6089 printf (" Hqp %d cell %d addedForScell %lu addedForScell2 %lu \n",
6091 hqP->hqE->cell->cellId,
6096 totPcellSCell += (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6100 hqP->hqE->cell->tenbStats->sch.dlTtlTpt +=
6101 (rbAllocInfo->tbInfo[1].bytesAlloc << 3);
6104 printf ("add DL TPT is %lu sfn:sf %d:%d \n", hqP->hqE->ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(hqP->hqE->cell)].dlTpt ,
6106 cell->crntTime.slot);
6112 pdcch->rnti = rbAllocInfo->rnti;
6113 pdcch->dci.dciFormat = rbAllocInfo->dciFormat;
6114 /* Update subframe and pdcch info in HqTb control block */
6115 switch(rbAllocInfo->dciFormat)
6118 case TFU_DCI_FORMAT_B1:
6119 case TFU_DCI_FORMAT_B2:
6121 // printf(" RG_5GTF:: Pdcch filling with DCI format B1/B2\n");
6122 rgSCHCmnFillHqPPdcchDciFrmtB1B2(cell, rbAllocInfo, hqP, \
6128 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6129 "Allocator's incorrect dciForamt Fill for RNTI:%d",rbAllocInfo->rnti);
6136 * @brief This function fills the PDCCH DCI format 1 information from dlProc.
6140 * Function: rgSCHCmnFillHqPPdcchDciFrmt1
6141 * Purpose: This function fills in the PDCCH information
6142 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6143 * for dedicated service scheduling. It also
6144 * obtains TPC to be filled in from the power module.
6145 * Assign the PDCCH to HQProc.
6147 * Invoked by: Downlink Scheduler
6149 * @param[in] RgSchCellCb* cell
6150 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6151 * @param[in] RgDlHqProc* hqP
6152 * @param[out] RgSchPdcch *pdcch
6159 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1
6162 RgSchDlRbAlloc *rbAllocInfo,
6163 RgSchDlHqProcCb *hqP,
6168 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1(cell, rbAllocInfo, hqP, pdcch, tpc)
6170 RgSchDlRbAlloc *rbAllocInfo;
6171 RgSchDlHqProcCb *hqP;
6178 RgSchTddANInfo *anInfo;
6182 /* For activation or reactivation,
6183 * Harq ProcId should be 0 */
6184 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6188 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6189 pdcch->dci.u.format1Info.tpcCmd = tpc;
6190 /* Avoiding this check,as we dont support Type1 RA */
6192 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6195 pdcch->dci.u.format1Info.allocInfo.isAllocType0 = TRUE;
6196 pdcch->dci.u.format1Info.allocInfo.resAllocMap[0] =
6197 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6199 pdcch->dci.u.format1Info.allocInfo.resAllocMap[1] =
6200 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6202 pdcch->dci.u.format1Info.allocInfo.resAllocMap[2] =
6203 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6205 pdcch->dci.u.format1Info.allocInfo.resAllocMap[3] =
6206 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6211 if ((!(hqP->tbInfo[0].txCntr)) &&
6212 (cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6213 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6214 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV)))
6217 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
6221 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
6224 pdcch->dci.u.format1Info.allocInfo.harqProcId = hqP->procId;
6227 pdcch->dci.u.format1Info.allocInfo.ndi =
6228 rbAllocInfo->tbInfo[0].tbCb->ndi;
6229 pdcch->dci.u.format1Info.allocInfo.mcs =
6230 rbAllocInfo->tbInfo[0].imcs;
6231 pdcch->dci.u.format1Info.allocInfo.rv =
6232 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6234 if(hqP->hqE->ue != NULLP)
6237 U8 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6238 hqP->hqE->cell->cellId,
6241 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6242 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6244 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6245 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6250 pdcch->dci.u.format1Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6254 /* Fixing DAI value - ccpu00109162 */
6255 pdcch->dci.u.format1Info.dai = RG_SCH_MAX_DAI_IDX;
6261 /* always 0 for RACH */
6262 pdcch->dci.u.format1Info.allocInfo.harqProcId = 0;
6264 /* Fixing DAI value - ccpu00109162 */
6265 pdcch->dci.u.format1Info.dai = 1;
6274 * @brief This function fills the PDCCH DCI format 1A information from dlProc.
6278 * Function: rgSCHCmnFillHqPPdcchDciFrmt1A
6279 * Purpose: This function fills in the PDCCH information
6280 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6281 * for dedicated service scheduling. It also
6282 * obtains TPC to be filled in from the power module.
6283 * Assign the PDCCH to HQProc.
6285 * Invoked by: Downlink Scheduler
6287 * @param[in] RgSchCellCb* cell
6288 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6289 * @param[in] RgDlHqProc* hqP
6290 * @param[out] RgSchPdcch *pdcch
6296 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1A
6299 RgSchDlRbAlloc *rbAllocInfo,
6300 RgSchDlHqProcCb *hqP,
6305 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1A(cell, rbAllocInfo, hqP, pdcch, tpc)
6307 RgSchDlRbAlloc *rbAllocInfo;
6308 RgSchDlHqProcCb *hqP;
6315 RgSchTddANInfo *anInfo;
6319 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6323 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6324 pdcch->dci.u.format1aInfo.isPdcchOrder = FALSE;
6325 pdcch->dci.u.format1aInfo.t.pdschInfo.tpcCmd = tpc;
6326 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.mcs = \
6327 rbAllocInfo->tbInfo[0].imcs;
6328 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres = TRUE;
6330 if ((!(hqP->tbInfo[0].txCntr)) &&
6331 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6332 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6333 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6336 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val = 0;
6340 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val
6344 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.val =
6347 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.ndi = \
6348 rbAllocInfo->tbInfo[0].tbCb->ndi;
6349 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv = \
6350 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6351 /* As of now, we do not support Distributed allocations */
6352 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.isLocal = TRUE;
6353 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.nGap2.pres = NOTPRSNT;
6354 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.type =
6356 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.alloc.u.riv =
6357 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
6358 rbAllocInfo->allocInfo.raType2.rbStart,
6359 rbAllocInfo->allocInfo.raType2.numRb);
6361 if(hqP->hqE->ue != NULLP)
6364 U8 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6365 hqP->hqE->cell->cellId,
6367 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6368 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6370 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6371 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6374 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
6377 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val =
6378 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6382 /* Fixing DAI value - ccpu00109162 */
6383 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = RG_SCH_MAX_DAI_IDX;
6384 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6385 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6392 /* always 0 for RACH */
6393 pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.harqProcId.pres
6396 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.pres = TRUE;
6397 /* Fixing DAI value - ccpu00109162 */
6398 pdcch->dci.u.format1aInfo.t.pdschInfo.dai.val = 1;
6406 * @brief This function fills the PDCCH DCI format 1B information from dlProc.
6410 * Function: rgSCHCmnFillHqPPdcchDciFrmt1B
6411 * Purpose: This function fills in the PDCCH information
6412 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6413 * for dedicated service scheduling. It also
6414 * obtains TPC to be filled in from the power module.
6415 * Assign the PDCCH to HQProc.
6417 * Invoked by: Downlink Scheduler
6419 * @param[in] RgSchCellCb* cell
6420 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6421 * @param[in] RgDlHqProc* hqP
6422 * @param[out] RgSchPdcch *pdcch
6428 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1B
6431 RgSchDlRbAlloc *rbAllocInfo,
6432 RgSchDlHqProcCb *hqP,
6437 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt1B(cell, rbAllocInfo, hqP, pdcch, tpc)
6439 RgSchDlRbAlloc *rbAllocInfo;
6440 RgSchDlHqProcCb *hqP;
6447 RgSchTddANInfo *anInfo;
6451 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6455 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6456 pdcch->dci.u.format1bInfo.tpcCmd = tpc;
6457 pdcch->dci.u.format1bInfo.allocInfo.mcs = \
6458 rbAllocInfo->tbInfo[0].imcs;
6460 if ((!(hqP->tbInfo[0].txCntr)) &&
6461 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6462 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6463 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6466 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = 0;
6470 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
6473 pdcch->dci.u.format1bInfo.allocInfo.harqProcId = hqP->procId;
6475 pdcch->dci.u.format1bInfo.allocInfo.ndi = \
6476 rbAllocInfo->tbInfo[0].tbCb->ndi;
6477 pdcch->dci.u.format1bInfo.allocInfo.rv = \
6478 rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6479 /* As of now, we do not support Distributed allocations */
6480 pdcch->dci.u.format1bInfo.allocInfo.isLocal = TRUE;
6481 pdcch->dci.u.format1bInfo.allocInfo.nGap2.pres = NOTPRSNT;
6482 pdcch->dci.u.format1bInfo.allocInfo.alloc.type =
6484 pdcch->dci.u.format1bInfo.allocInfo.alloc.u.riv =
6485 rgSCHCmnCalcRiv (cell->bwCfg.dlTotalBw,
6486 rbAllocInfo->allocInfo.raType2.rbStart,
6487 rbAllocInfo->allocInfo.raType2.numRb);
6488 /* Fill precoding Info */
6489 pdcch->dci.u.format1bInfo.allocInfo.pmiCfm = \
6490 rbAllocInfo->mimoAllocInfo.precIdxInfo >> 4;
6491 pdcch->dci.u.format1bInfo.allocInfo.tPmi = \
6492 rbAllocInfo->mimoAllocInfo.precIdxInfo & 0x0F;
6494 if(hqP->hqE->ue != NULLP)
6497 U8 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6498 hqP->hqE->cell->cellId,
6500 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6501 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6503 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6504 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6509 pdcch->dci.u.format1bInfo.dai =
6510 RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6514 pdcch->dci.u.format1bInfo.dai = RG_SCH_MAX_DAI_IDX;
6515 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6516 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6527 * @brief This function fills the PDCCH DCI format 2 information from dlProc.
6531 * Function: rgSCHCmnFillHqPPdcchDciFrmt2
6532 * Purpose: This function fills in the PDCCH information
6533 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6534 * for dedicated service scheduling. It also
6535 * obtains TPC to be filled in from the power module.
6536 * Assign the PDCCH to HQProc.
6538 * Invoked by: Downlink Scheduler
6540 * @param[in] RgSchCellCb* cell
6541 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6542 * @param[in] RgDlHqProc* hqP
6543 * @param[out] RgSchPdcch *pdcch
6549 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2
6552 RgSchDlRbAlloc *rbAllocInfo,
6553 RgSchDlHqProcCb *hqP,
6558 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2(cell, rbAllocInfo, hqP, pdcch, tpc)
6560 RgSchDlRbAlloc *rbAllocInfo;
6561 RgSchDlHqProcCb *hqP;
6568 RgSchTddANInfo *anInfo;
6572 /* ccpu00119023-ADD-For activation or reactivation,
6573 * Harq ProcId should be 0 */
6574 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6578 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6579 /*ccpu00120365:-ADD-call also if tb is disabled */
6580 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6581 rbAllocInfo->tbInfo[1].isDisabled)
6583 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6585 pdcch->dci.u.format2Info.tpcCmd = tpc;
6586 /* Avoiding this check,as we dont support Type1 RA */
6588 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6591 pdcch->dci.u.format2Info.allocInfo.isAllocType0 = TRUE;
6592 pdcch->dci.u.format2Info.allocInfo.resAllocMap[0] =
6593 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6595 pdcch->dci.u.format2Info.allocInfo.resAllocMap[1] =
6596 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6598 pdcch->dci.u.format2Info.allocInfo.resAllocMap[2] =
6599 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6601 pdcch->dci.u.format2Info.allocInfo.resAllocMap[3] =
6602 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6607 if ((!(hqP->tbInfo[0].txCntr)) &&
6608 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6609 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6610 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6613 pdcch->dci.u.format2Info.allocInfo.harqProcId = 0;
6617 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6620 pdcch->dci.u.format2Info.allocInfo.harqProcId = hqP->procId;
6622 /* Initialize the TB info for both the TBs */
6623 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].mcs = 0;
6624 pdcch->dci.u.format2Info.allocInfo.tbInfo[0].rv = 1;
6625 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].mcs = 0;
6626 pdcch->dci.u.format2Info.allocInfo.tbInfo[1].rv = 1;
6627 /* Fill tbInfo for scheduled TBs */
6628 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6629 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6630 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6631 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6632 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6633 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6634 /* If we reach this function. It is safely assumed that
6635 * rbAllocInfo->tbInfo[0] always has non default valid values.
6636 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6637 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6639 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6640 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6641 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6642 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6643 pdcch->dci.u.format2Info.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6644 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6646 pdcch->dci.u.format2Info.allocInfo.transSwap =
6647 rbAllocInfo->mimoAllocInfo.swpFlg;
6648 pdcch->dci.u.format2Info.allocInfo.precoding =
6649 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6651 if(hqP->hqE->ue != NULLP)
6655 U8 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6656 hqP->hqE->cell->cellId,
6658 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6659 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6661 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6662 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6667 pdcch->dci.u.format2Info.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6671 pdcch->dci.u.format2Info.dai = RG_SCH_MAX_DAI_IDX;
6672 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6673 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6683 * @brief This function fills the PDCCH DCI format 2A information from dlProc.
6687 * Function: rgSCHCmnFillHqPPdcchDciFrmt2A
6688 * Purpose: This function fills in the PDCCH information
6689 * obtained from the RgSchDlHqProcCb and RgSchDlRbAlloc
6690 * for dedicated service scheduling. It also
6691 * obtains TPC to be filled in from the power module.
6692 * Assign the PDCCH to HQProc.
6694 * Invoked by: Downlink Scheduler
6696 * @param[in] RgSchCellCb* cell
6697 * @param[in] RgSchDlRbAlloc* rbAllocInfo
6698 * @param[in] RgDlHqProc* hqP
6699 * @param[out] RgSchPdcch *pdcch
6705 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2A
6708 RgSchDlRbAlloc *rbAllocInfo,
6709 RgSchDlHqProcCb *hqP,
6714 PRIVATE Void rgSCHCmnFillHqPPdcchDciFrmt2A(cell, rbAllocInfo, hqP, pdcch, tpc)
6716 RgSchDlRbAlloc *rbAllocInfo;
6717 RgSchDlHqProcCb *hqP;
6723 RgSchTddANInfo *anInfo;
6727 RgSchCmnDlHqProc *cmnHqDl = RG_SCH_CMN_GET_DL_HQP(hqP);
6731 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 0, pdcch);
6732 /*ccpu00120365:-ADD-call also if tb is disabled */
6733 if (rbAllocInfo->tbInfo[1].schdlngForTb ||
6734 rbAllocInfo->tbInfo[1].isDisabled)
6737 rgSCHCmnFillHqPTb(cell, rbAllocInfo, 1, pdcch);
6740 pdcch->dci.u.format2AInfo.tpcCmd = tpc;
6741 /* Avoiding this check,as we dont support Type1 RA */
6743 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
6746 pdcch->dci.u.format2AInfo.allocInfo.isAllocType0 = TRUE;
6747 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[0] =
6748 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 24)
6750 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[1] =
6751 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 16)
6753 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[2] =
6754 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask >> 8)
6756 pdcch->dci.u.format2AInfo.allocInfo.resAllocMap[3] =
6757 ((rbAllocInfo->allocInfo.raType0.dlAllocBitMask & 0x000000ff));
6762 if ((!(hqP->tbInfo[0].txCntr)) &&
6763 ( cmnHqDl != (RgSchCmnDlHqProc*)NULLP &&
6764 ((cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_ACTV) ||
6765 (cmnHqDl->spsAction & RG_SCH_CMN_SPS_DL_REACTV))
6768 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = 0;
6772 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6775 pdcch->dci.u.format2AInfo.allocInfo.harqProcId = hqP->procId;
6777 /* Initialize the TB info for both the TBs */
6778 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].mcs = 0;
6779 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[0].rv = 1;
6780 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].mcs = 0;
6781 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[1].rv = 1;
6782 /* Fill tbInfo for scheduled TBs */
6783 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6784 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[0].tbCb->ndi;
6785 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6786 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[0].imcs;
6787 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[0].\
6788 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[0].tbCb->dlGrnt.rv;
6789 /* If we reach this function. It is safely assumed that
6790 * rbAllocInfo->tbInfo[0] always has non default valid values.
6791 * rbAllocInfo->tbInfo[1]'s scheduling is optional */
6793 if (rbAllocInfo->tbInfo[1].schdlngForTb == TRUE)
6795 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6796 tbCb->tbIdx].ndi = rbAllocInfo->tbInfo[1].tbCb->ndi;
6797 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6798 tbCb->tbIdx].mcs = rbAllocInfo->tbInfo[1].imcs;
6799 pdcch->dci.u.format2AInfo.allocInfo.tbInfo[rbAllocInfo->tbInfo[1].\
6800 tbCb->tbIdx].rv = rbAllocInfo->tbInfo[1].tbCb->dlGrnt.rv;
6803 pdcch->dci.u.format2AInfo.allocInfo.transSwap =
6804 rbAllocInfo->mimoAllocInfo.swpFlg;
6805 pdcch->dci.u.format2AInfo.allocInfo.precoding =
6806 rbAllocInfo->mimoAllocInfo.precIdxInfo;
6808 if(hqP->hqE->ue != NULLP)
6811 U8 servCellIdx = rgSchUtlGetServCellIdx(hqP->hqE->cell->instIdx,
6812 hqP->hqE->cell->cellId,
6814 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6815 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),servCellIdx);
6817 anInfo = rgSCHUtlGetUeANFdbkInfo(hqP->hqE->ue,
6818 &(rbAllocInfo->tbInfo[0].tbCb->fdbkTime),0);
6823 pdcch->dci.u.format2AInfo.dai = RG_SCH_GET_DAI_VALUE(anInfo->dlDai);
6827 pdcch->dci.u.format2AInfo.dai = RG_SCH_MAX_DAI_IDX;
6828 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
6829 "PDCCH is been scheduled without updating anInfo RNTI:%d",
6841 * @brief init of Sch vars.
6845 * Function: rgSCHCmnInitVars
6846 Purpose: Initialization of various UL subframe indices
6848 * @param[in] RgSchCellCb *cell
6853 PRIVATE Void rgSCHCmnInitVars
6858 PRIVATE Void rgSCHCmnInitVars(cell)
6862 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6865 cellUl->idx = RGSCH_INVALID_INFO;
6866 cellUl->schdIdx = RGSCH_INVALID_INFO;
6867 cellUl->schdHqProcIdx = RGSCH_INVALID_INFO;
6868 cellUl->msg3SchdIdx = RGSCH_INVALID_INFO;
6870 cellUl->emtcMsg3SchdIdx = RGSCH_INVALID_INFO;
6872 cellUl->msg3SchdHqProcIdx = RGSCH_INVALID_INFO;
6873 cellUl->rcpReqIdx = RGSCH_INVALID_INFO;
6874 cellUl->hqFdbkIdx[0] = RGSCH_INVALID_INFO;
6875 cellUl->hqFdbkIdx[1] = RGSCH_INVALID_INFO;
6876 cellUl->reTxIdx[0] = RGSCH_INVALID_INFO;
6877 cellUl->reTxIdx[1] = RGSCH_INVALID_INFO;
6878 /* Stack Crash problem for TRACE5 Changes. Added the return below */
6885 * @brief Updation of Sch vars per TTI.
6889 * Function: rgSCHCmnUpdVars
6890 * Purpose: Updation of Sch vars per TTI.
6892 * @param[in] RgSchCellCb *cell
6897 Void rgSCHCmnUpdVars
6902 Void rgSCHCmnUpdVars(cell)
6906 CmLteTimingInfo timeInfo;
6907 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6911 idx = (cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot);
6912 cellUl->idx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6914 printf("idx %d cellUl->idx %d RGSCH_NUM_SUB_FRAMES_5G %d time(%d %d) \n",idx,cellUl->idx ,RGSCH_NUM_SUB_FRAMES_5G,cell->crntTime.sfn,cell->crntTime.slot);
6916 /* Need to scheduler for after SCHED_DELTA */
6917 /* UL allocation has been advanced by 1 subframe
6918 * so that we do not wrap around and send feedback
6919 * before the data is even received by the PHY */
6920 /* Introduced timing delta for UL control */
6921 idx = (cellUl->idx + TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA);
6922 cellUl->schdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6924 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6925 TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA)
6926 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6928 /* ccpu00127193 filling schdTime for logging and enhancement purpose*/
6929 cellUl->schdTime = timeInfo;
6931 /* msg3 scheduling two subframes after general scheduling */
6932 idx = (cellUl->idx + RG_SCH_CMN_DL_DELTA + RGSCH_RARSP_MSG3_DELTA);
6933 cellUl->msg3SchdIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6935 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,
6936 RG_SCH_CMN_DL_DELTA+ RGSCH_RARSP_MSG3_DELTA)
6937 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
6939 idx = (cellUl->idx + TFU_RECPREQ_DLDELTA);
6941 cellUl->rcpReqIdx = ((idx) % (RG_SCH_CMN_UL_NUM_SF));
6943 /* Downlink harq feedback is sometime after data reception / harq failure */
6944 /* Since feedback happens prior to scheduling being called, we add 1 to */
6945 /* take care of getting the correct subframe for feedback */
6946 idx = (cellUl->idx - TFU_CRCIND_ULDELTA + RG_SCH_CMN_UL_NUM_SF);
6948 printf("Finally setting cellUl->hqFdbkIdx[0] = %d TFU_CRCIND_ULDELTA %d RG_SCH_CMN_UL_NUM_SF %d\n",idx,TFU_CRCIND_ULDELTA,RG_SCH_CMN_UL_NUM_SF);
6950 cellUl->hqFdbkIdx[0] = (idx % (RG_SCH_CMN_UL_NUM_SF));
6952 idx = ((cellUl->schdIdx) % (RG_SCH_CMN_UL_NUM_SF));
6954 cellUl->reTxIdx[0] = (U8) idx;
6956 printf("cellUl->hqFdbkIdx[0] %d cellUl->reTxIdx[0] %d \n",cellUl->hqFdbkIdx[0], cellUl->reTxIdx[0] );
6958 /* RACHO: update cmn sched specific RACH variables,
6959 * mainly the prachMaskIndex */
6960 rgSCHCmnUpdRachParam(cell);
6969 * @brief To get uplink subframe index associated with current PHICH
6974 * Function: rgSCHCmnGetPhichUlSfIdx
6975 * Purpose: Gets uplink subframe index associated with current PHICH
6976 * transmission based on SFN and subframe no
6978 * @param[in] CmLteTimingInfo *timeInfo
6979 * @param[in] RgSchCellCb *cell
6984 U8 rgSCHCmnGetPhichUlSfIdx
6986 CmLteTimingInfo *timeInfo,
6990 U8 rgSCHCmnGetPhichUlSfIdx(timeInfo, cell)
6991 CmLteTimingInfo *timeInfo;
6995 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
6997 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
7004 dlsf = rgSCHUtlSubFrmGet(cell, *timeInfo);
7006 if(dlsf->phichOffInfo.sfnOffset == RGSCH_INVALID_INFO)
7008 return (RGSCH_INVALID_INFO);
7010 subframe = dlsf->phichOffInfo.subframe;
7012 sfn = (RGSCH_MAX_SFN + timeInfo->sfn -
7013 dlsf->phichOffInfo.sfnOffset) % RGSCH_MAX_SFN;
7015 /* ccpu00130980: numUlSf(U16) parameter added to avoid integer
7016 * wrap case such that idx will be proper*/
7017 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7018 numUlSf = ((numUlSf * sfn) + rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subframe]) - 1;
7019 idx = numUlSf % (cellUl->numUlSubfrms);
7025 * @brief To get uplink subframe index.
7030 * Function: rgSCHCmnGetUlSfIdx
7031 * Purpose: Gets uplink subframe index based on SFN and subframe number.
7033 * @param[in] CmLteTimingInfo *timeInfo
7034 * @param[in] U8 ulDlCfgIdx
7039 U8 rgSCHCmnGetUlSfIdx
7041 CmLteTimingInfo *timeInfo,
7045 U8 rgSCHCmnGetUlSfIdx(timeInfo, cell)
7046 CmLteTimingInfo *timeInfo;
7050 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
7051 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
7056 /* ccpu00130980: numUlSf(U16) parameter added to avoid integer
7057 * wrap case such that idx will be proper*/
7058 numUlSf = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7059 numUlSf = ((numUlSf * timeInfo->sfn) + \
7060 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->subframe]) - 1;
7061 idx = numUlSf % (cellUl->numUlSubfrms);
7069 * @brief To get uplink hq index.
7074 * Function: rgSCHCmnGetUlHqProcIdx
7075 * Purpose: Gets uplink subframe index based on SFN and subframe number.
7077 * @param[in] CmLteTimingInfo *timeInfo
7078 * @param[in] U8 ulDlCfgIdx
7083 U8 rgSCHCmnGetUlHqProcIdx
7085 CmLteTimingInfo *timeInfo,
7089 U8 rgSCHCmnGetUlHqProcIdx(timeInfo, cell)
7090 CmLteTimingInfo *timeInfo;
7098 numUlSf = (timeInfo->sfn * RGSCH_NUM_SUB_FRAMES_5G + timeInfo->slot);
7099 procId = numUlSf % RGSCH_NUM_UL_HQ_PROC;
7101 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
7102 /*ccpu00130639 - MOD - To get correct UL HARQ Proc IDs for all UL/DL Configs*/
7104 S8 sfnCycle = cell->tddHqSfnCycle;
7105 U8 numUlHarq = rgSchTddUlNumHarqProcTbl[ulDlCfgIdx]
7107 /* TRACE 5 Changes */
7109 /* Calculate the number of UL SF in one SFN */
7110 numUlSfInSfn = RGSCH_NUM_SUB_FRAMES -
7111 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
7113 /* Check for the SFN wrap around case */
7114 if(cell->crntTime.sfn == 1023 && timeInfo->sfn == 0)
7118 else if(cell->crntTime.sfn == 0 && timeInfo->sfn == 1023)
7120 /* sfnCycle decremented by 1 */
7121 sfnCycle = (sfnCycle + numUlHarq-1) % numUlHarq;
7123 /* Calculate the total number of UL sf */
7124 /* -1 is done since uplink sf are counted from 0 */
7125 numUlSf = numUlSfInSfn * (timeInfo->sfn + (sfnCycle*1024)) +
7126 rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][timeInfo->slot] - 1;
7128 procId = numUlSf % numUlHarq;
7134 /* UL_ALLOC_CHANGES */
7135 /***********************************************************
7137 * Func : rgSCHCmnUlFreeAlloc
7139 * Desc : Free an allocation - invokes UHM and releases
7140 * alloc for the scheduler
7141 * Doest need subframe as argument
7149 **********************************************************/
7151 Void rgSCHCmnUlFreeAlloc
7157 Void rgSCHCmnUlFreeAlloc(cell, alloc)
7159 RgSchUlAlloc *alloc;
7162 RgSchUlHqProcCb *hqProc;
7166 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
7167 if ((alloc->hqProc->remTx == 0) &&
7168 (alloc->hqProc->rcvdCrcInd == FALSE) &&
7171 RgSchRaCb *raCb = alloc->raCb;
7172 rgSCHUhmFreeProc(alloc->hqProc, cell);
7173 rgSCHUtlUlAllocRelease(alloc);
7174 rgSCHRamDelRaCb(cell, raCb, TRUE);
7179 hqProc = alloc->hqProc;
7180 rgSCHUtlUlAllocRelease(alloc);
7181 rgSCHUhmFreeProc(hqProc, cell);
7186 /***********************************************************
7188 * Func : rgSCHCmnUlFreeAllocation
7190 * Desc : Free an allocation - invokes UHM and releases
7191 * alloc for the scheduler
7199 **********************************************************/
7201 Void rgSCHCmnUlFreeAllocation
7208 Void rgSCHCmnUlFreeAllocation(cell, sf, alloc)
7211 RgSchUlAlloc *alloc;
7214 RgSchUlHqProcCb *hqProc;
7219 /* Fix : Release RNTI upon MSG3 max TX failure for non-HO UEs */
7220 if ((alloc->hqProc->remTx == 0) &&
7221 (alloc->hqProc->rcvdCrcInd == FALSE) &&
7224 RgSchRaCb *raCb = alloc->raCb;
7225 rgSCHUhmFreeProc(alloc->hqProc, cell);
7226 rgSCHUtlUlAllocRls(sf, alloc);
7227 rgSCHRamDelRaCb(cell, raCb, TRUE);
7232 hqProc = alloc->hqProc;
7233 rgSCHUhmFreeProc(hqProc, cell);
7235 /* re-setting the PRB count while freeing the allocations */
7238 rgSCHUtlUlAllocRls(sf, alloc);
7244 * @brief This function implements PDCCH allocation for an UE
7245 * in the currently running subframe.
7249 * Function: rgSCHCmnPdcchAllocCrntSf
7250 * Purpose: This function determines current DL subframe
7251 * and UE DL CQI to call the actual pdcch allocator
7253 * Note that this function is called only
7254 * when PDCCH request needs to be made during
7255 * uplink scheduling.
7257 * Invoked by: Scheduler
7259 * @param[in] RgSchCellCb *cell
7260 * @param[in] RgSchUeCb *ue
7261 * @return RgSchPdcch *
7262 * -# NULLP when unsuccessful
7265 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf
7271 RgSchPdcch *rgSCHCmnPdcchAllocCrntSf(cell, ue)
7276 CmLteTimingInfo frm = cell->crntTime;
7277 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7279 RgSchPdcch *pdcch = NULLP;
7281 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
7282 sf = rgSCHUtlSubFrmGet(cell, frm);
7285 if (ue->allocCmnUlPdcch)
7287 pdcch = rgSCHCmnCmnPdcchAlloc(cell, sf);
7288 /* Since CRNTI Scrambled */
7291 pdcch->dciNumOfBits = ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
7297 //pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, y, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_0, FALSE);
7298 pdcch = rgSCHCmnPdcchAlloc(cell, ue, sf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_A1, FALSE);
7303 /***********************************************************
7305 * Func : rgSCHCmnUlAllocFillNdmrs
7307 * Desc : Determines and fills N_dmrs for a UE uplink
7312 * Notes: N_dmrs determination is straightforward, so
7313 * it is configured per subband
7317 **********************************************************/
7319 Void rgSCHCmnUlAllocFillNdmrs
7321 RgSchCmnUlCell *cellUl,
7325 Void rgSCHCmnUlAllocFillNdmrs(cellUl, alloc)
7326 RgSchCmnUlCell *cellUl;
7327 RgSchUlAlloc *alloc;
7330 alloc->grnt.nDmrs = cellUl->dmrsArr[alloc->sbStart];
7334 /***********************************************************
7336 * Func : rgSCHCmnUlAllocLnkHqProc
7338 * Desc : Links a new allocation for an UE with the
7339 * appropriate HARQ process of the UE.
7347 **********************************************************/
7349 Void rgSCHCmnUlAllocLnkHqProc
7352 RgSchUlAlloc *alloc,
7353 RgSchUlHqProcCb *proc,
7357 Void rgSCHCmnUlAllocLnkHqProc(ue, alloc, proc, isRetx)
7359 RgSchUlAlloc *alloc;
7360 RgSchUlHqProcCb *proc;
7367 rgSCHCmnUlAdapRetx(alloc, proc);
7371 #ifdef LTE_L2_MEAS /* L2_COUNTERS */
7374 rgSCHUhmNewTx(proc, (((RgUeUlHqCb*)proc->hqEnt)->maxHqRetx), alloc);
7380 * @brief This function releases a PDCCH in the subframe that is
7381 * currently being allocated for.
7385 * Function: rgSCHCmnPdcchRlsCrntSf
7386 * Purpose: This function determines current DL subframe
7387 * which is considered for PDCCH allocation,
7388 * and then calls the actual function that
7389 * releases a PDCCH in a specific subframe.
7390 * Note that this function is called only
7391 * when PDCCH release needs to be made during
7392 * uplink scheduling.
7394 * Invoked by: Scheduler
7396 * @param[in] RgSchCellCb *cell
7397 * @param[in] RgSchPdcch *pdcch
7401 Void rgSCHCmnPdcchRlsCrntSf
7407 Void rgSCHCmnPdcchRlsCrntSf(cell, pdcch)
7412 CmLteTimingInfo frm = cell->crntTime;
7416 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
7417 sf = rgSCHUtlSubFrmGet(cell, frm);
7418 rgSCHUtlPdcchPut(cell, &sf->pdcchInfo, pdcch);
7421 /***********************************************************
7423 * Func : rgSCHCmnUlFillPdcchWithAlloc
7425 * Desc : Fills a PDCCH with format 0 information.
7433 **********************************************************/
7435 Void rgSCHCmnUlFillPdcchWithAlloc
7438 RgSchUlAlloc *alloc,
7442 Void rgSCHCmnUlFillPdcchWithAlloc(pdcch, alloc, ue)
7444 RgSchUlAlloc *alloc;
7451 pdcch->rnti = alloc->rnti;
7452 //pdcch->dci.dciFormat = TFU_DCI_FORMAT_A2;
7453 pdcch->dci.dciFormat = alloc->grnt.dciFrmt;
7455 //Currently hardcoding values here.
7456 //printf("Filling 5GTF UL DCI for rnti %d \n",alloc->rnti);
7457 switch(pdcch->dci.dciFormat)
7459 case TFU_DCI_FORMAT_A1:
7461 pdcch->dci.u.formatA1Info.formatType = 0;
7462 pdcch->dci.u.formatA1Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
7463 pdcch->dci.u.formatA1Info.xPUSCH_TxTiming = 0;
7464 pdcch->dci.u.formatA1Info.RBAssign = alloc->grnt.rbAssign;
7465 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
7466 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
7467 pdcch->dci.u.formatA1Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
7468 pdcch->dci.u.formatA1Info.CSI_BSI_BRI_Req = 0;
7469 pdcch->dci.u.formatA1Info.CSIRS_BRRS_TxTiming = 0;
7470 pdcch->dci.u.formatA1Info.CSIRS_BRRS_SymbIdx = 0;
7471 pdcch->dci.u.formatA1Info.CSIRS_BRRS_ProcInd = 0;
7472 pdcch->dci.u.formatA1Info.numBSI_Reports = 0;
7473 pdcch->dci.u.formatA1Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
7474 pdcch->dci.u.formatA1Info.beamSwitch = 0;
7475 pdcch->dci.u.formatA1Info.SRS_Config = 0;
7476 pdcch->dci.u.formatA1Info.SRS_Symbol = 0;
7477 pdcch->dci.u.formatA1Info.REMapIdx_DMRS_PCRS_numLayers = 0;
7478 pdcch->dci.u.formatA1Info.SCID = alloc->grnt.SCID;
7479 pdcch->dci.u.formatA1Info.PMI = alloc->grnt.PMI;
7480 pdcch->dci.u.formatA1Info.UL_PCRS = 0;
7481 pdcch->dci.u.formatA1Info.tpcCmd = alloc->grnt.tpc;
7484 case TFU_DCI_FORMAT_A2:
7486 pdcch->dci.u.formatA2Info.formatType = 1;
7487 pdcch->dci.u.formatA2Info.xPUSCHRange = alloc->grnt.xPUSCHRange;
7488 pdcch->dci.u.formatA2Info.xPUSCH_TxTiming = 0;
7489 pdcch->dci.u.formatA2Info.RBAssign = alloc->grnt.rbAssign;
7490 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.hqProcId = alloc->grnt.hqProcId;
7491 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.mcs = alloc->grnt.iMcsCrnt;
7492 pdcch->dci.u.formatA2Info.u.rbAssignA1Val324.ndi = alloc->hqProc->ndi;
7493 pdcch->dci.u.formatA2Info.CSI_BSI_BRI_Req = 0;
7494 pdcch->dci.u.formatA2Info.CSIRS_BRRS_TxTiming = 0;
7495 pdcch->dci.u.formatA2Info.CSIRS_BRRS_SymbIdx = 0;
7496 pdcch->dci.u.formatA2Info.CSIRS_BRRS_ProcInd = 0;
7497 pdcch->dci.u.formatA2Info.numBSI_Reports = 0;
7498 pdcch->dci.u.formatA2Info.uciOnxPUSCH = alloc->grnt.uciOnxPUSCH;
7499 pdcch->dci.u.formatA2Info.beamSwitch = 0;
7500 pdcch->dci.u.formatA2Info.SRS_Config = 0;
7501 pdcch->dci.u.formatA2Info.SRS_Symbol = 0;
7502 pdcch->dci.u.formatA2Info.REMapIdx_DMRS_PCRS_numLayers = 0;
7503 pdcch->dci.u.formatA2Info.SCID = alloc->grnt.SCID;
7504 pdcch->dci.u.formatA2Info.PMI = alloc->grnt.PMI;
7505 pdcch->dci.u.formatA2Info.UL_PCRS = 0;
7506 pdcch->dci.u.formatA2Info.tpcCmd = alloc->grnt.tpc;
7510 RLOG1(L_ERROR," 5GTF_ERROR UL Allocator's icorrect "
7511 "dciForamt Fill RNTI:%d",alloc->rnti);
7519 /***********************************************************
7521 * Func : rgSCHCmnUlAllocFillTpc
7523 * Desc : Determines and fills TPC for an UE allocation.
7531 **********************************************************/
7533 Void rgSCHCmnUlAllocFillTpc
7540 Void rgSCHCmnUlAllocFillTpc(cell, ue, alloc)
7543 RgSchUlAlloc *alloc;
7546 alloc->grnt.tpc = rgSCHPwrPuschTpcForUe(cell, ue);
7551 /***********************************************************
7553 * Func : rgSCHCmnAddUeToRefreshQ
7555 * Desc : Adds a UE to refresh queue, so that the UE is
7556 * periodically triggered to refresh it's GBR and
7565 **********************************************************/
7567 PRIVATE Void rgSCHCmnAddUeToRefreshQ
7574 PRIVATE Void rgSCHCmnAddUeToRefreshQ(cell, ue, wait)
7580 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
7582 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
7586 memset(&arg, 0, sizeof(arg));
7587 arg.tqCp = &sched->tmrTqCp;
7588 arg.tq = sched->tmrTq;
7589 arg.timers = &ueSchd->tmr;
7593 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
7600 * @brief Perform UE reset procedure.
7604 * Function : rgSCHCmnUlUeReset
7606 * This functions performs BSR resetting and
7607 * triggers UL specific scheduler
7608 * to Perform UE reset procedure.
7610 * @param[in] RgSchCellCb *cell
7611 * @param[in] RgSchUeCb *ue
7615 PRIVATE Void rgSCHCmnUlUeReset
7621 PRIVATE Void rgSCHCmnUlUeReset(cell, ue)
7626 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7627 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
7629 RgSchCmnLcg *lcgCmn;
7631 RgSchCmnAllocRecord *allRcd;
7633 ue->ul.minReqBytes = 0;
7634 ue->ul.totalBsr = 0;
7636 ue->ul.nonGbrLcgBs = 0;
7637 ue->ul.effAmbr = ue->ul.cfgdAmbr;
7639 node = ueUl->ulAllocLst.first;
7642 allRcd = (RgSchCmnAllocRecord *)node->node;
7646 for(lcgCnt = 0; lcgCnt < RGSCH_MAX_LCG_PER_UE; lcgCnt++)
7648 lcgCmn = RG_SCH_CMN_GET_UL_LCG(&ue->ul.lcgArr[lcgCnt]);
7650 lcgCmn->reportedBs = 0;
7651 lcgCmn->effGbr = lcgCmn->cfgdGbr;
7652 lcgCmn->effDeltaMbr = lcgCmn->deltaMbr;
7654 rgSCHCmnUlUeDelAllocs(cell, ue);
7656 ue->isSrGrant = FALSE;
7658 cellSchd->apisUl->rgSCHUlUeReset(cell, ue);
7660 /* Stack Crash problem for TRACE5 changes. Added the return below */
7666 * @brief RESET UL CQI and DL CQI&RI to conservative values
7667 * for a reestablishing UE.
7671 * Function : rgSCHCmnResetRiCqi
7673 * RESET UL CQI and DL CQI&RI to conservative values
7674 * for a reestablishing UE
7676 * @param[in] RgSchCellCb *cell
7677 * @param[in] RgSchUeCb *ue
7681 PRIVATE Void rgSCHCmnResetRiCqi
7687 PRIVATE Void rgSCHCmnResetRiCqi(cell, ue)
7692 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7693 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
7694 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7695 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
7698 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
7699 cell->isCpUlExtend);
7701 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
7702 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
7703 ueDl->mimoInfo.ri = 1;
7704 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
7705 (ue->mimoInfo.txMode == RGR_UE_TM_6))
7707 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
7709 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
7711 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
7714 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
7716 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
7720 /* Request for an early Aper CQI in case of reest */
7721 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
7722 if(acqiCb && acqiCb->aCqiCfg.pres)
7724 acqiCb->aCqiTrigWt = 0;
7732 * @brief Perform UE reset procedure.
7736 * Function : rgSCHCmnDlUeReset
7738 * This functions performs BO resetting and
7739 * triggers DL specific scheduler
7740 * to Perform UE reset procedure.
7742 * @param[in] RgSchCellCb *cell
7743 * @param[in] RgSchUeCb *ue
7747 PRIVATE Void rgSCHCmnDlUeReset
7753 PRIVATE Void rgSCHCmnDlUeReset(cell, ue)
7758 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7759 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
7760 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
7763 if (ueDl->rachInfo.poLnk.node != NULLP)
7765 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
7768 /* Fix: syed Remove from TA List if this UE is there.
7769 * If TA Timer is running. Stop it */
7770 if (ue->dlTaLnk.node)
7772 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
7773 ue->dlTaLnk.node = (PTR)NULLP;
7775 else if (ue->taTmr.tmrEvnt != TMR_NONE)
7777 rgSCHTmrStopTmr(cell, ue->taTmr.tmrEvnt, ue);
7780 cellSchd->apisDl->rgSCHDlUeReset(cell, ue);
7784 rgSCHSCellDlUeReset(cell,ue);
7790 * @brief Perform UE reset procedure.
7794 * Function : rgSCHCmnUeReset
7796 * This functions triggers specific scheduler
7797 * to Perform UE reset procedure.
7799 * @param[in] RgSchCellCb *cell
7800 * @param[in] RgSchUeCb *ue
7806 Void rgSCHCmnUeReset
7812 Void rgSCHCmnUeReset(cell, ue)
7819 RgInfResetHqEnt hqEntRstInfo;
7821 /* RACHO: remove UE from pdcch, handover and rapId assoc Qs */
7822 rgSCHCmnDelRachInfo(cell, ue);
7824 rgSCHPwrUeReset(cell, ue);
7826 rgSCHCmnUlUeReset(cell, ue);
7827 rgSCHCmnDlUeReset(cell, ue);
7830 /* Making allocCmnUlPdcch TRUE to allocate DCI0/1A from Common search space.
7831 As because multiple cells are added hence 2 bits CqiReq is there
7832 This flag will be set to FALSE once we will get Scell READY */
7833 ue->allocCmnUlPdcch = TRUE;
7836 /* Fix : syed RESET UL CQI and DL CQI&RI to conservative values
7837 * for a reestablishing UE */
7838 /*Reset Cqi Config for all the configured cells*/
7839 for (idx = 0;idx < CM_LTE_MAX_CELLS; idx++)
7841 if (ue->cellInfo[idx] != NULLP)
7843 rgSCHCmnResetRiCqi(ue->cellInfo[idx]->cell, ue);
7846 /*After Reset Trigger APCQI for Pcell*/
7847 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
7848 if(pCellInfo->acqiCb.aCqiCfg.pres)
7850 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
7853 /* sending HqEnt reset to MAC */
7854 hqEntRstInfo.cellId = cell->cellId;
7855 hqEntRstInfo.crnti = ue->ueId;
7857 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
7858 RgSchMacRstHqEnt(&pst,&hqEntRstInfo);
7864 * @brief UE out of MeasGap or AckNackReptn.
7868 * Function : rgSCHCmnActvtUlUe
7870 * This functions triggers specific scheduler
7871 * to start considering it for scheduling.
7873 * @param[in] RgSchCellCb *cell
7874 * @param[in] RgSchUeCb *ue
7880 Void rgSCHCmnActvtUlUe
7886 Void rgSCHCmnActvtUlUe(cell, ue)
7891 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7893 /* : take care of this in UL retransmission */
7894 cellSchd->apisUl->rgSCHUlActvtUe(cell, ue);
7899 * @brief UE out of MeasGap or AckNackReptn.
7903 * Function : rgSCHCmnActvtDlUe
7905 * This functions triggers specific scheduler
7906 * to start considering it for scheduling.
7908 * @param[in] RgSchCellCb *cell
7909 * @param[in] RgSchUeCb *ue
7915 Void rgSCHCmnActvtDlUe
7921 Void rgSCHCmnActvtDlUe(cell, ue)
7926 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
7928 cellSchd->apisDl->rgSCHDlActvtUe(cell, ue);
7933 * @brief This API is invoked to indicate scheduler of a CRC indication.
7937 * Function : rgSCHCmnHdlUlTransInd
7938 * This API is invoked to indicate scheduler of a CRC indication.
7940 * @param[in] RgSchCellCb *cell
7941 * @param[in] RgSchUeCb *ue
7942 * @param[in] CmLteTimingInfo timingInfo
7947 Void rgSCHCmnHdlUlTransInd
7951 CmLteTimingInfo timingInfo
7954 Void rgSCHCmnHdlUlTransInd(cell, ue, timingInfo)
7957 CmLteTimingInfo timingInfo;
7961 /* Update the latest UL dat/sig transmission time */
7962 RGSCHCPYTIMEINFO(timingInfo, ue->ul.ulTransTime);
7963 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
7965 /* Some UL Transmission from this UE.
7966 * Activate this UE if it was inactive */
7967 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7968 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
7976 * @brief Compute the minimum Rank based on Codebook subset
7977 * restriction configuration for 4 Tx Ports and Tx Mode 4.
7981 * Function : rgSCHCmnComp4TxMode4
7983 * Depending on BitMap set at CBSR during Configuration
7984 * - return the least possible Rank
7987 * @param[in] U32 *pmiBitMap
7988 * @return RgSchCmnRank
7991 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode4
7996 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode4(pmiBitMap)
8000 U32 bitMap0, bitMap1;
8001 bitMap0 = pmiBitMap[0];
8002 bitMap1 = pmiBitMap[1];
8003 if((bitMap1) & 0xFFFF)
8005 return (RG_SCH_CMN_RANK_1);
8007 else if((bitMap1>>16) & 0xFFFF)
8009 return (RG_SCH_CMN_RANK_2);
8011 else if((bitMap0) & 0xFFFF)
8013 return (RG_SCH_CMN_RANK_3);
8015 else if((bitMap0>>16) & 0xFFFF)
8017 return (RG_SCH_CMN_RANK_4);
8021 return (RG_SCH_CMN_RANK_1);
8027 * @brief Compute the minimum Rank based on Codebook subset
8028 * restriction configuration for 2 Tx Ports and Tx Mode 4.
8032 * Function : rgSCHCmnComp2TxMode4
8034 * Depending on BitMap set at CBSR during Configuration
8035 * - return the least possible Rank
8038 * @param[in] U32 *pmiBitMap
8039 * @return RgSchCmnRank
8042 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode4
8047 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode4(pmiBitMap)
8052 bitMap0 = pmiBitMap[0];
8053 if((bitMap0>>26)& 0x0F)
8055 return (RG_SCH_CMN_RANK_1);
8057 else if((bitMap0>>30) & 3)
8059 return (RG_SCH_CMN_RANK_2);
8063 return (RG_SCH_CMN_RANK_1);
8068 * @brief Compute the minimum Rank based on Codebook subset
8069 * restriction configuration for 4 Tx Ports and Tx Mode 3.
8073 * Function : rgSCHCmnComp4TxMode3
8075 * Depending on BitMap set at CBSR during Configuration
8076 * - return the least possible Rank
8079 * @param[in] U32 *pmiBitMap
8080 * @return RgSchCmnRank
8083 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode3
8088 PRIVATE RgSchCmnRank rgSCHCmnComp4TxMode3(pmiBitMap)
8093 bitMap0 = pmiBitMap[0];
8094 if((bitMap0>>28)& 1)
8096 return (RG_SCH_CMN_RANK_1);
8098 else if((bitMap0>>29) &1)
8100 return (RG_SCH_CMN_RANK_2);
8102 else if((bitMap0>>30) &1)
8104 return (RG_SCH_CMN_RANK_3);
8106 else if((bitMap0>>31) &1)
8108 return (RG_SCH_CMN_RANK_4);
8112 return (RG_SCH_CMN_RANK_1);
8117 * @brief Compute the minimum Rank based on Codebook subset
8118 * restriction configuration for 2 Tx Ports and Tx Mode 3.
8122 * Function : rgSCHCmnComp2TxMode3
8124 * Depending on BitMap set at CBSR during Configuration
8125 * - return the least possible Rank
8128 * @param[in] U32 *pmiBitMap
8129 * @return RgSchCmnRank
8132 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode3
8137 PRIVATE RgSchCmnRank rgSCHCmnComp2TxMode3(pmiBitMap)
8142 bitMap0 = pmiBitMap[0];
8143 if((bitMap0>>30)& 1)
8145 return (RG_SCH_CMN_RANK_1);
8147 else if((bitMap0>>31) &1)
8149 return (RG_SCH_CMN_RANK_2);
8153 return (RG_SCH_CMN_RANK_1);
8158 * @brief Compute the minimum Rank based on Codebook subset
8159 * restriction configuration.
8163 * Function : rgSCHCmnComputeRank
8165 * Depending on Num Tx Ports and Transmission mode
8166 * - return the least possible Rank
8169 * @param[in] RgrTxMode txMode
8170 * @param[in] U32 *pmiBitMap
8171 * @param[in] U8 numTxPorts
8172 * @return RgSchCmnRank
8175 PRIVATE RgSchCmnRank rgSCHCmnComputeRank
8182 PRIVATE RgSchCmnRank rgSCHCmnComputeRank(txMode, pmiBitMap, numTxPorts)
8189 if (numTxPorts ==2 && txMode == RGR_UE_TM_3)
8191 return (rgSCHCmnComp2TxMode3(pmiBitMap));
8193 else if (numTxPorts ==4 && txMode == RGR_UE_TM_3)
8195 return (rgSCHCmnComp4TxMode3(pmiBitMap));
8197 else if (numTxPorts ==2 && txMode == RGR_UE_TM_4)
8199 return (rgSCHCmnComp2TxMode4(pmiBitMap));
8201 else if (numTxPorts ==4 && txMode == RGR_UE_TM_4)
8203 return (rgSCHCmnComp4TxMode4(pmiBitMap));
8207 return (RG_SCH_CMN_RANK_1);
8214 * @brief Harq Entity Deinitialization for CMN SCH.
8218 * Function : rgSCHCmnDlDeInitHqEnt
8220 * Harq Entity Deinitialization for CMN SCH
8222 * @param[in] RgSchCellCb *cell
8223 * @param[in] RgSchDlHqEnt *hqE
8226 /*KWORK_FIX:Changed function return type to void */
8228 Void rgSCHCmnDlDeInitHqEnt
8234 Void rgSCHCmnDlDeInitHqEnt(cell, hqE)
8239 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8240 RgSchDlHqProcCb *hqP;
8245 ret = cellSchd->apisDl->rgSCHDlUeHqEntDeInit(cell, hqE);
8246 /* Free only If the Harq proc are created*/
8251 for(cnt = 0; cnt < hqE->numHqPrcs; cnt++)
8253 hqP = &hqE->procs[cnt];
8254 if ((RG_SCH_CMN_GET_DL_HQP(hqP)))
8256 rgSCHUtlFreeSBuf(cell->instIdx,
8257 (Data**)(&(hqP->sch)), (sizeof(RgSchCmnDlHqProc)));
8261 rgSCHLaaDeInitDlHqProcCb (cell, hqE);
8268 * @brief Harq Entity initialization for CMN SCH.
8272 * Function : rgSCHCmnDlInitHqEnt
8274 * Harq Entity initialization for CMN SCH
8276 * @param[in] RgSchCellCb *cell
8277 * @param[in] RgSchUeCb *ue
8283 S16 rgSCHCmnDlInitHqEnt
8289 S16 rgSCHCmnDlInitHqEnt(cell, hqEnt)
8291 RgSchDlHqEnt *hqEnt;
8295 RgSchDlHqProcCb *hqP;
8298 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8300 for(cnt = 0; cnt < hqEnt->numHqPrcs; cnt++)
8302 hqP = &hqEnt->procs[cnt];
8303 if (rgSCHUtlAllocSBuf(cell->instIdx,
8304 (Data**)&(hqP->sch), (sizeof(RgSchCmnDlHqProc))) != ROK)
8310 if((cell->emtcEnable) &&(hqEnt->ue->isEmtcUe))
8312 if(ROK != cellSchd->apisEmtcDl->rgSCHDlUeHqEntInit(cell, hqEnt))
8321 if(ROK != cellSchd->apisDl->rgSCHDlUeHqEntInit(cell, hqEnt))
8328 } /* rgSCHCmnDlInitHqEnt */
8331 * @brief This function computes distribution of refresh period
8335 * Function: rgSCHCmnGetRefreshDist
8336 * Purpose: This function computes distribution of refresh period
8337 * This is required to align set of UEs refresh
8338 * around the different consecutive subframe.
8340 * Invoked by: rgSCHCmnGetRefreshPerDist
8342 * @param[in] RgSchCellCb *cell
8343 * @param[in] RgSchUeCb *ue
8348 PRIVATE U8 rgSCHCmnGetRefreshDist
8354 PRIVATE U8 rgSCHCmnGetRefreshDist(cell, ue)
8361 Inst inst = cell->instIdx;
8364 for(refOffst = 0; refOffst < RGSCH_MAX_REFRESH_OFFSET; refOffst++)
8366 if(cell->refreshUeCnt[refOffst] < RGSCH_MAX_REFRESH_GRPSZ)
8368 cell->refreshUeCnt[refOffst]++;
8369 ue->refreshOffset = refOffst;
8370 /* printf("UE[%d] refresh offset[%d]. Cell refresh ue count[%d].\n", ue->ueId, refOffst, cell->refreshUeCnt[refOffst]); */
8375 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Allocation of refresh distribution failed\n"));
8376 /* We should not enter here normally, but incase of failure, allocating from last offset*/
8377 cell->refreshUeCnt[refOffst-1]++;
8378 ue->refreshOffset = refOffst-1;
8380 return (refOffst-1);
8383 * @brief This function computes initial Refresh Wait Period.
8387 * Function: rgSCHCmnGetRefreshPer
8388 * Purpose: This function computes initial Refresh Wait Period.
8389 * This is required to align multiple UEs refresh
8390 * around the same time.
8392 * Invoked by: rgSCHCmnGetRefreshPer
8394 * @param[in] RgSchCellCb *cell
8395 * @param[in] RgSchUeCb *ue
8396 * @param[in] U32 *waitPer
8401 PRIVATE Void rgSCHCmnGetRefreshPer
8408 PRIVATE Void rgSCHCmnGetRefreshPer(cell, ue, waitPer)
8418 refreshPer = RG_SCH_CMN_REFRESH_TIME * RG_SCH_CMN_REFRESH_TIMERES;
8419 crntSubFrm = cell->crntTime.sfn * RGSCH_NUM_SUB_FRAMES_5G + cell->crntTime.slot;
8420 /* Fix: syed align multiple UEs to refresh at same time */
8421 *waitPer = refreshPer - (crntSubFrm % refreshPer);
8422 *waitPer = RGSCH_CEIL(*waitPer, RG_SCH_CMN_REFRESH_TIMERES);
8423 *waitPer = *waitPer + rgSCHCmnGetRefreshDist(cell, ue);
8431 * @brief UE initialisation for scheduler.
8435 * Function : rgSCHCmnRgrSCellUeCfg
8437 * This functions intialises UE specific scheduler
8438 * information for SCELL
8439 * 0. Perform basic validations
8440 * 1. Allocate common sched UE cntrl blk
8441 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
8443 * 4. Perform DLFS cfg
8445 * @param[in] RgSchCellCb *cell
8446 * @param[in] RgSchUeCb *ue
8447 * @param[out] RgSchErrInfo *err
8453 S16 rgSCHCmnRgrSCellUeCfg
8457 RgrUeSecCellCfg *sCellInfoCfg,
8461 S16 rgSCHCmnRgrSCellUeCfg(sCell, ue, sCellInfoCfg, err)
8464 RgrUeSecCellCfg *sCellInfoCfg;
8471 RgSchCmnAllocRecord *allRcd;
8472 RgSchDlRbAlloc *allocInfo;
8473 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
8475 RgSchCmnUlUe *ueUlPcell;
8476 RgSchCmnUe *pCellUeSchCmn;
8477 RgSchCmnUe *ueSchCmn;
8479 RgSchCmnDlUe *pCellUeDl;
8481 Inst inst = ue->cell->instIdx;
8483 U32 idx = (U8)((sCell->cellId - rgSchCb[sCell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
8485 pCellUeSchCmn = RG_SCH_CMN_GET_UE(ue,ue->cell);
8486 pCellUeDl = &pCellUeSchCmn->dl;
8488 /* 1. Allocate Common sched control block */
8489 if((rgSCHUtlAllocSBuf(sCell->instIdx,
8490 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
8492 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Memory allocation FAILED\n"));
8493 err->errCause = RGSCHERR_SCH_CFG;
8496 ueSchCmn = RG_SCH_CMN_GET_UE(ue,sCell);
8498 /*2. Perform UEs downlink configuration */
8499 ueDl = &ueSchCmn->dl;
8502 ueDl->mimoInfo = pCellUeDl->mimoInfo;
8504 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) ||
8505 (ue->mimoInfo.txMode == RGR_UE_TM_6))
8507 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_NO_PMI);
8509 if (ue->mimoInfo.txMode == RGR_UE_TM_3)
8511 RG_SCH_CMN_SET_FORCE_TD(ue, sCell, RG_SCH_CMN_TD_RI_1);
8513 RGSCH_ARRAY_BOUND_CHECK(sCell->instIdx, rgUeCatTbl, pCellUeSchCmn->cmn.ueCat);
8514 ueDl->maxTbBits = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlTbBits;
8517 ri = RGSCH_MIN(ri, sCell->numTxAntPorts);
8518 if(((CM_LTE_UE_CAT_6 == pCellUeSchCmn->cmn.ueCat )
8519 ||(CM_LTE_UE_CAT_7 == pCellUeSchCmn->cmn.ueCat))
8522 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[1];
8526 ueDl->maxTbSz = rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxDlBits[0];
8529 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
8531 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
8532 rgSchTddDlNumHarqProcTbl[sCell->ulDlCfgIdx]);
8534 ueDl->maxSbSz = (rgUeCatTbl[pCellUeSchCmn->cmn.ueCat].maxSftChBits/
8535 RGSCH_NUM_DL_HQ_PROC);
8538 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, ue->isEmtcUe);
8540 rgSCHCmnDlSetUeAllocLmt(sCell, ueDl, FALSE);
8544 /* ambrCfgd config moved to ueCb.dl, as it's not needed for per cell wise*/
8546 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, sCell);
8547 allocInfo->rnti = ue->ueId;
8549 /* Initializing the lastCfi value to current cfi value */
8550 ueDl->lastCfi = cellSchd->dl.currCfi;
8552 if ((cellSchd->apisDl->rgSCHRgrSCellDlUeCfg(sCell, ue, err)) != ROK)
8554 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "Spec Sched DL UE CFG FAILED\n"));
8558 /* TODO: enhance for DLFS RB Allocation for SCELLs in future dev */
8560 /* DLFS UE Config */
8561 if (cellSchd->dl.isDlFreqSel)
8563 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeCfg(sCell, ue, sCellInfoCfg, err)) != ROK)
8565 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS UE config FAILED\n"));
8570 /* TODO: Do UL SCELL CFG during UL CA dev */
8572 ueUl = RG_SCH_CMN_GET_UL_UE(ue, sCell);
8574 /* TODO_ULCA: SRS for SCELL needs to be handled in the below function call */
8575 rgSCHCmnUpdUeUlCqiInfo(sCell, ue, ueUl, ueSchCmn, cellSchd,
8576 sCell->isCpUlExtend);
8578 ret = rgSCHUhmHqEntInit(sCell, ue);
8581 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL UHM HARQ Ent Init "
8582 "Failed for CRNTI:%d", ue->ueId);
8586 ueUlPcell = RG_SCH_CMN_GET_UL_UE(ue, ue->cell);
8587 /* Initialize uplink HARQ related information for UE */
8588 ueUl->hqEnt.maxHqRetx = ueUlPcell->hqEnt.maxHqRetx;
8589 cmLListInit(&ueUl->hqEnt.free);
8590 cmLListInit(&ueUl->hqEnt.inUse);
8591 for(i=0; i < ueUl->hqEnt.numHqPrcs; i++)
8593 ueUl->hqEnt.hqProcCb[i].hqEnt = (void*)(&ueUl->hqEnt);
8594 ueUl->hqEnt.hqProcCb[i].procId = i;
8595 ueUl->hqEnt.hqProcCb[i].ulSfIdx = RGSCH_INVALID_INFO;
8596 ueUl->hqEnt.hqProcCb[i].alloc = NULLP;
8598 /* ccpu00139513- Initializing SPS flags*/
8599 ueUl->hqEnt.hqProcCb[i].isSpsActvnHqP = FALSE;
8600 ueUl->hqEnt.hqProcCb[i].isSpsOccnHqP = FALSE;
8602 cmLListAdd2Tail(&ueUl->hqEnt.free, &ueUl->hqEnt.hqProcCb[i].lnk);
8603 ueUl->hqEnt.hqProcCb[i].lnk.node = (PTR)&ueUl->hqEnt.hqProcCb[i];
8606 /* Allocate UL BSR allocation tracking List */
8607 cmLListInit(&ueUl->ulAllocLst);
8609 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8611 if((rgSCHUtlAllocSBuf(sCell->instIdx,
8612 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8614 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId,"SCELL Memory allocation FAILED"
8615 "for CRNTI:%d",ue->ueId);
8616 err->errCause = RGSCHERR_SCH_CFG;
8619 allRcd->allocTime = sCell->crntTime;
8620 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8621 allRcd->lnk.node = (PTR)allRcd;
8624 /* After initialising UL part, do power related init */
8625 ret = rgSCHPwrUeSCellCfg(sCell, ue, sCellInfoCfg);
8628 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Could not do "
8629 "power config for UE CRNTI:%d",ue->ueId);
8634 if(TRUE == ue->isEmtcUe)
8636 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
8638 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
8639 "for CRNTI:%d",ue->ueId);
8646 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(sCell, ue, NULL, err)) != ROK)
8648 RLOG_ARG1(L_ERROR,DBG_CELLID,sCell->cellId, "Spec Sched UL UE CFG FAILED"
8649 "for CRNTI:%d",ue->ueId);
8654 ue->ul.isUlCaEnabled = TRUE;
8658 } /* rgSCHCmnRgrSCellUeCfg */
8662 * @brief UE initialisation for scheduler.
8666 * Function : rgSCHCmnRgrSCellUeDel
8668 * This functions Delete UE specific scheduler
8669 * information for SCELL
8671 * @param[in] RgSchCellCb *cell
8672 * @param[in] RgSchUeCb *ue
8678 S16 rgSCHCmnRgrSCellUeDel
8680 RgSchUeCellInfo *sCellInfo,
8684 S16 rgSCHCmnRgrSCellUeDel(sCellInfo, ue)
8685 RgSchUeCellInfo *sCellInfo;
8689 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(ue->cell);
8690 Inst inst = ue->cell->instIdx;
8693 cellSchd->apisDl->rgSCHRgrSCellDlUeDel(sCellInfo, ue);
8696 rgSCHCmnUlUeDelAllocs(sCellInfo->cell, ue);
8699 if(TRUE == ue->isEmtcUe)
8701 cellSchd->apisEmtcUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
8706 cellSchd->apisUl->rgSCHFreeUlUe(sCellInfo->cell, ue);
8709 /* DLFS UE Config */
8710 if (cellSchd->dl.isDlFreqSel)
8712 if ((cellSchd->apisDlfs->rgSCHDlfsSCellUeDel(sCellInfo->cell, ue)) != ROK)
8714 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "DLFS Scell del FAILED\n"));
8719 rgSCHUtlFreeSBuf(sCellInfo->cell->instIdx,
8720 (Data**)(&(sCellInfo->sch)), (sizeof(RgSchCmnUe)));
8724 } /* rgSCHCmnRgrSCellUeDel */
8730 * @brief Handles 5gtf configuration for a UE
8734 * Function : rgSCHCmn5gtfUeCfg
8740 * @param[in] RgSchCellCb *cell
8741 * @param[in] RgSchUeCb *ue
8742 * @param[in] RgrUeCfg *cfg
8748 S16 rgSCHCmn5gtfUeCfg
8755 S16 rgSCHCmn5gtfUeCfg(cell, ue, cfg)
8762 RgSchUeGrp *ue5gtfGrp;
8763 ue->ue5gtfCb.grpId = cfg->ue5gtfCfg.grpId;
8764 ue->ue5gtfCb.BeamId = cfg->ue5gtfCfg.BeamId;
8765 ue->ue5gtfCb.numCC = cfg->ue5gtfCfg.numCC;
8766 ue->ue5gtfCb.mcs = cfg->ue5gtfCfg.mcs;
8767 ue->ue5gtfCb.maxPrb = cfg->ue5gtfCfg.maxPrb;
8769 ue->ue5gtfCb.cqiRiPer = 100;
8770 /* 5gtf TODO: CQIs to start from (10,0)*/
8771 ue->ue5gtfCb.nxtCqiRiOccn.sfn = 10;
8772 ue->ue5gtfCb.nxtCqiRiOccn.slot = 0;
8773 ue->ue5gtfCb.rank = 1;
8775 printf("\nschd cfg at mac,%u,%u,%u,%u,%u\n",ue->ue5gtfCb.grpId,ue->ue5gtfCb.BeamId,ue->ue5gtfCb.numCC,
8776 ue->ue5gtfCb.mcs,ue->ue5gtfCb.maxPrb);
8778 ue5gtfGrp = &(cell->cell5gtfCb.ueGrp5gConf[ue->ue5gtfCb.BeamId]);
8780 /* TODO_5GTF: Currently handling 1 group only. Need to update when multi group
8781 scheduling comes into picture */
8782 if(ue5gtfGrp->beamBitMask & (1 << ue->ue5gtfCb.BeamId))
8784 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8785 "5GTF_ERROR Invalid beam id CRNTI:%d",cfg->crnti);
8788 ue5gtfGrp->beamBitMask |= (1 << ue->ue5gtfCb.BeamId);
8795 * @brief UE initialisation for scheduler.
8799 * Function : rgSCHCmnRgrUeCfg
8801 * This functions intialises UE specific scheduler
8803 * 0. Perform basic validations
8804 * 1. Allocate common sched UE cntrl blk
8805 * 2. Perform DL cfg (allocate Hq Procs Cmn sched cntrl blks)
8807 * 4. Perform DLFS cfg
8809 * @param[in] RgSchCellCb *cell
8810 * @param[in] RgSchUeCb *ue
8811 * @param[int] RgrUeCfg *ueCfg
8812 * @param[out] RgSchErrInfo *err
8818 S16 rgSCHCmnRgrUeCfg
8826 S16 rgSCHCmnRgrUeCfg(cell, ue, ueCfg, err)
8833 RgSchDlRbAlloc *allocInfo;
8835 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
8836 RgSchCmnUe *ueSchCmn;
8840 RgSchCmnAllocRecord *allRcd;
8842 U32 idx = (U8)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId)&(CM_LTE_MAX_CELLS-1));
8843 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
8846 /* 1. Allocate Common sched control block */
8847 if((rgSCHUtlAllocSBuf(cell->instIdx,
8848 (Data**)&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch)), (sizeof(RgSchCmnUe))) != ROK))
8850 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8851 "Memory allocation FAILED for CRNTI:%d",ueCfg->crnti);
8852 err->errCause = RGSCHERR_SCH_CFG;
8855 ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
8856 ue->dl.ueDlCqiCfg = ueCfg->ueDlCqiCfg;
8857 pCellInfo->acqiCb.aCqiCfg = ueCfg->ueDlCqiCfg.aprdCqiCfg;
8858 if(ueCfg->ueCatEnum > 0 )
8860 /*KWORK_FIX removed NULL chk for ueSchCmn*/
8861 ueSchCmn->cmn.ueCat = ueCfg->ueCatEnum - 1;
8865 ueSchCmn->cmn.ueCat = 0; /* Assuming enum values correctly set */
8867 cmInitTimers(&ueSchCmn->cmn.tmr, 1);
8869 /*2. Perform UEs downlink configuration */
8870 ueDl = &ueSchCmn->dl;
8871 /* RACHO : store the rapId assigned for HandOver UE.
8872 * Append UE to handover list of cmnCell */
8873 if (ueCfg->dedPreambleId.pres == PRSNT_NODEF)
8875 rgSCHCmnDelDedPreamble(cell, ueCfg->dedPreambleId.val);
8876 ueDl->rachInfo.hoRapId = ueCfg->dedPreambleId.val;
8877 cmLListAdd2Tail(&cellSchd->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
8878 ueDl->rachInfo.hoLnk.node = (PTR)ue;
8881 rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd);
8883 if (ueCfg->txMode.pres == TRUE)
8885 if ((ueCfg->txMode.txModeEnum == RGR_UE_TM_4) ||
8886 (ueCfg->txMode.txModeEnum == RGR_UE_TM_6))
8888 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
8890 if (ueCfg->txMode.txModeEnum == RGR_UE_TM_3)
8892 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
8895 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
8896 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
8899 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
8900 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
8901 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
8904 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
8908 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
8911 /* Fix : syed Assign hqEnt to UE only if msg4 is done */
8913 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8914 rgSchTddDlNumHarqProcTbl[cell->ulDlCfgIdx]);
8916 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
8917 RGSCH_NUM_DL_HQ_PROC);
8920 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, ue->isEmtcUe);
8922 rgSCHCmnDlSetUeAllocLmt(cell, ueDl, FALSE);
8924 /* if none of the DL and UL AMBR are configured then fail the configuration
8926 if((ueCfg->ueQosCfg.dlAmbr == 0) && (ueCfg->ueQosCfg.ueBr == 0))
8928 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"UL Ambr and DL Ambr are"
8929 "configured as 0 for CRNTI:%d",ueCfg->crnti);
8930 err->errCause = RGSCHERR_SCH_CFG;
8934 ue->dl.ambrCfgd = (ueCfg->ueQosCfg.dlAmbr * RG_SCH_CMN_REFRESH_TIME)/100;
8936 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
8937 allocInfo->rnti = ue->ueId;
8939 /* Initializing the lastCfi value to current cfi value */
8940 ueDl->lastCfi = cellSchd->dl.currCfi;
8942 if(cell->emtcEnable && ue->isEmtcUe)
8944 if ((cellSchd->apisEmtcDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8946 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8947 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8955 if ((cellSchd->apisDl->rgSCHRgrDlUeCfg(cell, ue, ueCfg, err)) != ROK)
8957 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
8958 "Spec Sched DL UE CFG FAILED for CRNTI:%d",ueCfg->crnti);
8965 /* 3. Initialize ul part */
8966 ueUl = &ueSchCmn->ul;
8968 rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd,
8969 cell->isCpUlExtend);
8971 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
8972 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
8974 ue->ul.cfgdAmbr = (ueCfg->ueQosCfg.ueBr * RG_SCH_CMN_REFRESH_TIME)/100;
8975 ue->ul.effAmbr = ue->ul.cfgdAmbr;
8976 RGSCHCPYTIMEINFO(cell->crntTime, ue->ul.ulTransTime);
8978 /* Allocate UL BSR allocation tracking List */
8979 cmLListInit(&ueUl->ulAllocLst);
8981 for (cnt = 0; cnt < RG_SCH_CMN_MAX_ALLOC_TRACK; cnt++)
8983 if((rgSCHUtlAllocSBuf(cell->instIdx,
8984 (Data**)&(allRcd),sizeof(RgSchCmnAllocRecord)) != ROK))
8986 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation FAILED"
8987 "for CRNTI:%d",ueCfg->crnti);
8988 err->errCause = RGSCHERR_SCH_CFG;
8991 allRcd->allocTime = cell->crntTime;
8992 cmLListAdd2Tail(&ueUl->ulAllocLst, &allRcd->lnk);
8993 allRcd->lnk.node = (PTR)allRcd;
8995 /* Allocate common sch cntrl blocks for LCGs */
8996 for (cnt=0; cnt<RGSCH_MAX_LCG_PER_UE; cnt++)
8998 ret = rgSCHUtlAllocSBuf(cell->instIdx,
8999 (Data**)&(ue->ul.lcgArr[cnt].sch), (sizeof(RgSchCmnLcg)));
9002 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9003 "SCH struct alloc failed for CRNTI:%d",ueCfg->crnti);
9004 err->errCause = RGSCHERR_SCH_CFG;
9008 /* After initialising UL part, do power related init */
9009 ret = rgSCHPwrUeCfg(cell, ue, ueCfg);
9012 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
9013 "power config for UE CRNTI:%d",ueCfg->crnti);
9017 ret = rgSCHCmnSpsUeCfg(cell, ue, ueCfg, err);
9020 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not do "
9021 "SPS config for CRNTI:%d",ueCfg->crnti);
9024 #endif /* LTEMAC_SPS */
9027 if(TRUE == ue->isEmtcUe)
9029 if ((cellSchd->apisEmtcUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
9031 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
9032 "for CRNTI:%d",ueCfg->crnti);
9039 if ((cellSchd->apisUl->rgSCHRgrUlUeCfg(cell, ue, ueCfg, err)) != ROK)
9041 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Spec Sched UL UE CFG FAILED"
9042 "for CRNTI:%d",ueCfg->crnti);
9047 /* DLFS UE Config */
9048 if (cellSchd->dl.isDlFreqSel)
9050 if ((cellSchd->apisDlfs->rgSCHDlfsUeCfg(cell, ue, ueCfg, err)) != ROK)
9052 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "DLFS UE config FAILED"
9053 "for CRNTI:%d",ueCfg->crnti);
9058 /* Fix: syed align multiple UEs to refresh at same time */
9059 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
9060 /* Start UE Qos Refresh Timer */
9061 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
9063 rgSCHCmn5gtfUeCfg(cell, ue, ueCfg);
9067 } /* rgSCHCmnRgrUeCfg */
9070 * @brief UE TX mode reconfiguration handler.
9074 * Function : rgSCHCmnDlHdlTxModeRecfg
9076 * This functions updates UE specific scheduler
9077 * information upon UE reconfiguration.
9079 * @param[in] RgSchUeCb *ue
9080 * @param[in] RgrUeRecfg *ueRecfg
9085 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg
9089 RgrUeRecfg *ueRecfg,
9093 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, numTxPorts)
9096 RgrUeRecfg *ueRecfg;
9101 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg
9108 PRIVATE Void rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg)
9111 RgrUeRecfg *ueRecfg;
9115 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
9117 if (ueRecfg->txMode.pres != PRSNT_NODEF)
9121 /* ccpu00140894- Starting Timer for TxMode Transition Completion*/
9122 ue->txModeTransCmplt =FALSE;
9123 rgSCHTmrStartTmr (ue->cell, ue, RG_SCH_TMR_TXMODE_TRNSTN, RG_SCH_TXMODE_TRANS_TIMER);
9124 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_CMPLT)
9126 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell,
9127 RG_SCH_CMN_TD_TXMODE_RECFG);
9128 /* MS_WORKAROUND for ccpu00123186 MIMO Fix Start: need to set FORCE TD bitmap based on TX mode */
9129 ueDl->mimoInfo.ri = 1;
9130 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
9131 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
9133 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
9135 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
9137 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
9139 /* MIMO Fix End: need to set FORCE TD bitmap based on TX mode */
9142 if (ueRecfg->txMode.tmTrnstnState == RGR_TXMODE_RECFG_START)
9144 /* start afresh forceTD masking */
9145 RG_SCH_CMN_INIT_FORCE_TD(ue, cell, 0);
9146 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_TXMODE_RECFG);
9147 /* Intialize MIMO related parameters of UE */
9150 if(ueRecfg->txMode.pres)
9152 if((ueRecfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
9153 (ueRecfg->txMode.txModeEnum ==RGR_UE_TM_4))
9155 if(ueRecfg->ueCodeBookRstRecfg.pres)
9158 rgSCHCmnComputeRank(ueRecfg->txMode.txModeEnum,
9159 ueRecfg->ueCodeBookRstRecfg.pmiBitMap, numTxPorts);
9163 ueDl->mimoInfo.ri = 1;
9168 ueDl->mimoInfo.ri = 1;
9173 ueDl->mimoInfo.ri = 1;
9176 ueDl->mimoInfo.ri = 1;
9177 #endif /* TFU_UPGRADE */
9178 if ((ueRecfg->txMode.txModeEnum == RGR_UE_TM_4) ||
9179 (ueRecfg->txMode.txModeEnum == RGR_UE_TM_6))
9181 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
9183 if (ueRecfg->txMode.txModeEnum == RGR_UE_TM_3)
9185 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
9190 /***********************************************************
9192 * Func : rgSCHCmnUpdUeMimoInfo
9194 * Desc : Updates UL and DL Ue Information
9202 **********************************************************/
9204 PRIVATE Void rgSCHCmnUpdUeMimoInfo
9209 RgSchCmnCell *cellSchd
9212 PRIVATE Void rgSCHCmnUpdUeMimoInfo(ueCfg, ueDl, cell, cellSchd)
9216 RgSchCmnCell *cellSchd;
9220 if(ueCfg->txMode.pres)
9222 if((ueCfg->txMode.txModeEnum ==RGR_UE_TM_3) ||
9223 (ueCfg->txMode.txModeEnum ==RGR_UE_TM_4))
9225 if(ueCfg->ueCodeBookRstCfg.pres)
9228 rgSCHCmnComputeRank(ueCfg->txMode.txModeEnum,
9229 ueCfg->ueCodeBookRstCfg.pmiBitMap, cell->numTxAntPorts);
9233 ueDl->mimoInfo.ri = 1;
9238 ueDl->mimoInfo.ri = 1;
9243 ueDl->mimoInfo.ri = 1;
9247 ueDl->mimoInfo.ri = 1;
9248 #endif /*TFU_UPGRADE */
9249 ueDl->mimoInfo.cwInfo[0].cqi = cellSchd->dl.ccchCqi;
9250 ueDl->mimoInfo.cwInfo[1].cqi = cellSchd->dl.ccchCqi;
9254 /***********************************************************
9256 * Func : rgSCHCmnUpdUeUlCqiInfo
9258 * Desc : Updates UL and DL Ue Information
9266 **********************************************************/
9268 PRIVATE Void rgSCHCmnUpdUeUlCqiInfo
9273 RgSchCmnUe *ueSchCmn,
9274 RgSchCmnCell *cellSchd,
9278 PRIVATE Void rgSCHCmnUpdUeUlCqiInfo(cell, ue, ueUl, ueSchCmn, cellSchd, isEcp)
9282 RgSchCmnUe *ueSchCmn;
9283 RgSchCmnCell *cellSchd;
9290 if(ue->srsCb.srsCfg.type == RGR_SCH_SRS_SETUP)
9292 if(ue->ul.ulTxAntSel.pres)
9294 ueUl->crntUlCqi[ue->srsCb.selectedAnt] = cellSchd->ul.dfltUlCqi;
9295 ueUl->validUlCqi = ueUl->crntUlCqi[ue->srsCb.selectedAnt];
9299 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
9300 ueUl->validUlCqi = ueUl->crntUlCqi[0];
9302 ue->validTxAnt = ue->srsCb.selectedAnt;
9306 ueUl->validUlCqi = cellSchd->ul.dfltUlCqi;
9310 ueUl->ulLaCb.cqiBasediTbs = rgSchCmnUlCqiToTbsTbl[isEcp]
9311 [ueUl->validUlCqi] * 100;
9312 ueUl->ulLaCb.deltaiTbs = 0;
9316 ueUl->crntUlCqi[0] = cellSchd->ul.dfltUlCqi;
9317 #endif /*TFU_UPGRADE */
9318 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgUeCatTbl, ueSchCmn->cmn.ueCat);
9319 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
9321 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
9325 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
9330 /***********************************************************
9332 * Func : rgSCHCmnUpdUeCatCfg
9334 * Desc : Updates UL and DL Ue Information
9342 **********************************************************/
9344 PRIVATE Void rgSCHCmnUpdUeCatCfg
9350 PRIVATE Void rgSCHCmnUpdUeCatCfg(ue, cell)
9355 RgSchDlHqEnt *hqE = NULLP;
9356 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
9357 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
9358 RgSchCmnUe *ueSchCmn = RG_SCH_CMN_GET_UE(ue,cell);
9359 RgSchCmnCell *cellSchd = RG_SCH_CMN_GET_CELL(cell);
9362 ueDl->maxTbBits = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlTbBits;
9364 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
9367 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
9368 if(((CM_LTE_UE_CAT_6 == ueSchCmn->cmn.ueCat )
9369 ||(CM_LTE_UE_CAT_7 == ueSchCmn->cmn.ueCat))
9370 && (RG_SCH_MAX_TX_LYRS_4 == ri))
9372 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[1];
9376 ueDl->maxTbSz = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxDlBits[0];
9379 ueDl->maxSbSz = (rgUeCatTbl[ueSchCmn->cmn.ueCat].maxSftChBits/
9381 if (rgUeCatTbl[ueSchCmn->cmn.ueCat].ul64qamSup == FALSE)
9383 ueUl->maxUlCqi = cellSchd->ul.max16qamCqi;
9387 ueUl->maxUlCqi = RG_SCH_CMN_UL_NUM_CQI - 1;
9389 ue->ul.maxBytesPerUePerTti = rgUeCatTbl[ueSchCmn->cmn.ueCat].maxUlBits * \
9390 RG_SCH_CMN_MAX_BITS_RATIO / (RG_SCH_CMN_UL_COM_DENOM*8);
9395 * @brief UE reconfiguration for scheduler.
9399 * Function : rgSChCmnRgrUeRecfg
9401 * This functions updates UE specific scheduler
9402 * information upon UE reconfiguration.
9404 * @param[in] RgSchCellCb *cell
9405 * @param[in] RgSchUeCb *ue
9406 * @param[int] RgrUeRecfg *ueRecfg
9407 * @param[out] RgSchErrInfo *err
9413 S16 rgSCHCmnRgrUeRecfg
9417 RgrUeRecfg *ueRecfg,
9421 S16 rgSCHCmnRgrUeRecfg(cell, ue, ueRecfg, err)
9424 RgrUeRecfg *ueRecfg;
9428 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
9431 /* Basic validations */
9432 if (ueRecfg->ueRecfgTypes & RGR_UE_TXMODE_RECFG)
9435 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg, cell->numTxAntPorts);
9437 rgSCHCmnDlHdlTxModeRecfg(cell, ue, ueRecfg);
9438 #endif /* TFU_UPGRADE */
9440 if(ueRecfg->ueRecfgTypes & RGR_UE_CSG_PARAM_RECFG)
9442 ue->csgMmbrSta = ueRecfg->csgMmbrSta;
9444 /* Changes for UE Category reconfiguration feature */
9445 if(ueRecfg->ueRecfgTypes & RGR_UE_UECAT_RECFG)
9447 rgSCHCmnUpdUeCatCfg(ue, cell);
9449 if (ueRecfg->ueRecfgTypes & RGR_UE_APRD_DLCQI_RECFG)
9451 RgSchUeCellInfo *pCellInfo = RG_SCH_CMN_GET_PCELL_INFO(ue);
9452 pCellInfo->acqiCb.aCqiCfg = ueRecfg->aprdDlCqiRecfg;
9455 if (ueRecfg->ueRecfgTypes & RGR_UE_PRD_DLCQI_RECFG)
9457 if ((ueRecfg->prdDlCqiRecfg.pres == TRUE)
9458 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD10)
9459 && (ueRecfg->prdDlCqiRecfg.prdModeEnum != RGR_PRD_CQI_MOD20))
9461 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unsupported periodic CQI "
9462 "reporting mode %d for old CRNIT:%d",
9463 (int)ueRecfg->prdDlCqiRecfg.prdModeEnum,ueRecfg->oldCrnti);
9464 err->errCause = RGSCHERR_SCH_CFG;
9467 ue->dl.ueDlCqiCfg.prdCqiCfg = ueRecfg->prdDlCqiRecfg;
9471 if (ueRecfg->ueRecfgTypes & RGR_UE_ULPWR_RECFG)
9473 if (rgSCHPwrUeRecfg(cell, ue, ueRecfg) != ROK)
9475 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9476 "Power Reconfiguration Failed for OLD CRNTI:%d",ueRecfg->oldCrnti);
9481 if (ueRecfg->ueRecfgTypes & RGR_UE_QOS_RECFG)
9483 /* Uplink Sched related Initialization */
9484 if ((ueRecfg->ueQosRecfg.dlAmbr == 0) && (ueRecfg->ueQosRecfg.ueBr == 0))
9486 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Ul Ambr and DL Ambr "
9487 "configured as 0 for OLD CRNTI:%d",ueRecfg->oldCrnti);
9488 err->errCause = RGSCHERR_SCH_CFG;
9491 ue->ul.cfgdAmbr = (ueRecfg->ueQosRecfg.ueBr * \
9492 RG_SCH_CMN_REFRESH_TIME)/100;
9493 /* Downlink Sched related Initialization */
9494 ue->dl.ambrCfgd = (ueRecfg->ueQosRecfg.dlAmbr * \
9495 RG_SCH_CMN_REFRESH_TIME)/100;
9496 /* Fix: syed Update the effAmbr and effUeBR fields w.r.t the
9497 * new QOS configuration */
9498 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
9499 /* Fix: syed align multiple UEs to refresh at same time */
9500 rgSCHCmnGetRefreshPer(cell, ue, &waitPer);
9501 rgSCHCmnApplyUeRefresh(cell, ue);
9502 rgSCHCmnAddUeToRefreshQ(cell, ue, waitPer);
9505 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
9507 if ((cellSchCmn->apisEmtcUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9509 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9510 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9513 if ((cellSchCmn->apisEmtcDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9515 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9516 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9523 if ((cellSchCmn->apisUl->rgSCHRgrUlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9525 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9526 "Spec Sched UL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9529 if ((cellSchCmn->apisDl->rgSCHRgrDlUeRecfg(cell, ue, ueRecfg, err)) != ROK)
9531 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9532 "Spec Sched DL UE ReCFG FAILED for CRNTI:%d",ue->ueId);
9536 /* DLFS UE Config */
9537 if (cellSchCmn->dl.isDlFreqSel)
9539 if ((cellSchCmn->apisDlfs->rgSCHDlfsUeRecfg(cell, ue, \
9540 ueRecfg, err)) != ROK)
9542 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9543 "DLFS UE re-config FAILED for CRNTI:%d",ue->ueId);
9549 /* Invoke re-configuration on SPS module */
9550 if (rgSCHCmnSpsUeRecfg(cell, ue, ueRecfg, err) != ROK)
9552 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
9553 "DL SPS ReCFG FAILED for UE CRNTI:%d", ue->ueId);
9559 } /* rgSCHCmnRgrUeRecfg*/
9561 /***********************************************************
9563 * Func : rgSCHCmnUlUeDelAllocs
9565 * Desc : Deletion of all UE allocations.
9573 **********************************************************/
9575 PRIVATE Void rgSCHCmnUlUeDelAllocs
9581 PRIVATE Void rgSCHCmnUlUeDelAllocs(cell, ue)
9586 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
9587 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
9590 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
9593 for (i = 0; i < ueUl->hqEnt.numHqPrcs; ++i)
9595 RgSchUlHqProcCb *proc = rgSCHUhmGetUlHqProc(cell, ue, i);
9598 /* proc can't be NULL here */
9606 /* Added Insure Fixes Of reading Dangling memory.NULLed crntAlloc */
9608 if(proc->alloc == ulSpsUe->ulSpsSchdInfo.crntAlloc)
9610 ulSpsUe->ulSpsSchdInfo.crntAlloc = NULLP;
9611 ulSpsUe->ulSpsSchdInfo.crntAllocSf = NULLP;
9615 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
9616 proc->alloc,ue->isEmtcUe);
9618 rgSCHCmnUlFreeAllocation(cell, &cellUl->ulSfArr[proc->ulSfIdx],
9621 /* PHY probably needn't be intimated since
9622 * whatever intimation it needs happens at the last minute
9625 /* Fix: syed Adaptive Msg3 Retx crash. Remove the harqProc
9626 * from adaptive retx List. */
9627 if (proc->reTxLnk.node)
9630 //TODO_SID: Need to take care
9631 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
9632 proc->reTxLnk.node = (PTR)NULLP;
9640 /***********************************************************
9642 * Func : rgSCHCmnDelUeFrmRefreshQ
9644 * Desc : Adds a UE to refresh queue, so that the UE is
9645 * periodically triggered to refresh it's GBR and
9654 **********************************************************/
9656 PRIVATE Void rgSCHCmnDelUeFrmRefreshQ
9662 PRIVATE Void rgSCHCmnDelUeFrmRefreshQ(cell, ue)
9667 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
9669 RgSchCmnUeInfo *ueSchd = RG_SCH_CMN_GET_CMN_UE(ue);
9672 #ifdef RGL_SPECIFIC_CHANGES
9673 if(ue->refreshOffset < RGSCH_MAX_REFRESH_GRPSZ)
9675 if(cell->refreshUeCnt[ue->refreshOffset])
9677 cell->refreshUeCnt[ue->refreshOffset]--;
9683 memset(&arg, 0, sizeof(arg));
9684 arg.tqCp = &sched->tmrTqCp;
9685 arg.tq = sched->tmrTq;
9686 arg.timers = &ueSchd->tmr;
9690 arg.evnt = RG_SCH_CMN_EVNT_UE_REFRESH;
9696 /***********************************************************
9698 * Func : rgSCHCmnUeCcchSduDel
9700 * Desc : Clear CCCH SDU scheduling context.
9708 **********************************************************/
9710 PRIVATE Void rgSCHCmnUeCcchSduDel
9716 PRIVATE Void rgSCHCmnUeCcchSduDel(cell, ueCb)
9721 RgSchDlHqEnt *hqE = NULLP;
9722 RgSchDlHqProcCb *ccchSduHqP = NULLP;
9723 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
9726 hqE = RG_SCH_CMN_GET_UE_HQE(ueCb, cell);
9731 ccchSduHqP = hqE->ccchSduProc;
9732 if(ueCb->ccchSduLnk.node != NULLP)
9734 /* Remove the ccchSduProc if it is in the Tx list */
9735 cmLListDelFrm(&(cell->ccchSduUeLst), &(ueCb->ccchSduLnk));
9736 ueCb->ccchSduLnk.node = NULLP;
9738 else if(ccchSduHqP != NULLP)
9740 /* Fix for crash due to stale pdcch. Release ccch pdcch*/
9741 if(ccchSduHqP->pdcch)
9743 cmLListDelFrm(&ccchSduHqP->subFrm->pdcchInfo.pdcchs,
9744 &ccchSduHqP->pdcch->lnk);
9745 cmLListAdd2Tail(&cell->pdcchLst, &ccchSduHqP->pdcch->lnk);
9746 ccchSduHqP->pdcch = NULLP;
9748 if(ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node != NULLP)
9750 /* Remove the ccchSduProc if it is in the retx list */
9751 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
9752 &ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk);
9753 /* ccchSduHqP->tbInfo[0].ccchSchdInfo.retxLnk.node = NULLP; */
9754 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
9756 else if ((ccchSduHqP->subFrm != NULLP) &&
9757 (ccchSduHqP->hqPSfLnk.node != NULLP))
9759 rgSCHUtlDlHqPTbRmvFrmTx(ccchSduHqP->subFrm,
9760 ccchSduHqP, 0, FALSE);
9761 rgSCHDhmRlsHqpTb(ccchSduHqP, 0, TRUE);
9771 * @brief UE deletion for scheduler.
9775 * Function : rgSCHCmnUeDel
9777 * This functions deletes all scheduler information
9778 * pertaining to an UE.
9780 * @param[in] RgSchCellCb *cell
9781 * @param[in] RgSchUeCb *ue
9791 Void rgSCHCmnUeDel(cell, ue)
9796 RgSchDlHqEnt *hqE = NULLP;
9797 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
9799 RgSchCmnAllocRecord *allRcd;
9801 RgSchCmnCell *cellSchCmn = RG_SCH_CMN_GET_CELL(cell);
9804 if (RG_SCH_CMN_GET_UE(ue,cell) == NULLP)
9806 /* Common scheduler config has not happened yet */
9809 hqE = RG_SCH_CMN_GET_UE_HQE(ue, cell);
9812 /* UE Free can be triggered before MSG4 done when dlHqE is not updated */
9816 rgSCHEmtcCmnUeCcchSduDel(cell, ue);
9821 rgSCHCmnUeCcchSduDel(cell, ue);
9824 rgSCHCmnDelUeFrmRefreshQ(cell, ue);
9826 rgSCHCmnUlUeDelAllocs(cell, ue);
9828 rgSCHCmnDelRachInfo(cell, ue);
9831 if(TRUE == ue->isEmtcUe)
9833 cellSchCmn->apisEmtcUl->rgSCHFreeUlUe(cell, ue);
9838 cellSchCmn->apisUl->rgSCHFreeUlUe(cell, ue);
9843 for(idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
9845 if(ue->cellInfo[idx] != NULLP)
9847 rgSCHSCellDelUeSCell(cell,ue,idx);
9854 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
9856 cellSchCmn->apisEmtcDl->rgSCHFreeDlUe(cell, ue);
9861 cellSchCmn->apisDl->rgSCHFreeDlUe(cell, ue);
9863 rgSCHPwrUeDel(cell, ue);
9866 rgSCHCmnSpsUeDel(cell, ue);
9867 #endif /* LTEMAC_SPS*/
9870 rgSchCmnDlSfHqDel(ue, cell);
9872 /* DLFS UE delete */
9873 if (cellSchCmn->dl.isDlFreqSel)
9875 cellSchCmn->apisDlfs->rgSCHDlfsUeDel(cell, ue);
9877 node = ueUl->ulAllocLst.first;
9879 /* ccpu00117052 - MOD - Passing double pointer in all the places of
9880 rgSCHUtlFreeSBuf function call for proper NULLP assignment*/
9883 allRcd = (RgSchCmnAllocRecord *)node->node;
9885 cmLListDelFrm(&ueUl->ulAllocLst, &allRcd->lnk);
9886 rgSCHUtlFreeSBuf(cell->instIdx,
9887 (Data**)(&allRcd), (sizeof(RgSchCmnAllocRecord)));
9890 for(cnt = 0; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
9892 if (ue->ul.lcgArr[cnt].sch != NULLP)
9894 rgSCHUtlFreeSBuf(cell->instIdx,
9895 (Data**)(&(ue->ul.lcgArr[cnt].sch)), (sizeof(RgSchCmnLcg)));
9899 /* Fix : syed Moved hqEnt deinit to rgSCHCmnDlDeInitHqEnt */
9900 idx = (U8)((cell->cellId - rgSchCb[cell->instIdx].genCfg.startCellId) & (CM_LTE_MAX_CELLS - 1));
9901 rgSCHUtlFreeSBuf(cell->instIdx,
9902 (Data**)(&(((ue->cellInfo[ue->cellIdToCellIdxMap[idx]])->sch))), (sizeof(RgSchCmnUe)));
9904 } /* rgSCHCmnUeDel */
9908 * @brief This function handles the common code rate configurations
9909 * done as part of RgrCellCfg/RgrCellRecfg.
9913 * Function: rgSCHCmnDlCnsdrCmnRt
9914 * Purpose: This function handles the common code rate configurations
9915 * done as part of RgrCellCfg/RgrCellRecfg.
9917 * Invoked by: Scheduler
9919 * @param[in] RgSchCellCb *cell
9920 * @param[in] RgrDlCmnCodeRateCfg *dlCmnCodeRate
9925 PRIVATE S16 rgSCHCmnDlCnsdrCmnRt
9928 RgrDlCmnCodeRateCfg *dlCmnCodeRate
9931 PRIVATE S16 rgSCHCmnDlCnsdrCmnRt(cell, dlCmnCodeRate)
9933 RgrDlCmnCodeRateCfg *dlCmnCodeRate;
9936 RgSchCmnCell *cellDl = RG_SCH_CMN_GET_CELL(cell);
9944 /* code rate is bits per 1024 phy bits, since modl'n scheme is 2. it is
9945 * bits per 1024/2 REs */
9946 if (dlCmnCodeRate->bcchPchRaCodeRate != 0)
9948 bitsPerRb = ((dlCmnCodeRate->bcchPchRaCodeRate * 2) *
9949 cellDl->dl.noResPerRb[3])/1024;
9953 bitsPerRb = ((RG_SCH_CMN_DEF_BCCHPCCH_CODERATE * 2) *
9954 cellDl->dl.noResPerRb[3])/1024;
9956 /* Store bitsPerRb in cellDl->dl to use later to determine
9957 * Number of RBs for UEs with SI-RNTI, P-RNTI and RA-RNTI */
9958 cellDl->dl.bitsPerRb = bitsPerRb;
9959 /* ccpu00115595 end*/
9960 /* calculate the ITbs for 2 RBs. Initialize ITbs to MAX value */
9963 bitsPer2Rb = bitsPerRb * rbNum;
9964 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer2Rb))
9967 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs2Rbs = 0) :
9968 (cellDl->dl.cmnChITbs.iTbs2Rbs = i-1);
9970 /* calculate the ITbs for 3 RBs. Initialize ITbs to MAX value */
9973 bitsPer3Rb = bitsPerRb * rbNum;
9974 while ((i < 9) && (rgTbSzTbl[0][i][rbNum - 1] <= bitsPer3Rb))
9977 (i <= 1)? (cellDl->dl.cmnChITbs.iTbs3Rbs = 0) :
9978 (cellDl->dl.cmnChITbs.iTbs3Rbs = i-1);
9981 pdcchBits = 1 + /* Flag for format0/format1a differentiation */
9982 1 + /* Localized/distributed VRB assignment flag */
9985 3 + /* Harq process Id */
9987 4 + /* Harq process Id */
9988 2 + /* UL Index or DAI */
9990 1 + /* New Data Indicator */
9993 1 + rgSCHUtlLog32bitNbase2((cell->bwCfg.dlTotalBw * \
9994 (cell->bwCfg.dlTotalBw + 1))/2);
9995 /* Resource block assignment ceil[log2(bw(bw+1)/2)] : \
9996 Since VRB is local */
9997 /* For TDD consider DAI */
9999 /* Convert the pdcchBits to actual pdcchBits required for transmission */
10000 if (dlCmnCodeRate->pdcchCodeRate != 0)
10002 pdcchBits = (pdcchBits * 1024)/dlCmnCodeRate->pdcchCodeRate;
10003 if (pdcchBits <= 288) /* 288 : Num of pdcch bits for aggrLvl=4 */
10005 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
10007 else /* 576 : Num of pdcch bits for aggrLvl=8 */
10009 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL8;
10014 cellDl->dl.cmnChAggrLvl = CM_LTE_AGGR_LVL4;
10016 if (dlCmnCodeRate->ccchCqi == 0)
10022 cellDl->dl.ccchCqi = dlCmnCodeRate->ccchCqi;
10029 * @brief This function handles the configuration of cell for the first
10030 * time by the scheduler.
10034 * Function: rgSCHCmnDlRgrCellCfg
10035 * Purpose: Configuration received is stored into the data structures
10036 * Also, update the scheduler with the number of frames of
10037 * RACH preamble transmission.
10039 * Invoked by: BO and Scheduler
10041 * @param[in] RgSchCellCb* cell
10042 * @param[in] RgrCellCfg* cfg
10047 PRIVATE S16 rgSCHCmnDlRgrCellCfg
10054 PRIVATE S16 rgSCHCmnDlRgrCellCfg(cell, cfg, err)
10060 RgSchCmnCell *cellSch;
10065 U8 maxDlSubfrms = cell->numDlSubfrms;
10066 U8 splSubfrmIdx = cfg->spclSfCfgIdx;
10069 RgSchTddSubfrmInfo subfrmInfo = rgSchTddMaxUlSubfrmTbl[cell->ulDlCfgIdx];
10082 cellSch = RG_SCH_CMN_GET_CELL(cell);
10083 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->\
10084 rachCfg.preambleFormat];
10085 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
10086 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
10088 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
10089 3 TTI (MAX L1+L2 processing delay at the UE) */
10090 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
10091 rgSchCmnHarqRtt[cell->ulDlCfgIdx] + 3;
10092 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10093 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10094 if (cfg->maxUePerDlSf == 0)
10096 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10098 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
10104 if (cell->bwCfg.dlTotalBw <= 10)
10114 /* DwPTS Scheduling Changes Start */
10115 cellSch->dl.splSfCfg = splSubfrmIdx;
10117 if (cfg->isCpDlExtend == TRUE)
10119 if((0 == splSubfrmIdx) || (4 == splSubfrmIdx) ||
10120 (7 == splSubfrmIdx) || (8 == splSubfrmIdx)
10123 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
10127 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
10132 /* Refer to 36.213 Section 7.1.7 */
10133 if((0 == splSubfrmIdx) || (5 == splSubfrmIdx))
10135 cell->splSubfrmCfg.isDlDataAllowed = FALSE;
10139 cell->splSubfrmCfg.isDlDataAllowed = TRUE;
10142 /* DwPTS Scheduling Changes End */
10144 splSfCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
10145 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
10147 for (sfCount = 0; sfCount < maxDlSubfrms; sfCount++)
10149 sf = cell->subFrms[sfCount];
10150 /* Sfcount matches the first special subframe occurs at Index 0
10151 * or subsequent special subframes */
10152 if(subfrmInfo.switchPoints == 1)
10154 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
10155 RG_SCH_CMN_10_MS_PRD, &subfrmInfo);
10159 isSplfrm = rgSCHCmnIsSplSubfrm(swPtCnt, sfCount,
10160 RG_SCH_CMN_5_MS_PRD, &subfrmInfo);
10162 if(isSplfrm == TRUE)
10165 /* DwPTS Scheduling Changes Start */
10166 if (cell->splSubfrmCfg.isDlDataAllowed == TRUE)
10168 sf->sfType = RG_SCH_SPL_SF_DATA;
10172 sf->sfType = RG_SCH_SPL_SF_NO_DATA;
10174 /* DwPTS Scheduling Changes End */
10178 /* DwPTS Scheduling Changes Start */
10179 if (sf->sfNum != 0)
10181 sf->sfType = RG_SCH_DL_SF;
10185 sf->sfType = RG_SCH_DL_SF_0;
10187 /* DwPTS Scheduling Changes End */
10190 /* Calculate the number of CCEs per subframe in the cell */
10191 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][sf->sfNum];
10192 if(cell->dynCfiCb.isDynCfiEnb == TRUE)
10194 /* In case if Dynamic CFI feature is enabled, default CFI
10195 * value 1 is used */
10196 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][1];
10200 if (sf->sfType == RG_SCH_SPL_SF_DATA)
10202 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
10206 sf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi)];
10211 /* Intialize the RACH response scheduling related infromation */
10212 if(rgSCHCmnDlRachInfoInit(cell) != ROK)
10217 /* Allocate PRACH preamble list */
10218 rgSCHCmnDlCreateRachPrmLst(cell);
10220 /* Initialize PHICH offset information */
10221 rgSCHCmnDlPhichOffsetInit(cell);
10223 /* Update the size of HARQ ACK/NACK feedback table */
10224 /* The array size is increased by 2 to have enough free indices, where other
10225 * indices are busy waiting for HARQ feedback */
10226 cell->ackNackFdbkArrSize = rgSchTddANFdbkMapTbl[cell->ulDlCfgIdx] + 2;
10228 /* Initialize expected HARQ ACK/NACK feedback time */
10229 rgSCHCmnDlANFdbkInit(cell);
10231 /* Initialize UL association set index */
10232 if(cell->ulDlCfgIdx != 0)
10234 rgSCHCmnDlKdashUlAscInit(cell);
10237 if (cfg->isCpDlExtend == TRUE)
10239 cp = RG_SCH_CMN_EXT_CP;
10241 cell->splSubfrmCfg.dwPts =
10242 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlDwPts;
10244 if ( cell->splSubfrmCfg.dwPts == 0 )
10246 cell->isDwPtsCnted = FALSE;
10250 cell->isDwPtsCnted = TRUE;
10253 if(cfg->isCpUlExtend == TRUE)
10255 cell->splSubfrmCfg.upPts =
10256 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlExtUpPts;
10260 cell->splSubfrmCfg.upPts =
10261 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].extDlNorUpPts;
10266 cp = RG_SCH_CMN_NOR_CP;
10268 cell->splSubfrmCfg.dwPts =
10269 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlDwPts;
10270 cell->isDwPtsCnted = TRUE;
10272 if(cfg->isCpUlExtend == TRUE)
10274 cell->splSubfrmCfg.upPts =
10275 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlExtUpPts;
10279 cell->splSubfrmCfg.upPts =
10280 rgSchTddSplSubfrmInfoTbl[splSubfrmIdx].norDlNorUpPts;
10284 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
10285 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++,cfiIdx++)
10287 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
10288 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
10289 [cell->numTxAntPorts]][cfiIdx];
10290 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
10291 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
10292 [cell->numTxAntPorts]][cfiIdx];
10295 /* Initializing the values of CFI parameters */
10296 if(cell->dynCfiCb.isDynCfiEnb)
10298 /* If DCFI is enabled, current CFI value will start from 1 */
10299 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
10303 /* If DCFI is disabled, current CFI value is set as default max allowed CFI value */
10304 cellSch->dl.currCfi = RGSCH_MIN(cell->dynCfiCb.maxCfi, cellSch->cfiCfg.cfi);
10305 cellSch->dl.newCfi = cellSch->dl.currCfi;
10308 /* Include CRS REs while calculating Efficiency
10309 * The number of Resource Elements occupied by CRS depends on Number of
10310 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
10311 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
10312 * details of the same. Please note that PDCCH overlap symbols would not
10313 * considered in CRS REs deduction */
10314 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
10316 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
10317 - numPdcchSym) *RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
10320 /* DwPTS Scheduling Changes Start */
10321 antPortIdx = (cell->numTxAntPorts == 1)? 0:
10322 ((cell->numTxAntPorts == 2)? 1: 2);
10324 if (cp == RG_SCH_CMN_NOR_CP)
10326 splSfIdx = (splSubfrmIdx == 4)? 1: 0;
10330 splSfIdx = (splSubfrmIdx == 3)? 1: 0;
10333 numCrs = rgSchCmnDwptsCrs[splSfIdx][antPortIdx];
10335 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI-1; cfi++)
10337 /* If CFI is 2 and Ant Port is 4, don't consider the sym 1 CRS REs */
10338 if (antPortIdx == 2 && cfi == 2)
10342 cellSch->dl.numReDwPts[cfi] = ((cell->splSubfrmCfg.dwPts - cfi)*
10343 RB_SCH_CMN_NUM_SCS_PER_RB) - numCrs;
10345 /* DwPTS Scheduling Changes End */
10347 if (cfg->maxDlBwPerUe == 0)
10349 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
10353 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
10355 if (cfg->maxDlRetxBw == 0)
10357 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
10361 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
10363 /* Fix: MUE_PERTTI_DL*/
10364 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10365 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10366 if (cfg->maxUePerDlSf == 0)
10368 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10370 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
10371 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
10372 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
10374 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10375 "Invalid configuration !: "
10376 "maxCcchPerDlSf %u > maxUePerDlSf %u",
10377 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
10381 else if (!cfg->maxCcchPerDlSf)
10383 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
10384 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
10385 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
10386 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
10387 * FLE crash in PHY as PHY has limit of 16 max*/
10388 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
10392 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
10394 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
10399 /*ccpu00118273 - ADD - start */
10400 cmLListInit(&cellSch->dl.msg4RetxLst);
10402 cmLListInit(&cellSch->dl.ccchSduRetxLst);
10405 #ifdef RG_PHASE2_SCHED
10406 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
10408 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
10410 if (cfg->dlfsCfg.isDlFreqSel)
10412 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
10418 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
10421 /* Power related configuration */
10422 ret = rgSCHPwrCellCfg(cell, cfg);
10428 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
10429 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
10430 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
10431 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
10432 cellSch->dl.msg4pAVal = cfg->msg4pAVal;
10435 #else /* LTE_TDD */
10437 * @brief This function handles the configuration of cell for the first
10438 * time by the scheduler.
10442 * Function: rgSCHCmnDlRgrCellCfg
10443 * Purpose: Configuration received is stored into the data structures
10444 * Also, update the scheduler with the number of frames of
10445 * RACH preamble transmission.
10447 * Invoked by: BO and Scheduler
10449 * @param[in] RgSchCellCb* cell
10450 * @param[in] RgrCellCfg* cfg
10451 * @param[in] RgSchErrInfo* err
10456 PRIVATE S16 rgSCHCmnDlRgrCellCfg
10463 PRIVATE S16 rgSCHCmnDlRgrCellCfg(cell, cfg, err)
10470 RgSchCmnCell *cellSch;
10478 cellSch = RG_SCH_CMN_GET_CELL(cell);
10480 /* Initialize the parameters with the ones received in the */
10481 /* configuration. */
10483 /* Added matrix 'rgRaPrmblToRaFrmTbl' for computation of RA
10484 * sub-frames from preamble format */
10485 cellSch->dl.numRaSubFrms = rgRaPrmblToRaFrmTbl[cell->rachCfg.preambleFormat];
10487 /*[ccpu00138532]-ADD-fill the Msg4 Harq data */
10488 cell->dlHqCfg.maxMsg4HqTx = cfg->dlHqCfg.maxMsg4HqTx;
10490 /* Msg4 Tx Delay = (HARQ_RTT * MAX_MSG4_HARQ_RETX) +
10491 3 TTI (MAX L1+L2 processing delay at the UE) */
10492 cellSch->dl.msg4TxDelay = (cfg->dlHqCfg.maxMsg4HqTx-1) *
10493 rgSchCmnHarqRtt[7] + 3;
10495 if (cell->bwCfg.dlTotalBw <= 10)
10506 if (cell->isCpDlExtend == TRUE)
10508 cp = RG_SCH_CMN_EXT_CP;
10513 cp = RG_SCH_CMN_NOR_CP;
10517 /* Initializing the cqiToEffTbl and cqiToTbsTbl for every CFI value */
10518 for(cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, cfiIdx++)
10520 cellSch->dl.cqiToTbsTbl[0][cfi] = rgSchCmnCqiToTbs[0][cp][cfiIdx];
10522 cellSch->dl.emtcCqiToTbsTbl[0][cfi] = rgSchEmtcCmnCqiToTbs[0][cp][cfiIdx];
10524 cellSch->dl.cqiToEffTbl[0][cfi] = rgSchCmnEffTbl[0][cp][rgSchCmnAntIdx\
10525 [cell->numTxAntPorts]][cfiIdx];
10526 cellSch->dl.cqiToTbsTbl[1][cfi] = rgSchCmnCqiToTbs[1][cp][cfiIdx];
10528 cellSch->dl.emtcCqiToTbsTbl[1][cfi] = rgSchEmtcCmnCqiToTbs[1][cp][cfiIdx];
10530 cellSch->dl.cqiToEffTbl[1][cfi] = rgSchCmnEffTbl[1][cp][rgSchCmnAntIdx\
10531 [cell->numTxAntPorts]][cfiIdx];
10534 /* Initializing the values of CFI parameters */
10535 if(cell->dynCfiCb.isDynCfiEnb)
10537 /* If DCFI is enabled, current CFI value will start from 1 */
10538 cellSch->dl.currCfi = cellSch->dl.newCfi = 1;
10542 /* If DCFI is disabled, current CFI value is set as default CFI value */
10543 cellSch->dl.currCfi = cellSch->cfiCfg.cfi;
10544 cellSch->dl.newCfi = cellSch->dl.currCfi;
10547 /* Include CRS REs while calculating Efficiency
10548 * The number of Resource Elements occupied by CRS depends on Number of
10549 * Antenna Ports. Please refer to Section 6.10.1 of 3GPP TS 36.211 V8.8.0.
10550 * Also, please refer to Figures 6.10.1.2-1 and 6.10.1.2-2 for diagrammatic
10551 * details of the same. Please note that PDCCH overlap symbols would not
10552 * considered in CRS REs deduction */
10553 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++, numPdcchSym++)
10555 cellSch->dl.noResPerRb[cfi] = (((noSymPerSlot * RG_SCH_CMN_NUM_SLOTS_PER_SF)
10556 - numPdcchSym) * RB_SCH_CMN_NUM_SCS_PER_RB) - rgSchCmnNumResForCrs[cell->numTxAntPorts];
10559 if (cfg->maxDlBwPerUe == 0)
10561 cellSch->dl.maxDlBwPerUe = RG_SCH_CMN_MAX_DL_BW_PERUE;
10565 cellSch->dl.maxDlBwPerUe = cfg->maxDlBwPerUe;
10567 if (cfg->maxDlRetxBw == 0)
10569 cellSch->dl.maxDlRetxBw = RG_SCH_CMN_MAX_DL_RETX_BW;
10573 cellSch->dl.maxDlRetxBw = cfg->maxDlRetxBw;
10576 /* Fix: MUE_PERTTI_DL*/
10577 cellSch->dl.maxUePerDlSf = cfg->maxUePerDlSf;
10578 cellSch->dl.maxUeNewTxPerTti = cfg->maxDlUeNewTxPerTti;
10579 if (cfg->maxUePerDlSf == 0)
10581 cellSch->dl.maxUePerDlSf = RG_SCH_CMN_MAX_UE_PER_DL_SF;
10583 /* Fix: MUE_PERTTI_DL syed validating Cell Configuration */
10584 if (cellSch->dl.maxUePerDlSf < cellSch->dl.maxUeNewTxPerTti)
10586 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
10587 "FAILED MaxUePerDlSf(%u) < MaxDlUeNewTxPerTti(%u)",
10588 cellSch->dl.maxUePerDlSf,
10589 cellSch->dl.maxUeNewTxPerTti);
10592 /*[ccpu00138609]-ADD- Configure the Max CCCH Counter */
10593 if (cfg->maxCcchPerDlSf > cfg->maxUePerDlSf)
10595 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid configuration !: "
10596 "maxCcchPerDlSf %u > maxUePerDlSf %u",
10597 cfg->maxCcchPerDlSf, cfg->maxUePerDlSf );
10601 else if (!cfg->maxCcchPerDlSf)
10603 /* ccpu00143032: maxCcchPerDlSf 0 means not configured by application
10604 * hence setting to maxUePerDlSf. If maxCcchPerDlSf is 0 then scheduler
10605 * does't consider CCCH allocation in MaxUePerTti cap. Hence more than
10606 * 4UEs getting schduled & SCH expects >16 Hq PDUs in a TTI which causes
10607 * FLE crash in PHY as PHY has limit of 16 max*/
10608 cellSch->dl.maxCcchPerDlSf = cfg->maxUePerDlSf;
10612 cellSch->dl.maxCcchPerDlSf = cfg->maxCcchPerDlSf;
10616 if (rgSCHCmnDlCnsdrCmnRt(cell, &cfg->dlCmnCodeRate) != ROK)
10620 cmLListInit(&cellSch->dl.msg4RetxLst);
10622 cmLListInit(&cellSch->dl.ccchSduRetxLst);
10625 #ifdef RG_PHASE2_SCHED
10626 if (cellSch->apisDlfs == NULLP) /* DFLS specific initialization */
10628 cellSch->apisDlfs = &rgSchDlfsSchdTbl[cfg->dlfsSchdType];
10630 if (cfg->dlfsCfg.isDlFreqSel)
10632 ret = cellSch->apisDlfs->rgSCHDlfsCellCfg(cell, cfg, err);
10638 cellSch->dl.isDlFreqSel = cfg->dlfsCfg.isDlFreqSel;
10641 /* Power related configuration */
10642 ret = rgSCHPwrCellCfg(cell, cfg);
10648 cellSch->dl.bcchTxPwrOffset = cfg->bcchTxPwrOffset;
10649 cellSch->dl.pcchTxPwrOffset = cfg->pcchTxPwrOffset;
10650 cellSch->dl.rarTxPwrOffset = cfg->rarTxPwrOffset;
10651 cellSch->dl.phichTxPwrOffset = cfg->phichTxPwrOffset;
10652 RG_SCH_RESET_HCSG_DL_PRB_CNTR(&cellSch->dl);
10655 #endif /* LTE_TDD */
10657 /***********************************************************
10659 * Func : rgSCHCmnUlCalcReqRbCeil
10661 * Desc : Calculate RB required to satisfy 'bytes' for
10663 * Returns number of RBs such that requirement
10664 * is necessarily satisfied (does a 'ceiling'
10667 * Ret : Required RBs (U8)
10673 **********************************************************/
10675 U8 rgSCHCmnUlCalcReqRbCeil
10679 RgSchCmnUlCell *cellUl
10682 U8 rgSCHCmnUlCalcReqRbCeil(bytes, cqi, cellUl)
10685 RgSchCmnUlCell *cellUl;
10688 U32 numRe = RGSCH_CEIL((bytes * 8) * 1024, rgSchCmnUlCqiTbl[cqi].eff);
10689 return ((U8)RGSCH_CEIL(numRe, RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl)));
10692 /***********************************************************
10694 * Func : rgSCHCmnPrecompMsg3Vars
10696 * Desc : Precomputes the following for msg3 allocation:
10697 * 1. numSb and Imcs for msg size A
10698 * 2. numSb and Imcs otherwise
10702 * Notes: The corresponding vars in cellUl struct is filled
10707 **********************************************************/
10709 PRIVATE S16 rgSCHCmnPrecompMsg3Vars
10711 RgSchCmnUlCell *cellUl,
10718 PRIVATE S16 rgSCHCmnPrecompMsg3Vars(cellUl, ccchCqi, msgSzA, sbSize, isEcp)
10719 RgSchCmnUlCell *cellUl;
10731 U16 msg3GrntSz = 0;
10734 if (ccchCqi > cellUl->max16qamCqi)
10736 ccchCqi = cellUl->max16qamCqi;
10738 /* #ifndef RG_SCH_CMN_EXP_CP_SUP For ECP Pick the index 1 */
10740 ccchTbs = rgSchCmnUlCqiToTbsTbl[(U8)isEcp][ccchCqi];
10741 ccchMcs = rgSCHCmnUlGetIMcsFrmITbs(ccchTbs, CM_LTE_UE_CAT_1);
10743 /* MCS should fit in 4 bits in RAR */
10749 /* Limit the ccchMcs to 15 as it
10750 * can be inferred from 36.213, section 6.2 that msg3 imcs
10752 * Since, UE doesn't exist right now, we use CAT_1 for ue
10754 while((ccchMcs = (rgSCHCmnUlGetIMcsFrmITbs(
10755 rgSchCmnUlCqiToTbsTbl[(U8)isEcp][ccchCqi],CM_LTE_UE_CAT_1))
10757 RG_SCH_CMN_MAX_MSG3_IMCS)
10762 iTbs = rgSchCmnUlCqiToTbsTbl[(U8)isEcp][ccchCqi];
10764 if (msgSzA < RGSCH_MIN_MSG3_GRNT_SZ)
10768 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(msgSzA, ccchCqi, cellUl), sbSize);
10770 numRb = numSb * sbSize;
10771 msg3GrntSz = 8 * msgSzA;
10773 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
10776 numRb = numSb * sbSize;
10778 while (rgSchCmnMult235Tbl[numSb].match != numSb)
10782 /* Reversed(Corrected) the assignment for preamble-GrpA
10783 * Refer- TG36.321- section- 5.1.2*/
10784 cellUl->ra.prmblBNumSb = numSb;
10785 cellUl->ra.prmblBIMcs = ccchMcs;
10786 numSb = RGSCH_CEIL(rgSCHCmnUlCalcReqRbCeil(RGSCH_MIN_MSG3_GRNT_SZ, \
10790 numRb = numSb * sbSize;
10791 msg3GrntSz = 8 * RGSCH_MIN_MSG3_GRNT_SZ;
10792 while( (rgTbSzTbl[0][iTbs][numRb - 1]) < msg3GrntSz)
10795 numRb = numSb * sbSize;
10797 while (rgSchCmnMult235Tbl[numSb].match != numSb)
10801 /* Reversed(Corrected) the assignment for preamble-GrpA
10802 * Refer- TG36.321- section- 5.1.2*/
10803 cellUl->ra.prmblANumSb = numSb;
10804 cellUl->ra.prmblAIMcs = ccchMcs;
10808 U32 gPrntPucchDet=0;
10811 /***********************************************************
10813 * Func : rgSCHCmnUlCalcAvailBw
10815 * Desc : Calculates bandwidth available for PUSCH scheduling.
10817 * Ret : S16 (ROK/RFAILED)
10823 **********************************************************/
10825 PRIVATE S16 rgSCHCmnUlCalcAvailBw
10828 RgrCellCfg *cellCfg,
10834 PRIVATE S16 rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, rbStartRef, bwAvailRef)
10836 RgrCellCfg *cellCfg;
10843 U8 ulBw = cell->bwCfg.ulTotalBw;
10844 U8 n2Rb = cell->pucchCfg.resourceSize;
10845 U8 pucchDeltaShft = cell->pucchCfg.deltaShift;
10846 U16 n1Pucch = cell->pucchCfg.n1PucchAn;
10847 U8 n1Cs = cell->pucchCfg.cyclicShift;
10854 U8 exclRb; /* RBs to exclude */
10857 /* To avoid PUCCH and PUSCH collision issue */
10861 /* Maximum value of M as per Table 10.1-1 */
10862 U8 M[RGSCH_MAX_TDD_UL_DL_CFG] = {1, 2, 4, 3, 4, 9, 1};
10865 if (cell->isCpUlExtend)
10870 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
10872 /* Considering the max no. of CCEs for PUSCH BW calculation
10873 * based on min mi value */
10874 if (cell->ulDlCfgIdx == 0 || cell->ulDlCfgIdx == 6)
10883 totalCce = cell->dynCfiCb.cfi2NCceTbl[mi][cfi];
10885 P = rgSCHCmnGetPValFrmCCE(cell, totalCce-1);
10886 n1PlusOne = cell->rgSchTddNpValTbl[P + 1];
10887 n1Max = (M[cell->ulDlCfgIdx] - 1)*n1PlusOne + (totalCce-1) + n1Pucch;
10889 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
10891 n1RbPart = (c*n1Cs)/pucchDeltaShft;
10892 n1Rb = (n1Max - n1RbPart)/ n1PerRb;
10893 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
10895 /* get the total Number of RB's to be excluded for PUSCH */
10897 if(n1Pucch < n1RbPart)
10903 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
10905 puschRbStart = exclRb/2 + 1;
10907 /* Num of PUCCH RBs = puschRbStart*2 */
10908 if (puschRbStart * 2 >= ulBw)
10910 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
10914 *rbStartRef = puschRbStart;
10915 *bwAvailRef = ulBw - puschRbStart * 2;
10917 if(cell->pucchCfg.maxPucchRb !=0 &&
10918 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
10920 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
10927 /***********************************************************
10929 * Func : rgSCHCmnUlCalcAvailBw
10931 * Desc : Calculates bandwidth available for PUSCH scheduling.
10933 * Ret : S16 (ROK/RFAILED)
10939 **********************************************************/
10941 PRIVATE S16 rgSCHCmnUlCalcAvailBw
10944 RgrCellCfg *cellCfg,
10950 PRIVATE S16 rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, rbStartRef, bwAvailRef)
10952 RgrCellCfg *cellCfg;
10959 U8 ulBw = cell->bwCfg.ulTotalBw;
10960 U8 n2Rb = cell->pucchCfg.resourceSize;
10961 U8 pucchDeltaShft = cell->pucchCfg.deltaShift;
10962 U16 n1Pucch = cell->pucchCfg.n1PucchAn;
10963 U8 n1Cs = cell->pucchCfg.cyclicShift;
10969 U8 exclRb; /* RBs to exclude */
10973 U16 numOfN3PucchRb;
10974 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
10978 if (cell->isCpUlExtend)
10983 n1PerRb = c * 12 / pucchDeltaShft; /* 12/18/36 */
10985 totalCce = cell->dynCfiCb.cfi2NCceTbl[0][cfi];
10987 n1Max = n1Pucch + totalCce-1;
10989 /* ccpu00129978- MOD- excluding RBs based on formula in section 5.4.3 in
10991 n1RbPart = (c*n1Cs)/pucchDeltaShft;
10992 n1Rb = (U8)((n1Max - n1RbPart) / n1PerRb);
10993 mixedRb = RGSCH_CEIL(n1Cs, 8); /* same as 'mixedRb = n1Cs ? 1 : 0' */
10995 /* get the total Number of RB's to be excluded for PUSCH */
10997 if(n1Pucch < n1RbPart)
11003 exclRb = n2Rb + mixedRb + n1Rb; /* RBs to exclude */
11005 /*Support for PUCCH Format 3*/
11007 if (cell->isPucchFormat3Sptd)
11009 numOfN3PucchRb = RGSCH_CEIL(cellSch->dl.maxUePerDlSf,5);
11010 exclRb = exclRb + numOfN3PucchRb;
11013 puschRbStart = exclRb/2 + 1;
11017 #ifndef ALIGN_64BIT
11018 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%ld:%d:%d:%d:%d:%d]\n",
11019 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
11021 printf("CA_DBG:: puschRbStart:n1Rb:mixedRb:n1PerRb:totalCce:n1Max:n1RbPart:n2Rb::[%d:%d] [%d:%d:%d:%d:%d:%d:%d:%d]\n",
11022 cell->crntTime.sfn, cell->crntTime.slot, puschRbStart, n1Rb, mixedRb,n1PerRb, totalCce, n1Max, n1RbPart, n2Rb);
11026 if (puschRbStart*2 >= ulBw)
11028 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"No bw available for PUSCH");
11032 *rbStartRef = puschRbStart;
11033 *bwAvailRef = ulBw - puschRbStart * 2;
11035 if(cell->pucchCfg.maxPucchRb !=0 &&
11036 (puschRbStart * 2 > cell->pucchCfg.maxPucchRb))
11038 cell->dynCfiCb.maxCfi = RGSCH_MIN(cfi-1, cell->dynCfiCb.maxCfi);
11047 /***********************************************************
11049 * Func : rgSCHCmnUlCellInit
11051 * Desc : Uplink scheduler initialisation for cell.
11059 **********************************************************/
11061 PRIVATE S16 rgSCHCmnUlCellInit
11064 RgrCellCfg *cellCfg
11067 PRIVATE S16 rgSCHCmnUlCellInit(cell, cellCfg)
11069 RgrCellCfg *cellCfg;
11073 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11074 U8 maxUePerUlSf = cellCfg->maxUePerUlSf;
11076 /* Added configuration for maximum number of MSG3s */
11077 U8 maxMsg3PerUlSf = cellCfg->maxMsg3PerUlSf;
11079 U8 maxUlBwPerUe = cellCfg->maxUlBwPerUe;
11080 U8 sbSize = cellCfg->puschSubBand.size;
11088 U16 ulDlCfgIdx = cell->ulDlCfgIdx;
11089 /* [ccpu00127294]-MOD-Change the max Ul subfrms size in TDD */
11090 U8 maxSubfrms = 2 * rgSchTddNumUlSf[ulDlCfgIdx];
11091 U8 ulToDlMap[12] = {0}; /* maximum 6 Subframes in UL * 2 */
11092 U8 maxUlsubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
11093 [RGSCH_NUM_SUB_FRAMES-1];
11097 U8 maxSubfrms = RG_SCH_CMN_UL_NUM_SF;
11103 #if (defined(LTE_L2_MEAS) )
11104 Inst inst = cell->instIdx;
11105 #endif /* #if (defined(LTE_L2_MEAS) || defined(DEBUGP) */
11106 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
11109 cellUl->maxUeNewTxPerTti = cellCfg->maxUlUeNewTxPerTti;
11110 if (maxUePerUlSf == 0)
11112 maxUePerUlSf = RG_SCH_CMN_MAX_UE_PER_UL_SF;
11115 if (maxMsg3PerUlSf == 0)
11117 maxMsg3PerUlSf = RG_SCH_CMN_MAX_MSG3_PER_UL_SF;
11119 /* fixed the problem while sending raRsp
11120 * if maxMsg3PerUlSf is greater than
11121 * RGSCH_MAX_RNTI_PER_RARNTI
11123 if(maxMsg3PerUlSf > RGSCH_MAX_RNTI_PER_RARNTI)
11125 maxMsg3PerUlSf = RGSCH_MAX_RNTI_PER_RARNTI;
11128 if(maxMsg3PerUlSf > maxUePerUlSf)
11130 maxMsg3PerUlSf = maxUePerUlSf;
11133 /*cellUl->maxAllocPerUlSf = maxUePerUlSf + maxMsg3PerUlSf;*/
11134 /*Max MSG3 should be a subset of Max UEs*/
11135 cellUl->maxAllocPerUlSf = maxUePerUlSf;
11136 cellUl->maxMsg3PerUlSf = maxMsg3PerUlSf;
11138 cellUl->maxAllocPerUlSf = maxUePerUlSf;
11140 /* Fix: MUE_PERTTI_UL syed validating Cell Configuration */
11141 if (cellUl->maxAllocPerUlSf < cellUl->maxUeNewTxPerTti)
11143 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
11144 "FAILED: MaxUePerUlSf(%u) < MaxUlUeNewTxPerTti(%u)",
11145 cellUl->maxAllocPerUlSf,
11146 cellUl->maxUeNewTxPerTti);
11152 for(idx = 0; idx < RGSCH_SF_ALLOC_SIZE; idx++)
11154 for(idx = 0; idx < RGSCH_NUM_SUB_FRAMES; idx++)
11158 ret = rgSCHUtlAllocSBuf(inst, (Data **)&(cell->sfAllocArr[idx].
11159 ulUeInfo.ulAllocInfo), (cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc)));
11162 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"Memory allocation failed ");
11167 if (maxUlBwPerUe == 0)
11169 /* ccpu00139362- Setting to configured UL BW instead of MAX BW(100)*/
11170 maxUlBwPerUe = cell->bwCfg.ulTotalBw;
11172 cellUl->maxUlBwPerUe = maxUlBwPerUe;
11174 /* FOR RG_SCH_CMN_EXT_CP_SUP */
11175 if (!cellCfg->isCpUlExtend)
11177 cellUl->ulNumRePerRb = 12 * (14 - RGSCH_UL_SYM_DMRS_SRS);
11181 cellUl->ulNumRePerRb = 12 * (12 - RGSCH_UL_SYM_DMRS_SRS);
11184 if (sbSize != rgSchCmnMult235Tbl[sbSize].match)
11186 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"Invalid subband size %d", sbSize);
11189 //Setting the subband size to 4 which is size of VRBG in 5GTF
11191 sbSize = MAX_5GTF_VRBG_SIZE;
11194 maxSbPerUe = maxUlBwPerUe / sbSize;
11195 if (maxSbPerUe == 0)
11197 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnUlCellInit(): "
11198 "maxUlBwPerUe/sbSize is zero");
11201 cellUl->maxSbPerUe = rgSchCmnMult235Tbl[maxSbPerUe].prvMatch;
11203 /* CQI related updations */
11204 if ((!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->ulCmnCodeRate.ccchCqi))
11205 || (!RG_SCH_CMN_UL_IS_CQI_VALID(cellCfg->trgUlCqi.trgCqi)))
11207 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnUlCellInit(): "
11211 cellUl->dfltUlCqi = cellCfg->ulCmnCodeRate.ccchCqi;
11213 /* Changed the logic to determine maxUlCqi.
11214 * For a 16qam UE, maxUlCqi is the CQI Index at which
11215 * efficiency is as close as possible to RG_SCH_MAX_CODE_RATE_16QAM
11216 * Refer to 36.213-8.6.1 */
11217 for (i = RG_SCH_CMN_UL_NUM_CQI - 1;i > 0; --i)
11219 RLOG_ARG2(L_INFO,DBG_CELLID,cell->cellId,
11222 rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i]);
11223 #ifdef MAC_SCH_STATS
11224 /* ccpu00128489 ADD Update mcs in hqFailStats here instead of at CRC
11225 * since CQI to MCS mapping does not change. The only exception is for
11226 * ITBS = 19 where the MCS can be 20 or 21 based on the UE cat. We
11227 * choose 20, instead of 21, ie UE_CAT_3 */
11228 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
11229 RG_SCH_CMN_UL_TBS_TO_MCS(iTbs, hqFailStats.ulCqiStat[i - 1].mcs);
11232 for (i = RG_SCH_CMN_UL_NUM_CQI - 1; i != 0; --i)
11234 /* Fix for ccpu00123912*/
11235 iTbs = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][i];
11236 if (iTbs <= RGSCH_UL_16QAM_MAX_ITBS) /* corresponds to 16QAM */
11238 RLOG_ARG1(L_INFO,DBG_CELLID,cell->cellId,
11239 "16 QAM CQI %u", i);
11240 cellUl->max16qamCqi = i;
11246 /* Precompute useful values for RA msg3 */
11247 ret = rgSCHCmnPrecompEmtcMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
11248 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
11255 /* Precompute useful values for RA msg3 */
11256 ret = rgSCHCmnPrecompMsg3Vars(cellUl, cellCfg->ulCmnCodeRate.ccchCqi,
11257 cell->rachCfg.msgSizeGrpA, sbSize, cell->isCpUlExtend);
11263 cellUl->sbSize = sbSize;
11266 cellUl->numUlSubfrms = maxSubfrms;
11268 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->ulSfArr,
11269 cellUl->numUlSubfrms * sizeof(RgSchUlSf));
11273 cellUl->numUlSubfrms = 0;
11277 /* store the DL subframe corresponding to the PUSCH offset
11278 * in their respective UL subframe */
11279 for(i=0; i < RGSCH_NUM_SUB_FRAMES; i++)
11281 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][i] != 0)
11283 subfrm = (i + rgSchTddPuschTxKTbl[ulDlCfgIdx][i]) % \
11284 RGSCH_NUM_SUB_FRAMES;
11285 subfrm = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][subfrm]-1;
11286 dlIdx = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][i]-1;
11287 RGSCH_ARRAY_BOUND_CHECK( cell->instIdx, ulToDlMap, subfrm);
11288 ulToDlMap[subfrm] = dlIdx;
11291 /* Copy the information in the remaining UL subframes based
11292 * on number of HARQ processes */
11293 for(i=maxUlsubfrms; i < maxSubfrms; i++)
11295 subfrm = i-maxUlsubfrms;
11296 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, i);
11297 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, ulToDlMap, subfrm)
11298 ulToDlMap[i] = ulToDlMap[subfrm];
11302 for (cfi = 1; cfi < RG_SCH_CMN_MAX_CFI; cfi++)
11305 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
11307 ret = rgSCHCmnUlCalcAvailBw(cell, cellCfg, cfi, &rbStart, &bwAvail);
11316 cell->ulAvailBw = bwAvail;
11319 numSb = bwAvail/sbSize;
11321 cell->dynCfiCb.bwInfo[cfi].startRb = rbStart;
11322 cell->dynCfiCb.bwInfo[cfi].numSb = numSb;
11325 if(0 == cell->dynCfiCb.maxCfi)
11327 RLOG_ARG3(L_ERROR,DBG_CELLID,cell->cellId,
11328 "Incorrect Default CFI(%u), maxCfi(%u), maxPucchRb(%d)",
11329 cellSch->cfiCfg.cfi, cell->dynCfiCb.maxCfi,
11330 cell->pucchCfg.maxPucchRb);
11336 cellUl->dmrsArrSize = cell->dynCfiCb.bwInfo[1].numSb;
11337 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cellUl->dmrsArr,
11338 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11343 for (i = 0; i < cellUl->dmrsArrSize; ++i)
11345 cellUl->dmrsArr[i] = cellCfg->puschSubBand.dmrs[i];
11348 /* Init subframes */
11349 for (i = 0; i < maxSubfrms; ++i)
11351 ret = rgSCHUtlUlSfInit(cell, &cellUl->ulSfArr[i], i,
11352 cellUl->maxAllocPerUlSf);
11355 for (; i != 0; --i)
11357 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[i-1]);
11359 /* ccpu00117052 - MOD - Passing double pointer
11360 for proper NULLP assignment*/
11361 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)(&(cellUl->dmrsArr)),
11362 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11364 /* ccpu00117052 - MOD - Passing double pointer
11365 for proper NULLP assignment*/
11366 rgSCHUtlFreeSBuf(cell->instIdx,
11367 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
11372 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cellUl);
11377 * @brief Scheduler processing on cell configuration.
11381 * Function : rgSCHCmnRgrCellCfg
11383 * This function does requisite initialisation
11384 * and setup for scheduler1 when a cell is
11387 * @param[in] RgSchCellCb *cell
11388 * @param[in] RgrCellCfg *cellCfg
11389 * @param[out] RgSchErrInfo *err
11395 S16 rgSCHCmnRgrCellCfg
11398 RgrCellCfg *cellCfg,
11402 S16 rgSCHCmnRgrCellCfg(cell, cellCfg, err)
11404 RgrCellCfg *cellCfg;
11409 RgSchCmnCell *cellSch;
11411 /* As part of RGR cell configuration, validate the CRGCellCfg
11412 * There is no trigger for crgCellCfg from SC1 */
11413 /* Removed failure check for Extended CP */
11415 if (((ret = rgSCHUtlAllocSBuf(cell->instIdx,
11416 (Data**)&(cell->sc.sch), (sizeof(RgSchCmnCell)))) != ROK))
11418 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
11419 "Memory allocation FAILED");
11420 err->errCause = RGSCHERR_SCH_CFG;
11423 cellSch = (RgSchCmnCell *)(cell->sc.sch);
11424 cellSch->cfiCfg = cellCfg->cfiCfg;
11425 cellSch->trgUlCqi.trgCqi = cellCfg->trgUlCqi.trgCqi;
11426 /* Initialize the scheduler refresh timer queues */
11427 cellSch->tmrTqCp.nxtEnt = 0;
11428 cellSch->tmrTqCp.tmrLen = RG_SCH_CMN_NUM_REFRESH_Q;
11430 /* RACHO Intialize the RACH ded Preamble Information */
11431 rgSCHCmnCfgRachDedPrm(cell);
11433 /* Initialize 'Np' value for each 'p' used for
11434 * HARQ ACK/NACK reception */
11435 rgSCHCmnDlNpValInit(cell);
11438 /* Initialize 'Np' value for each 'p' used for
11439 * HARQ ACK/NACK reception */
11441 rgSCHCmnDlNpValInit(cell);
11444 /* Now perform uplink related initializations */
11445 ret = rgSCHCmnUlCellInit(cell, cellCfg);
11448 /* There is no downlink deinit to be performed */
11449 err->errCause = RGSCHERR_SCH_CFG;
11452 ret = rgSCHCmnDlRgrCellCfg(cell, cellCfg, err);
11455 err->errCause = RGSCHERR_SCH_CFG;
11458 /* DL scheduler has no initializations to make */
11459 /* As of now DL scheduler always returns ROK */
11461 rgSCHCmnGetDciFrmtSizes(cell);
11462 rgSCHCmnGetCqiDciFrmt2AggrLvl(cell);
11464 rgSCHCmnGetEmtcDciFrmtSizes(cell);
11465 rgSCHCmnGetCqiEmtcDciFrmt2AggrLvl(cell);
11466 #endif /* EMTC_ENABLE */
11469 if(TRUE == cellCfg->emtcEnable)
11471 cellSch->apisEmtcUl = &rgSchEmtcUlSchdTbl[0];
11472 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
11479 cellSch->apisUl = &rgSchUlSchdTbl[RG_SCH_CMN_GET_UL_SCHED_TYPE(cell)];
11480 ret = cellSch->apisUl->rgSCHRgrUlCellCfg(cell, cellCfg, err);
11486 if(TRUE == cellCfg->emtcEnable)
11488 cellSch->apisEmtcDl = &rgSchEmtcDlSchdTbl[0];
11489 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
11496 cellSch->apisDl = &rgSchDlSchdTbl[RG_SCH_CMN_GET_DL_SCHED_TYPE(cell)];
11498 /* Perform SPS specific initialization for the cell */
11499 ret = rgSCHCmnSpsCellCfg(cell, cellCfg, err);
11505 ret = cellSch->apisDl->rgSCHRgrDlCellCfg(cell, cellCfg, err);
11510 rgSCHCmnInitVars(cell);
11513 } /* rgSCHCmnRgrCellCfg*/
11517 * @brief This function handles the reconfiguration of cell.
11521 * Function: rgSCHCmnRgrCellRecfg
11522 * Purpose: Update the reconfiguration parameters.
11524 * Invoked by: Scheduler
11526 * @param[in] RgSchCellCb* cell
11531 S16 rgSCHCmnRgrCellRecfg
11534 RgrCellRecfg *recfg,
11538 S16 rgSCHCmnRgrCellRecfg(cell, recfg, err)
11540 RgrCellRecfg *recfg;
11545 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11546 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11549 if (recfg->recfgTypes & RGR_CELL_UL_CMNRATE_RECFG)
11551 U8 oldCqi = cellUl->dfltUlCqi;
11552 if (!RG_SCH_CMN_UL_IS_CQI_VALID(recfg->ulCmnCodeRate.ccchCqi))
11554 err->errCause = RGSCHERR_SCH_CFG;
11555 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnRgrCellRecfg(): "
11559 cellUl->dfltUlCqi = recfg->ulCmnCodeRate.ccchCqi;
11560 ret = rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
11561 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
11564 cellUl->dfltUlCqi = oldCqi;
11565 rgSCHCmnPrecompMsg3Vars(cellUl, recfg->ulCmnCodeRate.ccchCqi,
11566 cell->rachCfg.msgSizeGrpA, cellUl->sbSize, cell->isCpUlExtend);
11571 if (recfg->recfgTypes & RGR_CELL_DL_CMNRATE_RECFG)
11573 if (rgSCHCmnDlCnsdrCmnRt(cell, &recfg->dlCmnCodeRate) != ROK)
11575 err->errCause = RGSCHERR_SCH_CFG;
11581 if(TRUE == cell->emtcEnable)
11583 /* Invoke UL sched for cell Recfg */
11584 ret = cellSch->apisEmtcUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
11590 /* Invoke DL sched for cell Recfg */
11591 ret = cellSch->apisEmtcDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
11600 /* Invoke UL sched for cell Recfg */
11601 ret = cellSch->apisUl->rgSCHRgrUlCellRecfg(cell, recfg, err);
11607 /* Invoke DL sched for cell Recfg */
11608 ret = cellSch->apisDl->rgSCHRgrDlCellRecfg(cell, recfg, err);
11615 if (recfg->recfgTypes & RGR_CELL_DLFS_RECFG)
11617 ret = cellSch->apisDlfs->rgSCHDlfsCellRecfg(cell, recfg, err);
11622 cellSch->dl.isDlFreqSel = recfg->dlfsRecfg.isDlFreqSel;
11625 if (recfg->recfgTypes & RGR_CELL_PWR_RECFG)
11627 ret = rgSCHPwrCellRecfg(cell, recfg);
11637 /***********************************************************
11639 * Func : rgSCHCmnUlCellDeinit
11641 * Desc : Uplink scheduler de-initialisation for cell.
11649 **********************************************************/
11651 PRIVATE Void rgSCHCmnUlCellDeinit
11656 PRIVATE Void rgSCHCmnUlCellDeinit(cell)
11660 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
11663 U8 maxSubfrms = cellUl->numUlSubfrms;
11666 CmLList *lnk = NULLP;
11667 RgSchL2MeasCb *measCb;
11671 for(ulSfIdx = 0; ulSfIdx < RGSCH_SF_ALLOC_SIZE; ulSfIdx++)
11673 for(ulSfIdx = 0; ulSfIdx < RGSCH_NUM_SUB_FRAMES; ulSfIdx++)
11676 if(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo != NULLP)
11678 /* ccpu00117052 - MOD - Passing double pointer
11679 for proper NULLP assignment*/
11680 rgSCHUtlFreeSBuf(cell->instIdx,
11681 (Data **)(&(cell->sfAllocArr[ulSfIdx].ulUeInfo.ulAllocInfo)),
11682 cellUl->maxAllocPerUlSf * sizeof(RgInfUeUlAlloc));
11684 /* ccpu00117052 - DEL - removed explicit NULLP assignment
11685 as it is done in above utility function */
11688 /* Free the memory allocated to measCb */
11689 lnk = cell->l2mList.first;
11690 while(lnk != NULLP)
11692 measCb = (RgSchL2MeasCb *)lnk->node;
11693 cmLListDelFrm(&cell->l2mList, lnk);
11695 /* ccpu00117052 - MOD - Passing double pointer
11696 for proper NULLP assignment*/
11697 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&measCb,\
11698 sizeof(RgSchL2MeasCb));
11701 if (cellUl->dmrsArr != NULLP)
11703 /* ccpu00117052 - MOD - Passing double pointer
11704 for proper NULLP assignment*/
11705 rgSCHUtlFreeSBuf(cell->instIdx,(Data **)(&(cellUl->dmrsArr)),
11706 cellUl->dmrsArrSize * sizeof(*cellUl->dmrsArr));
11708 /* De-init subframes */
11710 for (ulSfIdx = 0; ulSfIdx < maxSubfrms; ++ulSfIdx)
11712 for (ulSfIdx = 0; ulSfIdx < RG_SCH_CMN_UL_NUM_SF; ++ulSfIdx)
11715 rgSCHUtlUlSfDeinit(cell, &cellUl->ulSfArr[ulSfIdx]);
11719 if (cellUl->ulSfArr != NULLP)
11721 /* ccpu00117052 - MOD - Passing double pointer
11722 for proper NULLP assignment*/
11723 rgSCHUtlFreeSBuf(cell->instIdx,
11724 (Data **)(&(cellUl->ulSfArr)), maxSubfrms * sizeof(RgSchUlSf));
11732 * @brief Scheduler processing for cell delete.
11736 * Function : rgSCHCmnCellDel
11738 * This functions de-initialises and frees memory
11739 * taken up by scheduler1 for the entire cell.
11741 * @param[in] RgSchCellCb *cell
11745 Void rgSCHCmnCellDel
11750 Void rgSCHCmnCellDel(cell)
11754 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11759 if (cellSch == NULLP)
11763 /* Perform the deinit for the UL scheduler */
11764 rgSCHCmnUlCellDeinit(cell);
11766 if(TRUE == cell->emtcEnable)
11768 if (cellSch->apisEmtcUl)
11770 cellSch->apisEmtcUl->rgSCHFreeUlCell(cell);
11774 if (cellSch->apisUl)
11776 /* api pointer checks added (here and below in
11777 * this function). pl check. - antriksh */
11778 cellSch->apisUl->rgSCHFreeUlCell(cell);
11781 /* Perform the deinit for the DL scheduler */
11782 cmLListInit(&cellSch->dl.taLst);
11783 if (cellSch->apisDl)
11785 cellSch->apisDl->rgSCHFreeDlCell(cell);
11788 if (cellSch->apisEmtcDl)
11790 rgSCHEmtcInitTaLst(&cellSch->dl);
11792 cellSch->apisEmtcDl->rgSCHFreeDlCell(cell);
11796 /* DLFS de-initialization */
11797 if (cellSch->dl.isDlFreqSel && cellSch->apisDlfs)
11799 cellSch->apisDlfs->rgSCHDlfsCellDel(cell);
11802 rgSCHPwrCellDel(cell);
11804 rgSCHCmnSpsCellDel(cell);
11807 /* ccpu00117052 - MOD - Passing double pointer
11808 for proper NULLP assignment*/
11809 rgSCHUtlFreeSBuf(cell->instIdx,
11810 (Data**)(&(cell->sc.sch)), (sizeof(RgSchCmnCell)));
11812 } /* rgSCHCmnCellDel */
11816 * @brief This function validates QOS parameters for DL.
11820 * Function: rgSCHCmnValidateDlQos
11821 * Purpose: This function validates QOS parameters for DL.
11823 * Invoked by: Scheduler
11825 * @param[in] CrgLchQosCfg *dlQos
11830 PRIVATE S16 rgSCHCmnValidateDlQos
11832 RgrLchQosCfg *dlQos
11835 PRIVATE S16 rgSCHCmnValidateDlQos(dlQos)
11836 RgrLchQosCfg *dlQos;
11839 U8 qci = dlQos->qci;
11842 if ( qci < RG_SCH_CMN_MIN_QCI || qci > RG_SCH_CMN_MAX_QCI )
11847 if ((qci >= RG_SCH_CMN_GBR_QCI_START) &&
11848 (qci <= RG_SCH_CMN_GBR_QCI_END))
11850 if ((dlQos->mbr == 0) || (dlQos->mbr < dlQos->gbr))
11859 * @brief Scheduler invocation on logical channel addition.
11863 * Function : rgSCHCmnRgrLchCfg
11865 * This functions does required processing when a new
11866 * (dedicated) logical channel is added. Assumes lcg
11867 * pointer in ulLc is set.
11869 * @param[in] RgSchCellCb *cell
11870 * @param[in] RgSchUeCb *ue
11871 * @param[in] RgSchDlLcCb *dlLc
11872 * @param[int] RgrLchCfg *lcCfg
11873 * @param[out] RgSchErrInfo *err
11879 S16 rgSCHCmnRgrLchCfg
11888 S16 rgSCHCmnRgrLchCfg(cell, ue, dlLc, lcCfg, err)
11898 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
11901 ret = rgSCHUtlAllocSBuf(cell->instIdx,
11902 (Data**)&((dlLc)->sch), (sizeof(RgSchCmnDlSvc)));
11905 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRgrLchCfg(): "
11906 "SCH struct alloc failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11907 err->errCause = RGSCHERR_SCH_CFG;
11910 if(lcCfg->lcType != CM_LTE_LCH_DCCH)
11912 ret = rgSCHCmnValidateDlQos(&lcCfg->dlInfo.dlQos);
11915 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"rgSchCmnCrgLcCfg(): "
11916 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11917 err->errCause = RGSCHERR_SCH_CFG;
11920 /* Perform DL service activation in the scheduler */
11921 ((RgSchCmnDlSvc *)(dlLc->sch))->qci = lcCfg->dlInfo.dlQos.qci;
11922 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = rgSchCmnDlQciPrio[lcCfg->dlInfo.dlQos.qci - 1];
11923 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcCfg->dlInfo.dlQos.gbr * \
11924 RG_SCH_CMN_REFRESH_TIME)/100;
11925 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcCfg->dlInfo.dlQos.mbr * \
11926 RG_SCH_CMN_REFRESH_TIME)/100;
11930 /*assigning highest priority to DCCH */
11931 ((RgSchCmnDlSvc *)(dlLc->sch))->prio=RG_SCH_CMN_DCCH_PRIO;
11934 dlLc->lcType=lcCfg->lcType;
11937 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
11939 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcCfg(cell, ue,dlLc ,lcCfg, err);
11948 ret = cellSch->apisDl->rgSCHRgrDlLcCfg(cell, ue, dlLc, lcCfg, err);
11956 if(TRUE == ue->isEmtcUe)
11958 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
11967 ret = cellSch->apisUl->rgSCHRgrUlLcCfg(cell, ue, lcCfg, err);
11977 rgSCHSCellDlLcCfg(cell, ue, dlLc);
11983 if(lcCfg->dlInfo.dlSpsCfg.isSpsEnabled)
11985 /* Invoke SPS module if SPS is enabled for the service */
11986 ret = rgSCHCmnSpsDlLcCfg(cell, ue, dlLc, lcCfg, err);
11989 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "rgSchCmnRgrLchCfg(): "
11990 "SPS configuration failed for DL LC for CRNTI:%d LCID:%d",ue->ueId,lcCfg->lcId);
11991 err->errCause = RGSCHERR_SCH_CFG;
12001 * @brief Scheduler invocation on logical channel addition.
12005 * Function : rgSCHCmnRgrLchRecfg
12007 * This functions does required processing when an existing
12008 * (dedicated) logical channel is reconfigured. Assumes lcg
12009 * pointer in ulLc is set to the old value.
12010 * Independent of whether new LCG is meant to be configured,
12011 * the new LCG scheduler information is accessed and possibly modified.
12013 * @param[in] RgSchCellCb *cell
12014 * @param[in] RgSchUeCb *ue
12015 * @param[in] RgSchDlLcCb *dlLc
12016 * @param[int] RgrLchRecfg *lcRecfg
12017 * @param[out] RgSchErrInfo *err
12023 S16 rgSCHCmnRgrLchRecfg
12028 RgrLchRecfg *lcRecfg,
12032 S16 rgSCHCmnRgrLchRecfg(cell, ue, dlLc, lcRecfg, err)
12036 RgrLchRecfg *lcRecfg;
12041 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12044 if(dlLc->lcType != CM_LTE_LCH_DCCH)
12046 ret = rgSCHCmnValidateDlQos(&lcRecfg->dlRecfg.dlQos);
12050 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
12051 "DlQos validation failed for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12052 err->errCause = RGSCHERR_SCH_CFG;
12055 if (((RgSchCmnDlSvc *)(dlLc->sch))->qci != lcRecfg->dlRecfg.dlQos.qci)
12057 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Qci, hence lc Priority change "
12058 "not supported for CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12059 err->errCause = RGSCHERR_SCH_CFG;
12062 ((RgSchCmnDlSvc *)(dlLc->sch))->gbr = (lcRecfg->dlRecfg.dlQos.gbr * \
12063 RG_SCH_CMN_REFRESH_TIME)/100;
12064 ((RgSchCmnDlSvc *)(dlLc->sch))->mbr = (lcRecfg->dlRecfg.dlQos.mbr * \
12065 RG_SCH_CMN_REFRESH_TIME)/100;
12069 /*assigning highest priority to DCCH */
12070 ((RgSchCmnDlSvc *)(dlLc->sch))->prio = RG_SCH_CMN_DCCH_PRIO;
12074 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
12076 ret = cellSch->apisEmtcDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12081 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
12090 ret = cellSch->apisDl->rgSCHRgrDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12095 ret = cellSch->apisUl->rgSCHRgrUlLcRecfg(cell, ue, lcRecfg, err);
12103 if (lcRecfg->recfgTypes & RGR_DL_LC_SPS_RECFG)
12105 /* Invoke SPS module if SPS is enabled for the service */
12106 if(lcRecfg->dlRecfg.dlSpsRecfg.isSpsEnabled)
12108 ret = rgSCHCmnSpsDlLcRecfg(cell, ue, dlLc, lcRecfg, err);
12111 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"SPS re-configuration not "
12112 "supported for dlLC Ignore this CRNTI:%d LCID:%d",ue->ueId,lcRecfg->lcId);
12123 * @brief Scheduler invocation on logical channel addition.
12127 * Function : rgSCHCmnRgrLcgCfg
12129 * This functions does required processing when a new
12130 * (dedicated) logical channel is added. Assumes lcg
12131 * pointer in ulLc is set.
12133 * @param[in] RgSchCellCb *cell,
12134 * @param[in] RgSchUeCb *ue,
12135 * @param[in] RgSchLcgCb *lcg,
12136 * @param[in] RgrLcgCfg *lcgCfg,
12137 * @param[out] RgSchErrInfo *err
12143 S16 rgSCHCmnRgrLcgCfg
12152 S16 rgSCHCmnRgrLcgCfg(cell, ue, lcg, lcgCfg, err)
12161 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12162 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCfg->ulInfo.lcgId].sch));
12165 ulLcg->cfgdGbr = (lcgCfg->ulInfo.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
12166 ulLcg->effGbr = ulLcg->cfgdGbr;
12167 ulLcg->deltaMbr = ((lcgCfg->ulInfo.mbr - lcgCfg->ulInfo.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
12168 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
12171 if(TRUE == ue->isEmtcUe)
12173 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
12182 ret = cellSch->apisUl->rgSCHRgrUlLcgCfg(cell, ue, lcg, lcgCfg, err);
12188 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
12190 /* Indicate MAC that this LCG is GBR LCG */
12191 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcgCfg->ulInfo.lcgId, TRUE);
12197 * @brief Scheduler invocation on logical channel addition.
12201 * Function : rgSCHCmnRgrLcgRecfg
12203 * This functions does required processing when a new
12204 * (dedicated) logical channel is added. Assumes lcg
12205 * pointer in ulLc is set.
12207 * @param[in] RgSchCellCb *cell,
12208 * @param[in] RgSchUeCb *ue,
12209 * @param[in] RgSchLcgCb *lcg,
12210 * @param[in] RgrLcgRecfg *reCfg,
12211 * @param[out] RgSchErrInfo *err
12217 S16 rgSCHCmnRgrLcgRecfg
12222 RgrLcgRecfg *reCfg,
12226 S16 rgSCHCmnRgrLcgRecfg(cell, ue, lcg, reCfg, err)
12230 RgrLcgRecfg *reCfg;
12235 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12236 RgSchCmnLcg *ulLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[reCfg->ulRecfg.lcgId].sch));
12239 ulLcg->cfgdGbr = (reCfg->ulRecfg.gbr * RG_SCH_CMN_REFRESH_TIME)/100;
12240 ulLcg->effGbr = ulLcg->cfgdGbr;
12241 ulLcg->deltaMbr = ((reCfg->ulRecfg.mbr - reCfg->ulRecfg.gbr) * RG_SCH_CMN_REFRESH_TIME)/100;
12242 ulLcg->effDeltaMbr = ulLcg->deltaMbr;
12245 if(TRUE == ue->isEmtcUe)
12247 ret = cellSch->apisEmtcUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
12256 ret = cellSch->apisUl->rgSCHRgrUlLcgRecfg(cell, ue, lcg, reCfg, err);
12262 if (RGSCH_IS_GBR_BEARER(ulLcg->cfgdGbr))
12264 /* Indicate MAC that this LCG is GBR LCG */
12265 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, TRUE);
12269 /* In case of RAB modification */
12270 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, reCfg->ulRecfg.lcgId, FALSE);
12275 /***********************************************************
12277 * Func : rgSCHCmnRgrLchDel
12279 * Desc : Scheduler handling for a (dedicated)
12280 * uplink logical channel being deleted.
12287 **********************************************************/
12289 S16 rgSCHCmnRgrLchDel
12297 S16 rgSCHCmnRgrLchDel(cell, ue, lcId, lcgId)
12304 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12306 if(TRUE == ue->isEmtcUe)
12308 cellSch->apisEmtcUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
12313 cellSch->apisUl->rgSCHRgrUlLchDel(cell, ue, lcId, lcgId);
12318 /***********************************************************
12320 * Func : rgSCHCmnLcgDel
12322 * Desc : Scheduler handling for a (dedicated)
12323 * uplink logical channel being deleted.
12331 **********************************************************/
12333 Void rgSCHCmnLcgDel
12340 Void rgSCHCmnLcgDel(cell, ue, lcg)
12346 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12347 RgSchCmnLcg *lcgCmn = RG_SCH_CMN_GET_UL_LCG(lcg);
12349 if (lcgCmn == NULLP)
12354 if (RGSCH_IS_GBR_BEARER(lcgCmn->cfgdGbr))
12356 /* Indicate MAC that this LCG is GBR LCG */
12357 rgSCHUtlBuildNSendLcgReg(cell, ue->ueId, lcg->lcgId, FALSE);
12361 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
12363 rgSCHCmnSpsUlLcgDel(cell, ue, lcg);
12365 #endif /* LTEMAC_SPS */
12367 lcgCmn->effGbr = 0;
12368 lcgCmn->reportedBs = 0;
12369 lcgCmn->cfgdGbr = 0;
12370 /* set lcg bs to 0. Deletion of control block happens
12371 * at the time of UE deletion. */
12374 if(TRUE == ue->isEmtcUe)
12376 cellSch->apisEmtcUl->rgSCHFreeUlLcg(cell, ue, lcg);
12381 cellSch->apisUl->rgSCHFreeUlLcg(cell, ue, lcg);
12388 * @brief This function deletes a service from scheduler.
12392 * Function: rgSCHCmnFreeDlLc
12393 * Purpose: This function is made available through a FP for
12394 * making scheduler aware of a service being deleted from UE.
12396 * Invoked by: BO and Scheduler
12398 * @param[in] RgSchCellCb* cell
12399 * @param[in] RgSchUeCb* ue
12400 * @param[in] RgSchDlLcCb* svc
12405 Void rgSCHCmnFreeDlLc
12412 Void rgSCHCmnFreeDlLc(cell, ue, svc)
12418 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
12419 if (svc->sch == NULLP)
12424 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
12426 cellSch->apisEmtcDl->rgSCHFreeDlLc(cell, ue, svc);
12431 cellSch->apisDl->rgSCHFreeDlLc(cell, ue, svc);
12437 rgSCHSCellDlLcDel(cell, ue, svc);
12442 /* If SPS service, invoke SPS module */
12443 if (svc->dlLcSpsCfg.isSpsEnabled)
12445 rgSCHCmnSpsDlLcDel(cell, ue, svc);
12449 /* ccpu00117052 - MOD - Passing double pointer
12450 for proper NULLP assignment*/
12451 rgSCHUtlFreeSBuf(cell->instIdx,
12452 (Data**)(&(svc->sch)), (sizeof(RgSchCmnDlSvc)));
12455 rgSCHLaaDeInitDlLchCb(cell, svc);
12464 * @brief This function Processes the Final Allocations
12465 * made by the RB Allocator against the requested
12466 * CCCH SDURetx Allocations.
12470 * Function: rgSCHCmnDlCcchSduRetxFnlz
12471 * Purpose: This function Processes the Final Allocations
12472 * made by the RB Allocator against the requested
12473 * CCCH Retx Allocations.
12474 * Scans through the scheduled list of ccchSdu retrans
12475 * fills the corresponding pdcch, adds the hqProc to
12476 * the corresponding SubFrm and removes the hqP from
12479 * Invoked by: Common Scheduler
12481 * @param[in] RgSchCellCb *cell
12482 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12487 PRIVATE Void rgSCHCmnDlCcchSduRetxFnlz
12490 RgSchCmnDlRbAllocInfo *allocInfo
12493 PRIVATE Void rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo)
12495 RgSchCmnDlRbAllocInfo *allocInfo;
12499 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12500 RgSchDlRbAlloc *rbAllocInfo;
12501 RgSchDlHqProcCb *hqP;
12504 /* Traverse through the Scheduled Retx List */
12505 node = allocInfo->ccchSduAlloc.schdCcchSduRetxLst.first;
12508 hqP = (RgSchDlHqProcCb *)(node->node);
12510 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, cell);
12512 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12514 /* Remove the HqP from cell's ccchSduRetxLst */
12515 cmLListDelFrm(&cmnCellDl->ccchSduRetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
12516 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
12518 /* Fix: syed dlAllocCb reset should be performed.
12519 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12520 rgSCHCmnDlUeResetTemp(ue, hqP);
12522 /* Fix: syed dlAllocCb reset should be performed.
12523 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12524 node = allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst.first;
12527 hqP = (RgSchDlHqProcCb *)(node->node);
12530 /* reset the UE allocation Information */
12531 rgSCHCmnDlUeResetTemp(ue, hqP);
12537 * @brief This function Processes the Final Allocations
12538 * made by the RB Allocator against the requested
12539 * CCCH Retx Allocations.
12543 * Function: rgSCHCmnDlCcchRetxFnlz
12544 * Purpose: This function Processes the Final Allocations
12545 * made by the RB Allocator against the requested
12546 * CCCH Retx Allocations.
12547 * Scans through the scheduled list of msg4 retrans
12548 * fills the corresponding pdcch, adds the hqProc to
12549 * the corresponding SubFrm and removes the hqP from
12552 * Invoked by: Common Scheduler
12554 * @param[in] RgSchCellCb *cell
12555 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12560 PRIVATE Void rgSCHCmnDlCcchRetxFnlz
12563 RgSchCmnDlRbAllocInfo *allocInfo
12566 PRIVATE Void rgSCHCmnDlCcchRetxFnlz(cell, allocInfo)
12568 RgSchCmnDlRbAllocInfo *allocInfo;
12572 RgSchCmnDlCell *cmnCellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12573 RgSchDlRbAlloc *rbAllocInfo;
12574 RgSchDlHqProcCb *hqP;
12577 /* Traverse through the Scheduled Retx List */
12578 node = allocInfo->msg4Alloc.schdMsg4RetxLst.first;
12581 hqP = (RgSchDlHqProcCb *)(node->node);
12582 raCb = hqP->hqE->raCb;
12583 rbAllocInfo = &raCb->rbAllocInfo;
12585 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12587 /* Remove the HqP from cell's msg4RetxLst */
12588 cmLListDelFrm(&cmnCellDl->msg4RetxLst, &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
12589 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
12590 /* Fix: syed dlAllocCb reset should be performed.
12591 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12592 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
12593 rgSCHCmnDlHqPResetTemp(hqP);
12595 /* Fix: syed dlAllocCb reset should be performed.
12596 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12597 node = allocInfo->msg4Alloc.nonSchdMsg4RetxLst.first;
12600 hqP = (RgSchDlHqProcCb *)(node->node);
12601 raCb = hqP->hqE->raCb;
12603 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
12604 rgSCHCmnDlHqPResetTemp(hqP);
12611 * @brief This function Processes the Final Allocations
12612 * made by the RB Allocator against the requested
12613 * CCCH SDU tx Allocations.
12617 * Function: rgSCHCmnDlCcchSduTxFnlz
12618 * Purpose: This function Processes the Final Allocations
12619 * made by the RB Allocator against the requested
12620 * CCCH tx Allocations.
12621 * Scans through the scheduled list of CCCH SDU trans
12622 * fills the corresponding pdcch, adds the hqProc to
12623 * the corresponding SubFrm and removes the hqP from
12626 * Invoked by: Common Scheduler
12628 * @param[in] RgSchCellCb *cell
12629 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12634 PRIVATE Void rgSCHCmnDlCcchSduTxFnlz
12637 RgSchCmnDlRbAllocInfo *allocInfo
12640 PRIVATE Void rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo)
12642 RgSchCmnDlRbAllocInfo *allocInfo;
12647 RgSchDlRbAlloc *rbAllocInfo;
12648 RgSchDlHqProcCb *hqP;
12649 RgSchLchAllocInfo lchSchdData;
12651 /* Traverse through the Scheduled Retx List */
12652 node = allocInfo->ccchSduAlloc.schdCcchSduTxLst.first;
12655 hqP = (RgSchDlHqProcCb *)(node->node);
12656 ueCb = hqP->hqE->ue;
12658 rbAllocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb, cell);
12660 /* fill the pdcch and HqProc */
12661 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12663 /* Remove the raCb from cell's toBeSchdLst */
12664 cmLListDelFrm(&cell->ccchSduUeLst, &ueCb->ccchSduLnk);
12665 ueCb->ccchSduLnk.node = (PTR)NULLP;
12667 /* Fix : Resetting this required to avoid complication
12668 * in reestablishment case */
12669 ueCb->dlCcchInfo.bo = 0;
12671 /* Indicate DHM of the CCCH LC scheduling */
12672 hqP->tbInfo[0].contResCe = NOTPRSNT;
12673 lchSchdData.lcId = 0;
12674 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
12675 (RGSCH_MSG4_HDRSIZE);
12676 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
12678 /* Fix: syed dlAllocCb reset should be performed.
12679 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12680 rgSCHCmnDlUeResetTemp(ueCb, hqP);
12682 /* Fix: syed dlAllocCb reset should be performed.
12683 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12684 node = allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst.first;
12687 hqP = (RgSchDlHqProcCb *)(node->node);
12688 ueCb = hqP->hqE->ue;
12690 /* Release HqProc */
12691 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
12692 /*Fix: Removing releasing of TB1 as it will not exist for CCCH SDU and hence caused a crash*/
12693 /*rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
12694 /* reset the UE allocation Information */
12695 rgSCHCmnDlUeResetTemp(ueCb, hqP);
12702 * @brief This function Processes the Final Allocations
12703 * made by the RB Allocator against the requested
12704 * CCCH tx Allocations.
12708 * Function: rgSCHCmnDlCcchTxFnlz
12709 * Purpose: This function Processes the Final Allocations
12710 * made by the RB Allocator against the requested
12711 * CCCH tx Allocations.
12712 * Scans through the scheduled list of msg4 trans
12713 * fills the corresponding pdcch, adds the hqProc to
12714 * the corresponding SubFrm and removes the hqP from
12717 * Invoked by: Common Scheduler
12719 * @param[in] RgSchCellCb *cell
12720 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12725 PRIVATE Void rgSCHCmnDlCcchTxFnlz
12728 RgSchCmnDlRbAllocInfo *allocInfo
12731 PRIVATE Void rgSCHCmnDlCcchTxFnlz(cell, allocInfo)
12733 RgSchCmnDlRbAllocInfo *allocInfo;
12738 RgSchDlRbAlloc *rbAllocInfo;
12739 RgSchDlHqProcCb *hqP;
12740 RgSchLchAllocInfo lchSchdData;
12742 /* Traverse through the Scheduled Retx List */
12743 node = allocInfo->msg4Alloc.schdMsg4TxLst.first;
12746 hqP = (RgSchDlHqProcCb *)(node->node);
12747 raCb = hqP->hqE->raCb;
12749 rbAllocInfo = &raCb->rbAllocInfo;
12751 /* fill the pdcch and HqProc */
12752 rgSCHCmnFillHqPPdcch(cell, rbAllocInfo, hqP);
12753 /* MSG4 Fix Start */
12755 rgSCHRamRmvFrmRaInfoSchdLst(cell, raCb);
12758 /* Indicate DHM of the CCCH LC scheduling */
12759 lchSchdData.lcId = 0;
12760 lchSchdData.schdData = hqP->tbInfo[0].ccchSchdInfo.totBytes -
12761 (RGSCH_MSG4_HDRSIZE + RGSCH_CONT_RESID_SIZE);
12762 /* TRansmitting presence of cont Res CE across MAC-SCH interface to
12763 * identify CCCH SDU transmissions which need to be done
12765 * contention resolution CE*/
12766 hqP->tbInfo[0].contResCe = PRSNT_NODEF;
12767 /*Dont add lc if only cont res CE is being transmitted*/
12768 if(raCb->dlCcchInfo.bo)
12770 rgSCHDhmAddLcData(cell->instIdx, &lchSchdData, &hqP->tbInfo[0]);
12775 /* Fix: syed dlAllocCb reset should be performed.
12776 * zombie info in dlAllocCb leading to crash rbNum wraparound */
12777 memset(&raCb->rbAllocInfo, 0, sizeof(raCb->rbAllocInfo));
12778 rgSCHCmnDlHqPResetTemp(hqP);
12780 node = allocInfo->msg4Alloc.nonSchdMsg4TxLst.first;
12783 hqP = (RgSchDlHqProcCb *)(node->node);
12784 raCb = hqP->hqE->raCb;
12786 rbAllocInfo = &raCb->rbAllocInfo;
12787 /* Release HqProc */
12788 rgSCHDhmRlsHqpTb(hqP, 0, FALSE);
12789 /*Fix: Removing releasing of TB1 as it will not exist for MSG4 and hence caused a crash*/
12790 /* rgSCHDhmRlsHqpTb(hqP, 1, FALSE);*/
12791 /* reset the UE allocation Information */
12792 memset(rbAllocInfo, 0, sizeof(*rbAllocInfo));
12793 rgSCHCmnDlHqPResetTemp(hqP);
12800 * @brief This function calculates the BI Index to be sent in the Bi header
12804 * Function: rgSCHCmnGetBiIndex
12805 * Purpose: This function Processes utilizes the previous BI time value
12806 * calculated and the difference last BI sent time and current time. To
12807 * calculate the latest BI Index. It also considers the how many UE's
12808 * Unserved in this subframe.
12810 * Invoked by: Common Scheduler
12812 * @param[in] RgSchCellCb *cell
12813 * @param[in] U32 ueCount
12818 U8 rgSCHCmnGetBiIndex
12824 U8 rgSCHCmnGetBiIndex(cell, ueCount)
12829 S16 prevVal = 0; /* To Store Intermediate Value */
12830 U16 newBiVal = 0; /* To store Bi Value in millisecond */
12835 if (cell->biInfo.prevBiTime != 0)
12838 if(cell->emtcEnable == TRUE)
12840 timeDiff =(RGSCH_CALC_SF_DIFF_EMTC(cell->crntTime, cell->biInfo.biTime));
12845 timeDiff =(RGSCH_CALC_SF_DIFF(cell->crntTime, cell->biInfo.biTime));
12848 prevVal = cell->biInfo.prevBiTime - timeDiff;
12854 newBiVal = RG_SCH_CMN_GET_BI_VAL(prevVal,ueCount);
12855 /* To be used next time when BI is calculated */
12857 if(cell->emtcEnable == TRUE)
12859 RGSCHCPYTIMEINFO_EMTC(cell->crntTime, cell->biInfo.biTime)
12864 RGSCHCPYTIMEINFO(cell->crntTime, cell->biInfo.biTime)
12867 /* Search the actual BI Index from table Backoff Parameters Value and
12868 * return that Index */
12871 if (rgSchCmnBiTbl[idx] > newBiVal)
12876 }while(idx < RG_SCH_CMN_NUM_BI_VAL-1);
12877 cell->biInfo.prevBiTime = rgSchCmnBiTbl[idx];
12878 /* For 16 Entries in Table 7.2.1 36.321.880 - 3 reserved so total 13 Entries */
12879 return (idx); /* Returning reserved value from table UE treats it has 960 ms */
12880 } /* rgSCHCmnGetBiIndex */
12884 * @brief This function Processes the Final Allocations
12885 * made by the RB Allocator against the requested
12886 * RAR allocations. Assumption: The reuqested
12887 * allocations are always satisfied completely.
12888 * Hence no roll back.
12892 * Function: rgSCHCmnDlRaRspFnlz
12893 * Purpose: This function Processes the Final Allocations
12894 * made by the RB Allocator against the requested.
12895 * Takes care of PDCCH filling.
12897 * Invoked by: Common Scheduler
12899 * @param[in] RgSchCellCb *cell
12900 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
12905 PRIVATE Void rgSCHCmnDlRaRspFnlz
12908 RgSchCmnDlRbAllocInfo *allocInfo
12911 PRIVATE Void rgSCHCmnDlRaRspFnlz(cell, allocInfo)
12913 RgSchCmnDlRbAllocInfo *allocInfo;
12917 RgSchDlRbAlloc *raRspAlloc;
12918 RgSchDlSf *subFrm = NULLP;
12922 RgSchRaReqInfo *raReq;
12924 RgSchUlAlloc *ulAllocRef=NULLP;
12925 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
12926 U8 allocRapidCnt = 0;
12928 U32 msg3SchdIdx = 0;
12929 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
12934 for (rarCnt=0; rarCnt<RG_SCH_CMN_MAX_CMN_PDCCH; rarCnt++)
12936 raRspAlloc = &allocInfo->raRspAlloc[rarCnt];
12937 /* Having likely condition first for optimization */
12938 if (!raRspAlloc->pdcch)
12944 subFrm = raRspAlloc->dlSf;
12945 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
12946 /* Corrected RACH handling for multiple RAPIDs per RARNTI */
12947 allocRapidCnt = raRspAlloc->numRapids;
12948 while (allocRapidCnt)
12950 raReq = (RgSchRaReqInfo *)(reqLst->first->node);
12951 /* RACHO: If dedicated preamble, then allocate UL Grant
12952 * (consequence of handover/pdcchOrder) and continue */
12953 if (RGSCH_IS_DEDPRM(cell, raReq->raReq.rapId))
12955 rgSCHCmnHdlHoPo(cell, &subFrm->raRsp[rarCnt].contFreeUeLst,
12957 cmLListDelFrm(reqLst, reqLst->first);
12959 /* ccpu00117052 - MOD - Passing double pointer
12960 for proper NULLP assignment*/
12961 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
12962 sizeof(RgSchRaReqInfo));
12966 if(cell->overLoadBackOffEnab)
12967 {/* rach Overlaod conrol is triggerd, Skipping this rach */
12968 cmLListDelFrm(reqLst, reqLst->first);
12970 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
12971 sizeof(RgSchRaReqInfo));
12974 /* Attempt to include each RA request into the RSP */
12975 /* Any failure in the procedure is considered to */
12976 /* affect futher allocations in the same TTI. When */
12977 /* a failure happens, we break out and complete */
12978 /* the processing for random access */
12979 if (rgSCHRamCreateRaCb(cell, &raCb, &err) != ROK)
12983 /* Msg3 allocation request to USM */
12984 if (raReq->raReq.rapId < cell->rachCfg.sizeRaPreambleGrpA)
12988 /*ccpu00128820 - MOD - Msg3 alloc double delete issue*/
12989 rgSCHCmnMsg3GrntReq(cell, raCb->tmpCrnti, preamGrpA, \
12990 &(raCb->msg3HqProc), &ulAllocRef, &raCb->msg3HqProcId);
12991 if (ulAllocRef == NULLP)
12993 rgSCHRamDelRaCb(cell, raCb, TRUE);
12996 if (raReq->raReq.cqiPres)
12998 raCb->ccchCqi = raReq->raReq.cqiIdx;
13002 raCb->ccchCqi = cellDl->ccchCqi;
13004 raCb->rapId = raReq->raReq.rapId;
13005 raCb->ta.pres = TRUE;
13006 raCb->ta.val = raReq->raReq.ta;
13007 raCb->msg3Grnt = ulAllocRef->grnt;
13008 /* Populating the tpc value received */
13009 raCb->msg3Grnt.tpc = raReq->raReq.tpc;
13010 /* PHR handling for MSG3 */
13011 ulAllocRef->raCb = raCb;
13013 /* To the crntTime, add the MIN time at which UE will
13014 * actually send MSG3 i.e DL_DELTA+6 */
13015 raCb->msg3AllocTime = cell->crntTime;
13016 RGSCH_INCR_SUB_FRAME(raCb->msg3AllocTime, RG_SCH_CMN_MIN_MSG3_RECP_INTRVL);
13018 msg3SchdIdx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) %
13019 RGSCH_NUM_SUB_FRAMES;
13020 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
13021 special subframe */
13022 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][msg3SchdIdx] !=
13023 RG_SCH_TDD_UL_SUBFRAME)
13025 RGSCHCMNADDTOCRNTTIME(cell->crntTime,raCb->msg3AllocTime,
13026 RG_SCH_CMN_DL_DELTA)
13027 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][
13028 raCb->msg3AllocTime.slot];
13029 RGSCHCMNADDTOCRNTTIME(raCb->msg3AllocTime, raCb->msg3AllocTime,
13033 cmLListAdd2Tail(&subFrm->raRsp[rarCnt].raRspLst, &raCb->rspLnk);
13034 raCb->rspLnk.node = (PTR)raCb;
13035 cmLListDelFrm(reqLst, reqLst->first);
13037 /* ccpu00117052 - MOD - Passing double pointer
13038 for proper NULLP assignment*/
13039 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&raReq,
13040 sizeof(RgSchRaReqInfo));
13042 /* SR_RACH_STATS : RAR scheduled */
13047 /* Fill subframe data members */
13048 subFrm->raRsp[rarCnt].raRnti = raRspAlloc->rnti;
13049 subFrm->raRsp[rarCnt].pdcch = raRspAlloc->pdcch;
13050 subFrm->raRsp[rarCnt].tbSz = raRspAlloc->tbInfo[0].bytesAlloc;
13051 /* Fill PDCCH data members */
13052 rgSCHCmnFillPdcch(cell, subFrm->raRsp[rarCnt].pdcch, raRspAlloc);
13055 if(cell->overLoadBackOffEnab)
13056 {/* rach Overlaod conrol is triggerd, Skipping this rach */
13057 subFrm->raRsp[rarCnt].backOffInd.pres = PRSNT_NODEF;
13058 subFrm->raRsp[rarCnt].backOffInd.val = cell->overLoadBackOffval;
13063 subFrm->raRsp[rarCnt].backOffInd.pres = NOTPRSNT;
13066 /*[ccpu00125212] Avoiding sending of empty RAR in case of RAR window
13067 is short and UE is sending unauthorised preamble.*/
13068 reqLst = &cell->raInfo.raReqLst[raRspAlloc->raIndex];
13069 if ((raRspAlloc->biEstmt) && (reqLst->count))
13071 subFrm->raRsp[0].backOffInd.pres = PRSNT_NODEF;
13072 /* Added as part of Upgrade */
13073 subFrm->raRsp[0].backOffInd.val =
13074 rgSCHCmnGetBiIndex(cell, reqLst->count);
13076 /* SR_RACH_STATS : Back Off Inds */
13080 else if ((subFrm->raRsp[rarCnt].raRspLst.first == NULLP) &&
13081 (subFrm->raRsp[rarCnt].contFreeUeLst.first == NULLP))
13083 /* Return the grabbed PDCCH */
13084 rgSCHUtlPdcchPut(cell, &subFrm->pdcchInfo, raRspAlloc->pdcch);
13085 subFrm->raRsp[rarCnt].pdcch = NULLP;
13086 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnRaRspAlloc(): "
13087 "Not even one RaReq.");
13091 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId,
13092 "RNTI:%d Scheduled RAR @ (%u,%u) ",
13094 cell->crntTime.sfn,
13095 cell->crntTime.slot);
13101 * @brief This function computes rv.
13105 * Function: rgSCHCmnDlCalcRvForBcch
13106 * Purpose: This function computes rv.
13108 * Invoked by: Common Scheduler
13110 * @param[in] RgSchCellCb *cell
13111 * @param[in] Bool si
13117 PRIVATE U8 rgSCHCmnDlCalcRvForBcch
13124 PRIVATE U8 rgSCHCmnDlCalcRvForBcch(cell, si, i)
13131 CmLteTimingInfo frm;
13133 frm = cell->crntTime;
13134 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
13142 k = (frm.sfn/2) % 4;
13144 rv = RGSCH_CEIL(3*k, 2) % 4;
13149 * @brief This function Processes the Final Allocations
13150 * made by the RB Allocator against the requested
13151 * BCCH/PCCH allocations. Assumption: The reuqested
13152 * allocations are always satisfied completely.
13153 * Hence no roll back.
13157 * Function: rgSCHCmnDlBcchPcchFnlz
13158 * Purpose: This function Processes the Final Allocations
13159 * made by the RB Allocator against the requested.
13160 * Takes care of PDCCH filling.
13162 * Invoked by: Common Scheduler
13164 * @param[in] RgSchCellCb *cell
13165 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
13170 PRIVATE Void rgSCHCmnDlBcchPcchFnlz
13173 RgSchCmnDlRbAllocInfo *allocInfo
13176 PRIVATE Void rgSCHCmnDlBcchPcchFnlz(cell, allocInfo)
13178 RgSchCmnDlRbAllocInfo *allocInfo;
13181 RgSchDlRbAlloc *rbAllocInfo;
13185 U8 nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
13187 #ifdef LTEMAC_HDFDD
13188 U8 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13190 U8 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13194 /* Moving variables to available scope for optimization */
13195 RgSchClcDlLcCb *pcch;
13198 RgSchClcDlLcCb *bcch;
13201 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
13205 rbAllocInfo = &allocInfo->pcchAlloc;
13206 if (rbAllocInfo->pdcch)
13208 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
13210 /* Added sfIdx calculation for TDD as well */
13212 #ifdef LTEMAC_HDFDD
13213 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13215 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13218 subFrm = rbAllocInfo->dlSf;
13219 pcch = rgSCHDbmGetPcch(cell);
13222 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnDlBcchPcchFnlz( ): "
13223 "No Pcch Present");
13227 /* Added Dl TB count for paging message transmission*/
13229 cell->dlUlTbCnt.tbTransDlTotalCnt++;
13231 bo = (RgSchClcBoRpt *)pcch->boLst.first->node;
13232 cmLListDelFrm(&pcch->boLst, &bo->boLstEnt);
13233 /* ccpu00117052 - MOD - Passing double pointer
13234 for proper NULLP assignment*/
13235 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
13236 /* Fill subframe data members */
13237 subFrm->pcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
13238 subFrm->pcch.pdcch = rbAllocInfo->pdcch;
13239 /* Fill PDCCH data members */
13240 rgSCHCmnFillPdcch(cell, subFrm->pcch.pdcch, rbAllocInfo);
13241 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, pcch->lcId, TRUE);
13242 /* ccpu00132314-ADD-Update the tx power allocation info
13243 TODO-Need to add a check for max tx power per symbol */
13244 subfrmAlloc->cmnLcInfo.pcchInfo.txPwrOffset = cellDl->pcchTxPwrOffset;
13248 rbAllocInfo = &allocInfo->bcchAlloc;
13249 if (rbAllocInfo->pdcch)
13251 RgInfSfAlloc *subfrmAlloc = &(cell->sfAllocArr[nextSfIdx]);
13253 #ifdef LTEMAC_HDFDD
13254 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
13256 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
13259 subFrm = rbAllocInfo->dlSf;
13261 /* Fill subframe data members */
13262 subFrm->bcch.tbSize = rbAllocInfo->tbInfo[0].bytesAlloc;
13263 subFrm->bcch.pdcch = rbAllocInfo->pdcch;
13264 /* Fill PDCCH data members */
13265 rgSCHCmnFillPdcch(cell, subFrm->bcch.pdcch, rbAllocInfo);
13267 if(rbAllocInfo->schdFirst)
13270 bcch = rgSCHDbmGetFirstBcchOnDlsch(cell);
13271 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
13273 /*Copy the SIB1 msg buff into interface buffer */
13274 SCpyMsgMsg(cell->siCb.crntSiInfo.sib1Info.sib1,
13275 rgSchCb[cell->instIdx].rgSchInit.region,
13276 rgSchCb[cell->instIdx].rgSchInit.pool,
13277 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13278 #endif/*RGR_SI_SCH*/
13279 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
13280 rgSCHCmnDlCalcRvForBcch(cell, FALSE, 0);
13288 i = cell->siCb.siCtx.i;
13289 /*Decrement the retransmission count */
13290 cell->siCb.siCtx.retxCntRem--;
13292 /*Copy the SI msg buff into interface buffer */
13293 if(cell->siCb.siCtx.warningSiFlag == FALSE)
13295 SCpyMsgMsg(cell->siCb.siArray[cell->siCb.siCtx.siId-1].si,
13296 rgSchCb[cell->instIdx].rgSchInit.region,
13297 rgSchCb[cell->instIdx].rgSchInit.pool,
13298 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13302 pdu = rgSCHUtlGetWarningSiPdu(cell);
13303 RGSCH_NULL_CHECK(cell->instIdx, pdu);
13305 rgSchCb[cell->instIdx].rgSchInit.region,
13306 rgSchCb[cell->instIdx].rgSchInit.pool,
13307 &subfrmAlloc->cmnLcInfo.bcchInfo.pdu);
13308 if(cell->siCb.siCtx.retxCntRem == 0)
13310 rgSCHUtlFreeWarningSiPdu(cell);
13311 cell->siCb.siCtx.warningSiFlag = FALSE;
13316 bcch = rgSCHDbmGetSecondBcchOnDlsch(cell);
13317 bo = (RgSchClcBoRpt *)bcch->boLst.first->node;
13319 if(bo->retxCnt != cell->siCfg.retxCnt-1)
13324 #endif/*RGR_SI_SCH*/
13325 subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv =
13326 rgSCHCmnDlCalcRvForBcch(cell, TRUE, i);
13329 /* Added Dl TB count for SIB1 and SI messages transmission.
13330 * This counter will be incremented only for the first transmission
13331 * (with RV 0) of these messages*/
13333 if(subFrm->bcch.pdcch->dci.u.format1aInfo.t.pdschInfo.allocInfo.rv == 0)
13335 cell->dlUlTbCnt.tbTransDlTotalCnt++;
13339 if(bo->retxCnt == 0)
13341 cmLListDelFrm(&bcch->boLst, &bo->boLstEnt);
13342 /* ccpu00117052 - MOD - Passing double pointer
13343 for proper NULLP assignment*/
13344 rgSCHUtlFreeSBuf(cell->instIdx, (Data **)&bo, sizeof(RgSchClcBoRpt));
13346 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, bcch->lcId, sendInd);
13348 /*Fill the interface info */
13349 rgSCHUtlFillRgInfCmnLcInfo(subFrm, subfrmAlloc, NULLD, NULLD);
13351 /* ccpu00132314-ADD-Update the tx power allocation info
13352 TODO-Need to add a check for max tx power per symbol */
13353 subfrmAlloc->cmnLcInfo.bcchInfo.txPwrOffset = cellDl->bcchTxPwrOffset;
13355 /*mBuf has been already copied above */
13356 #endif/*RGR_SI_SCH*/
13369 * Function: rgSCHCmnUlSetAllUnSched
13372 * Invoked by: Common Scheduler
13374 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13379 PRIVATE Void rgSCHCmnUlSetAllUnSched
13381 RgSchCmnUlRbAllocInfo *allocInfo
13384 PRIVATE Void rgSCHCmnUlSetAllUnSched(allocInfo)
13385 RgSchCmnUlRbAllocInfo *allocInfo;
13391 node = allocInfo->contResLst.first;
13394 rgSCHCmnUlMov2NonSchdCntResLst(allocInfo, (RgSchUeCb *)node->node);
13395 node = allocInfo->contResLst.first;
13398 node = allocInfo->retxUeLst.first;
13401 rgSCHCmnUlMov2NonSchdRetxUeLst(allocInfo, (RgSchUeCb *)node->node);
13402 node = allocInfo->retxUeLst.first;
13405 node = allocInfo->ueLst.first;
13408 rgSCHCmnUlMov2NonSchdUeLst(allocInfo, (RgSchUeCb *)node->node);
13409 node = allocInfo->ueLst.first;
13421 * Function: rgSCHCmnUlAdd2CntResLst
13424 * Invoked by: Common Scheduler
13426 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13427 * @param[in] RgSchUeCb *ue
13432 Void rgSCHCmnUlAdd2CntResLst
13434 RgSchCmnUlRbAllocInfo *allocInfo,
13438 Void rgSCHCmnUlAdd2CntResLst(allocInfo, ue)
13439 RgSchCmnUlRbAllocInfo *allocInfo;
13443 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,ue->cell))->alloc);
13444 cmLListAdd2Tail(&allocInfo->contResLst, &ulAllocInfo->reqLnk);
13445 ulAllocInfo->reqLnk.node = (PTR)ue;
13454 * Function: rgSCHCmnUlAdd2UeLst
13457 * Invoked by: Common Scheduler
13459 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
13460 * @param[in] RgSchUeCb *ue
13465 Void rgSCHCmnUlAdd2UeLst
13468 RgSchCmnUlRbAllocInfo *allocInfo,
13472 Void rgSCHCmnUlAdd2UeLst(cell, allocInfo, ue)
13474 RgSchCmnUlRbAllocInfo *allocInfo;
13478 RgSchCmnUeUlAlloc *ulAllocInfo = &((RG_SCH_CMN_GET_UL_UE(ue,cell))->alloc);
13479 if (ulAllocInfo->reqLnk.node == NULLP)
13481 cmLListAdd2Tail(&allocInfo->ueLst, &ulAllocInfo->reqLnk);
13482 ulAllocInfo->reqLnk.node = (PTR)ue;
13492 * Function: rgSCHCmnAllocUlRb
13493 * Purpose: To do RB allocations for uplink
13495 * Invoked by: Common Scheduler
13497 * @param[in] RgSchCellCb *cell
13498 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
13502 Void rgSCHCmnAllocUlRb
13505 RgSchCmnUlRbAllocInfo *allocInfo
13508 Void rgSCHCmnAllocUlRb(cell, allocInfo)
13510 RgSchCmnUlRbAllocInfo *allocInfo;
13513 RgSchUlSf *sf = allocInfo->sf;
13515 /* Schedule for new transmissions */
13516 rgSCHCmnUlRbAllocForLst(cell, sf, allocInfo->ueLst.count,
13517 &allocInfo->ueLst, &allocInfo->schdUeLst,
13518 &allocInfo->nonSchdUeLst, (Bool)TRUE);
13522 /***********************************************************
13524 * Func : rgSCHCmnUlRbAllocForLst
13526 * Desc : Allocate for a list in cmn rb alloc information passed
13535 **********************************************************/
13537 PRIVATE Void rgSCHCmnUlRbAllocForLst
13543 CmLListCp *schdLst,
13544 CmLListCp *nonSchdLst,
13548 PRIVATE Void rgSCHCmnUlRbAllocForLst(cell, sf, count, reqLst, schdLst,
13549 nonSchdLst, isNewTx)
13554 CmLListCp *schdLst;
13555 CmLListCp *nonSchdLst;
13564 CmLteTimingInfo timeInfo;
13568 if(schdLst->count == 0)
13570 cmLListInit(schdLst);
13573 cmLListInit(nonSchdLst);
13575 if(isNewTx == TRUE)
13577 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.numUes = (U8) count;
13579 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime, timeInfo, TFU_ULCNTRL_DLDELTA);
13580 k = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][timeInfo.subframe];
13581 RG_SCH_ADD_TO_CRNT_TIME(timeInfo,
13582 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo, k);
13584 RG_SCH_ADD_TO_CRNT_TIME(cell->crntTime,cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.timingInfo,
13585 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
13590 for (lnk = reqLst->first; count; lnk = lnk->next, --count)
13592 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
13593 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
13598 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
13603 ueUl->subbandShare = ueUl->subbandRequired;
13604 if(isNewTx == TRUE)
13606 maxRb = RGSCH_MIN((ueUl->subbandRequired * MAX_5GTF_VRBG_SIZE), ue->ue5gtfCb.maxPrb);
13608 ret = rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole);
13611 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, schdLst);
13612 rgSCHCmnUlUeFillAllocInfo(cell, ue);
13616 gUl5gtfRbAllocFail++;
13617 #if defined (TENB_STATS) && defined (RG_5GTF)
13618 cell->tenbStats->sch.ul5gtfRbAllocFail++;
13620 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
13621 ue->isMsg4PdcchWithCrnti = FALSE;
13622 ue->isSrGrant = FALSE;
13625 if(isNewTx == TRUE)
13627 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
13628 ulAllocInfo[count - 1].rnti = ue->ueId;
13629 cell->sfAllocArr[cell->crntSfIdx].ulUeInfo.
13630 ulAllocInfo[count - 1].numPrb = ue->ul.nPrb;
13633 ueUl->subbandShare = 0; /* This reset will take care of
13634 * all scheduler types */
13636 for (; count; lnk = lnk->next, --count)
13638 RgSchUeCb *ue = (RgSchUeCb *)lnk->node;
13639 rgSCHCmnUlRbAllocAddUeToLst(cell, ue, nonSchdLst);
13640 ue->isMsg4PdcchWithCrnti = FALSE;
13647 /***********************************************************
13649 * Func : rgSCHCmnUlMdfyGrntForCqi
13651 * Desc : Modify UL Grant to consider presence of
13652 * CQI along with PUSCH Data.
13657 * - Scale down iTbs based on betaOffset and
13658 * size of Acqi Size.
13659 * - Optionally attempt to increase numSb by 1
13660 * if input payload size does not fit in due
13661 * to reduced tbSz as a result of iTbsNew.
13665 **********************************************************/
13667 PRIVATE S16 rgSCHCmnUlMdfyGrntForCqi
13679 PRIVATE S16 rgSCHCmnUlMdfyGrntForCqi(cell, ue, maxRb, numSb, iTbs, hqSz, stepDownItbs, effTgt)
13690 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(ue->cell);
13695 U32 remREsForPusch;
13698 U32 betaOffVal = ue->ul.betaOffstVal;
13699 U32 cqiRiRptSz = ue->ul.cqiRiSz;
13700 U32 betaOffHqVal = rgSchCmnBetaHqOffstTbl[ue->ul.betaHqOffst];
13701 U32 resNumSb = *numSb;
13702 U32 puschEff = 1000;
13705 Bool mdfyiTbsFlg = FALSE;
13706 U8 resiTbs = *iTbs;
13712 iMcs = rgSCHCmnUlGetIMcsFrmITbs(resiTbs, RG_SCH_CMN_GET_UE_CTGY(ue));
13713 RG_SCH_UL_MCS_TO_MODODR(iMcs, modOdr);
13714 if (RG_SCH_CMN_GET_UE_CTGY(ue) != CM_LTE_UE_CAT_5)
13716 modOdr = RGSCH_MIN(RGSCH_QM_QPSK, modOdr);
13720 modOdr = RGSCH_MIN(RGSCH_QM_64QAM, modOdr);
13722 nPrb = resNumSb * cellUl->sbSize;
13723 /* Restricting the minumum iTbs requried to modify to 10 */
13724 if ((nPrb >= maxRb) && (resiTbs <= 10))
13726 /* Could not accomodate ACQI */
13729 totREs = nPrb * RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
13730 tbSz = rgTbSzTbl[0][resiTbs][nPrb-1];
13731 /* totalREs/tbSz = num of bits perRE. */
13732 cqiRiREs = (totREs * betaOffVal * cqiRiRptSz)/(1000 * tbSz); /* betaOffVal is represented
13733 as parts per 1000 */
13734 hqREs = (totREs * betaOffHqVal * hqSz)/(1000 * tbSz);
13735 if ((cqiRiREs + hqREs) < totREs)
13737 remREsForPusch = totREs - cqiRiREs - hqREs;
13738 bitsPerRe = (tbSz * 1000)/remREsForPusch; /* Multiplying by 1000 for Interger Oper */
13739 puschEff = bitsPerRe/modOdr;
13741 if (puschEff < effTgt)
13743 /* ensure resultant efficiency for PUSCH Data is within 0.93*/
13748 /* Alternate between increasing SB or decreasing iTbs until eff is met */
13749 if (mdfyiTbsFlg == FALSE)
13753 resNumSb = resNumSb + 1;
13755 mdfyiTbsFlg = TRUE;
13761 resiTbs-= stepDownItbs;
13763 mdfyiTbsFlg = FALSE;
13766 }while (1); /* Loop breaks if efficency is met
13767 or returns RFAILED if not able to meet the efficiency */
13776 /***********************************************************
13778 * Func : rgSCHCmnUlRbAllocForUe
13780 * Desc : Do uplink RB allocation for an UE.
13784 * Notes: Note that as of now, for retx, maxRb
13785 * is not considered. Alternatives, such
13786 * as dropping retx if it crosses maxRb
13787 * could be considered.
13791 **********************************************************/
13793 PRIVATE S16 rgSCHCmnUlRbAllocForUe
13802 PRIVATE S16 rgSCHCmnUlRbAllocForUe(cell, sf, ue, maxRb, hole)
13810 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
13811 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue, cell);
13812 RgSchUlAlloc *alloc = NULLP;
13818 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->schdHqProcIdx];
13820 RgSchUlHqProcCb *proc = NULLP;
13826 TfuDciFormat dciFrmt;
13831 rgSCHUhmGetAvlHqProc(cell, ue, &proc);
13834 //printf("UE [%d] HQ Proc unavailable\n", ue->ueId);
13839 if (ue->ue5gtfCb.rank == 2)
13841 dciFrmt = TFU_DCI_FORMAT_A2;
13846 dciFrmt = TFU_DCI_FORMAT_A1;
13849 /* 5gtf TODO : To pass dci frmt to this function */
13850 pdcch = rgSCHCmnPdcchAllocCrntSf(cell, ue);
13853 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
13854 "rgSCHCmnUlRbAllocForUe(): Could not get PDCCH for CRNTI:%d",ue->ueId);
13857 gUl5gtfPdcchSchd++;
13858 #if defined (TENB_STATS) && defined (RG_5GTF)
13859 cell->tenbStats->sch.ul5gtfPdcchSchd++;
13862 //TODO_SID using configured prb as of now
13863 nPrb = ue->ue5gtfCb.maxPrb;
13864 reqVrbg = nPrb/MAX_5GTF_VRBG_SIZE;
13865 iMcs = ue->ue5gtfCb.mcs; //gSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
13869 if((sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart > MAX_5GTF_VRBG)
13870 || (sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated > MAX_5GTF_VRBG))
13872 printf("5GTF_ERROR vrbg > 25 valstart = %d valalloc %d\n", sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart
13873 , sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated);
13878 /*TODO_SID: Workaround for alloc. Currently alloc is ulsf based. To handle multiple beams, we need a different
13879 design. Now alloc are formed based on MAX_5GTF_UE_SCH macro. */
13880 numVrbgTemp = MAX_5GTF_VRBG/MAX_5GTF_UE_SCH;
13883 alloc = rgSCHCmnUlSbAlloc(sf, numVrbgTemp,\
13886 if (alloc == NULLP)
13888 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
13889 "rgSCHCmnUlRbAllocForUe(): Could not get UlAlloc %d CRNTI:%d",numVrbg,ue->ueId);
13890 rgSCHCmnPdcchRlsCrntSf(cell, pdcch);
13893 gUl5gtfAllocAllocated++;
13894 #if defined (TENB_STATS) && defined (RG_5GTF)
13895 cell->tenbStats->sch.ul5gtfAllocAllocated++;
13897 alloc->grnt.vrbgStart = sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart;
13898 alloc->grnt.numVrbg = numVrbg;
13899 alloc->grnt.numLyr = numLyr;
13900 alloc->grnt.dciFrmt = dciFrmt;
13902 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].vrbgStart += numVrbg;
13903 sf->sfBeamInfo[ue->ue5gtfCb.BeamId].totVrbgAllocated += numVrbg;
13905 //rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
13907 sf->totPrb += alloc->grnt.numRb;
13908 ue->ul.nPrb = alloc->grnt.numRb;
13910 if (ue->csgMmbrSta != TRUE)
13912 cellUl->ncsgPrbCnt += alloc->grnt.numRb;
13914 cellUl->totPrbCnt += (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
13915 alloc->pdcch = pdcch;
13916 alloc->grnt.iMcs = iMcs;
13917 alloc->grnt.iMcsCrnt = iMcsCrnt;
13918 alloc->grnt.hop = 0;
13919 /* Initial Num RBs support for UCI on PUSCH */
13921 ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
13923 alloc->forMsg3 = FALSE;
13924 //RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTb5gtfSzTbl[0], (iTbs));
13926 //ueUl->alloc.allocdBytes = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
13927 /* TODO_SID Allocating based on configured MCS as of now.
13928 Currently for format A2. When doing multi grp per tti, need to update this. */
13929 ueUl->alloc.allocdBytes = (rgSch5gtfTbSzTbl[iMcs]/8) * ue->ue5gtfCb.rank;
13931 alloc->grnt.datSz = ueUl->alloc.allocdBytes;
13932 //TODO_SID Need to check mod order.
13933 RG_SCH_CMN_TBS_TO_MODODR(iMcs, alloc->grnt.modOdr);
13934 //alloc->grnt.modOdr = 6;
13935 alloc->grnt.isRtx = FALSE;
13937 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
13938 alloc->grnt.SCID = 0;
13939 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
13940 alloc->grnt.PMI = 0;
13941 alloc->grnt.uciOnxPUSCH = 0;
13942 alloc->grnt.hqProcId = proc->procId;
13944 alloc->hqProc = proc;
13945 alloc->hqProc->ulSfIdx = cellUl->schdIdx;
13947 /*commenting to retain the rnti used for transmission SPS/c-rnti */
13948 alloc->rnti = ue->ueId;
13949 ueUl->alloc.alloc = alloc;
13950 /*rntiwari-Adding the debug for generating the graph.*/
13951 /* No grant attr recorded now */
13955 /***********************************************************
13957 * Func : rgSCHCmnUlRbAllocAddUeToLst
13959 * Desc : Add UE to list (scheduled/non-scheduled list)
13960 * for UL RB allocation information.
13968 **********************************************************/
13970 Void rgSCHCmnUlRbAllocAddUeToLst
13977 Void rgSCHCmnUlRbAllocAddUeToLst(cell, ue, lst)
13983 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
13986 gUl5gtfUeRbAllocDone++;
13987 #if defined (TENB_STATS) && defined (RG_5GTF)
13988 cell->tenbStats->sch.ul5gtfUeRbAllocDone++;
13990 cmLListAdd2Tail(lst, &ueUl->alloc.schdLstLnk);
13991 ueUl->alloc.schdLstLnk.node = (PTR)ue;
13996 * @brief This function Processes the Final Allocations
13997 * made by the RB Allocator against the requested.
14001 * Function: rgSCHCmnUlAllocFnlz
14002 * Purpose: This function Processes the Final Allocations
14003 * made by the RB Allocator against the requested.
14005 * Invoked by: Common Scheduler
14007 * @param[in] RgSchCellCb *cell
14008 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
14013 PRIVATE Void rgSCHCmnUlAllocFnlz
14016 RgSchCmnUlRbAllocInfo *allocInfo
14019 PRIVATE Void rgSCHCmnUlAllocFnlz(cell, allocInfo)
14021 RgSchCmnUlRbAllocInfo *allocInfo;
14024 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14026 /* call scheduler specific Finalization */
14027 cellSch->apisUl->rgSCHUlAllocFnlz(cell, allocInfo);
14033 * @brief This function Processes the Final Allocations
14034 * made by the RB Allocator against the requested.
14038 * Function: rgSCHCmnDlAllocFnlz
14039 * Purpose: This function Processes the Final Allocations
14040 * made by the RB Allocator against the requested.
14042 * Invoked by: Common Scheduler
14044 * @param[in] RgSchCellCb *cell
14049 Void rgSCHCmnDlAllocFnlz
14054 Void rgSCHCmnDlAllocFnlz(cell)
14058 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14059 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
14062 rgSCHCmnDlCcchRetxFnlz(cell, allocInfo);
14063 rgSCHCmnDlCcchTxFnlz(cell, allocInfo);
14065 /* Added below functions for handling CCCH SDU transmission received
14067 * * guard timer expiry*/
14068 rgSCHCmnDlCcchSduRetxFnlz(cell, allocInfo);
14069 rgSCHCmnDlCcchSduTxFnlz(cell, allocInfo);
14071 rgSCHCmnDlRaRspFnlz(cell, allocInfo);
14072 /* call scheduler specific Finalization */
14073 cellSch->apisDl->rgSCHDlAllocFnlz(cell, allocInfo);
14075 /* Stack Crash problem for TRACE5 Changes. Added the return below */
14082 * @brief Update an uplink subframe.
14086 * Function : rgSCHCmnUlUpdSf
14088 * For each allocation
14089 * - if no more tx needed
14090 * - Release allocation
14092 * - Perform retransmission
14094 * @param[in] RgSchUlSf *sf
14098 PRIVATE Void rgSCHCmnUlUpdSf
14101 RgSchCmnUlRbAllocInfo *allocInfo,
14105 PRIVATE Void rgSCHCmnUlUpdSf(cell, allocInfo, sf)
14107 RgSchCmnUlRbAllocInfo *allocInfo;
14113 while ((lnk = sf->allocs.first))
14115 RgSchUlAlloc *alloc = (RgSchUlAlloc *)lnk->node;
14118 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
14123 /* If need to handle all retx together, run another loop separately */
14124 rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc);
14126 rgSCHCmnUlRlsUlAlloc(cell, sf, alloc);
14129 /* By this time, all allocs would have been cleared and
14130 * SF is reset to be made ready for new allocations. */
14131 rgSCHCmnUlSfReset(cell, sf);
14132 /* In case there are timing problems due to msg3
14133 * allocations being done in advance, (which will
14134 * probably happen with the current FDD code that
14135 * handles 8 subframes) one solution
14136 * could be to hold the (recent) msg3 allocs in a separate
14137 * list, and then possibly add that to the actual
14138 * list later. So at this time while allocations are
14139 * traversed, the recent msg3 ones are not seen. Anytime after
14140 * this (a good time is when the usual allocations
14141 * are made), msg3 allocations could be transferred to the
14142 * normal list. Not doing this now as it is assumed
14143 * that incorporation of TDD shall take care of this.
14151 * @brief Handle uplink allocation for retransmission.
14155 * Function : rgSCHCmnUlHndlAllocRetx
14157 * Processing Steps:
14158 * - Add to queue for retx.
14159 * - Do not release here, release happends as part
14160 * of the loop that calls this function.
14162 * @param[in] RgSchCellCb *cell
14163 * @param[in] RgSchCmnUlRbAllocInfo *allocInfo
14164 * @param[in] RgSchUlSf *sf
14165 * @param[in] RgSchUlAlloc *alloc
14169 PRIVATE Void rgSCHCmnUlHndlAllocRetx
14172 RgSchCmnUlRbAllocInfo *allocInfo,
14174 RgSchUlAlloc *alloc
14177 PRIVATE Void rgSCHCmnUlHndlAllocRetx(cell, allocInfo, sf, alloc)
14179 RgSchCmnUlRbAllocInfo *allocInfo;
14181 RgSchUlAlloc *alloc;
14185 RgSchCmnUlUe *ueUl;
14187 rgTbSzTbl[0][rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs)]\
14188 [alloc->grnt.numRb-1]/8;
14189 if (!alloc->forMsg3)
14191 ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue);
14192 ueUl->alloc.reqBytes = bytes;
14193 rgSCHUhmRetx(alloc->hqProc);
14194 rgSCHCmnUlAdd2RetxUeLst(allocInfo, alloc->ue);
14198 /* RACHO msg3 retx handling. Part of RACH procedure changes. */
14199 retxAlloc = rgSCHCmnUlGetUlAlloc(cell, sf, alloc->numSb);
14200 if (retxAlloc == NULLP)
14202 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
14203 "rgSCHCmnUlRbAllocForUe():Could not get UlAlloc for msg3Retx RNTI:%d",
14207 retxAlloc->grnt.iMcs = alloc->grnt.iMcs;
14208 retxAlloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl\
14209 [alloc->hqProc->rvIdx];
14210 retxAlloc->grnt.nDmrs = 0;
14211 retxAlloc->grnt.hop = 0;
14212 retxAlloc->grnt.delayBit = 0;
14213 retxAlloc->rnti = alloc->rnti;
14214 retxAlloc->ue = NULLP;
14215 retxAlloc->pdcch = FALSE;
14216 retxAlloc->forMsg3 = TRUE;
14217 retxAlloc->raCb = alloc->raCb;
14218 retxAlloc->hqProc = alloc->hqProc;
14219 rgSCHUhmRetx(retxAlloc->hqProc);
14226 * @brief Uplink Scheduling Handler.
14230 * Function: rgSCHCmnUlAlloc
14231 * Purpose: This function Handles Uplink Scheduling.
14233 * Invoked by: Common Scheduler
14235 * @param[in] RgSchCellCb *cell
14238 /* ccpu00132653- The definition of this function made common for TDD and FDD*/
14240 PRIVATE Void rgSCHCmnUlAlloc
14245 PRIVATE Void rgSCHCmnUlAlloc(cell)
14249 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14250 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
14251 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
14252 RgSchCmnUlRbAllocInfo allocInfo;
14253 RgSchCmnUlRbAllocInfo *allocInfoRef = &allocInfo;
14260 /* Initializing RgSchCmnUlRbAllocInfo structure */
14261 rgSCHCmnInitUlRbAllocInfo(allocInfoRef);
14263 /* Get Uplink Subframe */
14264 allocInfoRef->sf = &cellUl->ulSfArr[cellUl->schdIdx];
14266 /* initializing the UL PRB count */
14267 allocInfoRef->sf->totPrb = 0;
14271 rgSCHCmnSpsUlTti(cell, allocInfoRef);
14274 if(*allocInfoRef->sf->allocCountRef == 0)
14278 if ((hole = rgSCHUtlUlHoleFirst(allocInfoRef->sf)) != NULLP)
14280 /* Sanity check of holeDb */
14281 if (allocInfoRef->sf->holeDb->count == 1 && hole->start == 0)
14283 hole->num = cell->dynCfiCb.bwInfo[cellDl->currCfi].numSb;
14284 /* Re-Initialize available subbands because of CFI change*/
14285 allocInfoRef->sf->availSubbands = cell->dynCfiCb.\
14286 bwInfo[cellDl->currCfi].numSb;
14287 /*Currently initializing 5gtf ulsf specific initialization here.
14288 need to do at proper place */
14290 allocInfoRef->sf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
14291 allocInfoRef->sf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
14292 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
14294 allocInfoRef->sf->sfBeamInfo[idx].totVrbgAllocated = 0;
14295 allocInfoRef->sf->sfBeamInfo[idx].totVrbgRequired = 0;
14296 allocInfoRef->sf->sfBeamInfo[idx].vrbgStart = 0;
14302 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
14303 "Error! holeDb sanity check failed");
14308 /* Fix: Adaptive re-transmissions prioritised over other transmissions */
14309 /* perform adaptive retransmissions */
14310 rgSCHCmnUlSfReTxAllocs(cell, allocInfoRef->sf);
14314 /* Fix: syed Adaptive Msg3 Retx crash. Release all
14315 Harq processes for which adap Retx failed, to avoid
14316 blocking. This step should be done before New TX
14317 scheduling to make hqProc available. Right now we
14318 dont check if proc is in adap Retx list for considering
14319 it to be available. But now with this release that
14320 functionality would be correct. */
14322 rgSCHCmnUlSfRlsRetxProcs(cell, allocInfoRef->sf);
14325 /* Specific UL scheduler to perform UE scheduling */
14326 cellSch->apisUl->rgSCHUlSched(cell, allocInfoRef);
14328 /* Call UL RB allocator module */
14329 rgSCHCmnAllocUlRb(cell, allocInfoRef);
14331 /* Do group power control for PUSCH */
14332 rgSCHCmnGrpPwrCntrlPusch(cell, allocInfoRef->sf);
14334 cell->sc.apis->rgSCHDrxStrtInActvTmrInUl(cell);
14336 rgSCHCmnUlAllocFnlz(cell, allocInfoRef);
14337 if(5000 == g5gtfTtiCnt)
14339 ul5gtfsidDlAlreadyMarkUl = 0;
14340 ul5gtfsidDlSchdPass = 0;
14341 ul5gtfsidUlMarkUl = 0;
14342 ul5gtfTotSchdCnt = 0;
14350 * @brief send Subframe Allocations.
14354 * Function: rgSCHCmnSndCnsldtInfo
14355 * Purpose: Send the scheduled
14356 * allocations to MAC for StaInd generation to Higher layers and
14357 * for MUXing. PST's RgInfSfAlloc to MAC instance.
14359 * Invoked by: Common Scheduler
14361 * @param[in] RgSchCellCb *cell
14365 Void rgSCHCmnSndCnsldtInfo
14370 Void rgSCHCmnSndCnsldtInfo(cell)
14374 RgInfSfAlloc *subfrmAlloc;
14376 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14379 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
14381 /* Send the allocations to MAC for MUXing */
14382 rgSCHUtlGetPstToLyr(&pst, &rgSchCb[cell->instIdx], cell->macInst);
14383 subfrmAlloc->cellId = cell->cellId;
14384 /* Populate the List of UEs needing PDB-based Flow control */
14385 cellSch->apisDl->rgSCHDlFillFlwCtrlInfo(cell, subfrmAlloc);
14387 if((subfrmAlloc->rarInfo.numRaRntis) ||
14389 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
14390 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
14391 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
14393 (subfrmAlloc->ueInfo.numUes) ||
14394 (subfrmAlloc->cmnLcInfo.bitMask) ||
14395 (subfrmAlloc->ulUeInfo.numUes) ||
14396 (subfrmAlloc->flowCntrlInfo.numUes))
14398 if((subfrmAlloc->rarInfo.numRaRntis) ||
14400 (subfrmAlloc->emtcInfo.rarInfo.numRaRntis) ||
14401 (subfrmAlloc->emtcInfo.cmnLcInfo.bitMask) ||
14402 (subfrmAlloc->emtcInfo.ueInfo.numUes) ||
14404 (subfrmAlloc->ueInfo.numUes) ||
14405 (subfrmAlloc->cmnLcInfo.bitMask) ||
14406 (subfrmAlloc->flowCntrlInfo.numUes))
14409 RgSchMacSfAlloc(&pst, subfrmAlloc);
14412 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_NUM_SUB_FRAMES;
14414 cell->crntSfIdx = (cell->crntSfIdx + 1) % RGSCH_SF_ALLOC_SIZE;
14420 * @brief Consolidate Subframe Allocations.
14424 * Function: rgSCHCmnCnsldtSfAlloc
14425 * Purpose: Consolidate Subframe Allocations.
14427 * Invoked by: Common Scheduler
14429 * @param[in] RgSchCellCb *cell
14433 Void rgSCHCmnCnsldtSfAlloc
14438 Void rgSCHCmnCnsldtSfAlloc(cell)
14442 RgInfSfAlloc *subfrmAlloc;
14443 CmLteTimingInfo frm;
14445 CmLListCp dlDrxInactvTmrLst;
14446 CmLListCp dlInActvLst;
14447 CmLListCp ulInActvLst;
14448 RgSchCmnCell *cellSch = NULLP;
14451 cmLListInit(&dlDrxInactvTmrLst);
14452 cmLListInit(&dlInActvLst);
14453 cmLListInit(&ulInActvLst);
14455 subfrmAlloc = &(cell->sfAllocArr[cell->crntSfIdx]);
14457 /* Get Downlink Subframe */
14458 frm = cell->crntTime;
14459 RGSCH_INCR_SUB_FRAME(frm, RG_SCH_CMN_DL_DELTA);
14460 dlSf = rgSCHUtlSubFrmGet(cell, frm);
14462 /* Fill the allocation Info */
14463 rgSCHUtlFillRgInfRarInfo(dlSf, subfrmAlloc, cell);
14466 rgSCHUtlFillRgInfUeInfo(dlSf, cell, &dlDrxInactvTmrLst,
14467 &dlInActvLst, &ulInActvLst);
14468 #ifdef RG_PFS_STATS
14469 cell->totalPrb += dlSf->bwAssigned;
14471 /* Mark the following Ues inactive for UL*/
14472 cellSch = RG_SCH_CMN_GET_CELL(cell);
14474 /* Calling Scheduler specific function with DRX inactive UE list*/
14475 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInActvLst);
14476 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInActvLst);
14479 /*re/start DRX inactivity timer for the UEs*/
14480 (Void)rgSCHDrxStrtInActvTmr(cell,&dlDrxInactvTmrLst,RG_SCH_DRX_DL);
14486 * @brief Initialize the DL Allocation Information Structure.
14490 * Function: rgSCHCmnInitDlRbAllocInfo
14491 * Purpose: Initialize the DL Allocation Information Structure.
14493 * Invoked by: Common Scheduler
14495 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
14499 PRIVATE Void rgSCHCmnInitDlRbAllocInfo
14501 RgSchCmnDlRbAllocInfo *allocInfo
14504 PRIVATE Void rgSCHCmnInitDlRbAllocInfo(allocInfo)
14505 RgSchCmnDlRbAllocInfo *allocInfo;
14508 memset(&allocInfo->pcchAlloc, 0, sizeof(RgSchDlRbAlloc));
14509 memset(&allocInfo->bcchAlloc, 0, sizeof(RgSchDlRbAlloc));
14510 memset(allocInfo->raRspAlloc, 0,
14511 RG_SCH_CMN_MAX_CMN_PDCCH*sizeof(RgSchDlRbAlloc));
14513 allocInfo->msg4Alloc.msg4DlSf = NULLP;
14514 cmLListInit(&allocInfo->msg4Alloc.msg4TxLst);
14515 cmLListInit(&allocInfo->msg4Alloc.msg4RetxLst);
14516 cmLListInit(&allocInfo->msg4Alloc.schdMsg4TxLst);
14517 cmLListInit(&allocInfo->msg4Alloc.schdMsg4RetxLst);
14518 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4TxLst);
14519 cmLListInit(&allocInfo->msg4Alloc.nonSchdMsg4RetxLst);
14521 allocInfo->ccchSduAlloc.ccchSduDlSf = NULLP;
14522 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduTxLst);
14523 cmLListInit(&allocInfo->ccchSduAlloc.ccchSduRetxLst);
14524 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduTxLst);
14525 cmLListInit(&allocInfo->ccchSduAlloc.schdCcchSduRetxLst);
14526 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduTxLst);
14527 cmLListInit(&allocInfo->ccchSduAlloc.nonSchdCcchSduRetxLst);
14530 allocInfo->dedAlloc.dedDlSf = NULLP;
14531 cmLListInit(&allocInfo->dedAlloc.txHqPLst);
14532 cmLListInit(&allocInfo->dedAlloc.retxHqPLst);
14533 cmLListInit(&allocInfo->dedAlloc.schdTxHqPLst);
14534 cmLListInit(&allocInfo->dedAlloc.schdRetxHqPLst);
14535 cmLListInit(&allocInfo->dedAlloc.nonSchdTxHqPLst);
14536 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxHqPLst);
14538 cmLListInit(&allocInfo->dedAlloc.txRetxHqPLst);
14539 cmLListInit(&allocInfo->dedAlloc.schdTxRetxHqPLst);
14540 cmLListInit(&allocInfo->dedAlloc.nonSchdTxRetxHqPLst);
14542 cmLListInit(&allocInfo->dedAlloc.txSpsHqPLst);
14543 cmLListInit(&allocInfo->dedAlloc.retxSpsHqPLst);
14544 cmLListInit(&allocInfo->dedAlloc.schdTxSpsHqPLst);
14545 cmLListInit(&allocInfo->dedAlloc.schdRetxSpsHqPLst);
14546 cmLListInit(&allocInfo->dedAlloc.nonSchdTxSpsHqPLst);
14547 cmLListInit(&allocInfo->dedAlloc.nonSchdRetxSpsHqPLst);
14551 rgSCHLaaCmnInitDlRbAllocInfo (allocInfo);
14554 cmLListInit(&allocInfo->dedAlloc.errIndTxHqPLst);
14555 cmLListInit(&allocInfo->dedAlloc.schdErrIndTxHqPLst);
14556 cmLListInit(&allocInfo->dedAlloc.nonSchdErrIndTxHqPLst);
14561 * @brief Initialize the UL Allocation Information Structure.
14565 * Function: rgSCHCmnInitUlRbAllocInfo
14566 * Purpose: Initialize the UL Allocation Information Structure.
14568 * Invoked by: Common Scheduler
14570 * @param[out] RgSchCmnUlRbAllocInfo *allocInfo
14574 Void rgSCHCmnInitUlRbAllocInfo
14576 RgSchCmnUlRbAllocInfo *allocInfo
14579 Void rgSCHCmnInitUlRbAllocInfo(allocInfo)
14580 RgSchCmnUlRbAllocInfo *allocInfo;
14583 allocInfo->sf = NULLP;
14584 cmLListInit(&allocInfo->contResLst);
14585 cmLListInit(&allocInfo->schdContResLst);
14586 cmLListInit(&allocInfo->nonSchdContResLst);
14587 cmLListInit(&allocInfo->ueLst);
14588 cmLListInit(&allocInfo->schdUeLst);
14589 cmLListInit(&allocInfo->nonSchdUeLst);
14595 * @brief Scheduling for PUCCH group power control.
14599 * Function: rgSCHCmnGrpPwrCntrlPucch
14600 * Purpose: This function does group power control for PUCCH
14601 * corresponding to the subframe for which DL UE allocations
14604 * Invoked by: Common Scheduler
14606 * @param[in] RgSchCellCb *cell
14610 PRIVATE Void rgSCHCmnGrpPwrCntrlPucch
14616 PRIVATE Void rgSCHCmnGrpPwrCntrlPucch(cell, dlSf)
14622 rgSCHPwrGrpCntrlPucch(cell, dlSf);
14628 * @brief Scheduling for PUSCH group power control.
14632 * Function: rgSCHCmnGrpPwrCntrlPusch
14633 * Purpose: This function does group power control, for
14634 * the subframe for which UL allocation has (just) happened.
14636 * Invoked by: Common Scheduler
14638 * @param[in] RgSchCellCb *cell
14639 * @param[in] RgSchUlSf *ulSf
14643 PRIVATE Void rgSCHCmnGrpPwrCntrlPusch
14649 PRIVATE Void rgSCHCmnGrpPwrCntrlPusch(cell, ulSf)
14654 /*removed unused variable *cellSch*/
14655 CmLteTimingInfo frm;
14659 /* Got to pass DL SF corresponding to UL SF, so get that first.
14660 * There is no easy way of getting dlSf by having the RgSchUlSf*,
14661 * so use the UL delta from current time to get the DL SF. */
14662 frm = cell->crntTime;
14665 if(cell->emtcEnable == TRUE)
14667 RGSCH_INCR_SUB_FRAME_EMTC(frm, TFU_DLCNTRL_DLDELTA);
14672 RGSCH_INCR_SUB_FRAME(frm, TFU_DLCNTRL_DLDELTA);
14674 /* Del filling of dl.time */
14675 dlSf = rgSCHUtlSubFrmGet(cell, frm);
14677 rgSCHPwrGrpCntrlPusch(cell, dlSf, ulSf);
14682 /* Fix: syed align multiple UEs to refresh at same time */
14683 /***********************************************************
14685 * Func : rgSCHCmnApplyUeRefresh
14687 * Desc : Apply UE refresh in CMN and Specific
14688 * schedulers. Data rates and corresponding
14689 * scratchpad variables are updated.
14697 **********************************************************/
14699 PRIVATE S16 rgSCHCmnApplyUeRefresh
14705 PRIVATE S16 rgSCHCmnApplyUeRefresh(cell, ue)
14710 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
14712 U32 effNonGbrBsr = 0;
14716 /* Reset the refresh cycle variableCAP */
14717 ue->ul.effAmbr = ue->ul.cfgdAmbr;
14719 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
14721 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
14723 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
14725 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
14727 cmnLcg->effGbr = cmnLcg->cfgdGbr;
14728 cmnLcg->effDeltaMbr = cmnLcg->deltaMbr;
14729 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
14730 /* Considering GBR LCG will be prioritised by UE */
14731 effGbrBsr += cmnLcg->bs;
14732 }/* Else no remaing BS so nonLcg0 will be updated when BSR will be received */
14735 effNonGbrBsr += cmnLcg->reportedBs;
14736 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
14740 effNonGbrBsr = RGSCH_MIN(effNonGbrBsr,ue->ul.effAmbr);
14741 ue->ul.nonGbrLcgBs = effNonGbrBsr;
14743 ue->ul.nonLcg0Bs = effGbrBsr + effNonGbrBsr;
14744 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
14745 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
14748 /* call scheduler specific event handlers
14749 * for refresh timer expiry */
14750 cellSch->apisUl->rgSCHUlUeRefresh(cell, ue);
14751 cellSch->apisDl->rgSCHDlUeRefresh(cell, ue);
14756 /***********************************************************
14758 * Func : rgSCHCmnTmrExpiry
14760 * Desc : Adds an UE to refresh queue, so that the UE is
14761 * periodically triggered to refresh it's GBR and
14770 **********************************************************/
14772 PRIVATE S16 rgSCHCmnTmrExpiry
14774 PTR cb, /* Pointer to timer control block */
14775 S16 tmrEvnt /* Timer Event */
14778 PRIVATE S16 rgSCHCmnTmrExpiry(cb, tmrEvnt)
14779 PTR cb; /* Pointer to timer control block */
14780 S16 tmrEvnt; /* Timer Event */
14783 RgSchUeCb *ue = (RgSchUeCb *)cb;
14784 RgSchCellCb *cell = ue->cell;
14785 #if (ERRCLASS & ERRCLS_DEBUG)
14789 #if (ERRCLASS & ERRCLS_DEBUG)
14790 if (tmrEvnt != RG_SCH_CMN_EVNT_UE_REFRESH)
14792 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnTmrExpiry(): Invalid "
14793 "timer event CRNTI:%d",ue->ueId);
14800 rgSCHCmnApplyUeRefresh(cell, ue);
14802 rgSCHCmnAddUeToRefreshQ(cell, ue, RG_SCH_CMN_REFRESH_TIME);
14807 /***********************************************************
14809 * Func : rgSCHCmnTmrProc
14811 * Desc : Timer entry point per cell. Timer
14812 * processing is triggered at every frame boundary
14821 **********************************************************/
14823 PRIVATE S16 rgSCHCmnTmrProc
14828 PRIVATE S16 rgSCHCmnTmrProc(cell)
14832 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
14833 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
14834 /* Moving the assignment of scheduler pointer
14835 to available scope for optimization */
14837 if ((cell->crntTime.slot % RGSCH_NUM_SUB_FRAMES_5G) == 0)
14839 /* Reset the counters periodically */
14840 if ((cell->crntTime.sfn % RG_SCH_CMN_CSG_REFRESH_TIME) == 0)
14842 RG_SCH_RESET_HCSG_DL_PRB_CNTR(cmnDlCell);
14843 RG_SCH_RESET_HCSG_UL_PRB_CNTR(cmnUlCell);
14845 if ((cell->crntTime.sfn % RG_SCH_CMN_OVRLDCTRL_REFRESH_TIME) == 0)
14848 cell->measurements.ulTpt = ((cell->measurements.ulTpt * 95) + ( cell->measurements.ulBytesCnt * 5))/100;
14849 cell->measurements.dlTpt = ((cell->measurements.dlTpt * 95) + ( cell->measurements.dlBytesCnt * 5))/100;
14851 rgSCHUtlCpuOvrLdAdjItbsCap(cell);
14852 /* reset cell level tpt measurements for next cycle */
14853 cell->measurements.ulBytesCnt = 0;
14854 cell->measurements.dlBytesCnt = 0;
14856 /* Comparing with Zero instead of % is being done for efficiency.
14857 * If Timer resolution changes then accordingly update the
14858 * macro RG_SCH_CMN_REFRESH_TIMERES */
14859 RgSchCmnCell *sched = RG_SCH_CMN_GET_CELL(cell);
14860 cmPrcTmr(&sched->tmrTqCp, sched->tmrTq, (PFV)rgSCHCmnTmrExpiry);
14867 /***********************************************************
14869 * Func : rgSchCmnUpdCfiVal
14871 * Desc : Update the CFI value if CFI switch was done
14879 **********************************************************/
14881 PRIVATE Void rgSchCmnUpdCfiVal
14887 PRIVATE Void rgSchCmnUpdCfiVal(cell, delta)
14893 CmLteTimingInfo pdsch;
14894 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
14904 pdsch = cell->crntTime;
14905 RGSCH_INCR_SUB_FRAME(pdsch, delta);
14906 dlSf = rgSCHUtlSubFrmGet(cell, pdsch);
14907 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
14908 *change happens in that SF then UL PDCCH allocation happens with old CFI
14909 *but CFI in control Req goes updated one since it was stored in the CELL
14911 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
14912 if(cell->dynCfiCb.pdcchSfIdx != 0xFF)
14915 dlIdx = rgSCHUtlGetDlSfIdx(cell, &pdsch);
14917 dlIdx = (((pdsch.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (pdsch.slot % RGSCH_NUM_SUB_FRAMES));
14918 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
14920 /* If current downlink subframe index is same as pdcch SF index,
14921 * perform the switching of CFI in this subframe */
14922 if(cell->dynCfiCb.pdcchSfIdx == dlIdx)
14924 cellCmnDl->currCfi = cellCmnDl->newCfi;
14925 cell->dynCfiCb.pdcchSfIdx = 0xFF;
14927 /* Updating the nCce value based on the new CFI */
14929 splSfCfi = cellCmnDl->newCfi;
14930 for(idx = 0; idx < cell->numDlSubfrms; idx++)
14932 tddSf = cell->subFrms[idx];
14934 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][tddSf->sfNum];
14936 if(tddSf->sfType == RG_SCH_SPL_SF_DATA)
14938 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, splSfCfi);
14940 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][splSfCfi];
14944 tddSf->nCce = cell->dynCfiCb.cfi2NCceTbl[mPhich][cellCmnDl->currCfi];
14947 /* Setting the switch over window length based on config index.
14948 * During switch over period all the UL trnsmissions are Acked
14950 cell->dynCfiCb.switchOvrWinLen =
14951 rgSchCfiSwitchOvrWinLen[cell->ulDlCfgIdx];
14953 cell->nCce = cell->dynCfiCb.cfi2NCceTbl[0][cellCmnDl->currCfi];
14954 /* Fix for DCFI FLE issue: when DL delta is 1 and UL delta is 0 and CFI
14955 *change happens in that SF then UL PDCCH allocation happens with old CFI
14956 *but CFI in control Req goes updated one since it was stored in the CELL
14958 dlSf->pdcchInfo.currCfi = cellCmnDl->currCfi;
14959 cell->dynCfiCb.switchOvrWinLen = rgSchCfiSwitchOvrWinLen[7];
14967 /***********************************************************
14969 * Func : rgSchCmnUpdtPdcchSfIdx
14971 * Desc : Update the switch over window length
14979 **********************************************************/
14982 PRIVATE Void rgSchCmnUpdtPdcchSfIdx
14989 PRIVATE Void rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, sfNum)
14996 PRIVATE Void rgSchCmnUpdtPdcchSfIdx
15002 PRIVATE Void rgSchCmnUpdtPdcchSfIdx(cell, dlIdx)
15011 /* Resetting the parameters on CFI switching */
15012 cell->dynCfiCb.cceUsed = 0;
15013 cell->dynCfiCb.lowCceCnt = 0;
15015 cell->dynCfiCb.cceFailSum = 0;
15016 cell->dynCfiCb.cceFailCnt = 0;
15017 cell->dynCfiCb.prevCceFailIdx = 0;
15019 cell->dynCfiCb.switchOvrInProgress = TRUE;
15021 for(idx = 0; idx < cell->dynCfiCb.numFailSamples; idx++)
15023 cell->dynCfiCb.cceFailSamples[idx] = 0;
15026 cell->dynCfiCb.ttiCnt = 0;
15028 cell->dynCfiCb.cfiSwitches++;
15029 cfiSwitchCnt = cell->dynCfiCb.cfiSwitches;
15032 cell->dynCfiCb.pdcchSfIdx = (dlIdx +
15033 rgSchTddPdcchSfIncTbl[cell->ulDlCfgIdx][sfNum]) % cell->numDlSubfrms;
15035 cell->dynCfiCb.pdcchSfIdx = (dlIdx + RG_SCH_CFI_APPLY_DELTA) % \
15036 RGSCH_NUM_DL_slotS;
15040 /***********************************************************
15042 * Func : rgSchCmnUpdCfiDb
15044 * Desc : Update the counters related to dynamic
15045 * CFI feature in cellCb.
15053 **********************************************************/
15055 Void rgSchCmnUpdCfiDb
15061 Void rgSchCmnUpdCfiDb(cell, delta)
15066 CmLteTimingInfo frm;
15072 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15073 U8 nCceLowerCfi = 0;
15081 /* Get Downlink Subframe */
15082 frm = cell->crntTime;
15083 RGSCH_INCR_SUB_FRAME(frm, delta);
15086 dlIdx = rgSCHUtlGetDlSfIdx(cell, &frm);
15087 dlSf = cell->subFrms[dlIdx];
15088 isHiDci0 = rgSchTddPuschTxKTbl[cell->ulDlCfgIdx][dlSf->sfNum];
15090 /* Changing the idexing
15091 so that proper subframe is selected */
15092 dlIdx = (((frm.sfn & 1) * RGSCH_NUM_SUB_FRAMES) + (frm.slot % RGSCH_NUM_SUB_FRAMES));
15093 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, cell->subFrms, dlIdx);
15094 dlSf = cell->subFrms[dlIdx];
15097 currCfi = cellSch->dl.currCfi;
15099 if(!cell->dynCfiCb.switchOvrInProgress)
15102 if(!cell->dynCfiCb.isDynCfiEnb)
15104 if(currCfi != cellSch->cfiCfg.cfi)
15106 if(currCfi < cellSch->cfiCfg.cfi)
15108 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
15109 cfiIncr = cell->dynCfiCb.cfiIncr;
15113 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
15114 cfiDecr = cell->dynCfiCb.cfiDecr;
15121 /* Setting ttiMod to 0 for ttiCnt > 1000 in case if this
15122 * function was not called in UL subframe*/
15123 if(cell->dynCfiCb.ttiCnt > RGSCH_CFI_TTI_MON_INTRVL)
15130 ttiMod = cell->dynCfiCb.ttiCnt % RGSCH_CFI_TTI_MON_INTRVL;
15133 dlSf->dlUlBothCmplt++;
15135 if((dlSf->dlUlBothCmplt == 2) || (!isHiDci0))
15137 if(dlSf->dlUlBothCmplt == 2)
15140 /********************STEP UP CRITERIA********************/
15141 /* Updating the CCE failure count parameter */
15142 cell->dynCfiCb.cceFailCnt += dlSf->isCceFailure;
15143 cell->dynCfiCb.cceFailSum += dlSf->isCceFailure;
15145 /* Check if cfi step up can be performed */
15146 if(currCfi < cell->dynCfiCb.maxCfi)
15148 if(cell->dynCfiCb.cceFailSum >= cell->dynCfiCb.cfiStepUpTtiCnt)
15150 RG_SCH_CFI_STEP_UP(cell, cellSch, currCfi)
15151 cfiIncr = cell->dynCfiCb.cfiIncr;
15156 /********************STEP DOWN CRITERIA********************/
15158 /* Updating the no. of CCE used in this dl subframe */
15159 cell->dynCfiCb.cceUsed += dlSf->cceCnt;
15161 if(currCfi > RGSCH_MIN_CFI_VAL)
15163 /* calculating the number of CCE for next lower CFI */
15165 mPhich = rgSchTddPhichMValTbl[cell->ulDlCfgIdx][dlSf->sfNum];
15166 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[mPhich][currCfi-1];
15168 nCceLowerCfi = cell->dynCfiCb.cfi2NCceTbl[0][currCfi-1];
15170 if(dlSf->cceCnt < nCceLowerCfi)
15172 /* Updating the count of TTIs in which no. of CCEs
15173 * used were less than the CCEs of next lower CFI */
15174 cell->dynCfiCb.lowCceCnt++;
15179 totalCce = (nCceLowerCfi * cell->dynCfiCb.cfiStepDownTtiCnt *
15180 RGSCH_CFI_CCE_PERCNTG)/100;
15182 if((!cell->dynCfiCb.cceFailSum) &&
15183 (cell->dynCfiCb.lowCceCnt >=
15184 cell->dynCfiCb.cfiStepDownTtiCnt) &&
15185 (cell->dynCfiCb.cceUsed < totalCce))
15187 RG_SCH_CFI_STEP_DOWN(cell, cellSch, currCfi)
15188 cfiDecr = cell->dynCfiCb.cfiDecr;
15194 cceFailIdx = ttiMod/cell->dynCfiCb.failSamplePrd;
15196 if(cceFailIdx != cell->dynCfiCb.prevCceFailIdx)
15198 /* New sample period has started. Subtract the old count
15199 * from the new sample period */
15200 cell->dynCfiCb.cceFailSum -= cell->dynCfiCb.cceFailSamples[cceFailIdx];
15202 /* Store the previous sample period data */
15203 cell->dynCfiCb.cceFailSamples[cell->dynCfiCb.prevCceFailIdx]
15204 = cell->dynCfiCb.cceFailCnt;
15206 cell->dynCfiCb.prevCceFailIdx = cceFailIdx;
15208 /* Resetting the CCE failure count as zero for next sample period */
15209 cell->dynCfiCb.cceFailCnt = 0;
15214 /* Restting the parametrs after Monitoring Interval expired */
15215 cell->dynCfiCb.cceUsed = 0;
15216 cell->dynCfiCb.lowCceCnt = 0;
15217 cell->dynCfiCb.ttiCnt = 0;
15220 cell->dynCfiCb.ttiCnt++;
15224 if(cellSch->dl.newCfi != cellSch->dl.currCfi)
15227 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx, dlSf->sfNum);
15229 rgSchCmnUpdtPdcchSfIdx(cell, dlIdx);
15236 * @brief Dl Scheduler for Broadcast and Common channel scheduling.
15240 * Function: rgSCHCmnDlCommonChSch
15241 * Purpose: This function schedules DL Common channels for LTE.
15242 * Invoked by TTI processing in TOM. Scheduling is done for
15243 * BCCH, PCCH, Msg4, CCCH SDU, RAR in that order
15245 * Invoked by: TOM (TTI processing)
15247 * @param[in] RgSchCellCb *cell
15251 Void rgSCHCmnDlCommonChSch
15256 Void rgSCHCmnDlCommonChSch(cell)
15260 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15263 cellSch->apisDl->rgSCHDlTickForPdbTrkng(cell);
15264 rgSchCmnUpdCfiVal(cell, RG_SCH_CMN_DL_DELTA);
15266 /* handle Inactive UEs for DL */
15267 rgSCHCmnHdlDlInactUes(cell);
15269 /* Send a Tick to Refresh Timer */
15270 rgSCHCmnTmrProc(cell);
15272 if (cell->isDlDataAllwd && (cell->stopSiSch == FALSE))
15274 rgSCHCmnInitRbAlloc(cell);
15275 /* Perform DL scheduling of BCCH, PCCH */
15276 rgSCHCmnDlBcchPcchAlloc(cell);
15280 if(cell->siCb.inWindow != 0)
15282 cell->siCb.inWindow--;
15285 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
15287 rgSCHCmnDlCcchRarAlloc(cell);
15293 * @brief Scheduler invocation per TTI.
15297 * Function: rgSCHCmnUlSch
15298 * Purpose: This function implements UL scheduler alone. This is to
15299 * be able to perform scheduling with more flexibility.
15301 * Invoked by: TOM (TTI processing)
15303 * @param[in] RgSchCellCb *cell
15312 Void rgSCHCmnUlSch(cell)
15316 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15321 if(TRUE == rgSCHLaaSCellEnabled(cell))
15327 if(cellSch->ul.schdIdx != RGSCH_INVALID_INFO)
15329 rgSchCmnUpdCfiVal(cell, TFU_ULCNTRL_DLDELTA);
15331 /* Handle Inactive UEs for UL */
15332 rgSCHCmnHdlUlInactUes(cell);
15333 /* Perform UL Scheduling EVERY TTI */
15334 rgSCHCmnUlAlloc(cell);
15336 /* Calling function to update CFI parameters*/
15337 rgSchCmnUpdCfiDb(cell, TFU_ULCNTRL_DLDELTA);
15339 if(cell->dynCfiCb.switchOvrWinLen > 0)
15341 /* Decrementing the switchover window length */
15342 cell->dynCfiCb.switchOvrWinLen--;
15344 if(!cell->dynCfiCb.switchOvrWinLen)
15346 if(cell->dynCfiCb.dynCfiRecfgPend)
15348 /* Toggling the Dynamic CFI enabling */
15349 cell->dynCfiCb.isDynCfiEnb ^= 1;
15350 rgSCHDynCfiReCfg(cell, cell->dynCfiCb.isDynCfiEnb);
15351 cell->dynCfiCb.dynCfiRecfgPend = FALSE;
15353 cell->dynCfiCb.switchOvrInProgress = FALSE;
15361 rgSCHCmnSpsUlTti(cell, NULLP);
15371 * @brief This function updates the scheduler with service for an UE.
15375 * Function: rgSCHCmnDlDedBoUpd
15376 * Purpose: This function should be called whenever there is a
15377 * change BO for a service.
15379 * Invoked by: BO and Scheduler
15381 * @param[in] RgSchCellCb* cell
15382 * @param[in] RgSchUeCb* ue
15383 * @param[in] RgSchDlLcCb* svc
15388 Void rgSCHCmnDlDedBoUpd
15395 Void rgSCHCmnDlDedBoUpd(cell, ue, svc)
15401 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15403 /* RACHO : if UEs idle time exceeded and a BO update
15404 * is received, then add UE to the pdcch Order Q */
15405 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
15407 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue, cell);
15408 /* If PDCCH order is already triggered and we are waiting for
15409 * RACH from UE then do not add to PdcchOdrQ. */
15410 if (ueDl->rachInfo.rapIdLnk.node == NULLP)
15412 rgSCHCmnDlAdd2PdcchOdrQ(cell, ue);
15418 /* If SPS service, invoke SPS module */
15419 if (svc->dlLcSpsCfg.isSpsEnabled)
15421 rgSCHCmnSpsDlDedBoUpd(cell, ue, svc);
15422 /* Note: Retrun from here, no update needed in other schedulers */
15427 if((cell->emtcEnable)&&(TRUE == ue->isEmtcUe))
15429 cellSch->apisEmtcDl->rgSCHDlDedBoUpd(cell, ue, svc);
15430 //printf("rgSCHEMTCDlDedBoUpd\n");
15435 cellSch->apisDl->rgSCHDlDedBoUpd(cell, ue, svc);
15440 rgSCHSCellDlDedBoUpd(cell, ue, svc);
15448 * @brief Removes an UE from Cell's TA List.
15452 * Function: rgSCHCmnRmvFrmTaLst
15453 * Purpose: Removes an UE from Cell's TA List.
15455 * Invoked by: Specific Scheduler
15457 * @param[in] RgSchCellCb* cell
15458 * @param[in] RgSchUeCb* ue
15463 Void rgSCHCmnRmvFrmTaLst
15469 Void rgSCHCmnRmvFrmTaLst(cell, ue)
15474 RgSchCmnDlCell *cellCmnDl = RG_SCH_CMN_GET_DL_CELL(cell);
15477 if(cell->emtcEnable && ue->isEmtcUe)
15479 rgSCHEmtcRmvFrmTaLst(cellCmnDl,ue);
15484 cmLListDelFrm(&cellCmnDl->taLst, &ue->dlTaLnk);
15485 ue->dlTaLnk.node = (PTR)NULLP;
15490 /* Fix: syed Remove the msg4Proc from cell
15491 * msg4Retx Queue. I have used CMN scheduler function
15492 * directly. Please define a new API and call this
15493 * function through that. */
15496 * @brief This function removes MSG4 HARQ process from cell RETX Queues.
15500 * Function: rgSCHCmnDlMsg4ProcRmvFrmRetx
15501 * Purpose: This function removes MSG4 HARQ process from cell RETX Queues.
15503 * Invoked by: UE/RACB deletion.
15505 * @param[in] RgSchCellCb* cell
15506 * @param[in] RgSchDlHqProc* hqP
15511 Void rgSCHCmnDlMsg4ProcRmvFrmRetx
15514 RgSchDlHqProcCb *hqP
15517 Void rgSCHCmnDlMsg4ProcRmvFrmRetx(cell, hqP)
15519 RgSchDlHqProcCb *hqP;
15522 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15524 if (hqP->tbInfo[0].ccchSchdInfo.retxLnk.node)
15526 if (hqP->hqE->msg4Proc == hqP)
15528 cmLListDelFrm(&cellSch->dl.msg4RetxLst, \
15529 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15530 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
15533 else if(hqP->hqE->ccchSduProc == hqP)
15535 cmLListDelFrm(&cellSch->dl.ccchSduRetxLst,
15536 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15537 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)NULLP;
15546 * @brief This function adds a HARQ process for retx.
15550 * Function: rgSCHCmnDlProcAddToRetx
15551 * Purpose: This function adds a HARQ process to retransmission
15552 * queue. This may be performed when a HARQ ack is
15555 * Invoked by: HARQ feedback processing
15557 * @param[in] RgSchCellCb* cell
15558 * @param[in] RgSchDlHqProc* hqP
15563 Void rgSCHCmnDlProcAddToRetx
15566 RgSchDlHqProcCb *hqP
15569 Void rgSCHCmnDlProcAddToRetx(cell, hqP)
15571 RgSchDlHqProcCb *hqP;
15574 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
15576 if (hqP->hqE->msg4Proc == hqP) /* indicating msg4 transmission */
15578 cmLListAdd2Tail(&cellSch->dl.msg4RetxLst, \
15579 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15580 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
15583 else if(hqP->hqE->ccchSduProc == hqP)
15585 /*If CCCH SDU being transmitted without cont res CE*/
15586 cmLListAdd2Tail(&cellSch->dl.ccchSduRetxLst,
15587 &hqP->tbInfo[0].ccchSchdInfo.retxLnk);
15588 hqP->tbInfo[0].ccchSchdInfo.retxLnk.node = (PTR)hqP;
15594 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
15596 /* Invoke SPS module for SPS HARQ proc re-transmission handling */
15597 rgSCHCmnSpsDlProcAddToRetx(cell, hqP);
15600 #endif /* LTEMAC_SPS */
15602 if((TRUE == cell->emtcEnable)
15603 && (TRUE == hqP->hqE->ue->isEmtcUe))
15605 cellSch->apisEmtcDl->rgSCHDlProcAddToRetx(cell, hqP);
15610 cellSch->apisDl->rgSCHDlProcAddToRetx(cell, hqP);
15618 * @brief This function performs RI validation and
15619 * updates it to the ueCb.
15623 * Function: rgSCHCmnDlSetUeRi
15624 * Purpose: This function performs RI validation and
15625 * updates it to the ueCb.
15627 * Invoked by: rgSCHCmnDlCqiInd
15629 * @param[in] RgSchCellCb *cell
15630 * @param[in] RgSchUeCb *ue
15632 * @param[in] Bool isPeriodic
15637 PRIVATE Void rgSCHCmnDlSetUeRi
15645 PRIVATE Void rgSCHCmnDlSetUeRi(cell, ue, ri, isPer)
15652 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15653 RgSchCmnUeInfo *ueSchCmn = RG_SCH_CMN_GET_CMN_UE(ue);
15656 RgSchUePCqiCb *cqiCb = RG_SCH_GET_UE_CELL_CQI_CB(ue,cell);
15661 /* FIX for RRC Reconfiguration issue */
15662 /* ccpu00140894- During Tx Mode transition RI report will not entertained for
15663 * specific during which SCH expecting UE can complete TX mode transition*/
15664 if (ue->txModeTransCmplt == FALSE)
15669 /* Restrict the Number of TX layers to cell->numTxAntPorts.
15670 * Protection from invalid RI values. */
15671 ri = RGSCH_MIN(ri, cell->numTxAntPorts);
15673 /* Special case of converting PMI to sane value when
15674 * there is a switch in RI from 1 to 2 and PMI reported
15675 * for RI=1 is invalid for RI=2 */
15676 if ((cell->numTxAntPorts == 2) && (ue->mimoInfo.txMode == RGR_UE_TM_4))
15678 if ((ri == 2) && ( ueDl->mimoInfo.ri == 1))
15680 ueDl->mimoInfo.pmi = (ueDl->mimoInfo.pmi < 2)? 1:2;
15684 /* Restrict the Number of TX layers according to the UE Category */
15685 ueDl->mimoInfo.ri = RGSCH_MIN(ri, rgUeCatTbl[ueSchCmn->ueCat].maxTxLyrs);
15687 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].riCnt[ueDl->mimoInfo.ri-1]++;
15688 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
15692 ue->tenbStats->stats.nonPersistent.sch[0].riCnt[ueDl->mimoInfo.ri-1]++;
15693 cell->tenbStats->sch.riCnt[ueDl->mimoInfo.ri-1]++;
15699 /* If RI is from Periodic CQI report */
15700 cqiCb->perRiVal = ueDl->mimoInfo.ri;
15701 /* Reset at every Periodic RI Reception */
15702 cqiCb->invalidateCqi = FALSE;
15706 /* If RI is from Aperiodic CQI report */
15707 if (cqiCb->perRiVal != ueDl->mimoInfo.ri)
15709 /* if this aperRI is different from last reported
15710 * perRI then invalidate all CQI reports till next
15712 cqiCb->invalidateCqi = TRUE;
15716 cqiCb->invalidateCqi = FALSE;
15721 if (ueDl->mimoInfo.ri > 1)
15723 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
15725 else if (ue->mimoInfo.txMode == RGR_UE_TM_3) /* ri == 1 */
15727 RG_SCH_CMN_SET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_RI_1);
15735 * @brief This function performs PMI validation and
15736 * updates it to the ueCb.
15740 * Function: rgSCHCmnDlSetUePmi
15741 * Purpose: This function performs PMI validation and
15742 * updates it to the ueCb.
15744 * Invoked by: rgSCHCmnDlCqiInd
15746 * @param[in] RgSchCellCb *cell
15747 * @param[in] RgSchUeCb *ue
15748 * @param[in] U8 pmi
15753 PRIVATE S16 rgSCHCmnDlSetUePmi
15760 PRIVATE S16 rgSCHCmnDlSetUePmi(cell, ue, pmi)
15766 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15768 if (ue->txModeTransCmplt == FALSE)
15773 if (cell->numTxAntPorts == 2)
15779 if (ueDl->mimoInfo.ri == 2)
15781 /*ccpu00118150 - MOD - changed pmi value validation from 0 to 2*/
15782 /* PMI 2 and 3 are invalid incase of 2 TxAnt and 2 Layered SM */
15783 if (pmi == 2 || pmi == 3)
15787 ueDl->mimoInfo.pmi = pmi+1;
15791 ueDl->mimoInfo.pmi = pmi;
15794 else if (cell->numTxAntPorts == 4)
15800 ueDl->mimoInfo.pmi = pmi;
15802 /* Reset the No PMI Flag in forceTD */
15803 RG_SCH_CMN_UNSET_FORCE_TD(ue, cell, RG_SCH_CMN_TD_NO_PMI);
15808 * @brief This function Updates the DL CQI on PUCCH for the UE.
15812 * Function: rgSCHCmnDlProcCqiMode10
15814 * This function updates the DL CQI on PUCCH for the UE.
15816 * Invoked by: rgSCHCmnDlCqiOnPucchInd
15818 * Processing Steps:
15820 * @param[in] RgSchCellCb *cell
15821 * @param[in] RgSchUeCb *ue
15822 * @param[in] TfuDlCqiRpt *dlCqiRpt
15827 #ifdef RGR_CQI_REPT
15829 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10
15833 TfuDlCqiPucch *pucchCqi,
15837 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail)
15840 TfuDlCqiPucch *pucchCqi;
15845 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10
15849 TfuDlCqiPucch *pucchCqi
15852 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi)
15855 TfuDlCqiPucch *pucchCqi;
15859 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15861 if (pucchCqi->u.mode10Info.type == TFU_RPT_CQI)
15863 /*ccpu00109787 - ADD - Check for non-zero CQI*/
15864 /* Checking whether the decoded CQI is a value between 1 and 15*/
15865 if((pucchCqi->u.mode10Info.u.cqi) && (pucchCqi->u.mode10Info.u.cqi
15866 < RG_SCH_CMN_MAX_CQI))
15868 ueDl->cqiFlag = TRUE;
15869 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode10Info.u.cqi;
15870 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
15871 /* ccpu00117452 - MOD - Changed macro name from
15872 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
15873 #ifdef RGR_CQI_REPT
15874 *isCqiAvail = TRUE;
15882 else if (pucchCqi->u.mode10Info.type == TFU_RPT_RI)
15884 if ( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode10Info.u.ri) )
15886 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode10Info.u.ri,
15891 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
15892 pucchCqi->u.mode10Info.u.ri,ue->ueId);
15899 * @brief This function Updates the DL CQI on PUCCH for the UE.
15903 * Function: rgSCHCmnDlProcCqiMode11
15905 * This function updates the DL CQI on PUCCH for the UE.
15907 * Invoked by: rgSCHCmnDlCqiOnPucchInd
15909 * Processing Steps:
15910 * Process CQI MODE 11
15911 * @param[in] RgSchCellCb *cell
15912 * @param[in] RgSchUeCb *ue
15913 * @param[in] TfuDlCqiRpt *dlCqiRpt
15918 #ifdef RGR_CQI_REPT
15920 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11
15924 TfuDlCqiPucch *pucchCqi,
15926 Bool *is2ndCwCqiAvail
15929 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail, is2ndCwCqiAvail)
15932 TfuDlCqiPucch *pucchCqi;
15934 Bool *is2ndCwCqiAvail;
15938 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11
15942 TfuDlCqiPucch *pucchCqi
15945 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi)
15948 TfuDlCqiPucch *pucchCqi;
15952 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
15954 if (pucchCqi->u.mode11Info.type == TFU_RPT_CQI)
15956 ue->mimoInfo.puschFdbkVld = FALSE;
15957 /*ccpu00109787 - ADD - Check for non-zero CQI*/
15958 if((pucchCqi->u.mode11Info.u.cqi.cqi) &&
15959 (pucchCqi->u.mode11Info.u.cqi.cqi < RG_SCH_CMN_MAX_CQI))
15961 ueDl->cqiFlag = TRUE;
15962 /* ccpu00117452 - MOD - Changed macro name from
15963 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
15964 #ifdef RGR_CQI_REPT
15965 *isCqiAvail = TRUE;
15967 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode11Info.u.cqi.cqi;
15968 if (pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.pres)
15970 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
15971 ueDl->mimoInfo.cwInfo[1].cqi, \
15972 pucchCqi->u.mode11Info.u.cqi.wideDiffCqi.val);
15973 #ifdef RGR_CQI_REPT
15974 /* ccpu00117259 - ADD - Considering second codeword CQI info
15975 incase of MIMO for CQI Reporting */
15976 *is2ndCwCqiAvail = TRUE;
15984 rgSCHCmnDlSetUePmi(cell, ue, \
15985 pucchCqi->u.mode11Info.u.cqi.pmi);
15987 else if (pucchCqi->u.mode11Info.type == TFU_RPT_RI)
15989 if( RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode11Info.u.ri))
15991 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode11Info.u.ri,
15996 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
15997 pucchCqi->u.mode11Info.u.ri,ue->ueId);
16004 * @brief This function Updates the DL CQI on PUCCH for the UE.
16008 * Function: rgSCHCmnDlProcCqiMode20
16010 * This function updates the DL CQI on PUCCH for the UE.
16012 * Invoked by: rgSCHCmnDlCqiOnPucchInd
16014 * Processing Steps:
16015 * Process CQI MODE 20
16016 * @param[in] RgSchCellCb *cell
16017 * @param[in] RgSchUeCb *ue
16018 * @param[in] TfuDlCqiRpt *dlCqiRpt
16023 #ifdef RGR_CQI_REPT
16025 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20
16029 TfuDlCqiPucch *pucchCqi,
16033 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail )
16036 TfuDlCqiPucch *pucchCqi;
16041 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20
16045 TfuDlCqiPucch *pucchCqi
16048 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi)
16051 TfuDlCqiPucch *pucchCqi;
16055 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16057 if (pucchCqi->u.mode20Info.type == TFU_RPT_CQI)
16059 if (pucchCqi->u.mode20Info.u.cqi.isWideband)
16061 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16062 if((pucchCqi->u.mode20Info.u.cqi.u.wideCqi) &&
16063 (pucchCqi->u.mode20Info.u.cqi.u.wideCqi < RG_SCH_CMN_MAX_CQI))
16065 ueDl->cqiFlag = TRUE;
16066 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode20Info.u.cqi.\
16068 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16069 /* ccpu00117452 - MOD - Changed macro name from
16070 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16071 #ifdef RGR_CQI_REPT
16072 *isCqiAvail = TRUE;
16081 else if (pucchCqi->u.mode20Info.type == TFU_RPT_RI)
16083 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode20Info.u.ri))
16085 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode20Info.u.ri,
16090 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
16091 pucchCqi->u.mode20Info.u.ri,ue->ueId);
16099 * @brief This function Updates the DL CQI on PUCCH for the UE.
16103 * Function: rgSCHCmnDlProcCqiMode21
16105 * This function updates the DL CQI on PUCCH for the UE.
16107 * Invoked by: rgSCHCmnDlCqiOnPucchInd
16109 * Processing Steps:
16110 * Process CQI MODE 21
16111 * @param[in] RgSchCellCb *cell
16112 * @param[in] RgSchUeCb *ue
16113 * @param[in] TfuDlCqiRpt *dlCqiRpt
16118 #ifdef RGR_CQI_REPT
16120 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21
16124 TfuDlCqiPucch *pucchCqi,
16126 Bool *is2ndCwCqiAvail
16129 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail, is2ndCwCqiAvail)
16132 TfuDlCqiPucch *pucchCqi;
16133 TfuDlCqiRpt *dlCqiRpt;
16135 Bool *is2ndCwCqiAvail;
16139 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21
16143 TfuDlCqiPucch *pucchCqi
16146 PRIVATE INLINE Void rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi)
16149 TfuDlCqiPucch *pucchCqi;
16153 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16155 if (pucchCqi->u.mode21Info.type == TFU_RPT_CQI)
16157 ue->mimoInfo.puschFdbkVld = FALSE;
16158 if (pucchCqi->u.mode21Info.u.cqi.isWideband)
16160 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16161 if((pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi) &&
16162 (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.cqi < RG_SCH_CMN_MAX_CQI))
16164 ueDl->cqiFlag = TRUE;
16165 ueDl->mimoInfo.cwInfo[0].cqi = pucchCqi->u.mode21Info.u.cqi.\
16167 if (pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.pres)
16169 RG_SCH_UPDT_CW2_CQI(ueDl->mimoInfo.cwInfo[0].cqi, \
16170 ueDl->mimoInfo.cwInfo[1].cqi, \
16171 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.diffCqi.val);
16172 #ifdef RGR_CQI_REPT
16173 /* ccpu00117259 - ADD - Considering second codeword CQI info
16174 incase of MIMO for CQI Reporting */
16175 *is2ndCwCqiAvail = TRUE;
16178 /* ccpu00117452 - MOD - Changed macro name from
16179 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16180 #ifdef RGR_CQI_REPT
16181 *isCqiAvail = TRUE;
16188 rgSCHCmnDlSetUePmi(cell, ue, \
16189 pucchCqi->u.mode21Info.u.cqi.u.wideCqi.pmi);
16192 else if (pucchCqi->u.mode21Info.type == TFU_RPT_RI)
16194 if(RG_SCH_CMN_IS_RI_VALID(pucchCqi->u.mode21Info.u.ri))
16196 rgSCHCmnDlSetUeRi(cell, ue, pucchCqi->u.mode21Info.u.ri,
16201 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Invalid RI value(%x) CRNTI:%d",
16202 pucchCqi->u.mode21Info.u.ri,ue->ueId);
16210 * @brief This function Updates the DL CQI on PUCCH for the UE.
16214 * Function: rgSCHCmnDlCqiOnPucchInd
16216 * This function updates the DL CQI on PUCCH for the UE.
16218 * Invoked by: rgSCHCmnDlCqiInd
16220 * Processing Steps:
16221 * - Depending on the reporting mode of the PUCCH, the CQI/PMI/RI values
16222 * are updated and stored for each UE
16224 * @param[in] RgSchCellCb *cell
16225 * @param[in] RgSchUeCb *ue
16226 * @param[in] TfuDlCqiRpt *dlCqiRpt
16231 #ifdef RGR_CQI_REPT
16233 PRIVATE Void rgSCHCmnDlCqiOnPucchInd
16237 TfuDlCqiPucch *pucchCqi,
16238 RgrUeCqiRept *ueCqiRept,
16240 Bool *is2ndCwCqiAvail
16243 PRIVATE Void rgSCHCmnDlCqiOnPucchInd(cell, ue, pucchCqi, ueCqiRept, isCqiAvail, is2ndCwCqiAvail)
16246 TfuDlCqiPucch *pucchCqi;
16247 RgrUeCqiRept *ueCqiRept;
16249 Bool *is2ndCwCqiAvail;
16253 PRIVATE Void rgSCHCmnDlCqiOnPucchInd
16257 TfuDlCqiPucch *pucchCqi
16260 PRIVATE Void rgSCHCmnDlCqiOnPucchInd(cell, ue, pucchCqi)
16263 TfuDlCqiPucch *pucchCqi;
16267 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16269 /* ccpu00117452 - MOD - Changed
16270 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16271 #ifdef RGR_CQI_REPT
16272 /* Save CQI mode information in the report */
16273 ueCqiRept->cqiMode = pucchCqi->mode;
16276 switch(pucchCqi->mode)
16278 case TFU_PUCCH_CQI_MODE10:
16279 #ifdef RGR_CQI_REPT
16280 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi, isCqiAvail);
16282 rgSCHCmnDlProcCqiMode10(cell, ue, pucchCqi);
16284 ueDl->cqiFlag = TRUE;
16286 case TFU_PUCCH_CQI_MODE11:
16287 #ifdef RGR_CQI_REPT
16288 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi, isCqiAvail,
16291 rgSCHCmnDlProcCqiMode11(cell, ue, pucchCqi);
16293 ueDl->cqiFlag = TRUE;
16295 case TFU_PUCCH_CQI_MODE20:
16296 #ifdef RGR_CQI_REPT
16297 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi, isCqiAvail);
16299 rgSCHCmnDlProcCqiMode20(cell, ue, pucchCqi);
16301 ueDl->cqiFlag = TRUE;
16303 case TFU_PUCCH_CQI_MODE21:
16304 #ifdef RGR_CQI_REPT
16305 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi, isCqiAvail,
16308 rgSCHCmnDlProcCqiMode21(cell, ue, pucchCqi);
16310 ueDl->cqiFlag = TRUE;
16314 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Unknown CQI Mode %d",
16315 pucchCqi->mode,ue->ueId);
16316 /* ccpu00117452 - MOD - Changed macro name from
16317 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16318 #ifdef RGR_CQI_REPT
16319 *isCqiAvail = FALSE;
16326 } /* rgSCHCmnDlCqiOnPucchInd */
16330 * @brief This function Updates the DL CQI on PUSCH for the UE.
16334 * Function: rgSCHCmnDlCqiOnPuschInd
16336 * This function updates the DL CQI on PUSCH for the UE.
16338 * Invoked by: rgSCHCmnDlCqiInd
16340 * Processing Steps:
16341 * - Depending on the reporting mode of the PUSCH, the CQI/PMI/RI values
16342 * are updated and stored for each UE
16344 * @param[in] RgSchCellCb *cell
16345 * @param[in] RgSchUeCb *ue
16346 * @param[in] TfuDlCqiRpt *dlCqiRpt
16351 #ifdef RGR_CQI_REPT
16353 PRIVATE Void rgSCHCmnDlCqiOnPuschInd
16357 TfuDlCqiPusch *puschCqi,
16358 RgrUeCqiRept *ueCqiRept,
16360 Bool *is2ndCwCqiAvail
16363 PRIVATE Void rgSCHCmnDlCqiOnPuschInd(cell, ue, puschCqi, ueCqiRept, isCqiAvail, is2ndCwCqiAvail)
16366 TfuDlCqiPusch *puschCqi;
16367 RgrUeCqiRept *ueCqiRept;
16369 Bool *is2ndCwCqiAvail;
16373 PRIVATE Void rgSCHCmnDlCqiOnPuschInd
16377 TfuDlCqiPusch *puschCqi
16380 PRIVATE Void rgSCHCmnDlCqiOnPuschInd(cell, ue, puschCqi)
16383 TfuDlCqiPusch *puschCqi;
16387 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16389 if (puschCqi->ri.pres == PRSNT_NODEF)
16391 if (RG_SCH_CMN_IS_RI_VALID(puschCqi->ri.val))
16393 /* Saving the previous ri value to revert back
16394 in case PMI update failed */
16395 if (RGR_UE_TM_4 == ue->mimoInfo.txMode ) /* Cheking for TM4. TM8 check later */
16397 prevRiVal = ueDl->mimoInfo.ri;
16399 rgSCHCmnDlSetUeRi(cell, ue, puschCqi->ri.val, FALSE);
16403 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,"Invalid RI value(%x) CRNTI:%d",
16404 puschCqi->ri.val,ue->ueId);
16408 ue->mimoInfo.puschFdbkVld = FALSE;
16409 /* ccpu00117452 - MOD - Changed macro name from
16410 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16411 #ifdef RGR_CQI_REPT
16412 /* Save CQI mode information in the report */
16413 ueCqiRept->cqiMode = puschCqi->mode;
16414 /* ccpu00117259 - DEL - removed default setting of isCqiAvail to TRUE */
16417 switch(puschCqi->mode)
16419 case TFU_PUSCH_CQI_MODE_20:
16420 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16421 /* Checking whether the decoded CQI is a value between 1 and 15*/
16422 if((puschCqi->u.mode20Info.wideBandCqi) &&
16423 (puschCqi->u.mode20Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
16425 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode20Info.wideBandCqi;
16426 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16427 /* ccpu00117452 - MOD - Changed macro name from
16428 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16429 #ifdef RGR_CQI_REPT
16430 *isCqiAvail = TRUE;
16438 case TFU_PUSCH_CQI_MODE_30:
16439 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16440 if((puschCqi->u.mode30Info.wideBandCqi) &&
16441 (puschCqi->u.mode30Info.wideBandCqi < RG_SCH_CMN_MAX_CQI))
16443 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode30Info.wideBandCqi;
16444 ueDl->mimoInfo.cwInfo[1].cqi = ueDl->mimoInfo.cwInfo[0].cqi;
16445 /* ccpu00117452 - MOD - Changed macro name from
16446 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16447 #ifdef RGR_CQI_REPT
16448 *isCqiAvail = TRUE;
16452 extern U32 gACqiRcvdCount;
16463 case TFU_PUSCH_CQI_MODE_12:
16464 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16465 if((puschCqi->u.mode12Info.cqiIdx[0]) &&
16466 (puschCqi->u.mode12Info.cqiIdx[0] < RG_SCH_CMN_MAX_CQI))
16468 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode12Info.cqiIdx[0];
16469 /* ccpu00117452 - MOD - Changed macro name from
16470 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16471 #ifdef RGR_CQI_REPT
16472 *isCqiAvail = TRUE;
16479 if((puschCqi->u.mode12Info.cqiIdx[1]) &&
16480 (puschCqi->u.mode12Info.cqiIdx[1] < RG_SCH_CMN_MAX_CQI))
16482 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode12Info.cqiIdx[1];
16483 /* ccpu00117452 - MOD - Changed macro name from
16484 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16485 #ifdef RGR_CQI_REPT
16486 /* ccpu00117259 - ADD - Considering second codeword CQI info
16487 incase of MIMO for CQI Reporting */
16488 *is2ndCwCqiAvail = TRUE;
16495 ue->mimoInfo.puschFdbkVld = TRUE;
16496 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_12;
16497 ue->mimoInfo.puschPmiInfo.u.mode12Info = puschCqi->u.mode12Info;
16498 /* : resetting this is time based. Make use of CQI reporting
16499 * periodicity, DELTA's in determining the exact time at which this
16500 * need to be reset. */
16502 case TFU_PUSCH_CQI_MODE_22:
16503 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16504 if((puschCqi->u.mode22Info.wideBandCqi[0]) &&
16505 (puschCqi->u.mode22Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
16507 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode22Info.wideBandCqi[0];
16508 /* ccpu00117452 - MOD - Changed macro name from
16509 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16510 #ifdef RGR_CQI_REPT
16511 *isCqiAvail = TRUE;
16518 if((puschCqi->u.mode22Info.wideBandCqi[1]) &&
16519 (puschCqi->u.mode22Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
16521 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode22Info.wideBandCqi[1];
16522 /* ccpu00117452 - MOD - Changed macro name from
16523 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16524 #ifdef RGR_CQI_REPT
16525 /* ccpu00117259 - ADD - Considering second codeword CQI info
16526 incase of MIMO for CQI Reporting */
16527 *is2ndCwCqiAvail = TRUE;
16534 rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode22Info.wideBandPmi);
16535 ue->mimoInfo.puschFdbkVld = TRUE;
16536 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_22;
16537 ue->mimoInfo.puschPmiInfo.u.mode22Info = puschCqi->u.mode22Info;
16539 case TFU_PUSCH_CQI_MODE_31:
16540 /*ccpu00109787 - ADD - Check for non-zero CQI*/
16541 if((puschCqi->u.mode31Info.wideBandCqi[0]) &&
16542 (puschCqi->u.mode31Info.wideBandCqi[0] < RG_SCH_CMN_MAX_CQI))
16544 ueDl->mimoInfo.cwInfo[0].cqi = puschCqi->u.mode31Info.wideBandCqi[0];
16545 /* ccpu00117452 - MOD - Changed macro name from
16546 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16547 #ifdef RGR_CQI_REPT
16548 *isCqiAvail = TRUE;
16551 if (ueDl->mimoInfo.ri > 1)
16553 if((puschCqi->u.mode31Info.wideBandCqi[1]) &&
16554 (puschCqi->u.mode31Info.wideBandCqi[1] < RG_SCH_CMN_MAX_CQI))
16556 ueDl->mimoInfo.cwInfo[1].cqi = puschCqi->u.mode31Info.wideBandCqi[1];
16557 /* ccpu00117452 - MOD - Changed macro name from
16558 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16559 #ifdef RGR_CQI_REPT
16560 /* ccpu00117259 - ADD - Considering second codeword CQI info
16561 incase of MIMO for CQI Reporting */
16562 *is2ndCwCqiAvail = TRUE;
16566 if (rgSCHCmnDlSetUePmi(cell, ue, puschCqi->u.mode31Info.pmi) != ROK)
16568 /* To avoid Rank and PMI inconsistency */
16569 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
16570 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
16572 ueDl->mimoInfo.ri = prevRiVal;
16575 ue->mimoInfo.puschPmiInfo.mode = TFU_PUSCH_CQI_MODE_31;
16576 ue->mimoInfo.puschPmiInfo.u.mode31Info = puschCqi->u.mode31Info;
16580 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId, "Unknown CQI Mode %d CRNTI:%d",
16581 puschCqi->mode,ue->ueId);
16582 /* CQI decoding failed revert the RI to previous value */
16583 if ((puschCqi->ri.pres == PRSNT_NODEF) &&
16584 (RGR_UE_TM_4 == ue->mimoInfo.txMode)) /* checking for TM4. TM8 check later */
16586 ueDl->mimoInfo.ri = prevRiVal;
16588 /* ccpu00117452 - MOD - Changed macro name from
16589 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16590 #ifdef RGR_CQI_REPT
16591 *isCqiAvail = FALSE;
16592 /* ccpu00117259 - ADD - Considering second codeword CQI info
16593 incase of MIMO for CQI Reporting */
16594 *is2ndCwCqiAvail = FALSE;
16601 } /* rgSCHCmnDlCqiOnPuschInd */
16605 * @brief This function Updates the DL CQI for the UE.
16609 * Function: rgSCHCmnDlCqiInd
16610 * Purpose: Updates the DL CQI for the UE
16614 * @param[in] RgSchCellCb *cell
16615 * @param[in] RgSchUeCb *ue
16616 * @param[in] TfuDlCqiRpt *dlCqi
16621 Void rgSCHCmnDlCqiInd
16627 CmLteTimingInfo timingInfo
16630 Void rgSCHCmnDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo)
16635 CmLteTimingInfo timingInfo;
16638 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16639 /* ccpu00117452 - MOD - Changed macro name from
16640 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16641 #ifdef RGR_CQI_REPT
16642 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16643 RgrUeCqiRept ueCqiRept = {{0}};
16644 Bool isCqiAvail = FALSE;
16645 /* ccpu00117259 - ADD - Considering second codeword CQI info
16646 incase of MIMO for CQI Reporting */
16647 Bool is2ndCwCqiAvail = FALSE;
16651 #ifdef RGR_CQI_REPT
16654 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
16658 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi, &ueCqiRept, &isCqiAvail, &is2ndCwCqiAvail);
16663 rgSCHCmnDlCqiOnPucchInd(cell, ue, (TfuDlCqiPucch *)dlCqi);
16667 rgSCHCmnDlCqiOnPuschInd(cell, ue, (TfuDlCqiPusch *)dlCqi);
16671 #ifdef CQI_CONFBITMASK_DROP
16672 if(!ue->cqiConfBitMask)
16674 if (ueDl->mimoInfo.cwInfo[0].cqi >15)
16676 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
16677 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
16679 else if ( ueDl->mimoInfo.cwInfo[0].cqi >= ue->prevCqi)
16681 ue->prevCqi = ueDl->mimoInfo.cwInfo[0].cqi;
16685 U8 dlCqiDeltaPrev = 0;
16686 dlCqiDeltaPrev = ue->prevCqi - ueDl->mimoInfo.cwInfo[0].cqi;
16687 if (dlCqiDeltaPrev > 3)
16688 dlCqiDeltaPrev = 3;
16689 if ((ue->prevCqi - dlCqiDeltaPrev) < 6)
16695 ue->prevCqi = ue->prevCqi - dlCqiDeltaPrev;
16697 ueDl->mimoInfo.cwInfo[0].cqi = ue->prevCqi;
16698 ueDl->mimoInfo.cwInfo[1].cqi = ue->prevCqi;
16704 /* ccpu00117452 - MOD - Changed macro name from
16705 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
16706 #ifdef RGR_CQI_REPT
16707 /* ccpu00117259 - ADD - Considering second codeword CQI info
16708 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail\
16709 in 'if' condition*/
16710 if (RG_SCH_CQIR_IS_PUSHNCQI_ENBLE(ue) && (isCqiAvail || is2ndCwCqiAvail))
16712 ueCqiRept.cqi[0] = ueDl->mimoInfo.cwInfo[0].cqi;
16714 /* ccpu00117259 - ADD - Considering second codeword CQI info
16715 incase of MIMO for CQI Reporting - added is2ndCwCqiAvail
16716 in 'if' condition*/
16717 ueCqiRept.cqi[1] = 0;
16718 if(is2ndCwCqiAvail)
16720 ueCqiRept.cqi[1] = ueDl->mimoInfo.cwInfo[1].cqi;
16722 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, &ueCqiRept);
16727 rgSCHCmnDlSetUeAllocLmtLa(cell, ue);
16728 rgSCHCheckAndSetTxScheme(cell, ue);
16731 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), ue->isEmtcUe);
16733 rgSCHCmnDlSetUeAllocLmt(cell, RG_SCH_CMN_GET_DL_UE(ue,cell), FALSE);
16737 if (cellSch->dl.isDlFreqSel)
16739 cellSch->apisDlfs->rgSCHDlfsDlCqiInd(cell, ue, isPucchInfo, dlCqi, timingInfo);
16742 /* Call SPS module to update CQI indication */
16743 rgSCHCmnSpsDlCqiIndHndlr(cell, ue, timingInfo);
16745 /* Call Specific scheduler to process on dlCqiInd */
16747 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
16749 cellSch->apisEmtcDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
16754 cellSch->apisDl->rgSCHDlCqiInd(cell, ue, isPucchInfo, dlCqi);
16757 #ifdef RG_PFS_STATS
16758 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].avgCqi +=
16759 ueDl->mimoInfo.cwInfo[0].cqi;
16760 ue->pfsStats.cqiStats[(RG_SCH_GET_SCELL_INDEX(ue, cell))].totalCqiOcc++;
16764 ueDl->avgCqi += ueDl->mimoInfo.cwInfo[0].cqi;
16765 ueDl->numCqiOccns++;
16766 if (ueDl->mimoInfo.ri == 1)
16777 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
16778 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
16779 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw0Cqi ++;
16780 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].dlNumCw1Cqi ++;
16781 cell->tenbStats->sch.dlSumCw0Cqi += ueDl->mimoInfo.cwInfo[0].cqi;
16782 cell->tenbStats->sch.dlSumCw1Cqi += ueDl->mimoInfo.cwInfo[1].cqi;
16783 cell->tenbStats->sch.dlNumCw0Cqi ++;
16784 cell->tenbStats->sch.dlNumCw1Cqi ++;
16791 * @brief This function calculates the wideband CQI from SNR
16792 * reported for each RB.
16796 * Function: rgSCHCmnCalcWcqiFrmSnr
16797 * Purpose: Wideband CQI calculation from SNR
16799 * Invoked by: RG SCH
16801 * @param[in] RgSchCellCb *cell
16802 * @param[in] TfuSrsRpt *srsRpt,
16803 * @return Wideband CQI
16807 PRIVATE U8 rgSCHCmnCalcWcqiFrmSnr
16813 PRIVATE U8 rgSCHCmnCalcWcqiFrmSnr(cell,srsRpt)
16818 U8 wideCqi=1; /*Calculated value from SNR*/
16819 /*Need to map a certain SNR with a WideCQI value.
16820 * The CQI calculation is still primitive. Further, need to
16821 * use a improvized method for calculating WideCQI from SNR*/
16822 if (srsRpt->snr[0] <=50)
16826 else if (srsRpt->snr[0]>=51 && srsRpt->snr[0] <=100)
16830 else if (srsRpt->snr[0]>=101 && srsRpt->snr[0] <=150)
16834 else if (srsRpt->snr[0]>=151 && srsRpt->snr[0] <=200)
16838 else if (srsRpt->snr[0]>=201 && srsRpt->snr[0] <=250)
16847 }/*rgSCHCmnCalcWcqiFrmSnr*/
16851 * @brief This function Updates the SRS for the UE.
16855 * Function: rgSCHCmnSrsInd
16856 * Purpose: Updates the UL SRS for the UE
16860 * @param[in] RgSchCellCb *cell
16861 * @param[in] RgSchUeCb *ue
16862 * @param[in] TfuSrsRpt *srsRpt,
16867 Void rgSCHCmnSrsInd
16872 CmLteTimingInfo timingInfo
16875 Void rgSCHCmnSrsInd(cell, ue, srsRpt, timingInfo)
16879 CmLteTimingInfo timingInfo;
16882 U8 wideCqi; /*Calculated value from SNR*/
16883 U32 recReqTime; /*Received Time in TTI*/
16885 recReqTime = (timingInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) + timingInfo.slot;
16886 ue->srsCb.selectedAnt = (recReqTime/ue->srsCb.peri)%2;
16887 if(srsRpt->wideCqiPres)
16889 wideCqi = srsRpt->wideCqi;
16893 wideCqi = rgSCHCmnCalcWcqiFrmSnr(cell, srsRpt);
16895 rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi);
16897 }/*rgSCHCmnSrsInd*/
16902 * @brief This function is a handler for TA report for an UE.
16906 * Function: rgSCHCmnDlTARpt
16907 * Purpose: Determine based on UE_IDLE_TIME threshold,
16908 * whether UE needs to be Linked to the scheduler's TA list OR
16909 * if it needs a PDCCH Order.
16914 * @param[in] RgSchCellCb *cell
16915 * @param[in] RgSchUeCb *ue
16920 Void rgSCHCmnDlTARpt
16926 Void rgSCHCmnDlTARpt(cell, ue)
16931 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
16932 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
16933 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
16934 CmLListCp poInactvLst;
16937 /* RACHO: If UE idle time is more than threshold, then
16938 * set its poInactv pdcch order inactivity */
16939 /* Fix : syed Ignore if TaTmr is not configured */
16940 if ((ue->dl.taCb.cfgTaTmr) && (rgSCHCmnUeIdleExdThrsld(cell, ue) == ROK))
16942 U32 prevDlMsk = ue->dl.dlInactvMask;
16943 U32 prevUlMsk = ue->ul.ulInactvMask;
16944 ue->dl.dlInactvMask |= RG_PDCCHODR_INACTIVE;
16945 ue->ul.ulInactvMask |= RG_PDCCHODR_INACTIVE;
16946 /* Indicate Specific scheduler for this UEs inactivity */
16947 cmLListInit(&poInactvLst);
16948 cmLListAdd2Tail(&poInactvLst, &ueDl->rachInfo.inActUeLnk);
16949 ueDl->rachInfo.inActUeLnk.node = (PTR)ue;
16950 /* Send inactivate ind only if not already sent */
16951 if (prevDlMsk == 0)
16953 cellSch->apisDl->rgSCHDlInactvtUes(cell, &poInactvLst);
16955 if (prevUlMsk == 0)
16957 cellSch->apisUl->rgSCHUlInactvtUes(cell, &poInactvLst);
16962 /* Fix: ccpu00124009 Fix for loop in the linked list "cellDl->taLst" */
16963 if (!ue->dlTaLnk.node)
16966 if(cell->emtcEnable)
16970 rgSCHEmtcAddToTaLst(cellDl,ue);
16977 cmLListAdd2Tail(&cellDl->taLst, &ue->dlTaLnk);
16978 ue->dlTaLnk.node = (PTR)ue;
16983 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
16984 "<TA>TA duplicate entry attempt failed: UEID:%u",
16993 * @brief Indication of UL CQI.
16997 * Function : rgSCHCmnFindUlCqiUlTxAnt
16999 * - Finds the Best Tx Antenna amongst the CQIs received
17000 * from Two Tx Antennas.
17002 * @param[in] RgSchCellCb *cell
17003 * @param[in] RgSchUeCb *ue
17004 * @param[in] U8 wideCqi
17008 PRIVATE Void rgSCHCmnFindUlCqiUlTxAnt
17015 PRIVATE Void rgSCHCmnFindUlCqiUlTxAnt(cell, ue, wideCqi)
17021 ue->validTxAnt = 1;
17023 } /* rgSCHCmnFindUlCqiUlTxAnt */
17027 * @brief Indication of UL CQI.
17031 * Function : rgSCHCmnUlCqiInd
17033 * - Updates uplink CQI information for the UE. Computes and
17034 * stores the lowest CQI of CQIs reported in all subbands.
17036 * @param[in] RgSchCellCb *cell
17037 * @param[in] RgSchUeCb *ue
17038 * @param[in] TfuUlCqiRpt *ulCqiInfo
17042 Void rgSCHCmnUlCqiInd
17046 TfuUlCqiRpt *ulCqiInfo
17049 Void rgSCHCmnUlCqiInd(cell, ue, ulCqiInfo)
17052 TfuUlCqiRpt *ulCqiInfo;
17055 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17056 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17061 #if (defined(SCH_STATS) || defined(TENB_STATS))
17062 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
17065 /* consider inputs from SRS handlers about SRS occassions
17066 * in determining the UL TX Antenna selection */
17067 ueUl->crntUlCqi[0] = ulCqiInfo->wideCqi;
17069 ueUl->validUlCqi = ueUl->crntUlCqi[0];
17070 ue->validTxAnt = 0;
17072 iTbsNew = rgSchCmnUlCqiToTbsTbl[cell->isCpUlExtend][ueUl->validUlCqi];
17073 previTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
17075 if (RG_ITBS_DIFF(iTbsNew, previTbs) > 5)
17077 /* Ignore this iTBS report and mark that last iTBS report was */
17078 /* ignored so that subsequently we reset the LA algorithm */
17079 ueUl->ulLaCb.lastiTbsIgnored = TRUE;
17083 if (ueUl->ulLaCb.lastiTbsIgnored != TRUE)
17085 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
17086 (80 * ueUl->ulLaCb.cqiBasediTbs))/100;
17090 /* Reset the LA as iTbs in use caught up with the value */
17091 /* reported by UE. */
17092 ueUl->ulLaCb.cqiBasediTbs = ((20 * iTbsNew * 100) +
17093 (80 * previTbs * 100))/100;
17094 ueUl->ulLaCb.deltaiTbs = 0;
17095 ueUl->ulLaCb.lastiTbsIgnored = FALSE;
17100 rgSCHPwrUlCqiInd(cell, ue);
17102 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
17104 rgSCHCmnSpsUlCqiInd(cell, ue);
17107 /* Applicable to only some schedulers */
17109 if((TRUE == cell->emtcEnable) && (TRUE == ue->isEmtcUe))
17111 cellSch->apisEmtcUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
17116 cellSch->apisUl->rgSCHUlCqiInd(cell, ue, ulCqiInfo);
17120 ueUl->numCqiOccns++;
17121 ueUl->avgCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17126 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17127 ue->tenbStats->stats.nonPersistent.sch[RG_SCH_CELLINDEX(cell)].ulNumCqi ++;
17128 cell->tenbStats->sch.ulSumCqi += rgSCHCmnUlGetCqi(cell, ue, ueCtg);
17129 cell->tenbStats->sch.ulNumCqi ++;
17134 } /* rgSCHCmnUlCqiInd */
17137 * @brief Returns HARQ proc for which data expected now.
17141 * Function: rgSCHCmnUlHqProcForUe
17142 * Purpose: This function returns the harq process for
17143 * which data is expected in the current subframe.
17144 * It does not validate that the HARQ process
17145 * has an allocation.
17149 * @param[in] RgSchCellCb *cell
17150 * @param[in] CmLteTimingInfo frm
17151 * @param[in] RgSchUeCb *ue
17152 * @param[out] RgSchUlHqProcCb **procRef
17156 Void rgSCHCmnUlHqProcForUe
17159 CmLteTimingInfo frm,
17161 RgSchUlHqProcCb **procRef
17164 Void rgSCHCmnUlHqProcForUe(cell, frm, ue, procRef)
17166 CmLteTimingInfo frm;
17168 RgSchUlHqProcCb **procRef;
17172 U8 procId = rgSCHCmnGetUlHqProcIdx(&frm, cell);
17175 *procRef = rgSCHUhmGetUlHqProc(cell, ue, procId);
17177 *procRef = rgSCHUhmGetUlProcByTime(cell, ue, frm);
17184 * @brief Update harq process for allocation.
17188 * Function : rgSCHCmnUpdUlHqProc
17190 * This function is invoked when harq process
17191 * control block is now in a new memory location
17192 * thus requiring a pointer/reference update.
17194 * @param[in] RgSchCellCb *cell
17195 * @param[in] RgSchUlHqProcCb *curProc
17196 * @param[in] RgSchUlHqProcCb *oldProc
17202 S16 rgSCHCmnUpdUlHqProc
17205 RgSchUlHqProcCb *curProc,
17206 RgSchUlHqProcCb *oldProc
17209 S16 rgSCHCmnUpdUlHqProc(cell, curProc, oldProc)
17211 RgSchUlHqProcCb *curProc;
17212 RgSchUlHqProcCb *oldProc;
17218 #if (ERRCLASS & ERRCLS_DEBUG)
17219 if (curProc->alloc == NULLP)
17224 curProc->alloc->hqProc = curProc;
17226 } /* rgSCHCmnUpdUlHqProc */
17229 /*MS_WORKAROUND for CR FIXME */
17231 * @brief Hsndles BSR timer expiry
17235 * Function : rgSCHCmnBsrTmrExpry
17237 * This function is invoked when periodic BSR timer expires for a UE.
17239 * @param[in] RgSchUeCb *ue
17245 S16 rgSCHCmnBsrTmrExpry
17250 S16 rgSCHCmnBsrTmrExpry(ueCb)
17254 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ueCb->cell);
17257 ueCb->isSrGrant = TRUE;
17260 emtcStatsUlBsrTmrTxp++;
17264 if(ueCb->cell->emtcEnable)
17268 cellSch->apisEmtcUl->rgSCHSrRcvd(ueCb->cell, ueCb);
17275 cellSch->apisUl->rgSCHSrRcvd(ueCb->cell, ueCb);
17282 * @brief Short BSR update.
17286 * Function : rgSCHCmnUpdBsrShort
17288 * This functions does requisite updates to handle short BSR reporting.
17290 * @param[in] RgSchCellCb *cell
17291 * @param[in] RgSchUeCb *ue
17292 * @param[in] RgSchLcgCb *ulLcg
17293 * @param[in] U8 bsr
17294 * @param[out] RgSchErrInfo *err
17300 S16 rgSCHCmnUpdBsrShort
17309 S16 rgSCHCmnUpdBsrShort(cell, ue, ulLcg, bsr, err)
17319 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17321 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17322 RgSchCmnLcg *cmnLcg = NULLP;
17328 if (!RGSCH_LCG_ISCFGD(ulLcg))
17330 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
17333 for (lcgCnt=0; lcgCnt<4; lcgCnt++)
17336 /* Set BS of all other LCGs to Zero.
17337 If Zero BSR is reported in Short BSR include this LCG too */
17338 if ((lcgCnt != ulLcg->lcgId) ||
17339 (!bsr && !ueUl->hqEnt.numBusyHqProcs))
17341 /* If old BO is zero do nothing */
17342 if(((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs != 0)
17344 for(idx = 0; idx < ue->ul.lcgArr[lcgCnt].numLch; idx++)
17346 if((ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount) &&
17347 (ue->ulActiveLCs & (1 <<
17348 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1))))
17351 ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->ulUeCount--;
17352 ue->ulActiveLCs &= ~(1 <<
17353 (ue->ul.lcgArr[lcgCnt].lcArray[idx]->qciCb->qci -1));
17359 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgCnt]))
17361 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->bs = 0;
17362 ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgCnt].sch))->reportedBs = 0;
17367 if(ulLcg->lcgId && bsr && (((RgSchCmnLcg *)(ulLcg->sch))->bs == 0))
17369 for(idx = 0; idx < ulLcg->numLch; idx++)
17372 if (!(ue->ulActiveLCs & (1 << (ulLcg->lcArray[idx]->qciCb->qci -1))))
17374 ulLcg->lcArray[idx]->qciCb->ulUeCount++;
17375 ue->ulActiveLCs |= (1 << (ulLcg->lcArray[idx]->qciCb->qci -1));
17380 /* Resetting the nonGbrLcgBs info here */
17381 ue->ul.nonGbrLcgBs = 0;
17382 ue->ul.nonLcg0Bs = 0;
17384 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
17386 if (TRUE == ue->ul.useExtBSRSizes)
17388 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
17392 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
17394 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17396 /* TBD check for effGbr != 0 */
17397 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17399 else if (0 == ulLcg->lcgId)
17401 /* This is added for handling LCG0 */
17402 cmnLcg->bs = cmnLcg->reportedBs;
17406 /* Update non GBR LCG's BS*/
17407 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
17408 cmnLcg->bs = ue->ul.nonGbrLcgBs;
17410 ue->ul.totalBsr = cmnLcg->bs;
17413 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (bsr == 0))
17415 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
17419 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
17421 rgSCHCmnSpsBsrRpt(cell, ue, ulLcg);
17424 rgSCHCmnUpdUlCompEffBsr(ue);
17427 if(cell->emtcEnable)
17431 cellSch->apisEmtcUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
17438 cellSch->apisUl->rgSCHUpdBsrShort(cell, ue, ulLcg, bsr);
17442 if (ue->ul.isUlCaEnabled && ue->numSCells)
17444 for(U8 sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
17446 #ifndef PAL_ENABLE_UL_CA
17447 if((ue->cellInfo[sCellIdx] != NULLP) &&
17448 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
17450 if(ue->cellInfo[sCellIdx] != NULLP)
17453 cellSch->apisUl->rgSCHUpdBsrShort(ue->cellInfo[sCellIdx]->cell,
17464 * @brief Truncated BSR update.
17468 * Function : rgSCHCmnUpdBsrTrunc
17470 * This functions does required updates to handle truncated BSR report.
17473 * @param[in] RgSchCellCb *cell
17474 * @param[in] RgSchUeCb *ue
17475 * @param[in] RgSchLcgCb *ulLcg
17476 * @param[in] U8 bsr
17477 * @param[out] RgSchErrInfo *err
17483 S16 rgSCHCmnUpdBsrTrunc
17492 S16 rgSCHCmnUpdBsrTrunc(cell, ue, ulLcg, bsr, err)
17500 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17501 RgSchCmnLcg *cmnLcg = NULLP;
17508 if (!RGSCH_LCG_ISCFGD(ulLcg))
17510 err->errCause = RGSCHERR_SCH_LCG_NOT_CFGD;
17513 /* set all higher prio lcgs bs to 0 and update this lcgs bs and
17514 total bsr= sumofall lcgs bs */
17517 for (cnt = ulLcg->lcgId-1; cnt >= 0; cnt--)
17520 /* If Existing BO is zero the don't do anything */
17521 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs != 0)
17523 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
17526 if((ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount) &&
17527 (ue->ulActiveLCs & (1 <<
17528 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
17530 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount--;
17531 ue->ulActiveLCs &= ~(1 <<
17532 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
17537 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs = 0;
17538 ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->reportedBs = 0;
17543 for (cnt = ulLcg->lcgId; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
17545 if (ulLcg->lcgId == 0)
17549 /* If Existing BO is zero the don't do anything */
17550 if(((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs == 0)
17552 for(idx = 0; idx < ue->ul.lcgArr[cnt].numLch; idx++)
17555 if (!(ue->ulActiveLCs & (1 <<
17556 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1))))
17558 ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->ulUeCount++;
17559 ue->ulActiveLCs |= (1 <<
17560 (ue->ul.lcgArr[cnt].lcArray[idx]->qciCb->qci -1));
17566 ue->ul.nonGbrLcgBs = 0;
17567 ue->ul.nonLcg0Bs = 0;
17568 cmnLcg = ((RgSchCmnLcg *)(ulLcg->sch));
17569 if (TRUE == ue->ul.useExtBSRSizes)
17571 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsr];
17575 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsr];
17577 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17579 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17581 else if(ulLcg->lcgId == 0)
17583 /* This is for handeling LCG0 */
17584 cmnLcg->bs = cmnLcg->reportedBs;
17588 ue->ul.nonGbrLcgBs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
17589 cmnLcg->bs = ue->ul.nonGbrLcgBs;
17591 ue->ul.totalBsr = cmnLcg->bs;
17593 for (cnt = ulLcg->lcgId+1; cnt < RGSCH_MAX_LCG_PER_UE; cnt++)
17595 /* TODO: The bs for the other LCGs may be stale because some or all of
17596 * the part of bs may have been already scheduled/data received. Please
17597 * consider this when truncated BSR is tested/implemented */
17598 ue->ul.totalBsr += ((RgSchCmnLcg *)(ue->ul.lcgArr[cnt].sch))->bs;
17601 rgSCHCmnUpdUlCompEffBsr(ue);
17604 if(cell->emtcEnable)
17608 cellSch->apisEmtcUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
17615 cellSch->apisUl->rgSCHUpdBsrTrunc(cell, ue, ulLcg, bsr);
17619 if (ue->ul.isUlCaEnabled && ue->numSCells)
17621 for(U8 sCellIdx = 1; sCellIdx <= RG_SCH_MAX_SCELL ; sCellIdx++)
17623 #ifndef PAL_ENABLE_UL_CA
17624 if((ue->cellInfo[sCellIdx] != NULLP) &&
17625 (ue->cellInfo[sCellIdx]->sCellState == RG_SCH_SCELL_ACTIVE))
17627 if(ue->cellInfo[sCellIdx] != NULLP)
17630 cellSch->apisUl->rgSCHUpdBsrTrunc(ue->cellInfo[sCellIdx]->cell, ue, ulLcg, bsr);
17640 * @brief Long BSR update.
17644 * Function : rgSCHCmnUpdBsrLong
17646 * - Update BSRs for all configured LCGs.
17647 * - Update priority of LCGs if needed.
17648 * - Update UE's position within/across uplink scheduling queues.
17651 * @param[in] RgSchCellCb *cell
17652 * @param[in] RgSchUeCb *ue
17653 * @param[in] U8 bsArr[]
17654 * @param[out] RgSchErrInfo *err
17660 S16 rgSCHCmnUpdBsrLong
17668 S16 rgSCHCmnUpdBsrLong(cell, ue, bsArr, err)
17675 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17676 U32 tmpBsArr[4] = {0, 0, 0, 0};
17686 for(idx1 = 1; idx1 < RGSCH_MAX_LCG_PER_UE; idx1++)
17688 /* If Old BO is non zero then do nothing */
17689 if ((((RgSchCmnLcg *)(ue->ul.lcgArr[idx1].sch))->bs == 0)
17692 for(idx2 = 0; idx2 < ue->ul.lcgArr[idx1].numLch; idx2++)
17695 if (!(ue->ulActiveLCs & (1 <<
17696 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1))))
17698 ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->ulUeCount++;
17699 ue->ulActiveLCs |= (1 <<
17700 (ue->ul.lcgArr[idx1].lcArray[idx2]->qciCb->qci -1));
17706 ue->ul.nonGbrLcgBs = 0;
17707 ue->ul.nonLcg0Bs = 0;
17709 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[0]))
17711 if (TRUE == ue->ul.useExtBSRSizes)
17713 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnExtBsrTbl[bsArr[0]];
17714 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnExtBsrTbl[bsArr[0]];
17715 tmpBsArr[0] = rgSchCmnExtBsrTbl[bsArr[0]];
17719 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = rgSchCmnBsrTbl[bsArr[0]];
17720 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->reportedBs = rgSchCmnBsrTbl[bsArr[0]];
17721 tmpBsArr[0] = rgSchCmnBsrTbl[bsArr[0]];
17724 for (lcgId = 1; lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
17726 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
17728 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
17730 if (TRUE == ue->ul.useExtBSRSizes)
17732 cmnLcg->reportedBs = rgSchCmnExtBsrTbl[bsArr[lcgId]];
17736 cmnLcg->reportedBs = rgSchCmnBsrTbl[bsArr[lcgId]];
17738 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
17740 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr + cmnLcg->effDeltaMbr);
17741 tmpBsArr[lcgId] = cmnLcg->bs;
17745 nonGbrBs += cmnLcg->reportedBs;
17746 tmpBsArr[lcgId] = cmnLcg->reportedBs;
17747 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs,ue->ul.effAmbr);
17751 ue->ul.nonGbrLcgBs = RGSCH_MIN(nonGbrBs,ue->ul.effAmbr);
17753 ue->ul.totalBsr = tmpBsArr[0] + tmpBsArr[1] + tmpBsArr[2] + tmpBsArr[3];
17755 if ((ue->bsrTmr.tmrEvnt != TMR_NONE) && (ue->ul.totalBsr == 0))
17757 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
17762 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE) /* SPS_FIX */
17764 if(ue->ul.totalBsr - tmpBsArr[1] == 0)
17765 {/* Updaing the BSR to SPS only if LCG1 BS is present in sps active state */
17766 rgSCHCmnSpsBsrRpt(cell, ue, &ue->ul.lcgArr[1]);
17770 rgSCHCmnUpdUlCompEffBsr(ue);
17773 if(cell->emtcEnable)
17777 cellSch->apisEmtcUl->rgSCHUpdBsrLong(cell, ue, bsArr);
17784 cellSch->apisUl->rgSCHUpdBsrLong(cell, ue, bsArr);
17788 if (ue->ul.isUlCaEnabled && ue->numSCells)
17790 for(U8 idx = 1; idx <= RG_SCH_MAX_SCELL ; idx++)
17792 #ifndef PAL_ENABLE_UL_CA
17793 if((ue->cellInfo[idx] != NULLP) &&
17794 (ue->cellInfo[idx]->sCellState == RG_SCH_SCELL_ACTIVE))
17796 if(ue->cellInfo[idx] != NULLP)
17799 cellSch->apisUl->rgSCHUpdBsrLong(ue->cellInfo[idx]->cell, ue, bsArr);
17809 * @brief PHR update.
17813 * Function : rgSCHCmnUpdExtPhr
17815 * Updates extended power headroom information for an UE.
17817 * @param[in] RgSchCellCb *cell
17818 * @param[in] RgSchUeCb *ue
17819 * @param[in] U8 phr
17820 * @param[out] RgSchErrInfo *err
17826 S16 rgSCHCmnUpdExtPhr
17830 RgInfExtPhrCEInfo *extPhr,
17834 S16 rgSCHCmnUpdExtPhr(cell, ue, extPhr, err)
17837 RgInfExtPhrCEInfo *extPhr;
17841 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17842 RgSchCmnAllocRecord *allRcd;
17843 CmLList *node = ueUl->ulAllocLst.last;
17846 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
17853 allRcd = (RgSchCmnAllocRecord *)node->node;
17855 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
17857 rgSCHPwrUpdExtPhr(cell, ue, extPhr, allRcd);
17862 if(ulSpsUe->isUlSpsActv)
17864 rgSCHCmnSpsPhrInd(cell,ue);
17869 } /* rgSCHCmnUpdExtPhr */
17875 * @brief PHR update.
17879 * Function : rgSCHCmnUpdPhr
17881 * Updates power headroom information for an UE.
17883 * @param[in] RgSchCellCb *cell
17884 * @param[in] RgSchUeCb *ue
17885 * @param[in] U8 phr
17886 * @param[out] RgSchErrInfo *err
17900 S16 rgSCHCmnUpdPhr(cell, ue, phr, err)
17907 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
17908 RgSchCmnAllocRecord *allRcd;
17909 CmLList *node = ueUl->ulAllocLst.last;
17912 RgSchCmnUlUeSpsInfo *ulSpsUe = RG_SCH_CMN_GET_UL_SPS_UE(ue,cell);
17919 allRcd = (RgSchCmnAllocRecord *)node->node;
17921 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
17923 rgSCHPwrUpdPhr(cell, ue, phr, allRcd, RG_SCH_CMN_PWR_USE_CFG_MAX_PWR);
17928 if(ulSpsUe->isUlSpsActv)
17930 rgSCHCmnSpsPhrInd(cell,ue);
17935 } /* rgSCHCmnUpdPhr */
17938 * @brief UL grant for contention resolution.
17942 * Function : rgSCHCmnContResUlGrant
17944 * Add UE to another queue specifically for CRNTI based contention
17948 * @param[in] RgSchUeCb *ue
17949 * @param[out] RgSchErrInfo *err
17955 S16 rgSCHCmnContResUlGrant
17962 S16 rgSCHCmnContResUlGrant(cell, ue, err)
17968 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
17971 if(cell->emtcEnable)
17975 cellSch->apisEmtcUl->rgSCHContResUlGrant(cell, ue);
17982 cellSch->apisUl->rgSCHContResUlGrant(cell, ue);
17988 * @brief SR reception handling.
17992 * Function : rgSCHCmnSrRcvd
17994 * - Update UE's position within/across uplink scheduling queues
17995 * - Update priority of LCGs if needed.
17997 * @param[in] RgSchCellCb *cell
17998 * @param[in] RgSchUeCb *ue
17999 * @param[in] CmLteTimingInfo frm
18000 * @param[out] RgSchErrInfo *err
18010 CmLteTimingInfo frm,
18014 S16 rgSCHCmnSrRcvd(cell, ue, frm, err)
18017 CmLteTimingInfo frm;
18021 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18022 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18023 CmLList *node = ueUl->ulAllocLst.last;
18027 emtcStatsUlTomSrInd++;
18030 RGSCH_INCR_SUB_FRAME(frm, 1); /* 1 TTI after the time SR was sent */
18033 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)node->node;
18034 if (RGSCH_TIMEINFO_SAME(frm, allRcd->allocTime))
18040 //TODO_SID Need to check when it is getting triggered
18041 ue->isSrGrant = TRUE;
18043 if(cell->emtcEnable)
18047 cellSch->apisEmtcUl->rgSCHSrRcvd(cell, ue);
18054 cellSch->apisUl->rgSCHSrRcvd(cell, ue);
18060 * @brief Returns first uplink allocation to send reception
18065 * Function: rgSCHCmnFirstRcptnReq(cell)
18066 * Purpose: This function returns the first uplink allocation
18067 * (or NULLP if there is none) in the subframe
18068 * in which is expected to prepare and send reception
18073 * @param[in] RgSchCellCb *cell
18074 * @return RgSchUlAlloc*
18077 RgSchUlAlloc *rgSCHCmnFirstRcptnReq
18082 RgSchUlAlloc *rgSCHCmnFirstRcptnReq(cell)
18086 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18088 RgSchUlAlloc* alloc = NULLP;
18091 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
18093 RgSchUlSf* sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18094 alloc = rgSCHUtlUlAllocFirst(sf);
18096 if (alloc && alloc->hqProc == NULLP)
18098 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18106 * @brief Returns first uplink allocation to send reception
18111 * Function: rgSCHCmnNextRcptnReq(cell)
18112 * Purpose: This function returns the next uplink allocation
18113 * (or NULLP if there is none) in the subframe
18114 * in which is expected to prepare and send reception
18119 * @param[in] RgSchCellCb *cell
18120 * @return RgSchUlAlloc*
18123 RgSchUlAlloc *rgSCHCmnNextRcptnReq
18126 RgSchUlAlloc *alloc
18129 RgSchUlAlloc *rgSCHCmnNextRcptnReq(cell, alloc)
18131 RgSchUlAlloc *alloc;
18134 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18136 //RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18139 if (cellUl->rcpReqIdx != RGSCH_INVALID_INFO)
18141 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->rcpReqIdx];
18143 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18144 if (alloc && alloc->hqProc == NULLP)
18146 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18157 * @brief Collates DRX enabled UE's scheduled in this SF
18161 * Function: rgSCHCmnDrxStrtInActvTmrInUl(cell)
18162 * Purpose: This function collates the link
18163 * of UE's scheduled in this SF who
18164 * have drx enabled. It then calls
18165 * DRX specific function to start/restart
18166 * inactivity timer in Ul
18170 * @param[in] RgSchCellCb *cell
18174 Void rgSCHCmnDrxStrtInActvTmrInUl
18179 Void rgSCHCmnDrxStrtInActvTmrInUl(cell)
18183 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18184 RgSchUlSf *sf = &(cellUl->ulSfArr[cellUl->schdIdx]);
18185 RgSchUlAlloc *alloc = rgSCHUtlUlAllocFirst(sf);
18190 cmLListInit(&ulUeLst);
18198 if (!(alloc->grnt.isRtx) && ueCb->isDrxEnabled && !(ueCb->isSrGrant)
18200 /* ccpu00139513- DRX inactivity timer should not be started for
18201 * UL SPS occasions */
18202 && (alloc->hqProc->isSpsOccnHqP == FALSE)
18206 cmLListAdd2Tail(&ulUeLst,&(ueCb->ulDrxInactvTmrLnk));
18207 ueCb->ulDrxInactvTmrLnk.node = (PTR)ueCb;
18211 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18214 (Void)rgSCHDrxStrtInActvTmr(cell,&ulUeLst,RG_SCH_DRX_UL);
18221 * @brief Returns first uplink allocation to send HARQ feedback
18226 * Function: rgSCHCmnFirstHqFdbkAlloc
18227 * Purpose: This function returns the first uplink allocation
18228 * (or NULLP if there is none) in the subframe
18229 * for which it is expected to prepare and send HARQ
18234 * @param[in] RgSchCellCb *cell
18235 * @param[in] U8 idx
18236 * @return RgSchUlAlloc*
18239 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc
18245 RgSchUlAlloc *rgSCHCmnFirstHqFdbkAlloc(cell, idx)
18250 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18252 RgSchUlAlloc *alloc = NULLP;
18255 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
18257 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
18258 alloc = rgSCHUtlUlAllocFirst(sf);
18260 while (alloc && (alloc->hqProc == NULLP))
18262 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18270 * @brief Returns next allocation to send HARQ feedback for.
18274 * Function: rgSCHCmnNextHqFdbkAlloc(cell)
18275 * Purpose: This function returns the next uplink allocation
18276 * (or NULLP if there is none) in the subframe
18277 * for which HARQ feedback needs to be sent.
18281 * @param[in] RgSchCellCb *cell
18282 * @return RgSchUlAlloc*
18285 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc
18288 RgSchUlAlloc *alloc,
18292 RgSchUlAlloc *rgSCHCmnNextHqFdbkAlloc(cell, alloc, idx)
18294 RgSchUlAlloc *alloc;
18298 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18300 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
18302 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
18304 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18305 while (alloc && (alloc->hqProc == NULLP))
18307 alloc = rgSCHUtlUlAllocNxt(sf, alloc);
18317 /***********************************************************
18319 * Func : rgSCHCmnUlGetITbsFrmIMcs
18321 * Desc : Returns the Itbs that is mapped to an Imcs
18322 * for the case of uplink.
18330 **********************************************************/
18332 U8 rgSCHCmnUlGetITbsFrmIMcs
18337 U8 rgSCHCmnUlGetITbsFrmIMcs(iMcs)
18342 return (rgUlIMcsTbl[iMcs].iTbs);
18345 /***********************************************************
18347 * Func : rgSCHCmnUlGetIMcsFrmITbs
18349 * Desc : Returns the Imcs that is mapped to an Itbs
18350 * for the case of uplink.
18354 * Notes: For iTbs 19, iMcs is dependant on modulation order.
18355 * Refer to 36.213, Table 8.6.1-1 and 36.306 Table 4.1-2
18356 * for UE capability information
18360 **********************************************************/
18362 U8 rgSCHCmnUlGetIMcsFrmITbs
18365 CmLteUeCategory ueCtg
18368 U8 rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg)
18370 CmLteUeCategory ueCtg;
18379 /*a higher layer can force a 64QAM UE to transmit at 16QAM.
18380 * We currently do not support this. Once the support for such
18381 * is added, ueCtg should be replaced by current transmit
18382 * modulation configuration.Refer to 36.213 -8.6.1
18384 else if ( iTbs < 19 )
18388 else if ((iTbs == 19) && (ueCtg != CM_LTE_UE_CAT_5))
18398 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
18399 was seen when IMCS exceeds 20 on T2k TDD*/
18409 /***********************************************************
18411 * Func : rgSCHCmnUlMinTbBitsForITbs
18413 * Desc : Returns the minimum number of bits that can
18414 * be given as grant for a specific CQI.
18422 **********************************************************/
18424 U32 rgSCHCmnUlMinTbBitsForITbs
18426 RgSchCmnUlCell *cellUl,
18430 U32 rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs)
18431 RgSchCmnUlCell *cellUl;
18436 RGSCH_ARRAY_BOUND_CHECK(0, rgTbSzTbl[0], iTbs);
18438 return (rgTbSzTbl[0][iTbs][cellUl->sbSize-1]);
18441 /***********************************************************
18443 * Func : rgSCHCmnUlSbAlloc
18445 * Desc : Given a required 'number of subbands' and a hole,
18446 * returns a suitable alloc such that the subband
18447 * allocation size is valid
18451 * Notes: Does not assume either passed numSb or hole size
18452 * to be valid for allocation, and hence arrives at
18453 * an acceptable value.
18456 **********************************************************/
18458 RgSchUlAlloc *rgSCHCmnUlSbAlloc
18465 RgSchUlAlloc *rgSCHCmnUlSbAlloc(sf, numSb, hole)
18471 U8 holeSz; /* valid hole size */
18472 RgSchUlAlloc *alloc;
18474 if ((holeSz = rgSchCmnMult235Tbl[hole->num].prvMatch) == hole->num)
18476 numSb = rgSchCmnMult235Tbl[numSb].match;
18477 if (numSb >= holeSz)
18479 alloc = rgSCHUtlUlAllocGetCompHole(sf, hole);
18483 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
18488 if (numSb < holeSz)
18490 numSb = rgSchCmnMult235Tbl[numSb].match;
18494 numSb = rgSchCmnMult235Tbl[numSb].prvMatch;
18497 if ( numSb >= holeSz )
18501 alloc = rgSCHUtlUlAllocGetPartHole(sf, numSb, hole);
18507 * @brief To fill the RgSchCmnUeUlAlloc structure of UeCb.
18511 * Function: rgSCHCmnUlUeFillAllocInfo
18512 * Purpose: Specific scheduler to call this API to fill the alloc
18515 * Invoked by: Scheduler
18517 * @param[in] RgSchCellCb *cell
18518 * @param[out] RgSchUeCb *ue
18522 Void rgSCHCmnUlUeFillAllocInfo
18528 Void rgSCHCmnUlUeFillAllocInfo(cell, ue)
18533 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18534 RgSchCmnUeUlAlloc *ulAllocInfo;
18535 RgSchCmnUlUe *ueUl;
18538 ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18539 ulAllocInfo = &ueUl->alloc;
18541 /* Fill alloc structure */
18542 rgSCHCmnUlAllocFillTpc(cell, ue, ulAllocInfo->alloc);
18543 rgSCHCmnUlAllocFillNdmrs(cellUl, ulAllocInfo->alloc);
18544 rgSCHCmnUlAllocLnkHqProc(ue, ulAllocInfo->alloc, ulAllocInfo->alloc->hqProc,
18545 ulAllocInfo->alloc->hqProc->isRetx);
18547 rgSCHCmnUlFillPdcchWithAlloc(ulAllocInfo->alloc->pdcch,
18548 ulAllocInfo->alloc, ue);
18549 /* Recording information about this allocation */
18550 rgSCHCmnUlRecordUeAlloc(cell, ue);
18552 /* Update the UE's outstanding allocation */
18553 if (!ulAllocInfo->alloc->hqProc->isRetx)
18555 rgSCHCmnUlUpdOutStndAlloc(cell, ue, ulAllocInfo->allocdBytes);
18562 * @brief Update the UEs outstanding alloc based on the BSR report's timing.
18567 * Function: rgSCHCmnUpdUlCompEffBsr
18568 * Purpose: Clear off all the allocations from outstanding allocation that
18569 * are later than or equal to BSR timing information (stored in UEs datIndTime).
18571 * Invoked by: Scheduler
18573 * @param[in] RgSchUeCb *ue
18577 PRIVATE Void rgSCHCmnUpdUlCompEffBsr
18582 PRIVATE Void rgSCHCmnUpdUlCompEffBsr(ue)
18586 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,ue->cell);
18587 CmLList *node = ueUl->ulAllocLst.last;
18588 RgSchCmnAllocRecord *allRcd;
18589 U32 outStndAlloc=0;
18590 U32 nonLcg0OutStndAllocBs=0;
18593 RgSchCmnLcg *cmnLcg = NULLP;
18597 allRcd = (RgSchCmnAllocRecord *)node->node;
18598 if (RGSCH_TIMEINFO_SAME(ue->macCeRptTime, allRcd->allocTime))
18607 allRcd = (RgSchCmnAllocRecord *)node->node;
18609 outStndAlloc += allRcd->alloc;
18612 cmnLcg = (RgSchCmnLcg *)(ue->ul.lcgArr[0].sch);
18613 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
18614 if (cmnLcg->bs > outStndAlloc)
18616 cmnLcg->bs -= outStndAlloc;
18617 ue->ul.minReqBytes = cmnLcg->bs;
18622 nonLcg0OutStndAllocBs = outStndAlloc - cmnLcg->bs;
18626 for(lcgId = 1;lcgId < RGSCH_MAX_LCG_PER_UE; lcgId++)
18628 if(RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
18630 cmnLcg = ((RgSchCmnLcg *) (ue->ul.lcgArr[lcgId].sch));
18631 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
18633 nonLcg0Bsr += cmnLcg->bs;
18637 nonLcg0Bsr += ue->ul.nonGbrLcgBs;
18638 if (nonLcg0OutStndAllocBs > nonLcg0Bsr)
18644 nonLcg0Bsr -= nonLcg0OutStndAllocBs;
18646 ue->ul.nonLcg0Bs = nonLcg0Bsr;
18647 /* Cap effBsr with nonLcg0Bsr and append lcg0 bs.
18648 * nonLcg0Bsr limit applies only to lcg1,2,3 */
18649 /* better be handled in individual scheduler */
18650 ue->ul.effBsr = nonLcg0Bsr +\
18651 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18656 * @brief Records information about the current allocation.
18660 * Function: rgSCHCmnUlRecordUeAlloc
18661 * Purpose: Records information about the curent allocation.
18662 * This includes the allocated bytes, as well
18663 * as some power information.
18665 * Invoked by: Scheduler
18667 * @param[in] RgSchCellCb *cell
18668 * @param[in] RgSchUeCb *ue
18672 Void rgSCHCmnUlRecordUeAlloc
18678 Void rgSCHCmnUlRecordUeAlloc(cell, ue)
18684 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
18686 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18687 CmLListCp *lst = &ueUl->ulAllocLst;
18688 CmLList *node = ueUl->ulAllocLst.first;
18689 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
18690 RgSchCmnUeUlAlloc *ulAllocInfo = &ueUl->alloc;
18691 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18693 cmLListDelFrm(lst, &allRcd->lnk);
18695 /* To the crntTime, add the MIN time at which UE will
18696 * actually send the BSR i.e DELTA+4 */
18697 allRcd->allocTime = cell->crntTime;
18698 /*ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
18700 if(ue->isEmtcUe == TRUE)
18702 RGSCH_INCR_SUB_FRAME_EMTC(allRcd->allocTime,
18703 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
18708 RGSCH_INCR_SUB_FRAME(allRcd->allocTime,
18709 (TFU_ULCNTRL_DLDELTA + RGSCH_PDCCH_PUSCH_DELTA));
18712 allRcd->allocTime = cellUl->schdTime;
18714 cmLListAdd2Tail(lst, &allRcd->lnk);
18716 /* Filling in the parameters to be recorded */
18717 allRcd->alloc = ulAllocInfo->allocdBytes;
18718 //allRcd->numRb = ulAllocInfo->alloc->grnt.numRb;
18719 allRcd->numRb = (ulAllocInfo->alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
18720 /*Recording the UL CQI derived from the maxUlCqi */
18721 allRcd->cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
18722 allRcd->tpc = ulAllocInfo->alloc->grnt.tpc;
18724 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
18726 cell->measurements.ulBytesCnt += ulAllocInfo->allocdBytes;
18731 /** PHR handling for MSG3
18732 * @brief Records allocation information of msg3 in the the UE.
18736 * Function: rgSCHCmnUlRecMsg3Alloc
18737 * Purpose: Records information about msg3 allocation.
18738 * This includes the allocated bytes, as well
18739 * as some power information.
18741 * Invoked by: Scheduler
18743 * @param[in] RgSchCellCb *cell
18744 * @param[in] RgSchUeCb *ue
18745 * @param[in] RgSchRaCb *raCb
18749 Void rgSCHCmnUlRecMsg3Alloc
18756 Void rgSCHCmnUlRecMsg3Alloc(cell, ue, raCb)
18762 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18763 CmLListCp *lst = &ueUl->ulAllocLst;
18764 CmLList *node = ueUl->ulAllocLst.first;
18765 RgSchCmnAllocRecord *allRcd = (RgSchCmnAllocRecord *)(node->node);
18767 /* Stack Crash problem for TRACE5 changes */
18769 cmLListDelFrm(lst, node);
18770 allRcd->allocTime = raCb->msg3AllocTime;
18771 cmLListAdd2Tail(lst, node);
18773 /* Filling in the parameters to be recorded */
18774 allRcd->alloc = raCb->msg3Grnt.datSz;
18775 allRcd->numRb = raCb->msg3Grnt.numRb;
18776 allRcd->cqi = raCb->ccchCqi;
18777 allRcd->tpc = raCb->msg3Grnt.tpc;
18779 rgSCHPwrRecordRbAlloc(cell, ue, allRcd->numRb);
18784 * @brief Keeps track of the most recent RG_SCH_CMN_MAX_ALLOC_TRACK
18785 * allocations to track. Adds this allocation to the ueUl's ulAllocLst.
18790 * Function: rgSCHCmnUlUpdOutStndAlloc
18791 * Purpose: Recent Allocation shall be at First Pos'n.
18792 * Remove the last node, update the fields
18793 * with the new allocation and add at front.
18795 * Invoked by: Scheduler
18797 * @param[in] RgSchCellCb *cell
18798 * @param[in] RgSchUeCb *ue
18799 * @param[in] U32 alloc
18803 Void rgSCHCmnUlUpdOutStndAlloc
18810 Void rgSCHCmnUlUpdOutStndAlloc(cell, ue, alloc)
18816 U32 nonLcg0Alloc=0;
18818 /* Update UEs LCG0's bs according to the total outstanding BSR allocation.*/
18819 if (((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs > alloc)
18821 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs -= alloc;
18825 nonLcg0Alloc = alloc - ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18826 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs = 0;
18829 if (nonLcg0Alloc >= ue->ul.nonLcg0Bs)
18831 ue->ul.nonLcg0Bs = 0;
18835 ue->ul.nonLcg0Bs -= nonLcg0Alloc;
18837 /* Cap effBsr with effAmbr and append lcg0 bs.
18838 * effAmbr limit applies only to lcg1,2,3 non GBR LCG's*/
18839 /* better be handled in individual scheduler */
18840 ue->ul.effBsr = ue->ul.nonLcg0Bs +\
18841 ((RgSchCmnLcg *)(ue->ul.lcgArr[0].sch))->bs;
18843 if (ue->ul.effBsr == 0)
18845 if (ue->bsrTmr.tmrEvnt != TMR_NONE)
18847 rgSCHTmrStopTmr(cell, ue->bsrTmr.tmrEvnt, ue);
18850 if (FALSE == ue->isSrGrant)
18852 if (ue->ul.bsrTmrCfg.isPrdBsrTmrPres)
18855 rgSCHTmrStartTmr(cell, ue, RG_SCH_TMR_BSR,
18856 ue->ul.bsrTmrCfg.prdBsrTmr);
18862 /* Resetting UEs lower Cap */
18863 ue->ul.minReqBytes = 0;
18870 * @brief Returns the "Itbs" for a given UE.
18874 * Function: rgSCHCmnUlGetITbs
18875 * Purpose: This function returns the "Itbs" for a given UE.
18877 * Invoked by: Scheduler
18879 * @param[in] RgSchUeCb *ue
18883 U8 rgSCHCmnUlGetITbs
18890 U8 rgSCHCmnUlGetITbs(cell, ue, isEcp)
18896 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
18897 /* CQI will be capped to maxUlCqi for 16qam UEs */
18898 CmLteUeCategory ueCtgy = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
18902 U8 maxiTbs = rgSchCmnUlCqiToTbsTbl[(U8)isEcp][ueUl->maxUlCqi];
18906 /* #ifdef RG_SCH_CMN_EXT_CP_SUP For ECP pick index 1 */
18908 if ( (ueCtgy != CM_LTE_UE_CAT_5) &&
18909 (ueUl->validUlCqi > ueUl->maxUlCqi)
18912 cqi = ueUl->maxUlCqi;
18916 cqi = ueUl->validUlCqi;
18920 iTbs = (ueUl->ulLaCb.cqiBasediTbs + ueUl->ulLaCb.deltaiTbs)/100;
18922 RG_SCH_CHK_ITBS_RANGE(iTbs, maxiTbs);
18924 iTbs = RGSCH_MIN(iTbs, ue->cell->thresholds.maxUlItbs);
18927 /* This is a Temp fix, done for TENBPLUS-3898, ULSCH SDU corruption
18928 was seen when IMCS exceeds 20 on T2k TDD */
18937 if ( (ueCtgy != CM_LTE_UE_CAT_5) && (ueUl->crntUlCqi[0] > ueUl->maxUlCqi ))
18939 cqi = ueUl->maxUlCqi;
18943 cqi = ueUl->crntUlCqi[0];
18946 return (rgSchCmnUlCqiToTbsTbl[(U8)isEcp][cqi]);
18950 * @brief This function adds the UE to DLRbAllocInfo TX lst.
18954 * Function: rgSCHCmnDlRbInfoAddUeTx
18955 * Purpose: This function adds the UE to DLRbAllocInfo TX lst.
18957 * Invoked by: Common Scheduler
18959 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
18960 * @param[in] RgSchUeCb *ue
18961 * @param[in] RgSchDlHqProcCb *hqP
18966 PRIVATE Void rgSCHCmnDlRbInfoAddUeTx
18969 RgSchCmnDlRbAllocInfo *allocInfo,
18971 RgSchDlHqProcCb *hqP
18974 PRIVATE Void rgSCHCmnDlRbInfoAddUeTx(cell, allocInfo, ue, hqP)
18976 RgSchCmnDlRbAllocInfo *allocInfo;
18978 RgSchDlHqProcCb *hqP;
18981 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
18984 if (hqP->reqLnk.node == NULLP)
18986 if (cellSch->dl.isDlFreqSel)
18988 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
18989 &allocInfo->dedAlloc.txHqPLst, hqP);
18994 cmLListAdd2Tail(&allocInfo->dedAlloc.txHqPLst, &hqP->reqLnk);
18996 hqP->reqLnk.node = (PTR)hqP;
19003 * @brief This function adds the UE to DLRbAllocInfo RETX lst.
19007 * Function: rgSCHCmnDlRbInfoAddUeRetx
19008 * Purpose: This function adds the UE to DLRbAllocInfo RETX lst.
19010 * Invoked by: Common Scheduler
19012 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19013 * @param[in] RgSchUeCb *ue
19014 * @param[in] RgSchDlHqProcCb *hqP
19019 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetx
19022 RgSchCmnDlRbAllocInfo *allocInfo,
19024 RgSchDlHqProcCb *hqP
19027 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetx(cell, allocInfo, ue, hqP)
19029 RgSchCmnDlRbAllocInfo *allocInfo;
19031 RgSchDlHqProcCb *hqP;
19034 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
19037 if (cellSch->dl.isDlFreqSel)
19039 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
19040 &allocInfo->dedAlloc.retxHqPLst, hqP);
19044 /* checking UE's presence in this lst is unnecessary */
19045 cmLListAdd2Tail(&allocInfo->dedAlloc.retxHqPLst, &hqP->reqLnk);
19046 hqP->reqLnk.node = (PTR)hqP;
19052 * @brief This function adds the UE to DLRbAllocInfo TX-RETX lst.
19056 * Function: rgSCHCmnDlRbInfoAddUeRetxTx
19057 * Purpose: This adds the UE to DLRbAllocInfo TX-RETX lst.
19059 * Invoked by: Common Scheduler
19061 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19062 * @param[in] RgSchUeCb *ue
19063 * @param[in] RgSchDlHqProcCb *hqP
19068 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetxTx
19071 RgSchCmnDlRbAllocInfo *allocInfo,
19073 RgSchDlHqProcCb *hqP
19076 PRIVATE Void rgSCHCmnDlRbInfoAddUeRetxTx(allocInfo, ue, hqP)
19078 RgSchCmnDlRbAllocInfo *allocInfo;
19080 RgSchDlHqProcCb *hqP;
19083 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(ue->cell);
19086 if (cellSch->dl.isDlFreqSel)
19088 cellSch->apisDlfs->rgSCHDlfsAddUeToLst(cell,
19089 &allocInfo->dedAlloc.txRetxHqPLst, hqP);
19093 cmLListAdd2Tail(&allocInfo->dedAlloc.txRetxHqPLst, &hqP->reqLnk);
19094 hqP->reqLnk.node = (PTR)hqP;
19100 * @brief This function adds the UE to DLRbAllocInfo NonSchdRetxLst.
19104 * Function: rgSCHCmnDlAdd2NonSchdRetxLst
19105 * Purpose: During RB estimation for RETX, if allocation fails
19106 * then appending it to NonSchdRetxLst, the further
19107 * action is taken as part of Finalization in
19108 * respective schedulers.
19110 * Invoked by: Common Scheduler
19112 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19113 * @param[in] RgSchUeCb *ue
19114 * @param[in] RgSchDlHqProcCb *hqP
19119 PRIVATE Void rgSCHCmnDlAdd2NonSchdRetxLst
19121 RgSchCmnDlRbAllocInfo *allocInfo,
19123 RgSchDlHqProcCb *hqP
19126 PRIVATE Void rgSCHCmnDlAdd2NonSchdRetxLst(allocInfo, ue, hqP)
19127 RgSchCmnDlRbAllocInfo *allocInfo;
19129 RgSchDlHqProcCb *hqP;
19132 CmLList *schdLnkNode;
19136 if ( (hqP->sch != (RgSchCmnDlHqProc *)NULLP) &&
19137 (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP)))
19143 schdLnkNode = &hqP->schdLstLnk;
19144 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
19145 cmLListAdd2Tail(&allocInfo->dedAlloc.nonSchdRetxHqPLst, schdLnkNode);
19153 * @brief This function adds the UE to DLRbAllocInfo NonSchdTxRetxLst.
19157 * Function: rgSCHCmnDlAdd2NonSchdTxRetxLst
19158 * Purpose: During RB estimation for TXRETX, if allocation fails
19159 * then appending it to NonSchdTxRetxLst, the further
19160 * action is taken as part of Finalization in
19161 * respective schedulers.
19163 * Invoked by: Common Scheduler
19165 * @param[out] RgSchCmnDlRbAllocInfo *allocInfo
19166 * @param[in] RgSchUeCb *ue
19167 * @param[in] RgSchDlHqProcCb *hqP
19173 * @brief This function handles the initialisation of DL HARQ/ACK feedback
19174 * timing information for eaach DL subframe.
19178 * Function: rgSCHCmnDlANFdbkInit
19179 * Purpose: Each DL subframe stores the sfn and subframe
19180 * information of UL subframe in which it expects
19181 * HARQ ACK/NACK feedback for this subframe.It
19182 * generates the information based on Downlink
19183 * Association Set Index table.
19185 * Invoked by: Scheduler
19187 * @param[in] RgSchCellCb* cell
19192 PRIVATE S16 rgSCHCmnDlANFdbkInit
19197 PRIVATE S16 rgSCHCmnDlANFdbkInit(cell)
19202 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
19203 U8 maxDlSubfrms = cell->numDlSubfrms;
19210 RgSchTddSubfrmInfo ulSubfrmInfo;
19214 ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19215 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19217 /* Generate HARQ ACK/NACK feedback information for each DL sf in a radio frame
19218 * Calculate this information based on DL Association set Index table */
19219 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19221 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19222 RG_SCH_TDD_UL_SUBFRAME)
19224 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19228 for(idx=0; idx < rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
19229 numFdbkSubfrms; idx++)
19231 calcSfNum = sfNum - rgSchTddDlAscSetIdxKTbl[ulDlCfgIdx][sfNum].\
19235 calcSfnOffset = RGSCH_CEIL(-calcSfNum, RGSCH_NUM_SUB_FRAMES);
19242 calcSfNum = ((RGSCH_NUM_SUB_FRAMES * calcSfnOffset) + calcSfNum)\
19243 % RGSCH_NUM_SUB_FRAMES;
19245 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19249 else if((ulSubfrmInfo.switchPoints == 2) && (calcSfNum <= \
19250 RG_SCH_CMN_SPL_SUBFRM_6))
19252 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19256 dlIdx = calcSfNum - maxUlSubfrms;
19259 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = sfNum;
19260 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = calcSfnOffset;
19261 cell->subFrms[dlIdx]->dlFdbkInfo.m = idx;
19263 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19266 /* DL subframes in the subsequent radio frames are initialized
19267 * with the previous radio frames */
19268 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;\
19271 sfNum = dlIdx - rgSchTddNumDlSubfrmTbl[ulDlCfgIdx]\
19272 [RGSCH_NUM_SUB_FRAMES-1];
19273 cell->subFrms[dlIdx]->dlFdbkInfo.subframe = \
19274 cell->subFrms[sfNum]->dlFdbkInfo.subframe;
19275 cell->subFrms[dlIdx]->dlFdbkInfo.sfnOffset = \
19276 cell->subFrms[sfNum]->dlFdbkInfo.sfnOffset;
19277 cell->subFrms[dlIdx]->dlFdbkInfo.m = cell->subFrms[sfNum]->dlFdbkInfo.m;
19283 * @brief This function handles the initialization of uplink association
19284 * set information for each DL subframe.
19289 * Function: rgSCHCmnDlKdashUlAscInit
19290 * Purpose: Each DL sf stores the sfn and sf information of UL sf
19291 * in which it expects HQ ACK/NACK trans. It generates the information
19292 * based on k` in UL association set index table.
19294 * Invoked by: Scheduler
19296 * @param[in] RgSchCellCb* cell
19301 PRIVATE S16 rgSCHCmnDlKdashUlAscInit
19306 PRIVATE S16 rgSCHCmnDlKdashUlAscInit(cell)
19311 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
19312 U8 maxDlSubfrms = cell->numDlSubfrms;
19318 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19319 U8 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19320 [RGSCH_NUM_SUB_FRAMES-1];
19324 /* Generate ACK/NACK offset information for each DL subframe in a radio frame
19325 * Calculate this information based on K` in UL Association Set table */
19326 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19328 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19329 RG_SCH_TDD_UL_SUBFRAME)
19331 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19335 calcSfNum = (sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum] + \
19336 RGSCH_NUM_SUB_FRAMES) % RGSCH_NUM_SUB_FRAMES;
19337 calcSfnOffset = sfNum - rgSchTddUlAscIdxKDashTbl[ulDlCfgIdx-1][sfNum];
19338 if(calcSfnOffset < 0)
19340 calcSfnOffset = RGSCH_CEIL(-calcSfnOffset, RGSCH_NUM_SUB_FRAMES);
19347 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19351 else if((ulSubfrmInfo.switchPoints == 2) &&
19352 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
19354 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19358 dlIdx = calcSfNum - maxUlSubfrms;
19361 cell->subFrms[dlIdx]->ulAscInfo.subframe = sfNum;
19362 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset = calcSfnOffset;
19364 /* set dlIdx for which ulAscInfo is updated */
19365 dlPres = dlPres | (1 << dlIdx);
19366 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19369 /* Set Invalid information for which ulAscInfo is not present */
19371 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19374 /* If dlPres is 0, ulAscInfo is not present in that DL index */
19375 if(! ((dlPres >> sfCount)&0x01))
19377 cell->subFrms[sfCount]->ulAscInfo.sfnOffset =
19378 RGSCH_INVALID_INFO;
19379 cell->subFrms[sfCount]->ulAscInfo.subframe =
19380 RGSCH_INVALID_INFO;
19384 /* DL subframes in the subsequent radio frames are initialized
19385 * with the previous radio frames */
19386 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms; dlIdx < maxDlSubfrms;
19390 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19391 cell->subFrms[dlIdx]->ulAscInfo.subframe =
19392 cell->subFrms[sfNum]->ulAscInfo.subframe;
19393 cell->subFrms[dlIdx]->ulAscInfo.sfnOffset =
19394 cell->subFrms[sfNum]->ulAscInfo.sfnOffset;
19401 * @brief This function initialises the 'Np' value for 'p'
19405 * Function: rgSCHCmnDlNpValInit
19406 * Purpose: To initialise the 'Np' value for each 'p'. It is used
19407 * to find the mapping between nCCE and 'p' and used in
19408 * HARQ ACK/NACK reception.
19410 * Invoked by: Scheduler
19412 * @param[in] RgSchCellCb* cell
19417 PRIVATE S16 rgSCHCmnDlNpValInit
19422 PRIVATE S16 rgSCHCmnDlNpValInit(cell)
19429 /* Always Np is 0 for p=0 */
19430 cell->rgSchTddNpValTbl[0] = 0;
19432 for(idx=1; idx < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; idx++)
19434 np = cell->bwCfg.dlTotalBw * (idx * RG_SCH_CMN_NUM_SUBCAR - 4);
19435 cell->rgSchTddNpValTbl[idx] = (U8) (np/36);
19442 * @brief This function handles the creation of RACH preamble
19443 * list to queue the preambles and process at the scheduled
19448 * Function: rgSCHCmnDlCreateRachPrmLst
19449 * Purpose: To create RACH preamble list based on RA window size.
19450 * It is used to queue the preambles and process it at the
19453 * Invoked by: Scheduler
19455 * @param[in] RgSchCellCb* cell
19460 PRIVATE S16 rgSCHCmnDlCreateRachPrmLst
19465 PRIVATE S16 rgSCHCmnDlCreateRachPrmLst(cell)
19474 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
19476 lstSize = raArrSz * RGSCH_MAX_RA_RNTI_PER_SUBFRM * RGSCH_NUM_SUB_FRAMES;
19478 cell->raInfo.maxRaSize = raArrSz;
19479 ret = rgSCHUtlAllocSBuf(cell->instIdx,
19480 (Data **)(&cell->raInfo.raReqLst), (Size)(lstSize * sizeof(CmLListCp)));
19486 cell->raInfo.lstSize = lstSize;
19493 * @brief This function handles the initialization of RACH Response
19494 * information at each DL subframe.
19498 * Function: rgSCHCmnDlRachInfoInit
19499 * Purpose: Each DL subframe stores the sfn and subframe information of
19500 * possible RACH response allowed for UL subframes. It generates
19501 * the information based on PRACH configuration.
19503 * Invoked by: Scheduler
19505 * @param[in] RgSchCellCb* cell
19510 PRIVATE S16 rgSCHCmnDlRachInfoInit
19515 PRIVATE S16 rgSCHCmnDlRachInfoInit(cell)
19520 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
19523 U8 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19524 [RGSCH_NUM_SUB_FRAMES-1];
19526 RgSchTddRachRspLst rachRspLst[3][RGSCH_NUM_SUB_FRAMES];
19534 RgSchTddRachDelInfo *delInfo;
19539 memset(rachRspLst, 0, sizeof(rachRspLst));
19541 RG_SCH_CMN_CALC_RARSPLST_SIZE(cell, raArrSz);
19543 /* Include Special subframes */
19544 maxUlSubfrms = maxUlSubfrms + \
19545 rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx].switchPoints;
19546 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19548 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] ==
19549 RG_SCH_TDD_DL_SUBFRAME)
19551 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19555 startWin = (sfNum + RG_SCH_CMN_RARSP_WAIT_PRD + \
19556 ((RgSchCmnCell *)cell->sc.sch)->dl.numRaSubFrms);
19557 endWin = (startWin + cell->rachCfg.raWinSize - 1);
19559 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][startWin%RGSCH_NUM_SUB_FRAMES];
19560 /* Find the next DL subframe starting from Subframe 0 */
19561 if((startSubfrmIdx % RGSCH_NUM_SUB_FRAMES) == 0)
19563 startWin = RGSCH_CEIL(startWin, RGSCH_NUM_SUB_FRAMES);
19564 startWin = startWin * RGSCH_NUM_SUB_FRAMES;
19568 rgSchTddLowDlSubfrmIdxTbl[ulDlCfgIdx][endWin%RGSCH_NUM_SUB_FRAMES];
19569 endWin = (endWin/RGSCH_NUM_SUB_FRAMES) * RGSCH_NUM_SUB_FRAMES \
19571 if(startWin > endWin)
19575 /* Find all the possible RACH Response transmission
19576 * time within the RA window size */
19577 startSubfrmIdx = startWin%RGSCH_NUM_SUB_FRAMES;
19578 for(sfnIdx = startWin/RGSCH_NUM_SUB_FRAMES;
19579 sfnIdx <= endWin/RGSCH_NUM_SUB_FRAMES; sfnIdx++)
19581 if(sfnIdx == endWin/RGSCH_NUM_SUB_FRAMES)
19583 endSubfrmIdx = endWin%RGSCH_NUM_SUB_FRAMES;
19587 endSubfrmIdx = RGSCH_NUM_SUB_FRAMES-1;
19590 /* Find all the possible RACH Response transmission
19591 * time within radio frame */
19592 for(subfrmIdx = startSubfrmIdx;
19593 subfrmIdx <= endSubfrmIdx; subfrmIdx++)
19595 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][subfrmIdx] ==
19596 RG_SCH_TDD_UL_SUBFRAME)
19600 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
19601 /* Find the next DL subframe starting from Subframe 0 */
19602 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
19606 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx], subfrmIdx);
19608 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
19609 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset = sfnIdx;
19610 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].subframe[numSubfrms]
19612 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms++;
19614 startSubfrmIdx = RG_SCH_CMN_SUBFRM_0;
19616 /* Update the subframes to be deleted at this subframe */
19617 /* Get the subframe after the end of RA window size */
19620 sfnOffset = endWin/RGSCH_NUM_SUB_FRAMES;
19623 sfnOffset += raArrSz;
19625 sfnIdx = (endWin/RGSCH_NUM_SUB_FRAMES) % raArrSz;
19627 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx],endSubfrmIdx-1);
19628 if((endSubfrmIdx == RGSCH_NUM_SUB_FRAMES) ||
19629 (rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx] ==
19630 RGSCH_NUM_SUB_FRAMES))
19633 rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][RG_SCH_CMN_SUBFRM_0];
19637 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][endSubfrmIdx];
19640 delInfo = &rachRspLst[sfnIdx][subfrmIdx].delInfo;
19641 delInfo->sfnOffset = sfnOffset;
19642 delInfo->subframe[delInfo->numSubfrms] = sfNum;
19643 delInfo->numSubfrms++;
19645 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19648 ret = rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz);
19658 * @brief This function handles the initialization of PHICH information
19659 * for each DL subframe based on PHICH table.
19663 * Function: rgSCHCmnDlPhichOffsetInit
19664 * Purpose: Each DL subf stores the sfn and subf information of UL subframe
19665 * for which it trnsmts PHICH in this subframe. It generates the information
19666 * based on PHICH table.
19668 * Invoked by: Scheduler
19670 * @param[in] RgSchCellCb* cell
19675 PRIVATE S16 rgSCHCmnDlPhichOffsetInit
19680 PRIVATE S16 rgSCHCmnDlPhichOffsetInit(cell)
19685 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
19686 U8 maxDlSubfrms = cell->numDlSubfrms;
19693 RgSchTddSubfrmInfo ulSubfrmInfo = rgSchTddMaxUlSubfrmTbl[ulDlCfgIdx];
19694 U8 maxUlSubfrms = rgSchTddNumUlSubfrmTbl[ulDlCfgIdx]\
19695 [RGSCH_NUM_SUB_FRAMES-1];
19698 /* Generate PHICH offset information for each DL subframe in a radio frame
19699 * Calculate this information based on K in PHICH table */
19700 for (sfCount = 0, sfNum = 0; sfCount < maxUlSubfrms; sfCount++)
19702 while(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][sfNum] !=
19703 RG_SCH_TDD_UL_SUBFRAME)
19705 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19709 calcSfNum = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) % \
19710 RGSCH_NUM_SUB_FRAMES;
19711 calcSfnOffset = (rgSchTddKPhichTbl[ulDlCfgIdx][sfNum] + sfNum) / \
19712 RGSCH_NUM_SUB_FRAMES;
19714 if(calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_1)
19718 else if((ulSubfrmInfo.switchPoints == 2) &&
19719 (calcSfNum <= RG_SCH_CMN_SPL_SUBFRM_6))
19721 dlIdx = calcSfNum - ulSubfrmInfo.numFrmHf1;
19725 dlIdx = calcSfNum - maxUlSubfrms;
19728 cell->subFrms[dlIdx]->phichOffInfo.subframe = sfNum;
19729 cell->subFrms[dlIdx]->phichOffInfo.numSubfrms = 1;
19731 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset = calcSfnOffset;
19733 /* set dlIdx for which phich offset is updated */
19734 dlPres = dlPres | (1 << dlIdx);
19735 sfNum = (sfNum+1) % RGSCH_NUM_SUB_FRAMES;
19738 /* Set Invalid information for which phich offset is not present */
19740 sfCount < rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19743 /* If dlPres is 0, phich offset is not present in that DL index */
19744 if(! ((dlPres >> sfCount)&0x01))
19746 cell->subFrms[sfCount]->phichOffInfo.sfnOffset =
19747 RGSCH_INVALID_INFO;
19748 cell->subFrms[sfCount]->phichOffInfo.subframe =
19749 RGSCH_INVALID_INFO;
19750 cell->subFrms[sfCount]->phichOffInfo.numSubfrms = 0;
19754 /* DL subframes in the subsequent radio frames are
19755 * initialized with the previous radio frames */
19756 for(dlIdx = RGSCH_NUM_SUB_FRAMES - maxUlSubfrms;
19757 dlIdx < maxDlSubfrms; dlIdx++)
19760 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1];
19762 cell->subFrms[dlIdx]->phichOffInfo.subframe =
19763 cell->subFrms[sfNum]->phichOffInfo.subframe;
19765 cell->subFrms[dlIdx]->phichOffInfo.sfnOffset =
19766 cell->subFrms[sfNum]->phichOffInfo.sfnOffset;
19773 * @brief Updation of Sch vars per TTI.
19777 * Function: rgSCHCmnUpdVars
19778 * Purpose: Updation of Sch vars per TTI.
19780 * @param[in] RgSchCellCb *cell
19785 Void rgSCHCmnUpdVars
19790 Void rgSCHCmnUpdVars(cell)
19794 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
19795 CmLteTimingInfo timeInfo;
19798 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
19802 /* ccpu00132654-ADD- Initializing all the indices in every subframe*/
19803 rgSCHCmnInitVars(cell);
19805 idx = (cell->crntTime.slot + TFU_ULCNTRL_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
19806 /* Calculate the UL scheduling subframe idx based on the
19808 if(rgSchTddPuschTxKTbl[ulDlCfgIdx][idx] != 0)
19810 /* PUSCH transmission is based on offset from DL
19811 * PDCCH scheduling */
19812 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
19813 ulSubframe = rgSchTddPuschTxKTbl[ulDlCfgIdx][timeInfo.subframe];
19814 /* Add the DCI-0 to PUSCH time to get the time of UL subframe */
19815 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, ulSubframe);
19817 cellUl->schdTti = timeInfo.sfn * 10 + timeInfo.subframe;
19819 /* Fetch the corresponding UL subframe Idx in UL sf array */
19820 cellUl->schdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19821 /* Fetch the corresponding UL Harq Proc ID */
19822 cellUl->schdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
19823 cellUl->schdTime = timeInfo;
19825 Mval = rgSchTddPhichMValTbl[ulDlCfgIdx][idx];
19828 /* Fetch the tx time for DL HIDCI-0 */
19829 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo, TFU_ULCNTRL_DLDELTA);
19830 /* Fetch the corresponding n-k tx time of PUSCH */
19831 cellUl->hqFdbkIdx[0] = rgSCHCmnGetPhichUlSfIdx(&timeInfo, cell);
19832 /* Retx will happen according to the Pusch k table */
19833 cellUl->reTxIdx[0] = cellUl->schdIdx;
19835 if(ulDlCfgIdx == 0)
19837 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[0] */
19838 cellUl->reTxIdx[0] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
19839 cellUl->hqFdbkIdx[0]);
19842 /* At Idx 1 store the UL SF adjacent(left) to the UL SF
19844 cellUl->hqFdbkIdx[1] = (cellUl->hqFdbkIdx[0]-1 +
19845 cellUl->numUlSubfrms) % cellUl->numUlSubfrms;
19846 /* Calculate the ReTxIdx corresponding to hqFdbkIdx[1] */
19847 cellUl->reTxIdx[1] = rgSchUtlCfg0ReTxIdx(cell,timeInfo,
19848 cellUl->hqFdbkIdx[1]);
19853 idx = (cell->crntTime.slot + TFU_RECPREQ_DLDELTA) % RGSCH_NUM_SUB_FRAMES;
19854 if (rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] == RG_SCH_TDD_UL_SUBFRAME)
19856 RGSCHCMNADDTOCRNTTIME(cell->crntTime, timeInfo, TFU_RECPREQ_DLDELTA)
19857 cellUl->rcpReqIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19859 idx = (cell->crntTime.slot+RG_SCH_CMN_DL_DELTA) % RGSCH_NUM_SUB_FRAMES;
19861 /*[ccpu00134666]-MOD-Modify the check to schedule the RAR in
19862 special subframe */
19863 if(rgSchTddUlDlSubfrmTbl[ulDlCfgIdx][idx] != RG_SCH_TDD_UL_SUBFRAME)
19865 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
19866 msg3Subfrm = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
19867 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, msg3Subfrm);
19868 cellUl->msg3SchdIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19869 cellUl->msg3SchdHqProcIdx = rgSCHCmnGetUlHqProcIdx(&timeInfo, cell);
19872 if(!rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][idx])
19874 cellUl->spsUlRsrvIdx = RGSCH_INVALID_INFO;
19878 /* introduce some reuse with above code? */
19880 RGSCHCMNADDTOCRNTTIME(cell->crntTime,timeInfo,RG_SCH_CMN_DL_DELTA)
19881 //offst = rgSchTddMsg3SubfrmTbl[ulDlCfgIdx][timeInfo.subframe];
19882 offst = rgSchTddSpsUlRsrvTbl[ulDlCfgIdx][timeInfo.subframe];
19883 RGSCHCMNADDTOCRNTTIME(timeInfo, timeInfo, offst);
19884 cellUl->spsUlRsrvIdx = rgSCHCmnGetUlSfIdx(&timeInfo, cell);
19885 /* The harq proc continues to be accessed and used the same delta before
19886 * actual data occurance, and hence use the same idx */
19887 cellUl->spsUlRsrvHqProcIdx = cellUl->schdHqProcIdx;
19891 /* RACHO: update cmn sched specific RACH variables,
19892 * mainly the prachMaskIndex */
19893 rgSCHCmnUpdRachParam(cell);
19899 * @brief To get 'p' value from nCCE.
19903 * Function: rgSCHCmnGetPValFrmCCE
19904 * Purpose: Gets 'p' value for HARQ ACK/NACK reception from CCE.
19906 * @param[in] RgSchCellCb *cell
19907 * @param[in] U8 cce
19912 U8 rgSCHCmnGetPValFrmCCE
19918 U8 rgSCHCmnGetPValFrmCCE(cell, cce)
19925 for(i=1; i < RGSCH_TDD_MAX_P_PLUS_ONE_VAL; i++)
19927 if(cce < cell->rgSchTddNpValTbl[i])
19936 /***********************************************************
19938 * Func : rgSCHCmnUlAdapRetx
19940 * Desc : Adaptive retransmission for an allocation.
19948 **********************************************************/
19950 PRIVATE Void rgSCHCmnUlAdapRetx
19952 RgSchUlAlloc *alloc,
19953 RgSchUlHqProcCb *proc
19956 PRIVATE Void rgSCHCmnUlAdapRetx(alloc, proc)
19957 RgSchUlAlloc *alloc;
19958 RgSchUlHqProcCb *proc;
19962 rgSCHUhmRetx(proc, alloc);
19964 if (proc->rvIdx != 0)
19966 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[proc->rvIdx];
19971 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
19977 * @brief Scheduler invocation per TTI.
19981 * Function: rgSCHCmnHdlUlInactUes
19984 * Invoked by: Common Scheduler
19986 * @param[in] RgSchCellCb *cell
19990 PRIVATE Void rgSCHCmnHdlUlInactUes
19995 PRIVATE Void rgSCHCmnHdlUlInactUes(cell)
19999 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20000 CmLListCp ulInactvLst;
20001 /* Get a List of Inactv UEs for UL*/
20002 cmLListInit(&ulInactvLst);
20004 /* Trigger Spfc Schedulers with Inactive UEs */
20005 rgSCHMeasGapANRepGetUlInactvUe (cell, &ulInactvLst);
20006 /* take care of this in UL retransmission */
20007 cellSch->apisUl->rgSCHUlInactvtUes(cell, &ulInactvLst);
20013 * @brief Scheduler invocation per TTI.
20017 * Function: rgSCHCmnHdlDlInactUes
20020 * Invoked by: Common Scheduler
20022 * @param[in] RgSchCellCb *cell
20026 PRIVATE Void rgSCHCmnHdlDlInactUes
20031 PRIVATE Void rgSCHCmnHdlDlInactUes(cell)
20035 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20036 CmLListCp dlInactvLst;
20037 /* Get a List of Inactv UEs for DL */
20038 cmLListInit(&dlInactvLst);
20040 /* Trigger Spfc Schedulers with Inactive UEs */
20041 rgSCHMeasGapANRepGetDlInactvUe (cell, &dlInactvLst);
20043 cellSch->apisDl->rgSCHDlInactvtUes(cell, &dlInactvLst);
20047 /* RACHO: Rach handover functions start here */
20048 /***********************************************************
20050 * Func : rgSCHCmnUeIdleExdThrsld
20052 * Desc : RETURN ROK if UE has been idle more
20061 **********************************************************/
20063 PRIVATE S16 rgSCHCmnUeIdleExdThrsld
20069 PRIVATE S16 rgSCHCmnUeIdleExdThrsld(cell, ue)
20074 /* Time difference in subframes */
20075 U32 sfDiff = RGSCH_CALC_SF_DIFF(cell->crntTime, ue->ul.ulTransTime);
20078 if (sfDiff > (U32)RG_SCH_CMN_UE_IDLE_THRSLD(ue))
20090 * @brief Scheduler processing for Ded Preambles on cell configuration.
20094 * Function : rgSCHCmnCfgRachDedPrm
20096 * This function does requisite initialisation
20097 * for RACH Ded Preambles.
20100 * @param[in] RgSchCellCb *cell
20104 PRIVATE Void rgSCHCmnCfgRachDedPrm
20109 PRIVATE Void rgSCHCmnCfgRachDedPrm(cell)
20113 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20114 U32 gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
20118 if (cell->macPreambleSet.pres == NOTPRSNT)
20122 cellSch->rachCfg.numDedPrm = cell->macPreambleSet.size;
20123 cellSch->rachCfg.dedPrmStart = cell->macPreambleSet.start;
20124 /* Initialize handover List */
20125 cmLListInit(&cellSch->rachCfg.hoUeLst);
20126 /* Initialize pdcch Order List */
20127 cmLListInit(&cellSch->rachCfg.pdcchOdrLst);
20129 /* Intialize the rapId to UE mapping structure */
20130 for (cnt = 0; cnt<cellSch->rachCfg.numDedPrm; cnt++)
20132 cellSch->rachCfg.rapIdMap[cnt].rapId = cellSch->rachCfg.dedPrmStart + \
20134 cmLListInit(&cellSch->rachCfg.rapIdMap[cnt].assgndUes);
20136 /* Perform Prach Mask Idx, remDedPrm, applFrm initializations */
20137 /* Set remDedPrm as numDedPrm */
20138 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
20139 /* Initialize applFrm */
20140 cellSch->rachCfg.prachMskIndx = 0;
20141 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_EVEN)
20143 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + \
20144 (cell->crntTime.sfn % 2)) % RGSCH_MAX_SFN;
20147 else if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ODD)
20149 if((cell->crntTime.sfn%2) == 0)
20151 cellSch->rachCfg.applFrm.sfn = (cell->crntTime.sfn + 1)\
20158 cellSch->rachCfg.applFrm.sfn = cell->crntTime.sfn;
20160 /* Initialize cellSch->rachCfg.applFrm as >= crntTime.
20161 * This is because of RGSCH_CALC_SF_DIFF logic */
20162 if (cellSch->rachCfg.applFrm.sfn == cell->crntTime.sfn)
20164 while (cellSch->rachCfg.prachMskIndx < cell->rachCfg.raOccasion.size)
20166 if (cell->crntTime.slot <\
20167 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx])
20171 cellSch->rachCfg.prachMskIndx++;
20173 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size)
20175 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
20177 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) %\
20182 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) %\
20185 cellSch->rachCfg.prachMskIndx = 0;
20187 cellSch->rachCfg.applFrm.slot = \
20188 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20192 cellSch->rachCfg.applFrm.slot = \
20193 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20196 /* Note first param to this macro should always be the latest in time */
20197 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
20198 while (sfDiff <= gap)
20200 rgSCHCmnUpdNxtPrchMskIdx(cell);
20201 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, cell->crntTime);
20208 * @brief Updates the PRACH MASK INDEX.
20212 * Function: rgSCHCmnUpdNxtPrchMskIdx
20213 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
20214 * CFG is always >= "n"+"DELTA", where "n" is the crntTime
20215 * of the cell. If not, applFrm is updated to the next avl
20216 * PRACH oppurtunity as per the PRACH Cfg Index configuration.
20219 * Invoked by: Common Scheduler
20221 * @param[in] RgSchCellCb *cell
20225 PRIVATE Void rgSCHCmnUpdNxtPrchMskIdx
20230 PRIVATE Void rgSCHCmnUpdNxtPrchMskIdx(cell)
20234 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20236 /* Determine the next prach mask Index */
20237 if (cellSch->rachCfg.prachMskIndx == cell->rachCfg.raOccasion.size - 1)
20239 /* PRACH within applFrm.sfn are done, go to next AVL sfn */
20240 cellSch->rachCfg.prachMskIndx = 0;
20241 if (cell->rachCfg.raOccasion.sfnEnum == RGR_SFN_ANY)
20243 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+1) % \
20246 else/* RGR_SFN_EVEN or RGR_SFN_ODD */
20248 cellSch->rachCfg.applFrm.sfn = (cellSch->rachCfg.applFrm.sfn+2) % \
20251 cellSch->rachCfg.applFrm.slot = cell->rachCfg.raOccasion.\
20254 else /* applFrm.sfn is still valid */
20256 cellSch->rachCfg.prachMskIndx += 1;
20257 if ( cellSch->rachCfg.prachMskIndx < RGR_MAX_SUBFRAME_NUM )
20259 cellSch->rachCfg.applFrm.slot = \
20260 cell->rachCfg.raOccasion.subFrameNum[cellSch->rachCfg.prachMskIndx];
20267 * @brief Updates the Ded preamble RACH parameters
20272 * Function: rgSCHCmnUpdRachParam
20273 * Purpose: Ensures the "applFrm" field of Cmn Sched RACH
20274 * CFG is always >= "n"+"6"+"DELTA", where "n" is the crntTime
20275 * of the cell. If not, applFrm is updated to the next avl
20276 * PRACH oppurtunity as per the PRACH Cfg Index configuration,
20277 * accordingly the "remDedPrm" is reset to "numDedPrm" and
20278 * "prachMskIdx" field is updated as per "applFrm".
20281 * Invoked by: Common Scheduler
20283 * @param[in] RgSchCellCb *cell
20287 PRIVATE Void rgSCHCmnUpdRachParam
20292 PRIVATE Void rgSCHCmnUpdRachParam(cell)
20297 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20298 U32 gap = RG_SCH_CMN_MIN_PRACH_OPPR_GAP;
20301 if (cell->macPreambleSet.pres == NOTPRSNT)
20305 sfDiff = RGSCH_CALC_SF_DIFF(cellSch->rachCfg.applFrm, \
20309 /* applFrm is still a valid next Prach Oppurtunity */
20312 rgSCHCmnUpdNxtPrchMskIdx(cell);
20313 /* Reset remDedPrm as numDedPrm */
20314 cellSch->rachCfg.remDedPrm = cellSch->rachCfg.numDedPrm;
20320 * @brief Dedicated Preamble allocation function.
20324 * Function: rgSCHCmnAllocPOParam
20325 * Purpose: Allocate pdcch, rapId and PrachMskIdx.
20326 * Set mapping of UE with the allocated rapId.
20328 * Invoked by: Common Scheduler
20330 * @param[in] RgSchCellCb *cell
20331 * @param[in] RgSchDlSf *dlSf
20332 * @param[in] RgSchUeCb *ue
20333 * @param[out] RgSchPdcch **pdcch
20334 * @param[out] U8 *rapId
20335 * @param[out] U8 *prachMskIdx
20339 PRIVATE S16 rgSCHCmnAllocPOParam
20344 RgSchPdcch **pdcch,
20349 PRIVATE S16 rgSCHCmnAllocPOParam(cell, dlSf, ue, pdcch, rapId, prachMskIdx)
20353 RgSchPdcch **pdcch;
20359 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20360 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20363 if (cell->macPreambleSet.pres == PRSNT_NODEF)
20365 if (cellSch->rachCfg.remDedPrm == 0)
20369 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
20370 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
20374 /* The stored prachMskIdx is the index of PRACH Oppurtunities in
20375 * raOccasions.subframes[].
20376 * Converting the same to the actual PRACHMskIdx to be transmitted. */
20377 *prachMskIdx = cellSch->rachCfg.prachMskIndx + 1;
20378 /* Distribution starts from dedPrmStart till dedPrmStart + numDedPrm */
20379 *rapId = cellSch->rachCfg.dedPrmStart +
20380 cellSch->rachCfg.numDedPrm - cellSch->rachCfg.remDedPrm;
20381 cellSch->rachCfg.remDedPrm--;
20382 /* Map UE with the allocated RapId */
20383 ueDl->rachInfo.asgnOppr = cellSch->rachCfg.applFrm;
20384 RGSCH_ARRAY_BOUND_CHECK_WITH_POS_IDX(cell->instIdx, cellSch->rachCfg.rapIdMap, (*rapId - cellSch->rachCfg.dedPrmStart));
20385 cmLListAdd2Tail(&cellSch->rachCfg.rapIdMap[*rapId - cellSch->rachCfg.dedPrmStart].assgndUes,
20386 &ueDl->rachInfo.rapIdLnk);
20387 ueDl->rachInfo.rapIdLnk.node = (PTR)ue;
20388 ueDl->rachInfo.poRapId = *rapId;
20390 else /* if dedicated preambles not configured */
20392 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
20393 if ((*pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE)) == NULLP)
20405 * @brief Dowlink Scheduling Handler.
20409 * Function: rgSCHCmnGenPdcchOrder
20410 * Purpose: For each UE in PO Q, grab a PDCCH,
20411 * get an available ded RapId and fill PDCCH
20412 * with PO information.
20414 * Invoked by: Common Scheduler
20416 * @param[in] RgSchCellCb *cell
20417 * @param[in] RgSchDlSf *dlSf
20421 PRIVATE Void rgSCHCmnGenPdcchOrder
20427 PRIVATE Void rgSCHCmnGenPdcchOrder(cell, dlSf)
20432 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20433 CmLList *node = cellSch->rachCfg.pdcchOdrLst.first;
20437 RgSchPdcch *pdcch = NULLP;
20442 ue = (RgSchUeCb *)node->node;
20444 /* Skip sending for this subframe is Measuring or inActive in UL due
20445 * to MeasGap or inactie due to DRX
20447 if ((ue->measGapCb.isMeasuring == TRUE) ||
20448 (ue->ul.ulInactvMask & RG_MEASGAP_INACTIVE) ||
20449 (ue->isDrxEnabled &&
20450 ue->dl.dlInactvMask & RG_DRX_INACTIVE)
20455 if (rgSCHCmnAllocPOParam(cell, dlSf, ue, &pdcch, &rapId,\
20456 &prachMskIdx) != ROK)
20458 /* No More rapIds left for the valid next avl Oppurtunity.
20459 * Unsatisfied UEs here would be given a chance, when the
20460 * prach Mask Index changes as per rachUpd every TTI */
20462 /* PDDCH can also be ordered with rapId=0, prachMskIdx=0
20463 * so that UE triggers a RACH procedure with non-dedicated preamble.
20464 * But the implementation here does not do this. Instead, the "break"
20465 * here implies, that PDCCH Odr always given with valid rapId!=0,
20466 * prachMskIdx!=0 if dedicated preambles are configured.
20467 * If not configured, then trigger a PO with rapId=0,prchMskIdx=0*/
20470 /* Fill pdcch with pdcch odr information */
20471 rgSCHCmnFillPdcchOdr2Sf(cell, ue, pdcch, rapId, prachMskIdx);
20472 /* Remove this UE from the PDCCH ORDER QUEUE */
20473 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
20474 /* Reset UE's power state */
20475 rgSCHPwrUeReset(cell, ue);
20482 * @brief This function add UE to PdcchOdr Q if not already present.
20486 * Function: rgSCHCmnDlAdd2PdcchOdrQ
20489 * Invoked by: CMN Scheduler
20491 * @param[in] RgSchCellCb* cell
20492 * @param[in] RgSchUeCb* ue
20497 PRIVATE Void rgSCHCmnDlAdd2PdcchOdrQ
20503 PRIVATE Void rgSCHCmnDlAdd2PdcchOdrQ(cell, ue)
20508 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20509 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20512 if (ueDl->rachInfo.poLnk.node == NULLP)
20514 cmLListAdd2Tail(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
20515 ueDl->rachInfo.poLnk.node = (PTR)ue;
20522 * @brief This function rmvs UE to PdcchOdr Q if not already present.
20526 * Function: rgSCHCmnDlRmvFrmPdcchOdrQ
20529 * Invoked by: CMN Scheduler
20531 * @param[in] RgSchCellCb* cell
20532 * @param[in] RgSchUeCb* ue
20537 PRIVATE Void rgSCHCmnDlRmvFrmPdcchOdrQ
20543 PRIVATE Void rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue)
20548 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20549 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20552 cmLListDelFrm(&cellSch->rachCfg.pdcchOdrLst, &ueDl->rachInfo.poLnk);
20553 ueDl->rachInfo.poLnk.node = NULLP;
20558 * @brief Fill pdcch with PDCCH order information.
20562 * Function: rgSCHCmnFillPdcchOdr2Sf
20563 * Purpose: Fill PDCCH with PDCCH order information,
20565 * Invoked by: Common Scheduler
20567 * @param[in] RgSchUeCb *ue
20568 * @param[in] RgSchPdcch *pdcch
20569 * @param[in] U8 rapId
20570 * @param[in] U8 prachMskIdx
20574 PRIVATE Void rgSCHCmnFillPdcchOdr2Sf
20583 PRIVATE Void rgSCHCmnFillPdcchOdr2Sf(ue, pdcch, rapId, prachMskIdx)
20591 RgSchUeACqiCb *acqiCb = RG_SCH_CMN_GET_ACQICB(ue,cell);
20594 pdcch->rnti = ue->ueId;
20595 pdcch->dci.dciFormat = TFU_DCI_FORMAT_1A;
20596 pdcch->dci.u.format1aInfo.isPdcchOrder = TRUE;
20597 pdcch->dci.u.format1aInfo.t.pdcchOrder.preambleIdx = rapId;
20598 pdcch->dci.u.format1aInfo.t.pdcchOrder.prachMaskIdx = prachMskIdx;
20600 /* Request for APer CQI immediately after PDCCH Order */
20601 /* CR ccpu00144525 */
20603 if(ue->dl.ueDlCqiCfg.aprdCqiCfg.pres)
20605 ue->dl.reqForCqi = RG_SCH_APCQI_SERVING_CC;
20606 acqiCb->aCqiTrigWt = 0;
20615 * @brief UE deletion for scheduler.
20619 * Function : rgSCHCmnDelRachInfo
20621 * This functions deletes all scheduler information
20622 * pertaining to an UE.
20624 * @param[in] RgSchCellCb *cell
20625 * @param[in] RgSchUeCb *ue
20629 PRIVATE Void rgSCHCmnDelRachInfo
20635 PRIVATE Void rgSCHCmnDelRachInfo(cell, ue)
20640 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
20641 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20645 if (ueDl->rachInfo.poLnk.node)
20647 rgSCHCmnDlRmvFrmPdcchOdrQ(cell, ue);
20649 if (ueDl->rachInfo.hoLnk.node)
20651 cmLListDelFrm(&cellSch->rachCfg.hoUeLst, &ueDl->rachInfo.hoLnk);
20652 ueDl->rachInfo.hoLnk.node = NULLP;
20654 if (ueDl->rachInfo.rapIdLnk.node)
20656 rapIdIdx = ueDl->rachInfo.poRapId - cellSch->rachCfg.dedPrmStart;
20657 cmLListDelFrm(&cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes,
20658 &ueDl->rachInfo.rapIdLnk);
20659 ueDl->rachInfo.rapIdLnk.node = NULLP;
20665 * @brief This function retrieves the ue which has sent this raReq
20666 * and it allocates grant for UEs undergoing (for which RAR
20667 * is being generated) HandOver/PdcchOrder.
20672 * Function: rgSCHCmnHdlHoPo
20673 * Purpose: This function retrieves the ue which has sent this raReq
20674 * and it allocates grant for UEs undergoing (for which RAR
20675 * is being generated) HandOver/PdcchOrder.
20677 * Invoked by: Common Scheduler
20679 * @param[in] RgSchCellCb *cell
20680 * @param[out] CmLListCp *raRspLst
20681 * @param[in] RgSchRaReqInfo *raReq
20686 PRIVATE Void rgSCHCmnHdlHoPo
20689 CmLListCp *raRspLst,
20690 RgSchRaReqInfo *raReq
20693 PRIVATE Void rgSCHCmnHdlHoPo(cell, raRspLst, raReq)
20695 CmLListCp *raRspLst;
20696 RgSchRaReqInfo *raReq;
20699 RgSchUeCb *ue = raReq->ue;
20701 if ( ue->isDrxEnabled )
20703 rgSCHDrxDedRa(cell,ue);
20705 rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq);
20710 * @brief This function retrieves the UE which has sent this raReq
20711 * for handover case.
20716 * Function: rgSCHCmnGetHoUe
20717 * Purpose: This function retrieves the UE which has sent this raReq
20718 * for handover case.
20720 * Invoked by: Common Scheduler
20722 * @param[in] RgSchCellCb *cell
20723 * @param[in] RgSchRaReqInfo *raReq
20724 * @return RgSchUeCb*
20728 RgSchUeCb* rgSCHCmnGetHoUe
20734 RgSchUeCb* rgSCHCmnGetHoUe(cell, rapId)
20739 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20743 RgSchCmnDlUe *ueDl;
20745 ueLst = &cellSch->rachCfg.hoUeLst;
20746 node = ueLst->first;
20749 ue = (RgSchUeCb *)node->node;
20751 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20752 if (ueDl->rachInfo.hoRapId == rapId)
20761 PRIVATE Void rgSCHCmnDelDedPreamble
20767 PRIVATE rgSCHCmnDelDedPreamble(cell, preambleId)
20772 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20776 RgSchCmnDlUe *ueDl;
20778 ueLst = &cellSch->rachCfg.hoUeLst;
20779 node = ueLst->first;
20782 ue = (RgSchUeCb *)node->node;
20784 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20785 if (ueDl->rachInfo.hoRapId == preambleId)
20787 cmLListDelFrm(ueLst, &ueDl->rachInfo.hoLnk);
20788 ueDl->rachInfo.hoLnk.node = (PTR)NULLP;
20794 * @brief This function retrieves the UE which has sent this raReq
20795 * for PDCCh Order case.
20800 * Function: rgSCHCmnGetPoUe
20801 * Purpose: This function retrieves the UE which has sent this raReq
20802 * for PDCCH Order case.
20804 * Invoked by: Common Scheduler
20806 * @param[in] RgSchCellCb *cell
20807 * @param[in] RgSchRaReqInfo *raReq
20808 * @return RgSchUeCb*
20812 RgSchUeCb* rgSCHCmnGetPoUe
20816 CmLteTimingInfo timingInfo
20819 RgSchUeCb* rgSCHCmnGetPoUe(cell, rapId, timingInfo)
20822 CmLteTimingInfo timingInfo;
20825 RgSchCmnCell *cellSch = (RgSchCmnCell *)(cell->sc.sch);
20829 RgSchCmnDlUe *ueDl;
20832 rapIdIdx = rapId -cellSch->rachCfg.dedPrmStart;
20833 ueLst = &cellSch->rachCfg.rapIdMap[rapIdIdx].assgndUes;
20834 node = ueLst->first;
20837 ue = (RgSchUeCb *)node->node;
20839 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
20840 /* Remove UEs irrespective.
20841 * Old UE associations are removed.*/
20842 cmLListDelFrm(ueLst, &ueDl->rachInfo.rapIdLnk);
20843 ueDl->rachInfo.rapIdLnk.node = (PTR)NULLP;
20844 if (RGSCH_TIMEINFO_SAME(ueDl->rachInfo.asgnOppr, timingInfo))
20855 * @brief This function returns the valid UL cqi for a given UE.
20859 * Function: rgSCHCmnUlGetCqi
20860 * Purpose: This function returns the "valid UL cqi" for a given UE
20861 * based on UE category
20863 * Invoked by: Scheduler
20865 * @param[in] RgSchUeCb *ue
20866 * @param[in] U8 ueCtgy
20870 U8 rgSCHCmnUlGetCqi
20874 CmLteUeCategory ueCtgy
20877 U8 rgSCHCmnUlGetCqi(cell, ue, ueCtgy)
20880 CmLteUeCategory ueCtgy;
20883 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
20887 cqi = ueUl->maxUlCqi;
20889 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
20890 (ueUl->validUlCqi > ueUl->maxUlCqi)))
20892 cqi = ueUl->validUlCqi;
20895 if (!((ueCtgy != CM_LTE_UE_CAT_5) &&
20896 (ueUl->crntUlCqi[0] > ueUl->maxUlCqi )))
20898 cqi = ueUl->crntUlCqi[0];
20902 }/* End of rgSCHCmnUlGetCqi */
20904 /***********************************************************
20906 * Func : rgSCHCmnUlRbAllocForPoHoUe
20908 * Desc : Do uplink RB allocation for a HO/PO UE.
20912 * Notes: Note that as of now, for retx, maxRb
20913 * is not considered. Alternatives, such
20914 * as dropping retx if it crosses maxRb
20915 * could be considered.
20919 **********************************************************/
20921 PRIVATE S16 rgSCHCmnUlRbAllocForPoHoUe
20929 PRIVATE S16 rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, maxRb)
20936 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
20937 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
20938 U8 sbSize = cellUl->sbSize;
20939 U32 maxBits = ue->ul.maxBytesPerUePerTti*8;
20941 RgSchUlAlloc *alloc;
20951 RgSchUlHqProcCb *proc = &ueUl->hqEnt.hqProcCb[cellUl->msg3SchdHqProcIdx];
20952 CmLteUeCategory ueCtg = (CmLteUeCategory)(RG_SCH_CMN_GET_UE_CTGY(ue));
20954 if ((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
20958 /*MS_WORKAROUND for HO ccpu00121116*/
20959 cqi = rgSCHCmnUlGetCqi(cell, ue, ueCtg);
20960 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchCmnUlCqiToTbsTbl[(U8)cell->isCpUlExtend], cqi);
20961 iTbs = rgSchCmnUlCqiToTbsTbl[(U8)cell->isCpUlExtend][cqi];
20962 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs,ueCtg);
20963 while(iMcs > RG_SCH_CMN_MAX_MSG3_IMCS)
20966 iTbs = rgSchCmnUlCqiToTbsTbl[(U8)cell->isCpUlExtend][cqi];
20967 iMcs = rgSCHCmnUlGetIMcsFrmITbs(iTbs, ueCtg);
20969 /* Filling the modorder in the grant structure*/
20970 RG_SCH_UL_MCS_TO_MODODR(iMcs,modOdr);
20971 if (!cell->isCpUlExtend)
20973 eff = rgSchCmnNorUlEff[0][iTbs];
20977 eff = rgSchCmnExtUlEff[0][iTbs];
20980 bits = ueUl->alloc.reqBytes * 8;
20982 #if (ERRCLASS & ERRCLS_DEBUG)
20989 if (bits < rgSCHCmnUlMinTbBitsForITbs(cellUl, iTbs))
20992 nPrb = numSb * sbSize;
20996 if (bits > maxBits)
20999 nPrb = bits * 1024 / eff / RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl);
21004 numSb = nPrb / sbSize;
21008 /*ccpu00128775:MOD-Change to get upper threshold nPrb*/
21009 nPrb = RGSCH_CEIL((RGSCH_CEIL(bits * 1024, eff)),
21010 RG_SCH_CMN_UL_NUM_RE_PER_RB(cellUl));
21015 numSb = RGSCH_DIV_ROUND(nPrb, sbSize);
21020 alloc = rgSCHCmnUlSbAlloc(sf, (U8)RGSCH_MIN(numSb, cellUl->maxSbPerUe),\
21022 if (alloc == NULLP)
21024 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
21025 "rgSCHCmnUlRbAllocForPoHoUe(): Could not get UlAlloc");
21028 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
21030 /* Filling the modorder in the grant structure start*/
21031 alloc->grnt.modOdr = (TfuModScheme) modOdr;
21032 alloc->grnt.iMcs = iMcs;
21033 alloc->grnt.iMcsCrnt = iMcsCrnt;
21034 alloc->grnt.hop = 0;
21035 /* Fix for ccpu00123915*/
21036 alloc->forMsg3 = TRUE;
21037 alloc->hqProc = proc;
21038 alloc->hqProc->ulSfIdx = cellUl->msg3SchdIdx;
21040 alloc->rnti = ue->ueId;
21041 /* updating initNumRbs in case of HO */
21043 ue->initNumRbs = alloc->grnt.numRb;
21045 ueUl->alloc.alloc = alloc;
21046 iTbs = rgSCHCmnUlGetITbsFrmIMcs(iMcs);
21047 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[0], iTbs);
21048 alloc->grnt.datSz = rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1] / 8;
21049 /* MS_WORKAROUND for HO ccpu00121124*/
21050 /*[Adi temp change] Need to fil modOdr */
21051 RG_SCH_UL_MCS_TO_MODODR(alloc->grnt.iMcsCrnt,alloc->grnt.modOdr);
21052 rgSCHUhmNewTx(proc, ueUl->hqEnt.maxHqRetx, alloc);
21053 /* No grant attr recorded now */
21058 * @brief This function allocates grant for UEs undergoing (for which RAR
21059 * is being generated) HandOver/PdcchOrder.
21064 * Function: rgSCHCmnAllocPoHoGrnt
21065 * Purpose: This function allocates grant for UEs undergoing (for which RAR
21066 * is being generated) HandOver/PdcchOrder.
21068 * Invoked by: Common Scheduler
21070 * @param[in] RgSchCellCb *cell
21071 * @param[out] CmLListCp *raRspLst,
21072 * @param[in] RgSchUeCb *ue
21073 * @param[in] RgSchRaReqInfo *raReq
21078 PRIVATE Void rgSCHCmnAllocPoHoGrnt
21081 CmLListCp *raRspLst,
21083 RgSchRaReqInfo *raReq
21086 PRIVATE Void rgSCHCmnAllocPoHoGrnt(cell, raRspLst, ue, raReq)
21088 CmLListCp *raRspLst;
21090 RgSchRaReqInfo *raReq;
21093 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21094 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(ue,cell);
21096 RgSchUlSf *sf = &cellUl->ulSfArr[cellUl->msg3SchdIdx];
21099 /* Clearing previous allocs if any*/
21100 rgSCHCmnUlUeDelAllocs(cell, ue);
21101 /* Fix : syed allocs are limited */
21102 if (*sf->allocCountRef >= cellUl->maxAllocPerUlSf)
21106 ueUl->alloc.reqBytes = RG_SCH_MIN_GRNT_HOPO;
21107 if (rgSCHCmnUlRbAllocForPoHoUe(cell, sf, ue, RGSCH_MAX_UL_RB) != ROK)
21112 /* Fill grant information */
21113 grnt = &ueUl->alloc.alloc->grnt;
21118 RLOG_ARG1(L_ERROR,DBG_INSTID,cell->instIdx, "Failed to get"
21119 "the grant for HO/PDCCH Order. CRNTI:%d",ue->ueId);
21122 ue->ul.rarGrnt.rapId = raReq->raReq.rapId;
21123 ue->ul.rarGrnt.hop = grnt->hop;
21124 ue->ul.rarGrnt.rbStart = grnt->rbStart;
21125 ue->ul.rarGrnt.numRb = grnt->numRb;
21126 ue->ul.rarGrnt.tpc = grnt->tpc;
21127 ue->ul.rarGrnt.iMcsCrnt = grnt->iMcsCrnt;
21128 ue->ul.rarGrnt.ta.pres = TRUE;
21129 ue->ul.rarGrnt.ta.val = raReq->raReq.ta;
21130 ue->ul.rarGrnt.datSz = grnt->datSz;
21131 if((sf->numACqiCount < RG_SCH_MAX_ACQI_PER_ULSF) && (RG_SCH_APCQI_NO != ue->dl.reqForCqi))
21135 /* Send two bits cqireq field if more than one cells are configured else one*/
21136 for (idx = 1;idx < CM_LTE_MAX_CELLS;idx++)
21138 if (ue->cellInfo[idx] != NULLP)
21140 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
21144 if (idx == CM_LTE_MAX_CELLS)
21147 ue->ul.rarGrnt.cqiReqBit = ue->dl.reqForCqi;
21149 ue->dl.reqForCqi = RG_SCH_APCQI_NO;
21150 sf->numACqiCount++;
21154 ue->ul.rarGrnt.cqiReqBit = 0;
21156 /* Attach Ho/Po allocation to RAR Rsp cont free Lst */
21157 cmLListAdd2Tail(raRspLst, &ue->ul.rarGrnt.raRspLnk);
21158 ue->ul.rarGrnt.raRspLnk.node = (PTR)ue;
21164 * @brief This is a utility function to set the fields in
21165 * an UL harq proc which is identified for non-adaptive retx
21169 * Function: rgSCHCmnUlNonadapRetx
21170 * Purpose: Sets the fields in UL Harq proc for non-adaptive retx
21172 * @param[in] RgSchCmnUlCell *cellUl
21173 * @param[out] RgSchUlAlloc *alloc
21174 * @param[in] U8 idx
21180 PRIVATE Void rgSCHCmnUlNonadapRetx
21182 RgSchCmnUlCell *cellUl,
21183 RgSchUlAlloc *alloc,
21187 PRIVATE Void rgSCHCmnUlNonadapRetx(cellUl, alloc, idx)
21188 RgSchCmnUlCell *cellUl;
21189 RgSchUlAlloc *alloc;
21193 rgSCHUhmRetx(alloc->hqProc, alloc);
21195 /* Update alloc to retx */
21196 alloc->hqProc->isRetx = TRUE;
21197 alloc->hqProc->ulSfIdx = cellUl->reTxIdx[idx];
21199 if (alloc->hqProc->rvIdx != 0)
21201 alloc->grnt.iMcsCrnt = rgSchCmnUlRvIdxToIMcsTbl[alloc->hqProc->rvIdx];
21205 alloc->grnt.iMcsCrnt = alloc->grnt.iMcs;
21207 alloc->grnt.isRtx = TRUE;
21208 alloc->pdcch = NULLP;
21212 * @brief Check if 2 allocs overlap
21216 * Function : rgSCHCmnUlAllocsOvrLap
21218 * - Return TRUE if alloc1 and alloc2 overlap.
21220 * @param[in] RgSchUlAlloc *alloc1
21221 * @param[in] RgSchUlAlloc *alloc2
21225 PRIVATE Bool rgSCHCmnUlAllocsOvrLap
21227 RgSchUlAlloc *alloc1,
21228 RgSchUlAlloc *alloc2
21231 PRIVATE Bool rgSCHCmnUlAllocsOvrLap(alloc1, alloc2)
21232 RgSchUlAlloc *alloc1;
21233 RgSchUlAlloc *alloc2;
21238 if (((alloc1->sbStart >= alloc2->sbStart) &&
21239 (alloc1->sbStart <= alloc2->sbStart + alloc2->numSb-1)) ||
21240 ((alloc2->sbStart >= alloc1->sbStart) &&
21241 (alloc2->sbStart <= alloc1->sbStart + alloc1->numSb-1)))
21248 * @brief Copy allocation Info from src to dst.
21252 * Function : rgSCHCmnUlCpyAllocInfo
21254 * - Copy allocation Info from src to dst.
21256 * @param[in] RgSchUlAlloc *srcAlloc
21257 * @param[in] RgSchUlAlloc *dstAlloc
21261 PRIVATE Void rgSCHCmnUlCpyAllocInfo
21264 RgSchUlAlloc *srcAlloc,
21265 RgSchUlAlloc *dstAlloc
21268 PRIVATE Void rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc)
21270 RgSchUlAlloc *srcAlloc;
21271 RgSchUlAlloc *dstAlloc;
21274 RgSchCmnUlUe *ueUl;
21276 dstAlloc->grnt = srcAlloc->grnt;
21277 dstAlloc->hqProc = srcAlloc->hqProc;
21278 /* Fix : syed During UE context release, hqProc->alloc
21279 * was pointing to srcAlloc instead of dstAlloc and
21280 * freeing from incorrect sf->allocDb was
21281 * corrupting the list. */
21282 /* In case of SPS Occasion Allocation is done in advance and
21283 at a later time Hq Proc is linked. Hence HqProc
21284 pointer in alloc shall be NULL */
21286 if (dstAlloc->hqProc)
21289 dstAlloc->hqProc->alloc = dstAlloc;
21291 dstAlloc->ue = srcAlloc->ue;
21292 dstAlloc->rnti = srcAlloc->rnti;
21293 dstAlloc->forMsg3 = srcAlloc->forMsg3;
21294 dstAlloc->raCb = srcAlloc->raCb;
21295 dstAlloc->pdcch = srcAlloc->pdcch;
21296 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21299 ueUl = RG_SCH_CMN_GET_UL_UE(dstAlloc->ue,cell);
21300 ueUl->alloc.alloc = dstAlloc;
21302 if (dstAlloc->ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
21304 if((dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc != NULLP)
21305 && (dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc == srcAlloc))
21307 dstAlloc->ue->ul.ulSpsInfo.ulSpsSchdInfo.crntAlloc = dstAlloc;
21316 * @brief Update TX and RETX subframe's allocation
21321 * Function : rgSCHCmnUlInsAllocFrmNewSf2OldSf
21323 * - Release all preassigned allocations of newSf and merge
21325 * - If alloc of newSf collide with one or more allocs of oldSf
21326 * - mark all such allocs of oldSf for Adaptive Retx.
21327 * - Swap the alloc and hole DB references of oldSf and newSf.
21329 * @param[in] RgSchCellCb *cell
21330 * @param[in] RgSchUlSf *newSf
21331 * @param[in] RgSchUlSf *oldSf
21332 * @param[in] RgSchUlAlloc *srcAlloc
21336 PRIVATE Void rgSCHCmnUlInsAllocFrmNewSf2OldSf
21341 RgSchUlAlloc *srcAlloc
21344 PRIVATE Void rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, srcAlloc)
21348 RgSchUlAlloc *srcAlloc;
21351 RgSchUlAlloc *alloc, *dstAlloc, *nxtAlloc;
21353 /* MS_WORKAROUND ccpu00120827 */
21354 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
21357 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
21361 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
21362 /* If there is an overlap between alloc and srcAlloc
21363 * then alloc is marked for Adaptive retx and it is released
21365 if (rgSCHCmnUlAllocsOvrLap(alloc, srcAlloc) == TRUE)
21367 rgSCHCmnUlUpdAllocRetx(cell, alloc);
21368 rgSCHUtlUlAllocRls(oldSf, alloc);
21370 /* No further allocs spanning the srcAlloc subbands */
21371 if (srcAlloc->sbStart + srcAlloc->numSb - 1 <= alloc->sbStart)
21375 } while ((alloc = nxtAlloc) != NULLP);
21378 /* After freeing all the colliding allocs, request for an allocation
21379 * specifying the start and numSb with in txSf. This function should
21380 * always return positively with a nonNULL dstAlloc */
21381 /* MS_WORKAROUND ccpu00120827 */
21382 remAllocs = schCmnCell->ul.maxAllocPerUlSf - *oldSf->allocCountRef;
21385 /* Fix : If oldSf already has max Allocs then release the
21386 * old RETX alloc to make space for new alloc of newSf.
21387 * newSf allocs(i.e new Msg3s) are given higher priority
21388 * over retx allocs. */
21389 if ((alloc = rgSCHUtlUlAllocFirst(oldSf)) != NULLP)
21393 nxtAlloc = rgSCHUtlUlAllocNxt(oldSf, alloc);
21394 if (!alloc->mrgdNewTxAlloc)
21396 /* If alloc is for RETX */
21397 /* TODO: Incase of this ad also in case of choosing
21398 * and alloc for ADAP RETX, we need to send ACK for
21399 * the corresponding alloc in PHICH */
21400 #ifndef EMTC_ENABLE
21401 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc);
21403 rgSCHCmnUlFreeAllocation(cell, oldSf, alloc,FALSE);
21407 }while((alloc = nxtAlloc) != NULLP);
21410 dstAlloc = rgSCHUtlUlGetSpfcAlloc(oldSf, srcAlloc->sbStart, srcAlloc->numSb);
21412 /* This should never happen */
21413 if (dstAlloc == NULLP)
21415 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"CRNTI:%d "
21416 "rgSCHUtlUlGetSpfcAlloc failed in rgSCHCmnUlInsAllocFrmNewSf2OldSf",
21421 /* Copy the srcAlloc's state information in to dstAlloc */
21422 rgSCHCmnUlCpyAllocInfo(cell, srcAlloc, dstAlloc);
21423 /* Set new Tx merged Alloc Flag to TRUE, indicating that this
21424 * alloc shall not be processed for non-adaptive retransmission */
21425 dstAlloc->mrgdNewTxAlloc = TRUE;
21429 * @brief Merge all allocations of newSf to oldSf.
21433 * Function : rgSCHCmnUlMergeSfAllocs
21435 * - Merge all allocations of newSf to oldSf.
21436 * - If newSf's alloc collides with oldSf's alloc
21437 * then oldSf's alloc is marked for adaptive Retx
21438 * and is released from oldSf to create space for
21441 * @param[in] RgSchCellCb *cell
21442 * @param[in] RgSchUlSf *oldSf
21443 * @param[in] RgSchUlSf *newSf
21447 PRIVATE Void rgSCHCmnUlMergeSfAllocs
21454 PRIVATE Void rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf)
21460 RgSchUlAlloc *alloc, *nxtAlloc;
21463 /* Merge each alloc of newSf in to oldSf
21464 * and release it from newSf */
21465 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
21469 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
21470 rgSCHCmnUlInsAllocFrmNewSf2OldSf(cell, newSf, oldSf, alloc);
21471 rgSCHUtlUlAllocRls(newSf, alloc);
21472 } while((alloc = nxtAlloc) != NULLP);
21477 * @brief Swap Hole/Alloc DB context of newSf and oldSf.
21481 * Function : rgSCHCmnUlSwapSfAllocs
21483 * - Swap Hole/Alloc DB context of newSf and oldSf.
21485 * @param[in] RgSchCellCb *cell
21486 * @param[in] RgSchUlSf *oldSf
21487 * @param[in] RgSchUlSf *newSf
21491 PRIVATE Void rgSCHCmnUlSwapSfAllocs
21498 PRIVATE Void rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf)
21504 RgSchUlAllocDb *tempAllocDb = newSf->allocDb;
21505 RgSchUlHoleDb *tempHoleDb = newSf->holeDb;
21506 U8 tempAvailSbs = newSf->availSubbands;
21510 newSf->allocDb = oldSf->allocDb;
21511 newSf->holeDb = oldSf->holeDb;
21512 newSf->availSubbands = oldSf->availSubbands;
21514 oldSf->allocDb = tempAllocDb;
21515 oldSf->holeDb = tempHoleDb;
21516 oldSf->availSubbands = tempAvailSbs;
21518 /* Fix ccpu00120610*/
21519 newSf->allocCountRef = &newSf->allocDb->count;
21520 oldSf->allocCountRef = &oldSf->allocDb->count;
21524 * @brief Perform non-adaptive RETX for non-colliding allocs.
21528 * Function : rgSCHCmnUlPrcNonAdptRetx
21530 * - Perform non-adaptive RETX for non-colliding allocs.
21532 * @param[in] RgSchCellCb *cell
21533 * @param[in] RgSchUlSf *newSf
21534 * @param[in] U8 idx
21538 PRIVATE Void rgSCHCmnUlPrcNonAdptRetx
21545 PRIVATE Void rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx)
21551 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21552 RgSchUlAlloc *alloc, *nxtAlloc;
21554 /* perform non-adaptive retx allocation(adjustment) */
21555 if ((alloc = rgSCHUtlUlAllocFirst(newSf)) != NULLP)
21559 nxtAlloc = rgSCHUtlUlAllocNxt(newSf, alloc);
21560 /* A merged new TX alloc, reset the state and skip */
21561 if (alloc->mrgdNewTxAlloc)
21563 alloc->mrgdNewTxAlloc = FALSE;
21568 rgSCHCmnUlNonadapRetx(cellUl, alloc, idx);
21570 } while((alloc = nxtAlloc) != NULLP);
21576 * @brief Update TX and RETX subframe's allocation
21581 * Function : rgSCHCmnUlPrfmSfMerge
21583 * - Release all preassigned allocations of newSf and merge
21585 * - If alloc of newSf collide with one or more allocs of oldSf
21586 * - mark all such allocs of oldSf for Adaptive Retx.
21587 * - Swap the alloc and hole DB references of oldSf and newSf.
21588 * - The allocs which did not collide with pre-assigned msg3
21589 * allocs are marked for non-adaptive RETX.
21591 * @param[in] RgSchCellCb *cell
21592 * @param[in] RgSchUlSf *oldSf
21593 * @param[in] RgSchUlSf *newSf
21594 * @param[in] U8 idx
21598 PRIVATE Void rgSCHCmnUlPrfmSfMerge
21606 PRIVATE Void rgSCHCmnUlPrfmSfMerge(cell, oldSf, newSf, idx)
21613 /* Preassigned resources for msg3 in newSf.
21614 * Hence do adaptive retx for all NACKED TXs */
21615 rgSCHCmnUlMergeSfAllocs(cell, oldSf, newSf);
21616 /* swap alloc and hole DBs of oldSf and newSf. */
21617 rgSCHCmnUlSwapSfAllocs(cell, oldSf, newSf);
21618 /* Here newSf has the resultant merged allocs context */
21619 /* Perform non-adaptive RETX for non-colliding allocs */
21620 rgSCHCmnUlPrcNonAdptRetx(cell, newSf, idx);
21626 * @brief Update TX and RETX subframe's allocation
21631 * Function : rgSCHCmnUlRmvCmpltdAllocs
21633 * - Free all Transmission which are ACKED
21634 * OR for which MAX retransmission have
21638 * @param[in] RgSchCellCb *cell,
21639 * @param[in] RgSchUlSf *sf
21643 PRIVATE Void rgSCHCmnUlRmvCmpltdAllocs
21649 PRIVATE Void rgSCHCmnUlRmvCmpltdAllocs(cell, sf)
21654 RgSchUlAlloc *alloc, *nxtAlloc;
21656 if ((alloc = rgSCHUtlUlAllocFirst(sf)) == NULLP)
21662 nxtAlloc = rgSCHUtlUlAllocNxt(sf, alloc);
21664 printf("rgSCHCmnUlRmvCmpltdAllocs:time(%d %d) alloc->hqProc->remTx %d hqProcId(%d) \n",cell->crntTime.sfn,cell->crntTime.slot,alloc->hqProc->remTx, alloc->grnt.hqProcId);
21666 alloc->hqProc->rcvdCrcInd = TRUE;
21667 if ((alloc->hqProc->rcvdCrcInd) || (alloc->hqProc->remTx == 0))
21670 /* SR_RACH_STATS : MSG 3 MAX RETX FAIL*/
21671 if ((alloc->forMsg3 == TRUE) && (alloc->hqProc->remTx == 0))
21673 rgNumMsg3FailMaxRetx++;
21675 cell->tenbStats->sch.msg3Fail++;
21679 #ifdef MAC_SCH_STATS
21680 if(alloc->ue != NULLP)
21682 /* access from ulHarqProc*/
21683 RgSchUeCb *ueCb = alloc->ue;
21684 RgSchCmnUe *cmnUe = (RgSchCmnUe*)ueCb->sch;
21685 RgSchCmnUlUe *ulUe = &(cmnUe->ul);
21686 U8 cqi = ulUe->crntUlCqi[0];
21687 U16 numUlRetx = ueCb->ul.hqEnt.maxHqRetx - alloc->hqProc->remTx;
21689 hqRetxStats.ulCqiStat[(cqi - 1)].mcs = alloc->grnt.iMcs;
21694 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1++;
21697 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2++;
21700 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3++;
21703 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4++;
21706 hqRetxStats.ulCqiStat[(cqi - 1)].totalTx = \
21707 hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_1 + \
21708 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_2 * 2) + \
21709 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_3 * 3) + \
21710 (hqRetxStats.ulCqiStat[(cqi - 1)].numOfHQ_4 * 4);
21713 #endif /*MAC_SCH_STATS*/
21714 rgSCHCmnUlFreeAllocation(cell, sf, alloc);
21716 /*ccpu00106104 MOD added check for AckNackRep */
21717 /*added check for acknack so that adaptive retx considers ue
21718 inactivity due to ack nack repetition*/
21719 else if((alloc->ue != NULLP) && (TRUE != alloc->forMsg3))
21721 rgSCHCmnUlUpdAllocRetx(cell, alloc);
21722 rgSCHUtlUlAllocRls(sf, alloc);
21724 } while ((alloc = nxtAlloc) != NULLP);
21730 * @brief Update an uplink subframe.
21734 * Function : rgSCHCmnRlsUlSf
21736 * For each allocation
21737 * - if no more tx needed
21738 * - Release allocation
21740 * - Perform retransmission
21742 * @param[in] RgSchUlSf *sf
21743 * @param[in] U8 idx
21747 Void rgSCHCmnRlsUlSf
21753 Void rgSCHCmnRlsUlSf(cell, idx)
21759 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21761 if (cellUl->hqFdbkIdx[idx] != RGSCH_INVALID_INFO)
21763 RgSchUlSf *oldSf = &cellUl->ulSfArr[cellUl->hqFdbkIdx[idx]];
21765 /* Initialize the reTxLst of UL HqProcs for RETX subframe */
21766 if (rgSCHUtlUlAllocFirst(oldSf) == NULLP)
21770 /* Release all completed TX allocs from sf */
21771 rgSCHCmnUlRmvCmpltdAllocs(cell, oldSf);
21773 oldSf->numACqiCount = 0;
21779 * @brief Handle uplink allocation for retransmission.
21783 * Function : rgSCHCmnUlUpdAllocRetx
21785 * - Perform adaptive retransmission
21787 * @param[in] RgSchUlSf *sf
21788 * @param[in] RgSchUlAlloc *alloc
21792 PRIVATE Void rgSCHCmnUlUpdAllocRetx
21795 RgSchUlAlloc *alloc
21798 PRIVATE Void rgSCHCmnUlUpdAllocRetx(cell, alloc)
21800 RgSchUlAlloc *alloc;
21803 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
21806 alloc->hqProc->reTxAlloc.rnti = alloc->rnti;
21807 alloc->hqProc->reTxAlloc.numSb = alloc->numSb;
21808 alloc->hqProc->reTxAlloc.iMcs = alloc->grnt.iMcs;
21810 alloc->hqProc->reTxAlloc.dciFrmt = alloc->grnt.dciFrmt;
21811 alloc->hqProc->reTxAlloc.numLyr = alloc->grnt.numLyr;
21812 alloc->hqProc->reTxAlloc.vrbgStart = alloc->grnt.vrbgStart;
21813 alloc->hqProc->reTxAlloc.numVrbg = alloc->grnt.numVrbg;
21814 alloc->hqProc->reTxAlloc.modOdr = alloc->grnt.modOdr;
21816 //iTbs = rgSCHCmnUlGetITbsFrmIMcs(alloc->grnt.iMcs);
21817 //iTbs = alloc->grnt.iMcs;
21818 //RGSCH_ARRAY_BOUND_CHECK( 0, rgTbSzTbl[0], iTbs);
21819 alloc->hqProc->reTxAlloc.tbSz = alloc->grnt.datSz;
21820 //rgTbSzTbl[0][iTbs][alloc->grnt.numRb-1]/8;
21821 alloc->hqProc->reTxAlloc.ue = alloc->ue;
21822 alloc->hqProc->reTxAlloc.forMsg3 = alloc->forMsg3;
21823 alloc->hqProc->reTxAlloc.raCb = alloc->raCb;
21825 /* Set as retransmission is pending */
21826 alloc->hqProc->isRetx = TRUE;
21827 alloc->hqProc->alloc = NULLP;
21828 alloc->hqProc->ulSfIdx = RGSCH_INVALID_INFO;
21830 printf("Adding Harq Proc Id in the retx list hqProcId %d \n",alloc->grnt.hqProcId);
21832 cmLListAdd2Tail(&cmnUlCell->reTxLst, &alloc->hqProc->reTxLnk);
21833 alloc->hqProc->reTxLnk.node = (PTR)alloc->hqProc;
21838 * @brief Attempts allocation for msg3s for which ADAP retransmissions
21843 * Function : rgSCHCmnUlAdapRetxAlloc
21845 * Attempts allocation for msg3s for which ADAP retransmissions
21848 * @param[in] RgSchCellCb *cell
21849 * @param[in] RgSchUlSf *sf
21850 * @param[in] RgSchUlHqProcCb *proc;
21851 * @param[in] RgSchUlHole *hole;
21855 PRIVATE Bool rgSCHCmnUlAdapRetxAlloc
21859 RgSchUlHqProcCb *proc,
21863 PRIVATE Bool rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole)
21866 RgSchUlHqProcCb *proc;
21870 U8 numSb = proc->reTxAlloc.numSb;
21871 U8 iMcs = proc->reTxAlloc.iMcs;
21872 CmLteTimingInfo frm = cell->crntTime;
21873 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
21876 RgSchUlAlloc *alloc;
21878 /* Fetch PDCCH for msg3 */
21879 /* ccpu00116293 - Correcting relation between UL subframe and DL subframe based on RG_UL_DELTA*/
21880 /* Introduced timing delta for UL control */
21881 RGSCH_INCR_SUB_FRAME(frm, TFU_ULCNTRL_DLDELTA);
21882 dlSf = rgSCHUtlSubFrmGet(cell, frm);
21883 pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
21884 if (pdcch == NULLP)
21889 /* Fetch UL Alloc for msg3 */
21890 if (numSb <= hole->num)
21892 alloc = rgSCHUtlUlAllocGetHole(sf, numSb, hole);
21897 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
21898 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
21899 "UL Alloc fail for msg3 retx for rnti: %d\n",
21900 proc->reTxAlloc.rnti);
21904 rgSCHCmnUlAllocFillRbInfo(cell, sf, alloc);
21905 alloc->grnt.iMcs = iMcs;
21906 alloc->grnt.datSz = proc->reTxAlloc.tbSz;
21909 //RG_SCH_UL_MCS_TO_MODODR(iMcs, alloc->grnt.modOdr);
21911 /* Fill UL Alloc for msg3 */
21912 /* RACHO : setting nDmrs to 0 and UlDelaybit to 0*/
21913 alloc->grnt.nDmrs = 0;
21914 alloc->grnt.hop = 0;
21915 alloc->grnt.delayBit = 0;
21916 alloc->grnt.isRtx = TRUE;
21917 proc->ulSfIdx = cellUl->schdIdx;
21919 proc->schdTime = cellUl->schdTime;
21920 alloc->grnt.hqProcId = proc->procId;
21921 alloc->grnt.dciFrmt = proc->reTxAlloc.dciFrmt;
21922 alloc->grnt.numLyr = proc->reTxAlloc.numLyr;
21923 alloc->grnt.vrbgStart = proc->reTxAlloc.vrbgStart;
21924 alloc->grnt.numVrbg = proc->reTxAlloc.numVrbg;
21925 alloc->grnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG, alloc->grnt.vrbgStart, alloc->grnt.numVrbg);
21926 alloc->grnt.modOdr = proc->reTxAlloc.modOdr;
21928 /* TODO : Hardcoding these as of now */
21929 alloc->grnt.hop = 0;
21930 alloc->grnt.SCID = 0;
21931 alloc->grnt.xPUSCHRange = MAX_5GTF_XPUSCH_RANGE;
21932 alloc->grnt.PMI = 0;
21933 alloc->grnt.uciOnxPUSCH = 0;
21935 alloc->rnti = proc->reTxAlloc.rnti;
21936 /* Fix : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21937 alloc->ue = proc->reTxAlloc.ue;
21938 alloc->pdcch = pdcch;
21939 alloc->forMsg3 = proc->reTxAlloc.forMsg3;
21940 alloc->raCb = proc->reTxAlloc.raCb;
21941 alloc->hqProc = proc;
21942 alloc->isAdaptive = TRUE;
21944 sf->totPrb += alloc->grnt.numRb;
21946 /* FIX : syed HandIn Ue has forMsg3 and ue Set, but no RaCb */
21949 alloc->raCb->msg3Grnt= alloc->grnt;
21951 /* To the crntTime, add the time at which UE will
21952 * actually send MSG3 */
21953 alloc->raCb->msg3AllocTime = cell->crntTime;
21954 RGSCH_INCR_SUB_FRAME(alloc->raCb->msg3AllocTime, RG_SCH_CMN_MIN_RETXMSG3_RECP_INTRVL);
21956 alloc->raCb->msg3AllocTime = cellUl->schdTime;
21958 rgSCHCmnUlAdapRetx(alloc, proc);
21959 /* Fill PDCCH with alloc info */
21960 pdcch->rnti = alloc->rnti;
21961 pdcch->dci.dciFormat = TFU_DCI_FORMAT_0;
21962 pdcch->dci.u.format0Info.hoppingEnbld = alloc->grnt.hop;
21963 pdcch->dci.u.format0Info.rbStart = alloc->grnt.rbStart;
21964 pdcch->dci.u.format0Info.numRb = alloc->grnt.numRb;
21965 pdcch->dci.u.format0Info.mcs = alloc->grnt.iMcsCrnt;
21966 pdcch->dci.u.format0Info.ndi = alloc->hqProc->ndi;
21967 pdcch->dci.u.format0Info.nDmrs = alloc->grnt.nDmrs;
21968 pdcch->dci.u.format0Info.tpcCmd = alloc->grnt.tpc;
21972 /* ulIdx setting for cfg 0 shall be appropriately fixed thru ccpu00109015 */
21973 pdcch->dci.u.format0Info.ulIdx = RG_SCH_ULIDX_MSB;
21974 pdcch->dci.u.format0Info.dai = RG_SCH_MAX_DAI_IDX;
21977 pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_0];
21981 RgSchCmnUlUe *ueUl = RG_SCH_CMN_GET_UL_UE(alloc->ue,cell);
21983 alloc->ue->initNumRbs = (alloc->grnt.numVrbg * MAX_5GTF_VRBG_SIZE);
21986 ue->ul.nPrb = alloc->grnt.numRb;
21988 ueUl->alloc.alloc = alloc;
21989 /* FIx: Removed the call to rgSCHCmnUlAdapRetx */
21990 rgSCHCmnUlUeFillAllocInfo(cell, alloc->ue);
21991 /* Setting csireq as false for Adaptive Retx*/
21992 ueUl->alloc.alloc->pdcch->dci.u.format0Info.cqiReq = RG_SCH_APCQI_NO;
21993 pdcch->dciNumOfBits = alloc->ue->dciSize.cmnSize[TFU_DCI_FORMAT_0];
21995 /* Reset as retransmission is done */
21996 proc->isRetx = FALSE;
21998 else /* Intg fix */
22000 rgSCHUtlPdcchPut(cell, &dlSf->pdcchInfo, pdcch);
22001 RLOG_ARG1(L_DEBUG,DBG_CELLID,cell->cellId,
22002 "Num SB not suffiecient for adap retx for rnti: %d",
22003 proc->reTxAlloc.rnti);
22009 /* Fix: syed Adaptive Msg3 Retx crash. */
22011 * @brief Releases all Adaptive Retx HqProcs which failed for
22012 * allocations in this scheduling occassion.
22016 * Function : rgSCHCmnUlSfRlsRetxProcs
22019 * @param[in] RgSchCellCb *cell
22020 * @param[in] RgSchUlSf *sf
22025 PRIVATE Void rgSCHCmnUlSfRlsRetxProcs
22031 PRIVATE Void rgSCHCmnUlSfRlsRetxProcs(cell, sf)
22038 RgSchUlHqProcCb *proc;
22039 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
22042 cp = &(cellUl->reTxLst);
22046 proc = (RgSchUlHqProcCb *)node->node;
22048 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
22049 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
22050 proc->reTxLnk.node = (PTR)NULLP;
22057 * @brief Attempts allocation for UEs for which retransmissions
22062 * Function : rgSCHCmnUlSfReTxAllocs
22064 * Attempts allocation for UEs for which retransmissions
22067 * @param[in] RgSchCellCb *cell
22068 * @param[in] RgSchUlSf *sf
22072 PRIVATE Void rgSCHCmnUlSfReTxAllocs
22078 PRIVATE Void rgSCHCmnUlSfReTxAllocs(cell, sf)
22085 RgSchUlHqProcCb *proc;
22088 RgSchCmnCell *schCmnCell = (RgSchCmnCell *)(cell->sc.sch);
22089 RgSchCmnUlCell *cellUl = RG_SCH_CMN_GET_UL_CELL(cell);
22091 cp = &(cellUl->reTxLst);
22095 proc = (RgSchUlHqProcCb *)node->node;
22096 ue = proc->reTxAlloc.ue;
22098 /*ccpu00106104 MOD added check for AckNackRep */
22099 /*added check for acknack so that adaptive retx considers ue
22100 inactivity due to ack nack repetition*/
22101 if((ue != NULLP) &&
22102 ((ue->measGapCb.isMeasuring == TRUE)||
22103 (ue->ackNakRepCb.isAckNakRep == TRUE)))
22107 /* Fix for ccpu00123917: Check if maximum allocs per UL sf have been exhausted */
22108 if (((hole = rgSCHUtlUlHoleFirst(sf)) == NULLP)
22109 || (sf->allocDb->count == schCmnCell->ul.maxAllocPerUlSf))
22111 /* No more UL BW then return */
22114 /* perform adaptive retx for UE's */
22115 if (rgSCHCmnUlAdapRetxAlloc(cell, sf, proc, hole) == FALSE)
22119 /* ccpu00137834 : Deleting reTxLnk from the respective reTxLst */
22120 cmLListDelFrm(&cellUl->reTxLst, &proc->reTxLnk);
22121 /* Fix: syed Adaptive Msg3 Retx crash. */
22122 proc->reTxLnk.node = (PTR)NULLP;
22128 * @brief Handles RB allocation for downlink.
22132 * Function : rgSCHCmnDlRbAlloc
22134 * Invoking Module Processing:
22135 * - This function is invoked for DL RB allocation
22137 * Processing Steps:
22138 * - If cell is frequency selecive,
22139 * - Call rgSCHDlfsAllocRb().
22141 * - Call rgSCHCmnNonDlfsRbAlloc().
22143 * @param[in] RgSchCellCb *cell
22144 * @param[in] RgSchDlRbAllocInfo *allocInfo
22149 PRIVATE Void rgSCHCmnDlRbAlloc
22152 RgSchCmnDlRbAllocInfo *allocInfo
22155 PRIVATE Void rgSCHCmnDlRbAlloc(cell, allocInfo)
22157 RgSchCmnDlRbAllocInfo *allocInfo;
22160 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
22162 if (cellSch->dl.isDlFreqSel)
22164 printf("5GTF_ERROR DLFS SCH Enabled\n");
22165 cellSch->apisDlfs->rgSCHDlfsAllocRb(cell, allocInfo);
22169 rgSCHCmnNonDlfsRbAlloc(cell, allocInfo);
22177 * @brief Determines number of RBGs and RBG subset sizes for the given DL
22178 * bandwidth and rbgSize
22181 * Function : rgSCHCmnDlGetRbgInfo
22184 * Processing Steps:
22185 * - Fill-up rbgInfo data structure for given DL bandwidth and rbgSize
22187 * @param[in] U8 dlTotalBw
22188 * @param[in] U8 dlSubsetBw
22189 * @param[in] U8 maxRaType1SubsetBw
22190 * @param[in] U8 rbgSize
22191 * @param[out] RgSchBwRbgInfo *rbgInfo
22195 Void rgSCHCmnDlGetRbgInfo
22199 U8 maxRaType1SubsetBw,
22201 RgSchBwRbgInfo *rbgInfo
22204 Void rgSCHCmnDlGetRbgInfo(dlTotalBw, dlSubsetBw, maxRaType1SubsetBw,
22208 U8 maxRaType1SubsetBw;
22210 RgSchBwRbgInfo *rbgInfo;
22213 #ifdef RGSCH_SPS_UNUSED
22215 U8 lastRbgIdx = ((dlTotalBw + rbgSize - 1)/rbgSize) - 1;
22216 U8 currRbgSize = rbgSize;
22217 U8 subsetSizeIdx = 0;
22218 U8 subsetSize[RG_SCH_NUM_RATYPE1_SUBSETS] = {0};
22219 U8 lastRbgSize = rbgSize - (dlTotalBw - ((dlTotalBw/rbgSize) * rbgSize));
22220 U8 numRaType1Rbgs = (maxRaType1SubsetBw + rbgSize - 1)/rbgSize;
22223 /* Compute maximum number of SPS RBGs for the cell */
22224 rbgInfo->numRbgs = ((dlSubsetBw + rbgSize - 1)/rbgSize);
22226 #ifdef RGSCH_SPS_UNUSED
22227 /* Distribute RBGs across subsets except last RBG */
22228 for (;idx < numRaType1Rbgs - 1; ++idx)
22230 subsetSize[subsetSizeIdx] += currRbgSize;
22231 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
22234 /* Computation for last RBG */
22235 if (idx == lastRbgIdx)
22237 currRbgSize = lastRbgSize;
22239 subsetSize[subsetSizeIdx] += currRbgSize;
22240 subsetSizeIdx = (subsetSizeIdx + 1) % rbgSize;
22243 /* Update the computed sizes */
22244 #ifdef RGSCH_SPS_UNUSED
22245 rbgInfo->lastRbgSize = currRbgSize;
22247 rbgInfo->lastRbgSize = rbgSize -
22248 (dlSubsetBw - ((dlSubsetBw/rbgSize) * rbgSize));
22249 #ifdef RGSCH_SPS_UNUSED
22250 memcpy(rbgInfo->rbgSubsetSize, subsetSize, 4 * sizeof(U8));
22252 rbgInfo->numRbs = (rbgInfo->numRbgs * rbgSize > dlTotalBw) ?
22253 dlTotalBw:(rbgInfo->numRbgs * rbgSize);
22254 rbgInfo->rbgSize = rbgSize;
22258 * @brief Handles RB allocation for Resource allocation type 0
22262 * Function : rgSCHCmnDlRaType0Alloc
22264 * Invoking Module Processing:
22265 * - This function is invoked for DL RB allocation for resource allocation
22268 * Processing Steps:
22269 * - Determine the available positions in the rbgMask.
22270 * - Allocate RBGs in the available positions.
22271 * - Update RA Type 0, RA Type 1 and RA type 2 masks.
22273 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22274 * @param[in] U8 rbsReq
22275 * @param[in] RgSchBwRbgInfo *rbgInfo
22276 * @param[out] U8 *numAllocRbs
22277 * @param[out] RgSchDlSfAllocInfo *resAllocInfo
22278 * @param[in] Bool isPartialAlloc
22284 U8 rgSCHCmnDlRaType0Alloc
22286 RgSchDlSfAllocInfo *allocedInfo,
22288 RgSchBwRbgInfo *rbgInfo,
22290 RgSchDlSfAllocInfo *resAllocInfo,
22291 Bool isPartialAlloc
22294 U8 rgSCHCmnDlRaType0Alloc(allocedInfo, rbsReq, rbgInfo,
22295 numAllocRbs, resAllocInfo, isPartialAlloc)
22296 RgSchDlSfAllocInfo *allocedInfo;
22298 RgSchBwRbgInfo *rbgInfo;
22300 RgSchDlSfAllocInfo *resAllocInfo;
22301 Bool isPartialAlloc;
22304 /* Note: This function atttempts allocation only full allocation */
22305 U32 remNumRbs, rbgPosInRbgMask, ueRaType2Mask;
22306 U8 type2MaskIdx, cnt, rbIdx;
22308 U8 bestNumAvailRbs = 0;
22310 U8 numAllocRbgs = 0;
22311 U8 rbgSize = rbgInfo->rbgSize;
22312 U32 *rbgMask = &(resAllocInfo->raType0Mask);
22313 #ifdef RGSCH_SPS_UNUSED
22316 U32 *raType1Mask = resAllocInfo->raType1Mask;
22317 U32 *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22319 U32 *raType2Mask = resAllocInfo->raType2Mask;
22321 U32 allocedMask = allocedInfo->raType0Mask;
22323 maskSize = rbgInfo->numRbgs;
22326 RG_SCH_CMN_DL_COUNT_ONES(allocedMask, maskSize, &usedRbs);
22327 if (maskSize == usedRbs)
22329 /* All RBGs are allocated, including the last one */
22334 remNumRbs = (maskSize - usedRbs - 1) * rbgSize; /* vamsee: removed minus 1 */
22336 /* If last RBG is available, add last RBG size */
22337 if (!(allocedMask & (1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(maskSize - 1))))
22339 remNumRbs += rbgInfo->lastRbgSize;
22343 /* If complete allocation is needed, check if total requested RBs are available else
22344 * check the best available RBs */
22345 if (!isPartialAlloc)
22347 if (remNumRbs >= rbsReq)
22349 bestNumAvailRbs = rbsReq;
22354 bestNumAvailRbs = remNumRbs > rbsReq ? rbsReq : remNumRbs;
22357 /* Allocate for bestNumAvailRbs */
22358 if (bestNumAvailRbs)
22360 for (rbg = 0; rbg < maskSize - 1; ++rbg)
22362 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22363 if (!(allocedMask & rbgPosInRbgMask))
22365 /* Update RBG mask */
22366 *rbgMask |= rbgPosInRbgMask;
22368 /* Compute RB index of the first RB of the RBG allocated */
22369 rbIdx = rbg * rbgSize;
22371 for (cnt = 0; cnt < rbgSize; ++cnt)
22373 #ifdef RGSCH_SPS_UNUSED
22374 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22376 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22377 #ifdef RGSCH_SPS_UNUSED
22378 /* Update RBG mask for RA type 1 */
22379 raType1Mask[rbgSubset] |= ueRaType1Mask;
22380 raType1UsedRbs[rbgSubset]++;
22382 /* Update RA type 2 mask */
22383 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22386 *numAllocRbs += rbgSize;
22387 remNumRbs -= rbgSize;
22389 if (*numAllocRbs >= bestNumAvailRbs)
22395 /* If last RBG available and allocation is not completed, allocate
22397 if (*numAllocRbs < bestNumAvailRbs)
22399 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22400 *rbgMask |= rbgPosInRbgMask;
22401 *numAllocRbs += rbgInfo->lastRbgSize;
22403 /* Compute RB index of the first RB of the last RBG */
22404 rbIdx = ((rbgInfo->numRbgs - 1 ) * rbgSize ); /* removed minus 1 vamsee */
22406 for (cnt = 0; cnt < rbgInfo->lastRbgSize; ++cnt)
22408 #ifdef RGSCH_SPS_UNUSED
22409 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22411 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22412 #ifdef RGSCH_SPS_UNUSED
22413 /* Update RBG mask for RA type 1 */
22414 raType1Mask[rbgSubset] |= ueRaType1Mask;
22415 raType1UsedRbs[rbgSubset]++;
22417 /* Update RA type 2 mask */
22418 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22421 remNumRbs -= rbgInfo->lastRbgSize;
22424 /* Note: this should complete allocation, not checking for the
22428 return (numAllocRbgs);
22431 #ifdef RGSCH_SPS_UNUSED
22433 * @brief Handles RB allocation for Resource allocation type 1
22437 * Function : rgSCHCmnDlRaType1Alloc
22439 * Invoking Module Processing:
22440 * - This function is invoked for DL RB allocation for resource allocation
22443 * Processing Steps:
22444 * - Determine the available positions in the subsets.
22445 * - Allocate RB in the available subset.
22446 * - Update RA Type1, RA type 0 and RA type 2 masks.
22448 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22449 * @param[in] U8 rbsReq
22450 * @param[in] RgSchBwRbgInfo *rbgInfo
22451 * @param[in] U8 startRbgSubset
22452 * @param[in] U8 *allocRbgSubset
22453 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
22454 * @param[in] Bool isPartialAlloc
22457 * Number of allocated RBs
22461 U8 rgSCHCmnDlRaType1Alloc
22463 RgSchDlSfAllocInfo *allocedInfo,
22465 RgSchBwRbgInfo *rbgInfo,
22467 U8 *allocRbgSubset,
22468 RgSchDlSfAllocInfo *resAllocInfo,
22469 Bool isPartialAlloc
22472 U8 rgSCHCmnDlRaType1Alloc(allocedInfo, rbsReq,rbgInfo,startRbgSubset,
22473 allocRbgSubset, resAllocInfo, isPartialAlloc)
22474 RgSchDlSfAllocInfo *allocedInfo;
22476 RgSchBwRbgInfo *rbgInfo;
22478 U8 *allocRbgSubset;
22479 RgSchDlSfAllocInfo *resAllocInfo;
22480 Bool isPartialAlloc;
22483 /* Note: This function atttempts only full allocation */
22484 U8 *rbgSubsetSzArr;
22485 U8 type2MaskIdx, subsetIdx, rbIdx, rbInSubset, rbgInSubset;
22486 U8 offset, rbg, maskSize, bestSubsetIdx;
22488 U8 bestNumAvailRbs = 0;
22489 U8 numAllocRbs = 0;
22490 U32 ueRaType2Mask, ueRaType0Mask, rbPosInSubset;
22491 U32 remNumRbs, allocedMask;
22493 U8 rbgSize = rbgInfo->rbgSize;
22494 U8 rbgSubset = startRbgSubset;
22495 U32 *rbgMask = &resAllocInfo->raType0Mask;
22496 U32 *raType1Mask = resAllocInfo->raType1Mask;
22497 U32 *raType2Mask = resAllocInfo->raType2Mask;
22498 U32 *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22499 U32 *allocMask = allocedInfo->raType1Mask;
22501 /* Initialize the subset size Array */
22502 rbgSubsetSzArr = rbgInfo->rbgSubsetSize;
22504 /* Perform allocation for RA type 1 */
22505 for (subsetIdx = 0;subsetIdx < rbgSize; ++subsetIdx)
22507 allocedMask = allocMask[rbgSubset];
22508 maskSize = rbgSubsetSzArr[rbgSubset];
22510 /* Determine number of available RBs in the subset */
22511 usedRbs = allocedInfo->raType1UsedRbs[subsetIdx];
22512 remNumRbs = maskSize - usedRbs;
22514 if (remNumRbs >= rbsReq)
22516 bestNumAvailRbs = rbsReq;
22517 bestSubsetIdx = rbgSubset;
22520 else if (isPartialAlloc && (remNumRbs > bestNumAvailRbs))
22522 bestNumAvailRbs = remNumRbs;
22523 bestSubsetIdx = rbgSubset;
22526 rbgSubset = (rbgSubset + 1) % rbgSize;
22527 } /* End of for (each rbgsubset) */
22529 if (bestNumAvailRbs)
22531 /* Initialize alloced mask and subsetSize depending on the RBG
22532 * subset of allocation */
22534 maskSize = rbgSubsetSzArr[bestSubsetIdx];
22535 allocedMask = allocMask[bestSubsetIdx];
22536 RG_SCH_CMN_DL_GET_START_POS(allocedMask, maskSize,
22538 for (; startIdx < rbgSize; ++startIdx, ++startPos)
22540 for (rbInSubset = startPos; rbInSubset < maskSize;
22541 rbInSubset = rbInSubset + rbgSize)
22543 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
22544 if (!(allocedMask & rbPosInSubset))
22546 raType1Mask[bestSubsetIdx] |= rbPosInSubset;
22547 raType1UsedRbs[bestSubsetIdx]++;
22549 /* Compute RB index value for the RB being allocated */
22550 rbgInSubset = rbInSubset /rbgSize;
22551 offset = rbInSubset % rbgSize;
22552 rbg = (rbgInSubset * rbgSize) + bestSubsetIdx;
22553 rbIdx = (rbg * rbgSize) + offset;
22555 /* Update RBG mask for RA type 0 allocation */
22556 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
22557 *rbgMask |= ueRaType0Mask;
22559 /* Update RA type 2 mask */
22560 ueRaType2Mask = rgSCHCmnGetRaType2Mask(rbIdx, &type2MaskIdx);
22561 raType2Mask[type2MaskIdx] |= ueRaType2Mask;
22563 /* Update the counters */
22566 if (numAllocRbs == bestNumAvailRbs)
22571 } /* End of for (each position in the subset mask) */
22572 if (numAllocRbs == bestNumAvailRbs)
22576 } /* End of for startIdx = 0 to rbgSize */
22578 *allocRbgSubset = bestSubsetIdx;
22579 } /* End of if (bestNumAvailRbs) */
22581 return (numAllocRbs);
22585 * @brief Handles RB allocation for Resource allocation type 2
22589 * Function : rgSCHCmnDlRaType2Alloc
22591 * Invoking Module Processing:
22592 * - This function is invoked for DL RB allocation for resource allocation
22595 * Processing Steps:
22596 * - Determine the available positions in the mask
22597 * - Allocate best fit cosecutive RBs.
22598 * - Update RA Type2, RA type 1 and RA type 0 masks.
22600 * @param[in] RgSchDlSfAllocInfo *allocedInfo
22601 * @param[in] U8 rbsReq
22602 * @param[in] RgSchBwRbgInfo *rbgInfo
22603 * @param[out] U8 *rbStart
22604 * @param[out] rgSchDlSfAllocInfo *resAllocInfo
22605 * @param[in] Bool isPartialAlloc
22608 * Number of allocated RBs
22612 U8 rgSCHCmnDlRaType2Alloc
22614 RgSchDlSfAllocInfo *allocedInfo,
22616 RgSchBwRbgInfo *rbgInfo,
22618 RgSchDlSfAllocInfo *resAllocInfo,
22619 Bool isPartialAlloc
22622 U8 rgSCHCmnDlRaType2Alloc(allocedInfo, rbsReq, rbgInfo, rbStart,
22623 resAllocInfo, isPartialAlloc)
22624 RgSchDlSfAllocInfo *allocedInfo;
22626 RgSchBwRbgInfo *rbgInfo;
22628 RgSchDlSfAllocInfo *resAllocInfo;
22629 Bool isPartialAlloc;
22632 U8 numAllocRbs = 0;
22634 U8 rbgSize = rbgInfo->rbgSize;
22635 U32 *rbgMask = &resAllocInfo->raType0Mask;
22636 #ifdef RGSCH_SPS_UNUSED
22637 U32 *raType1Mask = resAllocInfo->raType1Mask;
22639 U32 *raType2Mask = resAllocInfo->raType2Mask;
22640 #ifdef RGSCH_SPS_UNUSED
22641 U32 *raType1UsedRbs = resAllocInfo->raType1UsedRbs;
22643 U32 *allocedMask = allocedInfo->raType2Mask;
22645 /* Note: This function atttempts only full allocation */
22646 rgSCHCmnDlGetBestFitHole(allocedMask, rbgInfo->numRbs,
22647 raType2Mask, rbsReq, rbStart, &numAllocRbs, isPartialAlloc);
22650 /* Update the allocation in RA type 0 and RA type 1 masks */
22651 U8 rbCnt = numAllocRbs;
22652 #ifdef RGSCH_SPS_UNUSED
22661 /* Update RBG mask for RA type 0 allocation */
22662 ueRaType0Mask = rgSCHCmnGetRaType0Mask(rbIdx, rbgSize);
22663 *rbgMask |= ueRaType0Mask;
22665 #ifdef RGSCH_SPS_UNUSED
22666 /* Update RBG mask for RA type 1 */
22667 ueRaType1Mask = rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, &rbgSubset);
22668 raType1Mask[rbgSubset] |= ueRaType1Mask;
22669 raType1UsedRbs[rbgSubset]++;
22671 /* Update the counters */
22677 return (numAllocRbs);
22681 * @brief Determines RA type 0 mask from given RB index.
22685 * Function : rgSCHCmnGetRaType0Mask
22688 * Processing Steps:
22689 * - Determine RA Type 0 mask for given rbIdex and rbg size.
22691 * @param[in] U8 rbIdx
22692 * @param[in] U8 rbgSize
22693 * @return U32 RA type 0 mask
22696 PRIVATE U32 rgSCHCmnGetRaType0Mask
22702 PRIVATE U32 rgSCHCmnGetRaType0Mask(rbIdx, rbgSize)
22708 U32 rbgPosInRbgMask = 0;
22710 rbg = rbIdx/rbgSize;
22711 rbgPosInRbgMask = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbg);
22713 return (rbgPosInRbgMask);
22716 #ifdef RGSCH_SPS_UNUSED
22718 * @brief Determines RA type 1 mask from given RB index.
22722 * Function : rgSCHCmnGetRaType1Mask
22725 * Processing Steps:
22726 * - Determine RA Type 1 mask for given rbIdex and rbg size.
22728 * @param[in] U8 rbIdx
22729 * @param[in] U8 rbgSize
22730 * @param[out] U8 *type1Subset
22731 * @return U32 RA type 1 mask
22734 PRIVATE U32 rgSCHCmnGetRaType1Mask
22741 PRIVATE U32 rgSCHCmnGetRaType1Mask(rbIdx, rbgSize, type1Subset)
22747 U8 rbg, rbgSubset, rbgInSubset, offset, rbInSubset;
22750 rbg = rbIdx/rbgSize;
22751 rbgSubset = rbg % rbgSize;
22752 rbgInSubset = rbg/rbgSize;
22753 offset = rbIdx % rbgSize;
22754 rbInSubset = rbgInSubset * rbgSize + offset;
22755 rbPosInSubset = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbInSubset);
22757 *type1Subset = rbgSubset;
22758 return (rbPosInSubset);
22760 #endif /* RGSCH_SPS_UNUSED */
22762 * @brief Determines RA type 2 mask from given RB index.
22766 * Function : rgSCHCmnGetRaType2Mask
22769 * Processing Steps:
22770 * - Determine RA Type 2 mask for given rbIdx and rbg size.
22772 * @param[in] U8 rbIdx
22773 * @param[out] U8 *maskIdx
22774 * @return U32 RA type 2 mask
22777 PRIVATE U32 rgSCHCmnGetRaType2Mask
22783 PRIVATE U32 rgSCHCmnGetRaType2Mask(rbIdx, maskIdx)
22790 *maskIdx = rbIdx / 32;
22791 rbPosInType2 = 1 << RG_SCH_CMN_DL_GET_POS_FRM_LSB(rbIdx % 32);
22793 return (rbPosInType2);
22797 * @brief Performs resource allocation for a non-SPS UE in SPS bandwidth
22801 * Function : rgSCHCmnAllocUeInSpsBw
22804 * Processing Steps:
22805 * - Determine allocation for the UE.
22806 * - Use resource allocation type 0, 1 and 2 for allocation
22807 * within maximum SPS bandwidth.
22809 * @param[in] RgSchDlSf *dlSf
22810 * @param[in] RgSchCellCb *cell
22811 * @param[in] RgSchUeCb *ue
22812 * @param[in] RgSchDlRbAlloc *rbAllocInfo
22813 * @param[in] Bool isPartialAlloc
22819 Bool rgSCHCmnAllocUeInSpsBw
22824 RgSchDlRbAlloc *rbAllocInfo,
22825 Bool isPartialAlloc
22828 Bool rgSCHCmnAllocUeInSpsBw(dlSf, cell, ue, rbAllocInfo, isPartialAlloc)
22832 RgSchDlRbAlloc *rbAllocInfo;
22833 Bool isPartialAlloc;
22836 U8 rbgSize = cell->rbgSize;
22837 U8 numAllocRbs = 0;
22838 U8 numAllocRbgs = 0;
22840 U8 idx, noLyr, iTbs;
22841 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
22842 RgSchDlSfAllocInfo *dlSfAlloc = &rbAllocInfo->dlSf->dlSfAllocInfo;
22843 RgSchBwRbgInfo *spsRbgInfo = &cell->spsBwRbgInfo;
22845 /* SPS_FIX : Check if this Hq proc is scheduled */
22846 if ((0 == rbAllocInfo->tbInfo[0].schdlngForTb) &&
22847 (0 == rbAllocInfo->tbInfo[1].schdlngForTb))
22852 /* Check if the requirement can be accomodated in SPS BW */
22853 if (dlSf->spsAllocdBw == spsRbgInfo->numRbs)
22855 /* SPS Bandwidth has been exhausted: no further allocations possible */
22858 if (!isPartialAlloc)
22860 if((dlSf->spsAllocdBw + rbAllocInfo->rbsReq) > spsRbgInfo->numRbs)
22866 /* Perform allocation for RA type 0 if rbsReq is multiple of RBG size (also
22867 * if RBG size = 1) */
22868 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22870 rbAllocInfo->rbsReq += (rbgSize - rbAllocInfo->rbsReq % rbgSize);
22871 numAllocRbgs = rgSCHCmnDlRaType0Alloc(dlSfAlloc,
22872 rbAllocInfo->rbsReq, spsRbgInfo, &numAllocRbs,
22873 &rbAllocInfo->resAllocInfo, isPartialAlloc);
22875 #ifdef RGSCH_SPS_UNUSED
22876 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
22878 /* If no RBS could be allocated, attempt RA TYPE 1 */
22880 numAllocRbs = rgSCHCmnDlRaType1Alloc(dlSfAlloc,
22881 rbAllocInfo->rbsReq, spsRbgInfo, (U8)dlSfAlloc->nxtRbgSubset,
22882 &rbAllocInfo->allocInfo.raType1.rbgSubset,
22883 &rbAllocInfo->resAllocInfo, isPartialAlloc);
22887 dlSfAlloc->nxtRbgSubset =
22888 (rbAllocInfo->allocInfo.raType1.rbgSubset + 1 ) % rbgSize;
22892 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22894 numAllocRbs = rgSCHCmnDlRaType2Alloc(dlSfAlloc,
22895 rbAllocInfo->rbsReq, spsRbgInfo,
22896 &rbStart, &rbAllocInfo->resAllocInfo, isPartialAlloc);
22903 if (!(rbAllocInfo->pdcch =
22904 rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi,\
22905 rbAllocInfo->dciFormat, FALSE)))
22907 /* Note: Returning TRUE since PDCCH might be available for another UE */
22911 /* Update Tb info for each scheduled TB */
22912 iTbs = rbAllocInfo->tbInfo[0].iTbs;
22913 noLyr = rbAllocInfo->tbInfo[0].noLyr;
22914 rbAllocInfo->tbInfo[0].bytesAlloc =
22915 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;
22917 if (rbAllocInfo->tbInfo[1].schdlngForTb)
22919 iTbs = rbAllocInfo->tbInfo[1].iTbs;
22920 noLyr = rbAllocInfo->tbInfo[1].noLyr;
22921 rbAllocInfo->tbInfo[1].bytesAlloc =
22922 rgTbSzTbl[noLyr - 1][iTbs][numAllocRbs - 1]/8;;
22925 /* Update rbAllocInfo with the allocation information */
22926 if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE0)
22928 rbAllocInfo->allocInfo.raType0.dlAllocBitMask =
22929 rbAllocInfo->resAllocInfo.raType0Mask;
22930 rbAllocInfo->allocInfo.raType0.numDlAlloc = numAllocRbgs;
22932 #ifdef RGSCH_SPS_UNUSED
22933 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE1)
22935 rbAllocInfo->allocInfo.raType1.dlAllocBitMask =
22936 rbAllocInfo->resAllocInfo.raType1Mask[rbAllocInfo->allocInfo.raType1.rbgSubset];
22937 rbAllocInfo->allocInfo.raType1.numDlAlloc = numAllocRbs;
22938 rbAllocInfo->allocInfo.raType1.shift = 0;
22941 else if (rbAllocInfo->raType == RG_SCH_CMN_RA_TYPE2)
22943 rbAllocInfo->allocInfo.raType2.isLocal = TRUE;
22944 rbAllocInfo->allocInfo.raType2.rbStart = rbStart;
22945 rbAllocInfo->allocInfo.raType2.numRb = numAllocRbs;
22948 rbAllocInfo->rbsAlloc = numAllocRbs;
22949 rbAllocInfo->tbInfo[0].schdlngForTb = TRUE;
22951 /* Update allocation masks for RA types 0, 1 and 2 in DL SF */
22953 /* Update type 0 allocation mask */
22954 dlSfAlloc->raType0Mask |= rbAllocInfo->resAllocInfo.raType0Mask;
22955 #ifdef RGSCH_SPS_UNUSED
22956 /* Update type 1 allocation masks */
22957 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
22959 dlSfAlloc->raType1Mask[idx] |= rbAllocInfo->resAllocInfo.raType1Mask[idx];
22960 dlSfAlloc->raType1UsedRbs[idx] +=
22961 rbAllocInfo->resAllocInfo.raType1UsedRbs[idx];
22964 /* Update type 2 allocation masks */
22965 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
22967 dlSfAlloc->raType2Mask[idx] |= rbAllocInfo->resAllocInfo.raType2Mask[idx];
22970 dlSf->spsAllocdBw += numAllocRbs;
22974 /***********************************************************
22976 * Func : rgSCHCmnDlGetBestFitHole
22979 * Desc : Converts the best fit hole into allocation and returns the
22980 * allocation information.
22990 **********************************************************/
22992 PRIVATE Void rgSCHCmnDlGetBestFitHole
22996 U32 *crntAllocMask,
23000 Bool isPartialAlloc
23003 PRIVATE Void rgSCHCmnDlGetBestFitHole (allocMask, numMaskRbs,
23004 crntAllocMask, rbsReq, allocStart, allocNumRbs, isPartialAlloc)
23007 U32 *crntAllocMask;
23011 Bool isPartialAlloc;
23014 U8 maskSz = (numMaskRbs + 31)/32;
23015 U8 maxMaskPos = (numMaskRbs % 32);
23016 U8 maskIdx, maskPos;
23017 U8 numAvailRbs = 0;
23018 U8 bestAvailNumRbs = 0;
23019 S8 bestStartPos = -1;
23021 U32 tmpMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
23022 U32 bestMask[RG_SCH_NUM_RATYPE2_32BIT_MASK] = {0};
23024 *allocNumRbs = numAvailRbs;
23027 for (maskIdx = 0; maskIdx < maskSz; ++maskIdx)
23030 if (maskIdx == (maskSz - 1))
23032 if (numMaskRbs % 32)
23034 maxMaskPos = numMaskRbs % 32;
23037 for (maskPos = 0; maskPos < maxMaskPos; ++maskPos)
23039 if (!(allocMask[maskIdx] & (1 << (31 - maskPos))))
23041 tmpMask[maskIdx] |= (1 << (31 - maskPos));
23042 if (startPos == -1)
23044 startPos = maskIdx * 32 + maskPos;
23047 if (numAvailRbs == rbsReq)
23049 *allocStart = (U8)startPos;
23050 *allocNumRbs = rbsReq;
23056 if (numAvailRbs > bestAvailNumRbs)
23058 bestAvailNumRbs = numAvailRbs;
23059 bestStartPos = startPos;
23060 memcpy(bestMask, tmpMask, 4 * sizeof(U32));
23064 memset(tmpMask, 0, 4 * sizeof(U32));
23067 if (*allocNumRbs == rbsReq)
23073 if (*allocNumRbs == rbsReq)
23075 /* Convert the hole into allocation */
23076 memcpy(crntAllocMask, tmpMask, 4 * sizeof(U32));
23081 if (bestAvailNumRbs && isPartialAlloc)
23083 /* Partial allocation could have been done */
23084 *allocStart = (U8)bestStartPos;
23085 *allocNumRbs = bestAvailNumRbs;
23086 /* Convert the hole into allocation */
23087 memcpy(crntAllocMask, bestMask, 4 * sizeof(U32));
23093 #endif /* LTEMAC_SPS */
23095 /***************************************************************************
23097 * NON-DLFS Allocation functions
23099 * *************************************************************************/
23103 * @brief Function to find out code rate
23107 * Function : rgSCHCmnFindCodeRate
23109 * Processing Steps:
23111 * @param[in] RgSchCellCb *cell
23112 * @param[in] RgSchDlSf *dlSf
23113 * @param[in,out] RgSchDlRbAlloc *allocInfo
23118 PRIVATE Void rgSCHCmnFindCodeRate
23122 RgSchDlRbAlloc *allocInfo,
23126 PRIVATE Void rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,idx)
23129 RgSchDlRbAlloc *allocInfo;
23138 /* Adjust the Imcs and bytes allocated also with respect to the adjusted
23139 RBs - Here we will find out the Imcs by identifying first Highest
23140 number of bits compared to the original bytes allocated. */
23142 * @brief Adjust IMCS according to tbSize and ITBS
23146 * Function : rgSCHCmnNonDlfsPbchTbImcsAdj
23148 * Processing Steps:
23149 * - Adjust Imcs according to tbSize and ITBS.
23151 * @param[in,out] RgSchDlRbAlloc *allocInfo
23152 * @param[in] U8 *idx
23156 PRIVATE Void rgSCHCmnNonDlfsPbchTbImcsAdj
23159 RgSchDlRbAlloc *allocInfo,
23164 PRIVATE Void rgSCHCmnNonDlfsPbchTbImcsAdj(cell,allocInfo, idx, rbsReq)
23166 RgSchDlRbAlloc *allocInfo;
23176 RgSchDlSf *dlSf = allocInfo->dlSf;
23178 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
23179 noLyrs = allocInfo->tbInfo[idx].noLyr;
23181 if((allocInfo->raType == RG_SCH_CMN_RA_TYPE0))
23183 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + dlSf->lstRbgDfct), cell->rbgSize);
23184 noRbs = (noRbgs * cell->rbgSize) - dlSf->lstRbgDfct;
23188 noRbs = allocInfo->rbsReq;
23191 /* This line will help in case if tbs is zero and reduction in MCS is not possible */
23192 if (allocInfo->rbsReq == 0 )
23196 origBytesReq = rgTbSzTbl[noLyrs - 1][tbs][rbsReq - 1]/8;
23198 /* Find out the ITbs & Imcs by identifying first Highest
23199 number of bits compared to the original bytes allocated.*/
23202 if(((rgTbSzTbl[noLyrs - 1][0][noRbs - 1])/8) < origBytesReq)
23204 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyrs - 1], tbs);
23205 while(((rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1])/8) > origBytesReq)
23214 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][noRbs - 1]/8;
23215 allocInfo->tbInfo[idx].iTbs = tbs;
23216 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
23221 /* Added funcion to adjust TBSize*/
23223 * @brief Function to adjust the tbsize in case of subframe 0 & 5 when
23224 * we were not able to do RB alloc adjustment by adding extra required Rbs
23228 * Function : rgSCHCmnNonDlfsPbchTbSizeAdj
23230 * Processing Steps:
23232 * @param[in,out] RgSchDlRbAlloc *allocInfo
23233 * @param[in] U8 numOvrlapgPbchRb
23234 * @param[in] U8 idx
23235 * @param[in] U8 pbchSsRsSym
23239 PRIVATE Void rgSCHCmnNonDlfsPbchTbSizeAdj
23241 RgSchDlRbAlloc *allocInfo,
23242 U8 numOvrlapgPbchRb,
23248 PRIVATE Void rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,idx,bytesReq)
23249 RgSchDlRbAlloc *allocInfo;
23250 U8 numOvrlapgPbchRb;
23256 U32 reducedTbs = 0;
23260 noLyrs = allocInfo->tbInfo[idx].noLyr;
23262 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[idx].imcs, tbs);
23264 reducedTbs = bytesReq - (((U32)numOvrlapgPbchRb * (U32)pbchSsRsSym * 6)/8);
23266 /* find out the ITbs & Imcs by identifying first Highest
23267 number of bits compared with reduced bits considering the bits that are
23268 reserved for PBCH/PSS/SSS */
23269 if(((rgTbSzTbl[noLyrs - 1][0][allocInfo->rbsReq - 1])/8) < reducedTbs)
23271 while(((rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1])/8) > reducedTbs)
23280 allocInfo->tbInfo[idx].bytesReq = rgTbSzTbl[noLyrs - 1][tbs][allocInfo->rbsReq - 1]/8;
23281 allocInfo->tbInfo[idx].iTbs = tbs;
23282 RG_SCH_CMN_DL_TBS_TO_MCS(tbs,allocInfo->tbInfo[idx].imcs);
23287 /* Added this function to find num of ovrlapping PBCH rb*/
23289 * @brief Function to find out how many additional rbs are available
23290 * in the entire bw which can be allocated to a UE
23293 * Function : rgSCHCmnFindNumAddtlRbsAvl
23295 * Processing Steps:
23296 * - Calculates number of additinal rbs available
23298 * @param[in] RgSchCellCb *cell
23299 * @param[in] RgSchDlSf *dlSf
23300 * @param[in,out] RgSchDlRbAlloc *allocInfo
23301 * @param[out] U8 addtlRbsAvl
23305 PRIVATE U8 rgSCHCmnFindNumAddtlRbsAvl
23309 RgSchDlRbAlloc *allocInfo
23312 PRIVATE U8 rgSCHCmnFindNumAddtlRbsAvl(cell,dlSf,allocInfo)
23315 RgSchDlRbAlloc *allocInfo;
23318 U8 addtlRbsAvl = 0;
23321 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
23323 addtlRbsAvl = (((dlSf->type0End - dlSf->type2End + 1)*\
23324 cell->rbgSize) - dlSf->lstRbgDfct) - allocInfo->rbsReq;
23326 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
23328 addtlRbsAvl = (dlSf->bw - dlSf->bwAlloced) - allocInfo->rbsReq;
23331 return (addtlRbsAvl);
23334 /* Added this function to find num of ovrlapping PBCH rb*/
23336 * @brief Function to find out how many of the requested RBs are
23337 * falling in the center 6 RBs of the downlink bandwidth.
23340 * Function : rgSCHCmnFindNumPbchOvrlapRbs
23342 * Processing Steps:
23343 * - Calculates number of overlapping rbs
23345 * @param[in] RgSchCellCb *cell
23346 * @param[in] RgSchDlSf *dlSf
23347 * @param[in,out] RgSchDlRbAlloc *allocInfo
23348 * @param[out] U8* numOvrlapgPbchRb
23352 PRIVATE Void rgSCHCmnFindNumPbchOvrlapRbs
23356 RgSchDlRbAlloc *allocInfo,
23357 U8 *numOvrlapgPbchRb
23360 PRIVATE Void rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,numOvrlapgPbchRb)
23363 RgSchDlRbAlloc *allocInfo;
23364 U8 *numOvrlapgPbchRb;
23367 *numOvrlapgPbchRb = 0;
23368 /*Find if we have already crossed the start boundary for PBCH 6 RBs,
23369 * if yes then lets find the number of RBs which are getting overlapped
23370 * with this allocation.*/
23371 if(dlSf->bwAlloced <= (cell->pbchRbStart))
23373 /*We have not crossed the start boundary of PBCH RBs. Now we need
23374 * to know that if take this allocation then how much PBCH RBs
23375 * are overlapping with this allocation.*/
23376 /* Find out the overlapping RBs in the centre 6 RBs */
23377 if((dlSf->bwAlloced + allocInfo->rbsReq) > cell->pbchRbStart)
23379 *numOvrlapgPbchRb = (dlSf->bwAlloced + allocInfo->rbsReq) - (cell->pbchRbStart);
23380 if(*numOvrlapgPbchRb > 6)
23381 *numOvrlapgPbchRb = 6;
23384 else if ((dlSf->bwAlloced > (cell->pbchRbStart)) &&
23385 (dlSf->bwAlloced < (cell->pbchRbEnd)))
23387 /*We have already crossed the start boundary of PBCH RBs.We need to
23388 * find that if we take this allocation then how much of the RBs for
23389 * this allocation will overlap with PBCH RBs.*/
23390 /* Find out the overlapping RBs in the centre 6 RBs */
23391 if(dlSf->bwAlloced + allocInfo->rbsReq < (cell->pbchRbEnd))
23393 /*If we take this allocation then also we are not crossing the
23394 * end boundary of PBCH 6 RBs.*/
23395 *numOvrlapgPbchRb = allocInfo->rbsReq;
23399 /*If we take this allocation then we are crossing the
23400 * end boundary of PBCH 6 RBs.*/
23401 *numOvrlapgPbchRb = (cell->pbchRbEnd) - dlSf->bwAlloced;
23408 * @brief Performs RB allocation adjustment if the requested RBs are
23409 * falling in the center 6 RBs of the downlink bandwidth.
23412 * Function : rgSCHCmnNonDlfsPbchRbAllocAdj
23414 * Processing Steps:
23415 * - Allocate consecutively available RBs.
23417 * @param[in] RgSchCellCb *cell
23418 * @param[in,out] RgSchDlRbAlloc *allocInfo
23419 * @param[in] U8 pbchSsRsSym
23423 PRIVATE Void rgSCHCmnNonDlfsPbchRbAllocAdj
23426 RgSchDlRbAlloc *allocInfo,
23431 PRIVATE Void rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo,pbchSsRsSym)
23433 RgSchDlRbAlloc *allocInfo;
23438 RgSchDlSf *dlSf = allocInfo->dlSf;
23439 U8 numOvrlapgPbchRb = 0;
23440 U8 numOvrlapgAdtlPbchRb = 0;
23442 U8 addtlRbsReq = 0;
23443 U8 moreAddtlRbsReq = 0;
23444 U8 addtlRbsAdd = 0;
23445 U8 moreAddtlRbsAdd = 0;
23455 origRbsReq = allocInfo->rbsReq;
23456 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23458 totSym = (cell->isCpDlExtend) ? RGSCH_TOT_NUM_SYM_EXTCP : RGSCH_TOT_NUM_SYM_NORCP;
23460 /* Additional RBs are allocated by considering the loss due to
23461 the reserved symbols for CFICH, PBCH, PSS, SSS and cell specific RS */
23463 divResult = (numOvrlapgPbchRb * pbchSsRsSym)/totSym;
23464 if((numOvrlapgPbchRb * pbchSsRsSym) % totSym)
23468 addtlRbsReq = divResult;
23470 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, addtlRbsReq, addtlRbsAdd)
23472 /*Now RBs requires is original requested RBs + these additional RBs to make
23473 * up for PSS/SSS/BCCH.*/
23474 allocInfo->rbsReq = allocInfo->rbsReq + addtlRbsAdd;
23476 /*Check if with these additional RBs we have taken up, these are also falling
23477 * under PBCH RBs range, if yes then we would need to account for
23478 * PSS/BSS/BCCH for these additional RBs too.*/
23479 if(addtlRbsAdd && ((dlSf->bwAlloced + allocInfo->rbsReq - addtlRbsAdd) < (cell->pbchRbEnd)))
23481 if((dlSf->bwAlloced + allocInfo->rbsReq) <= (cell->pbchRbEnd))
23483 /*With additional RBs taken into account, we are not crossing the
23484 * PBCH RB end boundary.Thus here we need to account just for
23485 * overlapping PBCH RBs for these additonal RBs.*/
23486 divResult = (addtlRbsAdd * pbchSsRsSym)/totSym;
23487 if((addtlRbsAdd * pbchSsRsSym) % totSym)
23492 moreAddtlRbsReq = divResult;
23494 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
23496 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
23501 /*Here we have crossed the PBCH RB end boundary, thus we need to take
23502 * into account the overlapping RBs for additional RBs which will be
23503 * subset of addtlRbs.*/
23504 numOvrlapgAdtlPbchRb = (cell->pbchRbEnd) - ((dlSf->bwAlloced + allocInfo->rbsReq) - addtlRbsAdd);
23506 divResult = (numOvrlapgAdtlPbchRb * pbchSsRsSym)/totSym;
23507 if((numOvrlapgAdtlPbchRb * pbchSsRsSym) % totSym)
23512 moreAddtlRbsReq = divResult;
23514 RG_SCH_CMN_UPD_RBS_TO_ADD(cell, dlSf, allocInfo, moreAddtlRbsReq, moreAddtlRbsAdd)
23516 allocInfo->rbsReq = allocInfo->rbsReq + moreAddtlRbsAdd;
23519 if (isBcchPcch == TRUE)
23524 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
23527 /* This case might be for Imcs value 6 and NPrb = 1 case - Not
23528 Adjusting either RBs or Imcs or Bytes Allocated */
23529 allocInfo->rbsReq = allocInfo->rbsReq - addtlRbsAdd - moreAddtlRbsAdd;
23531 else if(tbs && ((0 == addtlRbsAdd) && (moreAddtlRbsAdd == 0)))
23533 /*In case of a situation where we the entire bandwidth is already occupied
23534 * and we dont have room to add additional Rbs then in order to decrease the
23535 * code rate we reduce the tbsize such that we reduce the present calculated
23536 * tbsize by number of bytes that would be occupied by PBCH/PSS/SSS in overlapping
23537 * rbs and find the nearest tbsize which would be less than this deduced value*/
23539 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23541 noLyr = allocInfo->tbInfo[0].noLyr;
23542 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgTbSzTbl[noLyr - 1], tbs);
23543 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
23545 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,bytesReq);
23547 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23549 noLyr = allocInfo->tbInfo[1].noLyr;
23550 bytesReq = rgTbSzTbl[noLyr - 1][tbs][allocInfo->rbsReq - 1]/8;
23551 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,bytesReq);
23555 else if(tbs && ((addtlRbsAdd != addtlRbsReq) ||
23556 (addtlRbsAdd && (moreAddtlRbsReq != moreAddtlRbsAdd))))
23558 /*In case of a situation where we were not able to add required number of
23559 * additional RBs then we adjust the Imcs based on original RBs requested.
23560 * Doing this would comensate for the few extra Rbs we have added but inorder
23561 * to comensate for number of RBS we couldnt add we again do the TBSize adjustment*/
23563 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
23565 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23567 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
23570 rgSCHCmnFindNumPbchOvrlapRbs(cell,dlSf,allocInfo,&numOvrlapgPbchRb);
23571 numOvrlapgPbchRb = numOvrlapgPbchRb - (addtlRbsAdd + moreAddtlRbsAdd);
23573 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,0,allocInfo->tbInfo[0].bytesReq);
23575 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23577 rgSCHCmnNonDlfsPbchTbSizeAdj(allocInfo,numOvrlapgPbchRb,pbchSsRsSym,1,allocInfo->tbInfo[1].bytesReq);
23583 /*We hit this code when we were able to add the required additional RBS
23584 * hence we should adjust the IMcs based on orignals RBs requested*/
23586 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 0 , origRbsReq);
23588 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
23590 rgSCHCmnNonDlfsPbchTbImcsAdj(cell, allocInfo, 1 , origRbsReq);
23595 } /* end of rgSCHCmnNonDlfsPbchRbAllocAdj */
23599 * @brief Performs RB allocation for frequency non-selective cell.
23603 * Function : rgSCHCmnNonDlfsCmnRbAlloc
23605 * Processing Steps:
23606 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
23608 * @param[in] RgSchCellCb *cell
23609 * @param[in, out] RgSchDlRbAlloc *allocInfo
23615 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc
23618 RgSchDlRbAlloc *allocInfo
23621 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc(cell, allocInfo)
23623 RgSchDlRbAlloc *allocInfo;
23629 U8 pbchSsRsSym = 0;
23632 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
23634 RgSchDlSf *dlSf = allocInfo->dlSf;
23637 U8 spsRbsAlloc = 0;
23638 RgSchDlSfAllocInfo *dlSfAlloc = &allocInfo->dlSf->dlSfAllocInfo;
23641 allocInfo->tbInfo[0].noLyr = 1;
23644 /* Note: Initialize the masks to 0, this might not be needed since alloInfo
23645 * is initialized to 0 at the beginning of allcoation */
23646 allocInfo->resAllocInfo.raType0Mask = 0;
23647 memset(allocInfo->resAllocInfo.raType1Mask, 0,
23648 RG_SCH_NUM_RATYPE1_32BIT_MASK * sizeof (U32));
23649 memset(allocInfo->resAllocInfo.raType2Mask, 0,
23650 RG_SCH_NUM_RATYPE2_32BIT_MASK * sizeof (U32));
23652 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
23653 (dlSf->bwAlloced == dlSf->bw))
23655 if(dlSf->bwAlloced == dlSf->bw)
23661 if (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced))
23664 if ((allocInfo->tbInfo[0].imcs < 29) && (dlSf->bwAlloced < dlSf->bw))
23666 if(allocInfo->tbInfo[0].imcs < 29)
23669 /* set the remaining RBs for the requested UE */
23670 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
23671 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
23672 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[0][tbs][allocInfo->rbsReq - 1]/8;
23677 /* Attempt RA Type 2 allocation in SPS Bandwidth */
23678 if (dlSf->spsAllocdBw < cell->spsBwRbgInfo.numRbs)
23681 rgSCHCmnDlRaType2Alloc(dlSfAlloc,
23682 allocInfo->rbsReq, &cell->spsBwRbgInfo, &rbStart,
23683 &allocInfo->resAllocInfo, FALSE);
23684 /* rbsAlloc assignment moved from line 16671 to here to avoid
23685 * compilation error. Recheck */
23686 dlSf->spsAllocdBw += spsRbsAlloc;
23689 #endif /* LTEMAC_SPS */
23697 /* Update allocation information */
23698 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
23699 if (allocInfo->pdcch == NULLP)
23703 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
23704 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
23705 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
23706 allocInfo->allocInfo.raType2.isLocal = TRUE;
23710 allocInfo->allocInfo.raType2.rbStart = rbStart;
23711 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23712 allocInfo->rbsAlloc = allocInfo->rbsReq;
23723 if(!(dlSf->sfNum == 5))
23725 /* case for subframes 1 to 9 except 5 */
23727 allocInfo->allocInfo.raType2.rbStart = rbStart;
23729 /*Fix for ccpu00123918*/
23730 allocInfo->allocInfo.raType2.rbStart = (U8)dlSf->type2Start;
23735 pbchFrame = 1; /* case for subframe 5 */
23736 /* In subframe 5, symbols are reserved for PSS and SSS and CFICH
23737 and Cell Specific Reference Signals */
23738 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PSS_SSS_SYM) *
23739 RGSCH_NUM_SC_IN_RB + cell->numCellRSPerSf);
23745 /* In subframe 0, symbols are reserved for PSS, SSS, PBCH, CFICH and
23746 and Cell Specific Reference signals */
23747 pbchSsRsSym = (((cellDl->currCfi) + RGSCH_NUM_PBCH_SYM +
23748 RGSCH_NUM_PSS_SSS_SYM) * RGSCH_NUM_SC_IN_RB +
23749 cell->numCellRSPerSf);
23750 } /* end of outer else */
23753 (((dlSf->bwAlloced + allocInfo->rbsReq) - cell->pbchRbStart) > 0)&&
23754 (dlSf->bwAlloced < cell->pbchRbEnd))
23756 if(allocInfo->tbInfo[0].imcs < 29)
23758 rgSCHCmnNonDlfsPbchRbAllocAdj(cell, allocInfo, pbchSsRsSym, TRUE);
23770 /*Fix for ccpu00123918*/
23771 allocInfo->allocInfo.raType2.rbStart = (U8)dlSf->type2Start;
23772 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23773 allocInfo->rbsAlloc = allocInfo->rbsReq;
23775 /* LTE_ADV_FLAG_REMOVED_START */
23777 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
23779 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
23780 allocInfo->allocInfo.raType2.rbStart, \
23781 allocInfo->allocInfo.raType2.numRb);
23786 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
23787 allocInfo->allocInfo.raType2.rbStart, \
23788 allocInfo->allocInfo.raType2.numRb);
23794 /* LTE_ADV_FLAG_REMOVED_END */
23795 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23802 /* Update type 0, 1 and 2 masks */
23803 dlSfAlloc->raType0Mask |= allocInfo->resAllocInfo.raType0Mask;
23804 #ifdef RGSCH_SPS_UNUSED
23805 for (idx = 0; idx < RG_SCH_NUM_RATYPE1_32BIT_MASK; ++idx)
23807 dlSfAlloc->raType1Mask[idx] |=
23808 allocInfo->resAllocInfo.raType1Mask[idx];
23809 dlSfAlloc->raType1UsedRbs[idx] +=
23810 allocInfo->resAllocInfo.raType1UsedRbs[idx];
23813 for (idx = 0; idx < RG_SCH_NUM_RATYPE2_32BIT_MASK; ++idx)
23815 dlSfAlloc->raType2Mask[idx] |=
23816 allocInfo->resAllocInfo.raType2Mask[idx];
23826 * @brief Performs RB allocation for frequency non-selective cell.
23830 * Function : rgSCHCmnNonDlfsCmnRbAllocRar
23832 * Processing Steps:
23833 * - Allocate consecutively available RBs for BCCH/PCCH/RAR.
23835 * @param[in] RgSchCellCb *cell
23836 * @param[in, out] RgSchDlRbAlloc *allocInfo
23842 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAllocRar
23845 RgSchDlRbAlloc *allocInfo
23848 PRIVATE S16 rgSCHCmnNonDlfsCmnRbAlloc(cell, allocInfo)
23850 RgSchDlRbAlloc *allocInfo;
23853 RgSchDlSf *dlSf = allocInfo->dlSf;
23856 if(dlSf->bwAlloced == dlSf->bw)
23861 allocInfo->tbInfo[0].noLyr = 1;
23863 /* Update allocation information */
23864 allocInfo->pdcch = rgSCHCmnCmnPdcchAlloc(cell, dlSf);
23865 if (allocInfo->pdcch == NULLP)
23869 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
23870 allocInfo->pdcch->dciNumOfBits = cell->dciSize.size[TFU_DCI_FORMAT_1A];
23871 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
23872 allocInfo->allocInfo.raType2.isLocal = TRUE;
23874 /*Fix for ccpu00123918*/
23875 allocInfo->allocInfo.raType2.rbStart = (U8)dlSf->type2Start;
23876 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
23877 allocInfo->rbsAlloc = allocInfo->rbsReq;
23879 /* LTE_ADV_FLAG_REMOVED_END */
23880 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23883 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, NULLP, dlSf, 13, TFU_DCI_FORMAT_B1, FALSE);
23884 if (allocInfo->pdcch == NULLP)
23888 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
23889 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
23891 printf("5GTF_ERROR vrbg allocated > 25\n");
23895 allocInfo->tbInfo[0].cmnGrnt.vrbgStart = beamInfo->vrbgStart;
23896 allocInfo->tbInfo[0].cmnGrnt.numVrbg = allocInfo->vrbgReq;
23898 /* Update allocation information */
23899 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
23901 allocInfo->tbInfo[0].cmnGrnt.xPDSCHRange = 1;
23902 allocInfo->tbInfo[0].cmnGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
23903 allocInfo->tbInfo[0].cmnGrnt.vrbgStart, allocInfo->tbInfo[0].cmnGrnt.numVrbg);
23905 allocInfo->tbInfo[0].cmnGrnt.rbStrt = (allocInfo->tbInfo[0].cmnGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
23906 allocInfo->tbInfo[0].cmnGrnt.numRb = (allocInfo->tbInfo[0].cmnGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
23908 beamInfo->vrbgStart += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
23909 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].cmnGrnt.numVrbg;
23910 allocInfo->tbInfo[0].cmnGrnt.rv = 0;
23911 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
23914 printf("\n[%s],allocInfo->tbInfo[0].bytesAlloc:%u,vrbgReq:%u\n",
23915 __func__,allocInfo->tbInfo[0].bytesAlloc,allocInfo->vrbgReq);
23921 /* LTE_ADV_FLAG_REMOVED_START */
23924 * @brief To check if DL BW available for non-DLFS allocation.
23928 * Function : rgSCHCmnNonDlfsBwAvlbl
23930 * Processing Steps:
23931 * - Determine availability based on RA Type.
23933 * @param[in] RgSchCellCb *cell
23934 * @param[in] RgSchDlSf *dlSf
23935 * @param[in] RgSchDlRbAlloc *allocInfo
23943 PRIVATE Bool rgSCHCmnNonDlfsSFRBwAvlbl
23946 RgSchSFRPoolInfo **sfrpoolInfo,
23948 RgSchDlRbAlloc *allocInfo,
23952 PRIVATE Bool rgSCHCmnNonDlfsSFRBwAvlbl(cell, sfrpoolInfo, dlSf, allocInfo, isUeCellEdge)
23954 RgSchSFRPoolInfo **sfrpoolInfo;
23956 RgSchDlRbAlloc *allocInfo;
23964 RgSchSFRPoolInfo *sfrPool;
23965 RgSchSFRPoolInfo *sfrCEPool;
23969 RgSchSFRPoolInfo *poolWithMaxAvlblBw = NULLP;
23971 U32 addtnlPRBs = 0;
23973 if (dlSf->bw <= dlSf->bwAlloced)
23975 RLOG_ARG2(L_ERROR,DBG_CELLID,cell->cellId,
23976 "BW is fully allocated for subframe (%d) CRNTI:%d", dlSf->sfNum,allocInfo->rnti);
23980 if (dlSf->sfrTotalPoolInfo.ccBwFull == TRUE)
23982 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
23983 "BW is fully allocated for CC Pool CRNTI:%d",allocInfo->rnti);
23987 if ((dlSf->sfrTotalPoolInfo.ceBwFull == TRUE) && (isUeCellEdge))
23989 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,
23990 "BW is fully allocated for CE Pool CRNTI:%d",allocInfo->rnti);
23994 /* We first check if the ue scheduled is a cell edge or cell centre and accordingly check the avaialble
23995 memory in their pool. If the cell centre UE doesnt have Bw available in its pool, then it will check
23996 Bw availability in cell edge pool but the other way around is NOT possible. */
23999 l = &dlSf->sfrTotalPoolInfo.cePool;
24003 l = &dlSf->sfrTotalPoolInfo.ccPool;
24006 n = cmLListFirst(l);
24010 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
24012 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24014 /* MS_FIX for ccpu00123919 : Number of RBs in case of RETX should be same as that of initial transmission. */
24015 if(allocInfo->tbInfo[0].tbCb->txCntr)
24017 /* If RB assignment is being done for RETX. Then if reqRbs are a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
24018 * not a multiple of rbgSize then check if lsgRbgDfct exists */
24019 if (allocInfo->rbsReq % cell->rbgSize == 0)
24021 if ((sfrPool->type2End == dlSf->type2End) && dlSf->lstRbgDfct)
24023 /* In this scenario we are wasting the last RBG for this dlSf */
24024 sfrPool->type0End--;
24025 sfrPool->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24027 dlSf->lstRbgDfct = 0;
24029 /*ABHINAV To check if these variables need to be taken care of*/
24031 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24036 if (dlSf->lstRbgDfct)
24038 /* Check if type0 allocation can cater to this RETX requirement */
24039 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
24045 if (sfrPool->type2End != dlSf->type2End) /*Search again for some pool which has the END RBG of the BandWidth*/
24053 /* cannot allocate same number of required RBs */
24059 /*rg002.301 ccpu00120391 MOD condition is modified approprialtely to find if rbsReq is less than available RBS*/
24060 if(allocInfo->rbsReq <= (((sfrPool->type0End - sfrPool->type2End + 1)*\
24061 cell->rbgSize) - dlSf->lstRbgDfct))
24063 *sfrpoolInfo = sfrPool;
24068 if (sfrPool->bw <= sfrPool->bwAlloced + cell->rbgSize)
24070 n = cmLListNext(l);
24071 /* If the ue is cell centre then it will simply check the memory available in next pool.
24072 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
24074 if((!isUeCellEdge) && (!n->node))
24076 l = &dlSf->sfrTotalPoolInfo.cePool;
24077 n = cmLListFirst(l);
24083 /* MS_FIX: Number of RBs in case of RETX should be same as that of initial transmission */
24084 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24086 /*rg002.301 ccpu00120391 MOD setting the remaining RBs for the requested UE*/
24087 allocInfo->rbsReq = (((sfrPool->type0End - sfrPool->type2End + 1)*\
24088 cell->rbgSize) - dlSf->lstRbgDfct);
24089 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24090 noLyrs = allocInfo->tbInfo[0].noLyr;
24091 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24092 *sfrpoolInfo = sfrPool;
24097 n = cmLListNext(l);
24099 /* If the ue is cell centre then it will simply check the memory available in next pool.
24100 But if there are no more memory pools available, then cell centre Ue will try to look for memory in cell edge pool */
24101 if((!isUeCellEdge) && (!n->node))
24103 l = &dlSf->sfrTotalPoolInfo.cePool;
24104 n = cmLListFirst(l);
24113 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
24115 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24116 /* This is a Case where a UE was CC and had more RBs allocated than present in CE pool.
24117 In case this UE whn become CE with retx going on, then BW is not sufficient for Retx */
24118 if ((isUeCellEdge) &&
24119 (allocInfo->tbInfo[0].tbCb->txCntr != 0))
24121 if(allocInfo->rbsReq > (sfrPool->bw - sfrPool->bwAlloced))
24123 /* Adjust CE BW such that Retx alloc is successful */
24124 /* Check if merging CE with adjacent CC pool will be sufficient to process Retx */
24126 /* If no Type 0 allocations are made from this pool */
24127 if (sfrPool->type0End == (((sfrPool->poolendRB + 1) / cell->rbgSize) - 1))
24129 if (sfrPool->adjCCPool &&
24130 (sfrPool->adjCCPool->type2Start == sfrPool->poolendRB + 1) &&
24131 (allocInfo->rbsReq <= ((sfrPool->bw - sfrPool->bwAlloced) +
24132 ((sfrPool->adjCCPool->bw - sfrPool->adjCCPool->bwAlloced)))))
24134 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
24136 /* Adjusting CE Pool Info */
24137 sfrPool->bw += addtnlPRBs;
24138 sfrPool->type0End = ((sfrPool->poolendRB + addtnlPRBs + 1) /
24139 cell->rbgSize) - 1;
24141 /* Adjusting CC Pool Info */
24142 sfrPool->adjCCPool->type2Start += addtnlPRBs;
24143 sfrPool->adjCCPool->type2End = RGSCH_CEIL(sfrPool->adjCCPool->type2Start,
24145 sfrPool->adjCCPool->bw -= addtnlPRBs;
24146 *sfrpoolInfo = sfrPool;
24153 /* Check if CC pool is one of the following:
24154 * 1. |CE| + |CC "CCPool2Exists" = TRUE|
24155 * 2. |CC "CCPool2Exists" = FALSE| + |CE| + |CC "CCPool2Exists" = TRUE|
24157 if(TRUE == sfrPool->CCPool2Exists)
24159 l1 = &dlSf->sfrTotalPoolInfo.cePool;
24160 n1 = cmLListFirst(l1);
24161 sfrCEPool = (RgSchSFRPoolInfo*)(n1->node);
24162 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced))
24164 *sfrpoolInfo = sfrCEPool;
24167 else if(allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
24169 *sfrpoolInfo = sfrPool;
24172 /* Check if CE and CC boundary has unallocated prbs */
24173 else if ((sfrPool->poolstartRB == sfrPool->type2Start) &&
24174 (sfrCEPool->type0End == ((sfrCEPool->poolendRB + 1) / cell->rbgSize) - 1))
24176 if(allocInfo->rbsReq <= (sfrCEPool->bw - sfrCEPool->bwAlloced) +
24177 (sfrPool->bw - sfrPool->bwAlloced))
24179 /* Checking if BW can be allocated partly from CE pool and partly
24182 addtnlPRBs = allocInfo->rbsReq - (sfrPool->bw - sfrPool->bwAlloced);
24183 /* Updating CE and CC type2 parametrs based on the RBs allocated
24184 * from these pools*/
24185 sfrPool->type2Start -= addtnlPRBs;
24186 sfrPool->type2End = RGSCH_CEIL(sfrPool->type2Start, cell->rbgSize);
24187 sfrPool->bw += addtnlPRBs;
24188 if (addtnlPRBs == (sfrCEPool->bw - sfrCEPool->bwAlloced))
24190 sfrCEPool->bwAlloced = sfrCEPool->bw;
24191 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24195 sfrCEPool->bw -= addtnlPRBs;
24196 sfrCEPool->type0End = ((sfrCEPool->poolendRB + 1 - addtnlPRBs) / cell->rbgSize) - 1;
24198 *sfrpoolInfo = sfrPool;
24201 else if ( bwAvlbl <
24202 ((sfrCEPool->bw - sfrCEPool->bwAlloced) +
24203 (sfrPool->bw - sfrPool->bwAlloced)))
24205 /* All the Prbs from CE BW shall be allocated */
24206 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24208 sfrPool->type2Start = sfrCEPool->type2Start;
24209 sfrPool->bw += sfrCEPool->bw - sfrCEPool->bwAlloced;
24210 sfrCEPool->type2Start = sfrCEPool->poolendRB + 1;
24211 sfrCEPool->bwAlloced = sfrCEPool->bw;
24212 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24214 /* set the remaining RBs for the requested UE */
24215 allocInfo->rbsReq = (sfrPool->bw - sfrPool->bwAlloced);
24216 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24217 noLyrs = allocInfo->tbInfo[0].noLyr;
24218 allocInfo->tbInfo[0].bytesReq =
24219 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24220 *sfrpoolInfo = sfrPool;
24231 /* Checking if no. of RBs required can be allocated from
24233 * 1. If available return the SFR pool.
24234 * 2. Else update the RBs required parameter based on the
24235 * BW available in the pool
24236 * 3. Return FALSE if no B/W is available.
24238 if (allocInfo->rbsReq <= (sfrPool->bw - sfrPool->bwAlloced))
24240 *sfrpoolInfo = sfrPool;
24245 if(allocInfo->tbInfo[0].tbCb->txCntr == 0)
24247 if (bwAvlbl < sfrPool->bw - sfrPool->bwAlloced)
24251 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24253 bwAvlbl = sfrPool->bw - sfrPool->bwAlloced;
24254 poolWithMaxAvlblBw = sfrPool;
24256 n = cmLListNext(l);
24258 if ((isUeCellEdge == FALSE) && (n == NULLP))
24260 if(l != &dlSf->sfrTotalPoolInfo.cePool)
24262 l = &dlSf->sfrTotalPoolInfo.cePool;
24263 n = cmLListFirst(l);
24273 dlSf->sfrTotalPoolInfo.ceBwFull = TRUE;
24277 dlSf->sfrTotalPoolInfo.ccBwFull = TRUE;
24283 /* set the remaining RBs for the requested UE */
24284 allocInfo->rbsReq = poolWithMaxAvlblBw->bw -
24285 poolWithMaxAvlblBw->bwAlloced;
24286 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24287 noLyrs = allocInfo->tbInfo[0].noLyr;
24288 allocInfo->tbInfo[0].bytesReq =
24289 rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24290 *sfrpoolInfo = poolWithMaxAvlblBw;
24297 n = cmLListNext(l);
24299 if ((isUeCellEdge == FALSE) && (n == NULLP))
24301 if(l != &dlSf->sfrTotalPoolInfo.cePool)
24303 l = &dlSf->sfrTotalPoolInfo.cePool;
24304 n = cmLListFirst(l);
24320 #endif /* end of ifndef LTE_TDD*/
24321 /* LTE_ADV_FLAG_REMOVED_END */
24324 * @brief To check if DL BW available for non-DLFS allocation.
24328 * Function : rgSCHCmnNonDlfsUeRbAlloc
24330 * Processing Steps:
24331 * - Determine availability based on RA Type.
24333 * @param[in] RgSchCellCb *cell
24334 * @param[in] RgSchDlSf *dlSf
24335 * @param[in] RgSchDlRbAlloc *allocInfo
24343 PRIVATE Bool rgSCHCmnNonDlfsBwAvlbl
24347 RgSchDlRbAlloc *allocInfo
24350 PRIVATE Bool rgSCHCmnNonDlfsBwAvlbl(cell, dlSf, allocInfo)
24353 RgSchDlRbAlloc *allocInfo;
24358 U8 ignoredDfctRbg = FALSE;
24360 if (dlSf->bw <= dlSf->bwAlloced)
24362 RLOG_ARG3(L_DEBUG,DBG_CELLID,cell->cellId, "(%d:%d)FAILED CRNTI:%d",
24363 dlSf->bw, dlSf->bwAlloced,allocInfo->rnti);
24366 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
24368 /* Fix for ccpu00123919 : Number of RBs in case of RETX should be same as
24369 * that of initial transmission. */
24370 if(allocInfo->tbInfo[0].tbCb->txCntr)
24372 /* If RB assignment is being done for RETX. Then if reqRbs are
24373 * a multiple of rbgSize then ignore lstRbgDfct. If reqRbs is
24374 * not a multiple of rbgSize then check if lsgRbgDfct exists */
24375 if (allocInfo->rbsReq % cell->rbgSize == 0)
24377 if (dlSf->lstRbgDfct)
24379 /* In this scenario we are wasting the last RBG for this dlSf */
24382 dlSf->bwAlloced += (cell->rbgSize - dlSf->lstRbgDfct);
24383 /* Fix: MUE_PERTTI_DL */
24384 dlSf->lstRbgDfct = 0;
24385 ignoredDfctRbg = TRUE;
24391 if (dlSf->lstRbgDfct)
24393 /* Check if type0 allocation can cater to this RETX requirement */
24394 if ((allocInfo->rbsReq % cell->rbgSize) != (cell->rbgSize - dlSf->lstRbgDfct))
24401 /* cannot allocate same number of required RBs */
24407 /* Condition is modified approprialtely to find
24408 * if rbsReq is less than available RBS*/
24409 if(allocInfo->rbsReq <= (((dlSf->type0End - dlSf->type2End + 1)*\
24410 cell->rbgSize) - dlSf->lstRbgDfct))
24414 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
24415 * allocation in TDD when requested RBs are more than available RBs*/
24418 /* MS_WORKAROUND for ccpu00122022 */
24419 if (dlSf->bw < dlSf->bwAlloced + cell->rbgSize)
24421 /* ccpu00132358- Re-assigning the values which were updated above
24422 * if it is RETX and Last RBG available*/
24423 if(ignoredDfctRbg == TRUE)
24426 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
24427 dlSf->lstRbgDfct = 1;
24433 /* Fix: Number of RBs in case of RETX should be same as
24434 * that of initial transmission. */
24435 if(allocInfo->tbInfo[0].tbCb->txCntr == 0
24437 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
24441 /* Setting the remaining RBs for the requested UE*/
24442 allocInfo->rbsReq = (((dlSf->type0End - dlSf->type2End + 1)*\
24443 cell->rbgSize) - dlSf->lstRbgDfct);
24444 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24445 noLyrs = allocInfo->tbInfo[0].noLyr;
24446 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24447 /* DwPts Scheduling Changes Start */
24449 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24451 allocInfo->tbInfo[0].bytesReq =
24452 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
24455 /* DwPts Scheduling Changes End */
24459 /* ccpu00132358- Re-assigning the values which were updated above
24460 * if it is RETX and Last RBG available*/
24461 if(ignoredDfctRbg == TRUE)
24464 dlSf->bwAlloced -= (cell->rbgSize - dlSf->lstRbgDfct);
24465 dlSf->lstRbgDfct = 1;
24468 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "FAILED for CRNTI:%d",
24470 printf ("RB Alloc failed for LAA TB type 0\n");
24476 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
24478 if (allocInfo->rbsReq <= (dlSf->bw - dlSf->bwAlloced))
24482 /* ccpu00132358:MOD- Removing "ifndef LTE_TDD" for unblocking the RB
24483 * allocation in TDD when requested RBs are more than available RBs*/
24486 /* Fix: Number of RBs in case of RETX should be same as
24487 * that of initial transmission. */
24488 if((allocInfo->tbInfo[0].tbCb->txCntr == 0)
24490 && (FALSE == rgSCHLaaIsLaaTB(allocInfo))
24494 /* set the remaining RBs for the requested UE */
24495 allocInfo->rbsReq = dlSf->bw - dlSf->bwAlloced;
24496 RG_SCH_CMN_DL_MCS_TO_TBS(allocInfo->tbInfo[0].imcs, tbs);
24497 noLyrs = allocInfo->tbInfo[0].noLyr;
24498 allocInfo->tbInfo[0].bytesReq = rgTbSzTbl[noLyrs-1][tbs][allocInfo->rbsReq - 1]/8;
24499 /* DwPts Scheduling Changes Start */
24501 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24503 allocInfo->tbInfo[0].bytesReq =
24504 rgTbSzTbl[noLyrs-1][tbs][RGSCH_MAX(allocInfo->rbsReq*3/4,1) - 1]/8;
24507 /* DwPts Scheduling Changes End */
24511 printf ("RB Alloc failed for LAA TB type 2\n");
24512 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
24515 /* Fix: Number of RBs in case of RETX should be same as
24516 * that of initial transmission. */
24520 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"FAILED for CRNTI:%d",allocInfo->rnti);
24524 /* LTE_ADV_FLAG_REMOVED_START */
24527 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24531 * Function : rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
24533 * Processing Steps:
24535 * @param[in] RgSchCellCb *cell
24536 * @param[in] RgSchDlSf *dlSf
24537 * @param[in] U8 rbStrt
24538 * @param[in] U8 numRb
24543 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc
24551 Void rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, rbStrt, numRb)
24560 RgSchSFRPoolInfo *sfrPool;
24562 l = &dlSf->sfrTotalPoolInfo.ccPool;
24564 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24565 dlSf->bwAlloced += numRb;
24566 dlSf->type2Start += numRb;
24567 n = cmLListFirst(l);
24571 sfrPool = (RgSchSFRPoolInfo*)(n->node);
24572 n = cmLListNext(l);
24574 /* If the pool contains some RBs allocated in this allocation, e.g: Pool is [30.50]. Pool->type2Start is 40 , dlSf->type2Start is 45. then update the variables in pool */
24575 if((sfrPool->poolendRB >= dlSf->type2Start) && (sfrPool->type2Start < dlSf->type2Start))
24577 sfrPool->type2End = dlSf->type2End;
24578 sfrPool->bwAlloced = dlSf->type2Start - sfrPool->poolstartRB;
24579 sfrPool->type2Start = dlSf->type2Start;
24583 /* If the pool contains all RBs allocated in this allocation*/
24584 if(dlSf->type2Start > sfrPool->poolendRB)
24586 sfrPool->type2End = sfrPool->type0End + 1;
24587 sfrPool->bwAlloced = sfrPool->bw;
24588 sfrPool->type2Start = sfrPool->poolendRB + 1;
24593 if (l != &dlSf->sfrTotalPoolInfo.cePool)
24595 l = &dlSf->sfrTotalPoolInfo.cePool;
24596 n = cmLListFirst(l);
24606 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24610 * Function : rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
24612 * Processing Steps:
24614 * @param[in] RgSchCellCb *cell
24615 * @param[in] RgSchDlSf *dlSf
24616 * @param[in] U8 rbStrt
24617 * @param[in] U8 numRb
24623 PRIVATE S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc
24632 PRIVATE S16 rgSCHCmnNonDlfsUpdDSFRTyp2Alloc(cell, ue, dlSf, rbStrt, numRb)
24642 RgSchSFRPoolInfo *sfrCCPool1 = NULL;
24643 RgSchSFRPoolInfo *sfrCCPool2 = NULL;
24646 /* Move the type2End pivot forward */
24649 l = &dlSf->sfrTotalPoolInfo.ccPool;
24650 n = cmLListFirst(l);
24653 sfrCCPool1 = (RgSchSFRPoolInfo*)(n->node);
24655 if (sfrCCPool1 == NULLP)
24657 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24658 "sfrCCPool1 is NULL for CRNTI:%d",ue->ueId);
24661 n = cmLListNext(l);
24664 sfrCCPool2 = (RgSchSFRPoolInfo*)(n->node);
24665 n = cmLListNext(l);
24667 if((sfrCCPool1) && (sfrCCPool2))
24669 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
24670 if(((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
24671 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb)) ||
24672 ((dlSf->type2Start >= sfrCCPool2->pwrHiCCRange.startRb) &&
24673 (dlSf->type2Start + numRb < sfrCCPool2->pwrHiCCRange.endRb)))
24675 ue->lteAdvUeCb.isCCUePHigh = TRUE;
24677 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
24678 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
24681 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24682 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
24689 if((dlSf->type2Start >= sfrCCPool1->pwrHiCCRange.startRb) &&
24690 (dlSf->type2Start + numRb < sfrCCPool1->pwrHiCCRange.endRb))
24692 ue->lteAdvUeCb.isCCUePHigh = TRUE;
24694 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
24695 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, dlSf->type2Start, numRb, dlSf->bw);
24698 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdDSFRTyp2Alloc():"
24699 "rgSCHCmnBuildRntpInfo() function returned RFAILED CRNTI:%d",ue->ueId);
24705 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24707 dlSf->bwAlloced += numRb;
24708 /*MS_FIX for ccpu00123918*/
24709 dlSf->type2Start += numRb;
24715 #endif /* end of ifndef LTE_TDD*/
24716 /* LTE_ADV_FLAG_REMOVED_END */
24718 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
24722 * Function : rgSCHCmnNonDlfsUpdTyp2Alloc
24724 * Processing Steps:
24726 * @param[in] RgSchCellCb *cell
24727 * @param[in] RgSchDlSf *dlSf
24728 * @param[in] U8 rbStrt
24729 * @param[in] U8 numRb
24734 PRIVATE Void rgSCHCmnNonDlfsUpdTyp2Alloc
24742 PRIVATE Void rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, rbStrt, numRb)
24749 /* Move the type2End pivot forward */
24750 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
24751 //#ifndef LTEMAC_SPS
24752 dlSf->bwAlloced += numRb;
24753 /*Fix for ccpu00123918*/
24754 dlSf->type2Start += numRb;
24760 * @brief To do DL allocation using TYPE0 RA.
24764 * Function : rgSCHCmnNonDlfsType0Alloc
24766 * Processing Steps:
24767 * - Perform TYPE0 allocation using the RBGs between
24768 * type0End and type2End.
24769 * - Build the allocation mask as per RBG positioning.
24770 * - Update the allocation parameters.
24772 * @param[in] RgSchCellCb *cell
24773 * @param[in] RgSchDlSf *dlSf
24774 * @param[in] RgSchDlRbAlloc *allocInfo
24780 PRIVATE Void rgSCHCmnNonDlfsType0Alloc
24784 RgSchDlRbAlloc *allocInfo,
24788 PRIVATE Void rgSCHCmnNonDlfsType0Alloc(cell, dlSf, allocInfo, dlUe)
24791 RgSchDlRbAlloc *allocInfo;
24795 U32 dlAllocMsk = 0;
24796 U8 rbgFiller = dlSf->lstRbgDfct;
24797 U8 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
24798 //U8 noRbgs = (allocInfo->rbsReq + rbgFiller)/ cell->rbgSize;
24802 U32 tb1BytesAlloc = 0;
24803 U32 tb2BytesAlloc = 0;
24804 RgSchCmnDlUe *dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
24806 //if(noRbgs == 0) noRbgs = 1; /* Not required as ceilling is used above*/
24808 /* Fix for ccpu00123919*/
24809 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24810 if (dlSf->bwAlloced + noRbs > dlSf->bw)
24816 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24819 /* Fix for ccpu00138701: Ceilling is using to derive num of RBGs, Therefore,
24820 * after this operation,checking Max TB size and Max RBs are not crossed
24821 * if it is crossed then decrement num of RBGs. */
24822 //if((noRbs + rbgFiller) % cell->rbgSize)
24823 if((noRbs > allocInfo->rbsReq) &&
24824 (allocInfo->rbsReq + rbgFiller) % cell->rbgSize)
24825 {/* considering ue category limitation
24826 * due to ceiling */
24829 if (rgSCHLaaIsLaaTB(allocInfo)== FALSE)
24832 if ((allocInfo->tbInfo[0].schdlngForTb) && (!allocInfo->tbInfo[0].tbCb->txCntr))
24834 iTbs = allocInfo->tbInfo[0].iTbs;
24835 noLyr = allocInfo->tbInfo[0].noLyr;
24836 tb1BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24839 if ((allocInfo->tbInfo[1].schdlngForTb) && (!allocInfo->tbInfo[1].tbCb->txCntr))
24841 iTbs = allocInfo->tbInfo[1].iTbs;
24842 noLyr = allocInfo->tbInfo[1].noLyr;
24843 tb2BytesAlloc = rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24847 /* Only Check for New Tx No need for Retx */
24848 if (tb1BytesAlloc || tb2BytesAlloc)
24850 if (( ue->dl.aggTbBits >= dlUe->maxTbBits) ||
24851 (tb1BytesAlloc >= dlUe->maxTbSz/8) ||
24852 (tb2BytesAlloc >= dlUe->maxTbSz/8) ||
24853 (noRbs >= dlUe->maxRb))
24859 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
24863 /* type0End would have been initially (during subfrm Init) at the bit position
24864 * (cell->noOfRbgs - 1), 0 being the most significant.
24865 * Getting DlAllocMsk for noRbgs and at the appropriate position */
24866 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - dlSf->type0End));
24867 /* Move backwards the type0End pivot */
24868 dlSf->type0End -= noRbgs;
24869 /*Fix for ccpu00123919*/
24870 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
24871 /* Update the bwAlloced field accordingly */
24872 //#ifndef LTEMAC_SPS /* ccpu00129474*/
24873 dlSf->bwAlloced += noRbs;
24875 /* Update Type0 Alloc Info */
24876 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
24877 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
24878 allocInfo->rbsAlloc = noRbs;
24880 /* Update Tb info for each scheduled TB */
24881 iTbs = allocInfo->tbInfo[0].iTbs;
24882 noLyr = allocInfo->tbInfo[0].noLyr;
24883 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
24884 * RETX TB Size is same as Init TX TB Size */
24885 if (allocInfo->tbInfo[0].tbCb->txCntr)
24887 allocInfo->tbInfo[0].bytesAlloc =
24888 allocInfo->tbInfo[0].bytesReq;
24892 allocInfo->tbInfo[0].bytesAlloc =
24893 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
24894 /* DwPts Scheduling Changes Start */
24896 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24898 allocInfo->tbInfo[0].bytesAlloc =
24899 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
24902 /* DwPts Scheduling Changes End */
24905 if (allocInfo->tbInfo[1].schdlngForTb)
24907 iTbs = allocInfo->tbInfo[1].iTbs;
24908 noLyr = allocInfo->tbInfo[1].noLyr;
24909 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
24910 * RETX TB Size is same as Init TX TB Size */
24911 if (allocInfo->tbInfo[1].tbCb->txCntr)
24913 allocInfo->tbInfo[1].bytesAlloc =
24914 allocInfo->tbInfo[1].bytesReq;
24918 allocInfo->tbInfo[1].bytesAlloc =
24919 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;;
24920 /* DwPts Scheduling Changes Start */
24922 if (dlSf->sfType == RG_SCH_SPL_SF_DATA)
24924 allocInfo->tbInfo[1].bytesAlloc =
24925 rgTbSzTbl[noLyr - 1][iTbs][RGSCH_MAX(noRbs*3/4,1) - 1]/8;
24928 /* DwPts Scheduling Changes End */
24932 /* The last RBG which can be smaller than the RBG size is consedered
24933 * only for the first time allocation of TYPE0 UE */
24934 dlSf->lstRbgDfct = 0;
24941 * @brief To prepare RNTP value from the PRB allocation (P-High -> 1 and P-Low -> 0)
24945 * Function : rgSCHCmnBuildRntpInfo
24947 * Processing Steps:
24949 * @param[in] U8 *rntpPtr
24950 * @param[in] U8 startRb
24951 * @param[in] U8 numRb
24957 PRIVATE S16 rgSCHCmnBuildRntpInfo
24966 PRIVATE S16 rgSCHCmnBuildRntpInfo(cell, rntpPtr, startRb, nmbRb, bw)
24974 U16 rbPtrStartIdx; /* Start Index of Octete Buffer to be filled */
24975 U16 rbPtrEndIdx; /* End Index of Octete Buffer to be filled */
24976 U16 rbBitLoc; /* Bit Location to be set as 1 in the current Byte */
24977 U16 nmbRbPerByte; /* PRB's to be set in the current Byte (in case of multiple Bytes) */
24980 rbPtrStartIdx = (startRb)/8;
24981 rbPtrEndIdx = (startRb + nmbRb)/8;
24983 if (rntpPtr == NULLP)
24985 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId,
24986 "rgSCHCmnBuildRntpInfo():"
24987 "rntpPtr can't be NULLP (Memory Allocation Failed)");
24991 while(rbPtrStartIdx <= rbPtrEndIdx)
24993 rbBitLoc = (startRb)%8;
24995 /* case 1: startRb and endRb lies in same Byte */
24996 if (rbPtrStartIdx == rbPtrEndIdx)
24998 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
24999 | (((1<<nmbRb)-1)<<rbBitLoc);
25002 /* case 2: startRb and endRb lies in different Byte */
25003 if (rbPtrStartIdx != rbPtrEndIdx)
25005 nmbRbPerByte = 8 - rbBitLoc;
25006 nmbRb = nmbRb - nmbRbPerByte;
25007 rntpPtr[rbPtrStartIdx] = rntpPtr[rbPtrStartIdx]
25008 | (((1<<nmbRbPerByte)-1)<<rbBitLoc);
25009 startRb = startRb + nmbRbPerByte;
25015 /* dsfr_pal_fixes ** 21-March-2013 ** SKS ** Adding Debug logs */
25017 /* dsfr_pal_fixes ** 25-March-2013 ** SKS ** Adding Debug logs to print RNTP */
25023 * @brief To update non-DLFS alloc'n parameters after TYPE2 Allocation.
25027 * Function : rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
25029 * Processing Steps:
25031 * @param[in] RgSchCellCb *cell
25032 * @param[in] RgSchDlSf *dlSf
25033 * @param[in] U8 rbStrt
25034 * @param[in] U8 numRb
25039 PRIVATE S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc
25044 RgSchSFRPoolInfo *sfrPool,
25049 PRIVATE S16 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrPool, rbStrt, numRb)
25053 RgSchSFRPoolInfo *sfrPool;
25062 dlSf->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
25063 sfrPool->type2End = RGSCH_CEIL((rbStrt+numRb), cell->rbgSize);
25066 dlSf->type2Start += numRb;
25067 dlSf->bwAlloced += numRb;
25069 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
25071 /* Based on RNTP info, the CC user is assigned high power per subframe basis */
25072 if(FALSE == ue->lteAdvUeCb.rgrLteAdvUeCfg.isUeCellEdge)
25074 if((sfrPool->type2Start >= sfrPool->pwrHiCCRange.startRb) &&
25075 (sfrPool->type2Start + numRb < sfrPool->pwrHiCCRange.endRb))
25077 ue->lteAdvUeCb.isCCUePHigh = TRUE;
25079 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
25080 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
25083 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId,"rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
25084 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
25091 /* Calling rgSCHCmnBuildRntpInfo function to update RNTP BitMap */
25092 ret = rgSCHCmnBuildRntpInfo(cell, dlSf->rntpInfo.val, sfrPool->type2Start, numRb, dlSf->bw);
25095 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc():"
25096 "rgSCHCmnBuildRntpInfo() function returned RFAILED for CRNTI:%d",ue->ueId);
25101 sfrPool->type2Start += numRb;
25102 sfrPool->bwAlloced += numRb;
25109 * @brief To do DL allocation using TYPE0 RA.
25113 * Function : rgSCHCmnNonDlfsSFRPoolType0Alloc
25115 * Processing Steps:
25116 * - Perform TYPE0 allocation using the RBGs between type0End and type2End.
25117 * - Build the allocation mask as per RBG positioning.
25118 * - Update the allocation parameters.
25120 * @param[in] RgSchCellCb *cell
25121 * @param[in] RgSchDlSf *dlSf
25122 * @param[in] RgSchDlRbAlloc *allocInfo
25127 PRIVATE Void rgSCHCmnNonDlfsSFRPoolType0Alloc
25131 RgSchSFRPoolInfo *poolInfo,
25132 RgSchDlRbAlloc *allocInfo
25135 PRIVATE Void rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, poolInfo, allocInfo)
25138 RgSchSFRPoolInfo *poolInfo;
25139 RgSchDlRbAlloc *allocInfo;
25142 U32 dlAllocMsk = 0;
25150 if (poolInfo->poolstartRB + poolInfo->bw == dlSf->bw)
25152 if (poolInfo->type0End == dlSf->bw/4)
25154 rbgFiller = dlSf->lstRbgDfct;
25155 /* The last RBG which can be smaller than the RBG size is consedered
25156 * only for the first time allocation of TYPE0 UE */
25157 dlSf->lstRbgDfct = 0;
25161 noRbgs = RGSCH_CEIL((allocInfo->rbsReq + rbgFiller), cell->rbgSize);
25163 /* Abhinav to-do start */
25164 /* MS_FIX for ccpu00123919*/
25165 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
25166 if (dlSf->bwAlloced + noRbs > dlSf->bw)
25172 noRbs = (noRbgs * cell->rbgSize) - rbgFiller;
25174 /* Abhinav to-do end */
25178 /* type0End would have been initially (during subfrm Init) at the bit position
25179 * (cell->noOfRbgs - 1), 0 being the most significant.
25180 * Getting DlAllocMsk for noRbgs and at the appropriate position */
25181 dlAllocMsk |= (((1 << noRbgs) - 1) << (31 - poolInfo->type0End));
25182 /* Move backwards the type0End pivot */
25183 poolInfo->type0End -= noRbgs;
25184 /*MS_FIX for ccpu00123919*/
25185 /*noRbs = (noRbgs * cell->rbgSize) - rbgFiller;*/
25186 /* Update the bwAlloced field accordingly */
25187 poolInfo->bwAlloced += noRbs + dlSf->lstRbgDfct;
25188 dlSf->bwAlloced += noRbs + dlSf->lstRbgDfct;
25190 /* Update Type0 Alloc Info */
25191 allocInfo->allocInfo.raType0.numDlAlloc = noRbgs;
25192 allocInfo->allocInfo.raType0.dlAllocBitMask |= dlAllocMsk;
25193 allocInfo->rbsAlloc = noRbs;
25195 /* Update Tb info for each scheduled TB */
25196 iTbs = allocInfo->tbInfo[0].iTbs;
25197 noLyr = allocInfo->tbInfo[0].noLyr;
25198 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant.
25199 * RETX TB Size is same as Init TX TB Size */
25200 if (allocInfo->tbInfo[0].tbCb->txCntr)
25202 allocInfo->tbInfo[0].bytesAlloc =
25203 allocInfo->tbInfo[0].bytesReq;
25207 allocInfo->tbInfo[0].bytesAlloc =
25208 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;
25211 if (allocInfo->tbInfo[1].schdlngForTb)
25213 iTbs = allocInfo->tbInfo[1].iTbs;
25214 noLyr = allocInfo->tbInfo[1].noLyr;
25215 /* Fix for ccpu00123919: For a RETX TB the iTbs is irrelevant
25216 * RETX TB Size is same as Init TX TB Size */
25217 if (allocInfo->tbInfo[1].tbCb->txCntr)
25219 allocInfo->tbInfo[1].bytesAlloc =
25220 allocInfo->tbInfo[1].bytesReq;
25224 allocInfo->tbInfo[1].bytesAlloc =
25225 rgTbSzTbl[noLyr - 1][iTbs][noRbs - 1]/8;;
25229 /* The last RBG which can be smaller than the RBG size is consedered
25230 * only for the first time allocation of TYPE0 UE */
25231 dlSf->lstRbgDfct = 0;
25236 * @brief Computes RNTP Info for a subframe.
25240 * Function : rgSCHCmnNonDlfsDsfrRntpComp
25242 * Processing Steps:
25243 * - Computes RNTP info from individual pools.
25245 * @param[in] RgSchDlSf *dlSf
25251 PRIVATE void rgSCHCmnNonDlfsDsfrRntpComp
25257 PRIVATE void rgSCHCmnNonDlfsDsfrRntpComp(cell, dlSf)
25262 PRIVATE U16 samples = 0;
25264 U16 bwBytes = (dlSf->bw-1)/8;
25265 RgrLoadInfIndInfo *rgrLoadInf;
25270 len = (dlSf->bw % 8 == 0) ? dlSf->bw/8 : dlSf->bw/8 + 1;
25272 /* RNTP info is ORed every TTI and the sample is stored in cell control block */
25273 for(i = 0; i <= bwBytes; i++)
25275 cell->rntpAggrInfo.val[i] |= dlSf->rntpInfo.val[i];
25277 samples = samples + 1;
25278 /* After every 1000 ms, the RNTP info will be sent to application to be further sent to all neighbouring eNB
25279 informing them about the load indication for cell edge users */
25280 if(RG_SCH_MAX_RNTP_SAMPLES == samples)
25283 ret = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&rgrLoadInf,
25284 sizeof(RgrLoadInfIndInfo));
25287 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
25288 "allocate memory for sending LoadInfo");
25292 rgrLoadInf->u.rntpInfo.pres = cell->rntpAggrInfo.pres;
25293 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
25294 rgrLoadInf->u.rntpInfo.len = len;
25296 /* dsfr_pal_fixes ** 21-March-2013 ** SKS */
25297 rgrLoadInf->u.rntpInfo.val = cell->rntpAggrInfo.val;
25298 rgrLoadInf->cellId = cell->cellId;
25300 /* dsfr_pal_fixes ** 22-March-2013 ** SKS */
25301 rgrLoadInf->bw = dlSf->bw;
25302 rgrLoadInf->type = RGR_SFR;
25304 ret = rgSCHUtlRgrLoadInfInd(cell, rgrLoadInf);
25307 RLOG_ARG0(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHCmnNonDlfsDsfrRntpComp():"
25308 "rgSCHUtlRgrLoadInfInd() returned RFAILED");
25311 memset(cell->rntpAggrInfo.val,0,len);
25315 /* LTE_ADV_FLAG_REMOVED_END */
25317 /* LTE_ADV_FLAG_REMOVED_START */
25319 * @brief Performs RB allocation per UE from a pool.
25323 * Function : rgSCHCmnSFRNonDlfsUeRbAlloc
25325 * Processing Steps:
25326 * - Allocate consecutively available RBs.
25328 * @param[in] RgSchCellCb *cell
25329 * @param[in] RgSchUeCb *ue
25330 * @param[in] RgSchDlSf *dlSf
25331 * @param[out] U8 *isDlBwAvail
25339 PRIVATE S16 rgSCHCmnSFRNonDlfsUeRbAlloc
25347 PRIVATE S16 rgSCHCmnSFRNonDlfsUeRbAlloc(cell, ue, dlSf, isDlBwAvail)
25354 RgSchDlRbAlloc *allocInfo;
25355 RgSchCmnDlUe *dlUe;
25357 RgSchSFRPoolInfo *sfrpoolInfo = NULLP;
25360 isUECellEdge = RG_SCH_CMN_IS_UE_CELL_EDGE(ue);
25362 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
25363 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25364 *isDlBwAvail = TRUE;
25366 /*Find which pool is available for this UE*/
25367 if (rgSCHCmnNonDlfsSFRBwAvlbl(cell, &sfrpoolInfo, dlSf, allocInfo, isUECellEdge) != TRUE)
25369 /* SFR_FIX - If this is CE UE there may be BW available in CC Pool
25370 So CC UEs will be scheduled */
25373 *isDlBwAvail = TRUE;
25377 *isDlBwAvail = FALSE;
25382 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX || dlUe->proc->tbInfo[1].isAckNackDtx)
25384 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
25388 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
25391 if (!(allocInfo->pdcch))
25393 /* Returning ROK since PDCCH might be available for another UE and further allocations could be done */
25398 allocInfo->rnti = ue->ueId;
25401 if (allocInfo->raType == RG_SCH_CMN_RA_TYPE2)
25403 allocInfo->allocInfo.raType2.isLocal = TRUE;
25404 /* rg004.201 patch - ccpu00109921 fix end */
25405 /* MS_FIX for ccpu00123918*/
25406 allocInfo->allocInfo.raType2.rbStart = (U8)sfrpoolInfo->type2Start;
25407 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25408 /* rg007.201 - Changes for MIMO feature addition */
25409 /* rg008.201 - Removed dependency on MIMO compile-time flag */
25410 rgSCHCmnNonDlfsUpdSFRPoolTyp2Alloc(cell, ue, dlSf, sfrpoolInfo, \
25411 allocInfo->allocInfo.raType2.rbStart, \
25412 allocInfo->allocInfo.raType2.numRb);
25413 allocInfo->rbsAlloc = allocInfo->rbsReq;
25414 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25416 else if (allocInfo->raType == RG_SCH_CMN_RA_TYPE0)
25418 rgSCHCmnNonDlfsSFRPoolType0Alloc(cell, dlSf, sfrpoolInfo, allocInfo);
25422 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,0);
25423 if(allocInfo->tbInfo[1].schdlngForTb == TRUE)
25425 rgSCHCmnFindCodeRate(cell,dlSf,allocInfo,1);
25430 #if defined(LTEMAC_SPS)
25431 /* Update the sub-frame with new allocation */
25432 dlSf->bwAlloced += allocInfo->rbsReq;
25438 /* LTE_ADV_FLAG_REMOVED_END */
25439 #endif /* LTE_TDD */
25442 * @brief Performs RB allocation per UE for frequency non-selective cell.
25446 * Function : rgSCHCmnNonDlfsUeRbAlloc
25448 * Processing Steps:
25449 * - Allocate consecutively available RBs.
25451 * @param[in] RgSchCellCb *cell
25452 * @param[in] RgSchUeCb *ue
25453 * @param[in] RgSchDlSf *dlSf
25454 * @param[out] U8 *isDlBwAvail
25461 PRIVATE S16 rgSCHCmnNonDlfsUeRbAlloc
25469 PRIVATE S16 rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, isDlBwAvail)
25476 RgSchDlRbAlloc *allocInfo;
25477 RgSchCmnDlUe *dlUe;
25483 RgSch5gtfUeCb *ue5gtfCb = &(ue->ue5gtfCb);
25484 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[ue5gtfCb->BeamId]);
25486 dlUe = RG_SCH_CMN_GET_DL_UE(ue,cell);
25487 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
25488 *isDlBwAvail = TRUE;
25490 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
25492 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25493 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
25495 printf("5GTF_ERROR vrbg allocated > 25\n");
25499 if (dlUe->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX
25500 || dlUe->proc->tbInfo[1].isAckNackDtx)
25502 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat, TRUE);
25506 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ue, dlSf, dlUe->mimoInfo.cwInfo[0].cqi, allocInfo->dciFormat,FALSE);
25508 if (!(allocInfo->pdcch))
25510 /* Returning ROK since PDCCH might be available for another UE and
25511 * further allocations could be done */
25512 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25513 "5GTF_ERROR : PDCCH allocation failed :ue (%u)",
25515 printf("5GTF_ERROR PDCCH allocation failed\n");
25519 //maxPrb = RGSCH_MIN((allocInfo->vrbgReq * MAX_5GTF_VRBG_SIZE), ue5gtfCb->maxPrb);
25520 //maxPrb = RGSCH_MIN(maxPrb,
25521 //((beamInfo->totVrbgAvail - beamInfo->vrbgStart)* MAX_5GTF_VRBG_SIZE)));
25522 //TODO_SID Need to check for vrbg available after scheduling for same beam.
25523 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
25524 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
25525 //TODO_SID: Setting for max TP
25526 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
25527 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
25528 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
25529 allocInfo->tbInfo[0].tbCb->dlGrnt.SCID = 0;
25530 allocInfo->tbInfo[0].tbCb->dlGrnt.dciFormat = allocInfo->dciFormat;
25531 //Filling temporarily
25532 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
25533 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
25535 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25536 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25537 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25545 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
25549 * Function : rgSCHCmnNonDlfsCcchSduAlloc
25551 * Processing Steps:
25552 * - For each element in the list, Call rgSCHCmnNonDlfsCcchSduRbAlloc().
25553 * - If allocation is successful, add the ueCb to scheduled list of CCCH
25555 * - else, add UeCb to non-scheduled list.
25557 * @param[in] RgSchCellCb *cell
25558 * @param[in, out] RgSchCmnCcchSduRbAlloc *allocInfo
25559 * @param[in] U8 isRetx
25564 PRIVATE Void rgSCHCmnNonDlfsCcchSduAlloc
25567 RgSchCmnCcchSduRbAlloc *allocInfo,
25571 PRIVATE Void rgSCHCmnNonDlfsCcchSduAlloc(cell, allocInfo, isRetx)
25573 RgSchCmnCcchSduRbAlloc *allocInfo;
25578 CmLListCp *ccchSduLst = NULLP;
25579 CmLListCp *schdCcchSduLst = NULLP;
25580 CmLListCp *nonSchdCcchSduLst = NULLP;
25581 CmLList *schdLnkNode = NULLP;
25582 CmLList *toBeSchdLnk = NULLP;
25583 RgSchDlSf *dlSf = allocInfo->ccchSduDlSf;
25584 RgSchUeCb *ueCb = NULLP;
25585 RgSchDlHqProcCb *hqP = NULLP;
25589 /* Initialize re-transmitting lists */
25590 ccchSduLst = &(allocInfo->ccchSduRetxLst);
25591 schdCcchSduLst = &(allocInfo->schdCcchSduRetxLst);
25592 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduRetxLst);
25596 /* Initialize transmitting lists */
25597 ccchSduLst = &(allocInfo->ccchSduTxLst);
25598 schdCcchSduLst = &(allocInfo->schdCcchSduTxLst);
25599 nonSchdCcchSduLst = &(allocInfo->nonSchdCcchSduTxLst);
25602 /* Perform allocaations for the list */
25603 toBeSchdLnk = cmLListFirst(ccchSduLst);
25604 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
25606 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25607 ueCb = hqP->hqE->ue;
25608 schdLnkNode = &hqP->schdLstLnk;
25609 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25610 ret = rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf);
25613 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
25614 * list and return */
25617 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25618 ueCb = hqP->hqE->ue;
25619 schdLnkNode = &hqP->schdLstLnk;
25620 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25621 cmLListAdd2Tail(nonSchdCcchSduLst, schdLnkNode);
25622 toBeSchdLnk = toBeSchdLnk->next;
25623 } while(toBeSchdLnk);
25627 /* Allocation successful: Add UE to the scheduled list */
25628 cmLListAdd2Tail(schdCcchSduLst, schdLnkNode);
25636 * @brief Performs RB allocation for CcchSdu for frequency non-selective cell.
25640 * Function : rgSCHCmnNonDlfsCcchSduRbAlloc
25642 * Processing Steps:
25644 * - Allocate consecutively available RBs
25646 * @param[in] RgSchCellCb *cell
25647 * @param[in] RgSchUeCb *ueCb
25648 * @param[in] RgSchDlSf *dlSf
25654 PRIVATE S16 rgSCHCmnNonDlfsCcchSduRbAlloc
25661 PRIVATE S16 rgSCHCmnNonDlfsCcchSduRbAlloc(cell, ueCb, dlSf)
25667 RgSchDlRbAlloc *allocInfo;
25668 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ueCb,cell);
25672 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ueCb,cell);
25674 /* [ccpu00138802]-MOD-If Bw is less than required, return fail
25675 It will be allocated in next TTI */
25677 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
25678 (dlSf->bwAlloced == dlSf->bw))
25680 if((dlSf->bwAlloced == dlSf->bw) ||
25681 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
25686 /* Retrieve PDCCH */
25687 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
25688 if (ueDl->proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
25690 /* allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, dlSf, y, ueDl->cqi,
25691 * TFU_DCI_FORMAT_1A, TRUE);*/
25692 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, TRUE);
25696 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, ueCb, dlSf, ueDl->mimoInfo.cwInfo[0].cqi, TFU_DCI_FORMAT_1A, FALSE);
25698 if (!(allocInfo->pdcch))
25700 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
25704 /* Update allocation information */
25705 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25706 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25707 allocInfo->allocInfo.raType2.isLocal = TRUE;
25709 /*Fix for ccpu00123918*/
25710 /* Push this harq process back to the free queue */
25711 allocInfo->allocInfo.raType2.rbStart = (U8)dlSf->type2Start;
25712 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25713 allocInfo->rbsAlloc = allocInfo->rbsReq;
25714 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25715 /* Update the sub-frame with new allocation */
25717 /* LTE_ADV_FLAG_REMOVED_START */
25719 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
25721 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf,
25722 allocInfo->allocInfo.raType2.rbStart,
25723 allocInfo->allocInfo.raType2.numRb);
25726 #endif /* end of ifndef LTE_TDD*/
25728 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf,
25729 allocInfo->allocInfo.raType2.rbStart,
25730 allocInfo->allocInfo.raType2.numRb);
25733 /* LTE_ADV_FLAG_REMOVED_END */
25734 /* ccpu00131941 - bwAlloced is updated from SPS bandwidth */
25742 * @brief Performs RB allocation for Msg4 for frequency non-selective cell.
25746 * Function : rgSCHCmnNonDlfsMsg4RbAlloc
25748 * Processing Steps:
25750 * - Allocate consecutively available RBs
25752 * @param[in] RgSchCellCb *cell
25753 * @param[in] RgSchRaCb *raCb
25754 * @param[in] RgSchDlSf *dlSf
25760 PRIVATE S16 rgSCHCmnNonDlfsMsg4RbAlloc
25767 PRIVATE S16 rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf)
25773 RgSchDlRbAlloc *allocInfo;
25776 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_RACB(raCb);
25779 RgSchSfBeamInfo *beamInfo = &(dlSf->sfBeamInfo[0]);
25780 if(beamInfo->totVrbgAllocated > MAX_5GTF_VRBG)
25782 RLOG_ARG1(L_ERROR ,DBG_CELLID,cell->cellId,
25783 "5GTF_ERROR : vrbg allocated > 25 :ue (%u)",
25785 printf("5GTF_ERROR vrbg allocated > 25\n");
25790 if ((dlSf->spsAllocdBw >= cell->spsBwRbgInfo.numRbs) &&
25791 (dlSf->bwAlloced == dlSf->bw))
25793 if((dlSf->bwAlloced == dlSf->bw) ||
25794 (allocInfo->rbsReq > (dlSf->bw - dlSf->bwAlloced)))
25801 /* DTX Changes: One Variable is passed to check whether it is DTX or Not */
25802 if (raCb->dlHqE->msg4Proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX)
25804 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, TRUE);
25808 allocInfo->pdcch = rgSCHCmnPdcchAlloc(cell, raCb->ue, dlSf, raCb->ccchCqi, TFU_DCI_FORMAT_B1, FALSE);
25810 if (!(allocInfo->pdcch))
25812 /* Returning RFAILED since PDCCH not available for any CCCH allocations */
25817 /* SR_RACH_STATS : MSG4 TX Failed */
25818 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
25820 /* Update allocation information */
25821 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
25822 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
25823 allocInfo->allocInfo.raType2.isLocal = TRUE;
25826 /*Fix for ccpu00123918*/
25827 allocInfo->allocInfo.raType2.rbStart = (U8)dlSf->type2Start;
25828 allocInfo->allocInfo.raType2.numRb = allocInfo->rbsReq;
25829 /* LTE_ADV_FLAG_REMOVED_START */
25831 if (cell->lteAdvCb.sfrCfg.status == RGR_ENABLE)
25833 rgSCHCmnNonDlfsSFRCmnChannelUpdTyp2Alloc(cell, dlSf, \
25834 allocInfo->allocInfo.raType2.rbStart, \
25835 allocInfo->allocInfo.raType2.numRb);
25838 #endif /* end of ifndef LTE_TDD */
25840 rgSCHCmnNonDlfsUpdTyp2Alloc(cell, dlSf, \
25841 allocInfo->allocInfo.raType2.rbStart, \
25842 allocInfo->allocInfo.raType2.numRb);
25844 /* LTE_ADV_FLAG_REMOVED_END */
25846 allocInfo->rbsAlloc = allocInfo->rbsReq;
25847 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25851 allocInfo->pdcch->dci.u.format1aInfo.t.pdschInfo.isTBMsg4 = TRUE;
25853 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart = beamInfo->vrbgStart;
25854 allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg = allocInfo->vrbgReq;
25856 /* Update allocation information */
25857 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
25859 allocInfo->tbInfo[0].tbCb->dlGrnt.xPDSCHRange = 1;
25860 allocInfo->tbInfo[0].tbCb->dlGrnt.rbAssign = rgSCHCmnCalcRiv(MAX_5GTF_VRBG,
25861 allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart, allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg);
25863 allocInfo->tbInfo[0].tbCb->dlGrnt.rbStrt = (allocInfo->tbInfo[0].tbCb->dlGrnt.vrbgStart * MAX_5GTF_VRBG_SIZE);
25864 allocInfo->tbInfo[0].tbCb->dlGrnt.numRb = (allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg * MAX_5GTF_VRBG_SIZE);
25867 beamInfo->vrbgStart += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25868 beamInfo->totVrbgAllocated += allocInfo->tbInfo[0].tbCb->dlGrnt.numVrbg;
25869 allocInfo->tbInfo[0].bytesAlloc = allocInfo->tbInfo[0].bytesReq;
25877 * @brief Performs RB allocation for Msg4 lists of frequency non-selective cell.
25881 * Function : rgSCHCmnNonDlfsMsg4Alloc
25883 * Processing Steps:
25884 * - For each element in the list, Call rgSCHCmnNonDlfsMsg4RbAlloc().
25885 * - If allocation is successful, add the raCb to scheduled list of MSG4.
25886 * - else, add RaCb to non-scheduled list.
25888 * @param[in] RgSchCellCb *cell
25889 * @param[in, out] RgSchCmnMsg4RbAlloc *allocInfo
25890 * @param[in] U8 isRetx
25895 PRIVATE Void rgSCHCmnNonDlfsMsg4Alloc
25898 RgSchCmnMsg4RbAlloc *allocInfo,
25902 PRIVATE Void rgSCHCmnNonDlfsMsg4Alloc(cell, allocInfo, isRetx)
25904 RgSchCmnMsg4RbAlloc *allocInfo;
25909 CmLListCp *msg4Lst = NULLP;
25910 CmLListCp *schdMsg4Lst = NULLP;
25911 CmLListCp *nonSchdMsg4Lst = NULLP;
25912 CmLList *schdLnkNode = NULLP;
25913 CmLList *toBeSchdLnk = NULLP;
25914 RgSchDlSf *dlSf = allocInfo->msg4DlSf;
25915 RgSchRaCb *raCb = NULLP;
25916 RgSchDlHqProcCb *hqP = NULLP;
25920 /* Initialize re-transmitting lists */
25921 msg4Lst = &(allocInfo->msg4RetxLst);
25922 schdMsg4Lst = &(allocInfo->schdMsg4RetxLst);
25923 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4RetxLst);
25927 /* Initialize transmitting lists */
25928 msg4Lst = &(allocInfo->msg4TxLst);
25929 schdMsg4Lst = &(allocInfo->schdMsg4TxLst);
25930 nonSchdMsg4Lst = &(allocInfo->nonSchdMsg4TxLst);
25933 /* Perform allocaations for the list */
25934 toBeSchdLnk = cmLListFirst(msg4Lst);
25935 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
25937 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25938 raCb = hqP->hqE->raCb;
25939 schdLnkNode = &hqP->schdLstLnk;
25940 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25941 ret = rgSCHCmnNonDlfsMsg4RbAlloc(cell, raCb, dlSf);
25944 /* Allocation failed: Add remaining MSG4 nodes to non-scheduled
25945 * list and return */
25948 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
25949 raCb = hqP->hqE->raCb;
25950 schdLnkNode = &hqP->schdLstLnk;
25951 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
25952 cmLListAdd2Tail(nonSchdMsg4Lst, schdLnkNode);
25953 toBeSchdLnk = toBeSchdLnk->next;
25954 } while(toBeSchdLnk);
25958 /* Allocation successful: Add UE to the scheduled list */
25959 cmLListAdd2Tail(schdMsg4Lst, schdLnkNode);
25970 * @brief Performs RB allocation for the list of UEs of a frequency
25971 * non-selective cell.
25975 * Function : rgSCHCmnNonDlfsDedRbAlloc
25977 * Processing Steps:
25978 * - For each element in the list, Call rgSCHCmnNonDlfsUeRbAlloc().
25979 * - If allocation is successful, add the ueCb to scheduled list of UEs.
25980 * - else, add ueCb to non-scheduled list of UEs.
25982 * @param[in] RgSchCellCb *cell
25983 * @param[in, out] RgSchCmnUeRbAlloc *allocInfo
25984 * @param[in] CmLListCp *ueLst,
25985 * @param[in, out] CmLListCp *schdHqPLst,
25986 * @param[in, out] CmLListCp *nonSchdHqPLst
25991 Void rgSCHCmnNonDlfsDedRbAlloc
25994 RgSchCmnUeRbAlloc *allocInfo,
25996 CmLListCp *schdHqPLst,
25997 CmLListCp *nonSchdHqPLst
26000 Void rgSCHCmnNonDlfsDedRbAlloc(cell, allocInfo, ueLst,
26001 schdHqPLst, nonSchdHqPLst)
26003 RgSchCmnUeRbAlloc *allocInfo;
26005 CmLListCp *schdHqPLst;
26006 CmLListCp *nonSchdHqPLst;
26010 CmLList *schdLnkNode = NULLP;
26011 CmLList *toBeSchdLnk = NULLP;
26012 RgSchDlSf *dlSf = allocInfo->dedDlSf;
26013 RgSchUeCb *ue = NULLP;
26014 RgSchDlHqProcCb *hqP = NULLP;
26018 /* Perform allocaations for the list */
26019 toBeSchdLnk = cmLListFirst(ueLst);
26020 for (; toBeSchdLnk; toBeSchdLnk = toBeSchdLnk->next)
26022 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
26024 schdLnkNode = &hqP->schdLstLnk;
26025 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
26027 ret = rgSCHCmnNonDlfsUeRbAlloc(cell, ue, dlSf, &isDlBwAvail);
26030 /* Allocation failed: Add remaining UEs to non-scheduled
26031 * list and return */
26034 hqP = (RgSchDlHqProcCb *)(toBeSchdLnk->node);
26036 schdLnkNode = &hqP->schdLstLnk;
26037 RG_SCH_CMN_INIT_SCHD_LNK(schdLnkNode, hqP);
26038 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
26039 toBeSchdLnk = toBeSchdLnk->next;
26040 } while(toBeSchdLnk);
26046 #if defined (TENB_STATS) && defined (RG_5GTF)
26047 cell->tenbStats->sch.dl5gtfRbAllocPass++;
26049 /* Allocation successful: Add UE to the scheduled list */
26050 cmLListAdd2Tail(schdHqPLst, schdLnkNode);
26054 #if defined (TENB_STATS) && defined (RG_5GTF)
26055 cell->tenbStats->sch.dl5gtfRbAllocFail++;
26057 /* Allocation failed : Add UE to the non-scheduled list */
26058 printf("5GTF_ERROR Dl rb alloc failed adding nonSchdHqPLst\n");
26059 cmLListAdd2Tail(nonSchdHqPLst, schdLnkNode);
26067 * @brief Handles RB allocation for frequency non-selective cell.
26071 * Function : rgSCHCmnNonDlfsRbAlloc
26073 * Invoking Module Processing:
26074 * - SCH shall invoke this if downlink frequency selective is disabled for
26075 * the cell for RB allocation.
26076 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
26077 * estimate and subframe for each allocation to be made to SCH.
26079 * Processing Steps:
26080 * - Allocate sequentially for common channels.
26081 * - For transmitting and re-transmitting UE list.
26083 * - Perform wide-band allocations for UE in increasing order of
26085 * - Determine Imcs for the allocation.
26086 * - Determine RA type.
26087 * - Determine DCI format.
26089 * @param[in] RgSchCellCb *cell
26090 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
26095 Void rgSCHCmnNonDlfsRbAlloc
26098 RgSchCmnDlRbAllocInfo *allocInfo
26101 Void rgSCHCmnNonDlfsRbAlloc(cell, allocInfo)
26103 RgSchCmnDlRbAllocInfo *allocInfo;
26107 RgSchDlRbAlloc *reqAllocInfo;
26109 /* Allocate for MSG4 retransmissions */
26110 if (allocInfo->msg4Alloc.msg4RetxLst.count)
26112 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc RetxLst\n");
26113 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), TRUE);
26116 /* Allocate for MSG4 transmissions */
26117 /* Assuming all the nodes in the list need allocations: rbsReq is valid */
26118 if (allocInfo->msg4Alloc.msg4TxLst.count)
26120 printf("5GTF_ERROR rgSCHCmnNonDlfsMsg4Alloc txLst\n");
26121 rgSCHCmnNonDlfsMsg4Alloc(cell, &(allocInfo->msg4Alloc), FALSE);
26124 /* Allocate for CCCH SDU (received after guard timer expiry)
26125 * retransmissions */
26126 if (allocInfo->ccchSduAlloc.ccchSduRetxLst.count)
26128 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
26129 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), TRUE);
26132 /* Allocate for CCCD SDU transmissions */
26133 /* Allocate for CCCH SDU (received after guard timer expiry) transmissions */
26134 if (allocInfo->ccchSduAlloc.ccchSduTxLst.count)
26136 printf("5GTF_ERROR rgSCHCmnNonDlfsCcchSduAlloc\n");
26137 rgSCHCmnNonDlfsCcchSduAlloc(cell, &(allocInfo->ccchSduAlloc), FALSE);
26141 /* Allocate for Random access response */
26142 for (raRspCnt = 0; raRspCnt < RG_SCH_CMN_MAX_CMN_PDCCH; ++raRspCnt)
26144 /* Assuming that the requests will be filled in sequentially */
26145 reqAllocInfo = &(allocInfo->raRspAlloc[raRspCnt]);
26146 if (!reqAllocInfo->rbsReq)
26150 printf("5GTF_ERROR calling RAR rgSCHCmnNonDlfsCmnRbAlloc\n");
26151 // if ((rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo)) != ROK)
26152 if ((rgSCHCmnNonDlfsCmnRbAllocRar(cell, reqAllocInfo)) != ROK)
26158 /* Allocate for RETX+TX UEs */
26159 if(allocInfo->dedAlloc.txRetxHqPLst.count)
26161 printf("5GTF_ERROR TX RETX rgSCHCmnNonDlfsDedRbAlloc\n");
26162 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26163 &(allocInfo->dedAlloc.txRetxHqPLst),
26164 &(allocInfo->dedAlloc.schdTxRetxHqPLst),
26165 &(allocInfo->dedAlloc.nonSchdTxRetxHqPLst));
26168 if((allocInfo->dedAlloc.retxHqPLst.count))
26170 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26171 &(allocInfo->dedAlloc.retxHqPLst),
26172 &(allocInfo->dedAlloc.schdRetxHqPLst),
26173 &(allocInfo->dedAlloc.nonSchdRetxHqPLst));
26176 /* Allocate for transmitting UEs */
26177 if((allocInfo->dedAlloc.txHqPLst.count))
26179 rgSCHCmnNonDlfsDedRbAlloc(cell, &(allocInfo->dedAlloc),
26180 &(allocInfo->dedAlloc.txHqPLst),
26181 &(allocInfo->dedAlloc.schdTxHqPLst),
26182 &(allocInfo->dedAlloc.nonSchdTxHqPLst));
26185 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cell);
26186 if ((allocInfo->dedAlloc.txRetxHqPLst.count +
26187 allocInfo->dedAlloc.retxHqPLst.count +
26188 allocInfo->dedAlloc.txHqPLst.count) >
26189 cmnCell->dl.maxUePerDlSf)
26191 #ifndef ALIGN_64BIT
26192 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
26193 " scheduler exceed maximumUePerDlSf(%u)tx-retx %ld retx %ld tx %ld\n",
26194 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
26195 allocInfo->dedAlloc.retxHqPLst.count,
26196 allocInfo->dedAlloc.txHqPLst.count));
26198 RGSCHDBGERRNEW(cell->instIdx,(rgSchPBuf(cell->instIdx),"UEs selected by"
26199 " scheduler exceed maximumUePerDlSf(%u)tx-retx %d retx %d tx %d\n",
26200 cmnCell->dl.maxUePerDlSf, allocInfo->dedAlloc.txRetxHqPLst.count,
26201 allocInfo->dedAlloc.retxHqPLst.count,
26202 allocInfo->dedAlloc.txHqPLst.count));
26207 /* LTE_ADV_FLAG_REMOVED_START */
26208 if(cell->lteAdvCb.dsfrCfg.status == RGR_ENABLE)
26210 printf("5GTF_ERROR RETX rgSCHCmnNonDlfsDsfrRntpComp\n");
26211 rgSCHCmnNonDlfsDsfrRntpComp(cell, allocInfo->dedAlloc.dedDlSf);
26213 /* LTE_ADV_FLAG_REMOVED_END */
26214 #endif /* LTE_TDD */
26218 /***********************************************************
26220 * Func : rgSCHCmnCalcRiv
26222 * Desc : This function calculates RIV.
26228 * File : rg_sch_utl.c
26230 **********************************************************/
26233 U32 rgSCHCmnCalcRiv
26240 U32 rgSCHCmnCalcRiv(bw, rbStart, numRb)
26247 U32 rgSCHCmnCalcRiv
26254 U32 rgSCHCmnCalcRiv(bw, rbStart, numRb)
26261 U8 numRbMinus1 = numRb - 1;
26265 if (numRbMinus1 <= bw/2)
26267 riv = bw * numRbMinus1 + rbStart;
26271 riv = bw * (bw - numRbMinus1) + (bw - rbStart - 1);
26274 } /* rgSCHCmnCalcRiv */
26278 * @brief This function allocates and copies the RACH response scheduling
26279 * related information into cell control block.
26283 * Function: rgSCHCmnDlCpyRachInfo
26284 * Purpose: This function allocates and copies the RACH response
26285 * scheduling related information into cell control block
26286 * for each DL subframe.
26289 * Invoked by: Scheduler
26291 * @param[in] RgSchCellCb* cell
26292 * @param[in] RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES]
26293 * @param[in] U8 raArrSz
26298 PRIVATE S16 rgSCHCmnDlCpyRachInfo
26301 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES],
26305 PRIVATE S16 rgSCHCmnDlCpyRachInfo(cell, rachRspLst, raArrSz)
26307 RgSchTddRachRspLst rachRspLst[][RGSCH_NUM_SUB_FRAMES];
26311 U8 ulDlCfgIdx = cell->ulDlCfgIdx;
26321 /* Allocate RACH response information for each DL
26322 * subframe in a radio frame */
26323 ret = rgSCHUtlAllocSBuf(cell->instIdx, (Data **)&cell->rachRspLst,
26324 rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][RGSCH_NUM_SUB_FRAMES-1] *
26325 sizeof(RgSchTddRachRspLst));
26331 for(sfnIdx=raArrSz-1; sfnIdx>=0; sfnIdx--)
26333 for(subfrmIdx=0; subfrmIdx < RGSCH_NUM_SUB_FRAMES; subfrmIdx++)
26335 subfrmIdx = rgSchTddHighDlSubfrmIdxTbl[ulDlCfgIdx][subfrmIdx];
26336 if(subfrmIdx == RGSCH_NUM_SUB_FRAMES)
26341 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rachRspLst[sfnIdx],subfrmIdx);
26343 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
26345 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, rgSchTddNumDlSubfrmTbl[ulDlCfgIdx],subfrmIdx);
26346 sfNum = rgSchTddNumDlSubfrmTbl[ulDlCfgIdx][subfrmIdx]-1;
26347 numRfs = cell->rachRspLst[sfNum].numRadiofrms;
26348 /* For each DL subframe in which RACH response can
26349 * be sent is updated */
26352 cell->rachRspLst[sfNum].rachRsp[numRfs].sfnOffset =
26353 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].sfnOffset;
26354 for(sfcount=0; sfcount < numSubfrms; sfcount++)
26356 cell->rachRspLst[sfNum].rachRsp[numRfs].\
26357 subframe[sfcount] =
26358 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].\
26361 cell->rachRspLst[sfNum].rachRsp[numRfs].numSubfrms =
26362 rachRspLst[sfnIdx][subfrmIdx].rachRsp[0].numSubfrms;
26363 cell->rachRspLst[sfNum].numRadiofrms++;
26366 /* Copy the subframes to be deleted at ths subframe */
26368 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
26371 cell->rachRspLst[sfNum].delInfo.sfnOffset =
26372 rachRspLst[sfnIdx][subfrmIdx].delInfo.sfnOffset;
26373 for(sfcount=0; sfcount < numSubfrms; sfcount++)
26375 cell->rachRspLst[sfNum].delInfo.subframe[sfcount] =
26376 rachRspLst[sfnIdx][subfrmIdx].delInfo.subframe[sfcount];
26378 cell->rachRspLst[sfNum].delInfo.numSubfrms =
26379 rachRspLst[sfnIdx][subfrmIdx].delInfo.numSubfrms;
26387 * @brief This function determines the iTbs based on the new CFI,
26388 * CQI and BLER based delta iTbs
26392 * Function: rgSchCmnFetchItbs
26393 * Purpose: Fetch the new iTbs when CFI changes.
26395 * @param[in] RgSchCellCb *cell
26396 * @param[in] RgSchCmnDlUe *ueDl
26397 * @param[in] U8 cqi
26404 PRIVATE S32 rgSchCmnFetchItbs
26407 RgSchCmnDlUe *ueDl,
26415 PRIVATE S32 rgSchCmnFetchItbs (cell, ueDl, subFrm, cqi, cfi, cwIdx, noLyr)
26417 RgSchCmnDlUe *ueDl;
26426 PRIVATE S32 rgSchCmnFetchItbs
26429 RgSchCmnDlUe *ueDl,
26436 PRIVATE S32 rgSchCmnFetchItbs (cell, ueDl, cqi, cfi, cwIdx, noLyr)
26438 RgSchCmnDlUe *ueDl;
26447 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
26452 /* Special Handling for Spl Sf when CFI is 3 as
26453 * CFI in Spl Sf will be max 2 */
26454 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
26456 if((cellDl->currCfi == 3) ||
26457 ((cell->bwCfg.dlTotalBw <= 10) && (cellDl->currCfi == 1)))
26459 /* Use CFI 2 in this case */
26460 iTbs = (ueDl->laCb[cwIdx].deltaiTbs +
26461 ((*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][2]))[cqi])* 100)/100;
26463 RG_SCH_CHK_ITBS_RANGE(iTbs, RGSCH_NUM_ITBS - 1);
26467 iTbs = ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1];
26469 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
26471 else /* CFI Changed. Update with new iTbs Reset the BLER*/
26474 S32 tmpiTbs = (*(RgSchCmnCqiToTbs *)(cellDl->cqiToTbsTbl[0][cfi]))[cqi];
26476 iTbs = (ueDl->laCb[cwIdx].deltaiTbs + tmpiTbs*100)/100;
26478 RG_SCH_CHK_ITBS_RANGE(iTbs, tmpiTbs);
26480 iTbs = RGSCH_MIN(iTbs, cell->thresholds.maxDlItbs);
26482 ueDl->mimoInfo.cwInfo[cwIdx].iTbs[noLyr - 1] = iTbs;
26484 ueDl->lastCfi = cfi;
26485 ueDl->laCb[cwIdx].deltaiTbs = 0;
26492 * @brief This function determines the RBs and Bytes required for BO
26493 * transmission for UEs configured with TM 1/2/6/7.
26497 * Function: rgSCHCmnDlAllocTxRb1Tb1Cw
26498 * Purpose: Allocate TB1 on CW1.
26500 * Reference Parameter effBo is filled with alloced bytes.
26501 * Returns RFAILED if BO not satisfied at all.
26503 * Invoked by: rgSCHCmnDlAllocTxRbTM1/2/6/7
26505 * @param[in] RgSchCellCb *cell
26506 * @param[in] RgSchDlSf *subFrm
26507 * @param[in] RgSchUeCb *ue
26508 * @param[in] U32 bo
26509 * @param[out] U32 *effBo
26510 * @param[in] RgSchDlHqProcCb *proc
26511 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26516 PRIVATE Void rgSCHCmnDlAllocTxRb1Tb1Cw
26523 RgSchDlHqProcCb *proc,
26524 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26527 PRIVATE Void rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26533 RgSchDlHqProcCb *proc;
26534 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26537 RgSchDlRbAlloc *allocInfo;
26542 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26544 if (ue->ue5gtfCb.rank == 2)
26546 allocInfo->dciFormat = TFU_DCI_FORMAT_B2;
26550 allocInfo->dciFormat = TFU_DCI_FORMAT_B1;
26553 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
26554 allocInfo->raType);
26556 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
26557 bo, &numRb, effBo);
26558 if (ret == RFAILED)
26560 /* If allocation couldn't be made then return */
26563 /* Adding UE to RbAllocInfo TX Lst */
26564 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
26565 /* Fill UE alloc Info */
26566 allocInfo->rbsReq = numRb;
26567 allocInfo->dlSf = subFrm;
26569 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
26577 * @brief This function determines the RBs and Bytes required for BO
26578 * retransmission for UEs configured with TM 1/2/6/7.
26582 * Function: rgSCHCmnDlAllocRetxRb1Tb1Cw
26583 * Purpose: Allocate TB1 on CW1.
26585 * Reference Parameter effBo is filled with alloced bytes.
26586 * Returns RFAILED if BO not satisfied at all.
26588 * Invoked by: rgSCHCmnDlAllocRetxRbTM1/2/6/7
26590 * @param[in] RgSchCellCb *cell
26591 * @param[in] RgSchDlSf *subFrm
26592 * @param[in] RgSchUeCb *ue
26593 * @param[in] U32 bo
26594 * @param[out] U32 *effBo
26595 * @param[in] RgSchDlHqProcCb *proc
26596 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26601 PRIVATE Void rgSCHCmnDlAllocRetxRb1Tb1Cw
26608 RgSchDlHqProcCb *proc,
26609 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26612 PRIVATE Void rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26618 RgSchDlHqProcCb *proc;
26619 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26622 RgSchDlRbAlloc *allocInfo;
26627 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
26630 /* 5GTF: RETX DCI format same as TX */
26631 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
26632 &allocInfo->raType);
26635 /* Get the Allocation in terms of RBs that are required for
26636 * this retx of TB1 */
26637 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
26639 if (ret == RFAILED)
26641 /* Allocation couldn't be made for Retx */
26642 /* Fix : syed If TxRetx allocation failed then add the UE along with the proc
26643 * to the nonSchdTxRetxUeLst and let spfc scheduler take care of it during
26645 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
26648 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
26649 /* Fill UE alloc Info */
26650 allocInfo->rbsReq = numRb;
26651 allocInfo->dlSf = subFrm;
26653 allocInfo->vrbgReq = numRb/MAX_5GTF_VRBG_SIZE;
26661 * @brief This function determines the RBs and Bytes required for BO
26662 * transmission for UEs configured with TM 2.
26666 * Function: rgSCHCmnDlAllocTxRbTM1
26669 * Reference Parameter effBo is filled with alloced bytes.
26670 * Returns RFAILED if BO not satisfied at all.
26672 * Invoked by: rgSCHCmnDlAllocTxRb
26674 * @param[in] RgSchCellCb *cell
26675 * @param[in] RgSchDlSf *subFrm
26676 * @param[in] RgSchUeCb *ue
26677 * @param[in] U32 bo
26678 * @param[out] U32 *effBo
26679 * @param[in] RgSchDlHqProcCb *proc
26680 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26685 PRIVATE Void rgSCHCmnDlAllocTxRbTM1
26692 RgSchDlHqProcCb *proc,
26693 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26696 PRIVATE Void rgSCHCmnDlAllocTxRbTM1(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26702 RgSchDlHqProcCb *proc;
26703 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26706 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26712 * @brief This function determines the RBs and Bytes required for BO
26713 * retransmission for UEs configured with TM 2.
26717 * Function: rgSCHCmnDlAllocRetxRbTM1
26720 * Reference Parameter effBo is filled with alloced bytes.
26721 * Returns RFAILED if BO not satisfied at all.
26723 * Invoked by: rgSCHCmnDlAllocRetxRb
26725 * @param[in] RgSchCellCb *cell
26726 * @param[in] RgSchDlSf *subFrm
26727 * @param[in] RgSchUeCb *ue
26728 * @param[in] U32 bo
26729 * @param[out] U32 *effBo
26730 * @param[in] RgSchDlHqProcCb *proc
26731 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26736 PRIVATE Void rgSCHCmnDlAllocRetxRbTM1
26743 RgSchDlHqProcCb *proc,
26744 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26747 PRIVATE Void rgSCHCmnDlAllocRetxRbTM1(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26753 RgSchDlHqProcCb *proc;
26754 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26757 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26763 * @brief This function determines the RBs and Bytes required for BO
26764 * transmission for UEs configured with TM 2.
26768 * Function: rgSCHCmnDlAllocTxRbTM2
26771 * Reference Parameter effBo is filled with alloced bytes.
26772 * Returns RFAILED if BO not satisfied at all.
26774 * Invoked by: rgSCHCmnDlAllocTxRb
26776 * @param[in] RgSchCellCb *cell
26777 * @param[in] RgSchDlSf *subFrm
26778 * @param[in] RgSchUeCb *ue
26779 * @param[in] U32 bo
26780 * @param[out] U32 *effBo
26781 * @param[in] RgSchDlHqProcCb *proc
26782 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26787 PRIVATE Void rgSCHCmnDlAllocTxRbTM2
26794 RgSchDlHqProcCb *proc,
26795 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26798 PRIVATE Void rgSCHCmnDlAllocTxRbTM2(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26804 RgSchDlHqProcCb *proc;
26805 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26808 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26814 * @brief This function determines the RBs and Bytes required for BO
26815 * retransmission for UEs configured with TM 2.
26819 * Function: rgSCHCmnDlAllocRetxRbTM2
26822 * Reference Parameter effBo is filled with alloced bytes.
26823 * Returns RFAILED if BO not satisfied at all.
26825 * Invoked by: rgSCHCmnDlAllocRetxRb
26827 * @param[in] RgSchCellCb *cell
26828 * @param[in] RgSchDlSf *subFrm
26829 * @param[in] RgSchUeCb *ue
26830 * @param[in] U32 bo
26831 * @param[out] U32 *effBo
26832 * @param[in] RgSchDlHqProcCb *proc
26833 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26838 PRIVATE Void rgSCHCmnDlAllocRetxRbTM2
26845 RgSchDlHqProcCb *proc,
26846 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26849 PRIVATE Void rgSCHCmnDlAllocRetxRbTM2(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26855 RgSchDlHqProcCb *proc;
26856 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26859 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
26865 * @brief This function determines the RBs and Bytes required for BO
26866 * transmission for UEs configured with TM 3.
26870 * Function: rgSCHCmnDlAllocTxRbTM3
26873 * Reference Parameter effBo is filled with alloced bytes.
26874 * Returns RFAILED if BO not satisfied at all.
26876 * Invoked by: rgSCHCmnDlAllocTxRb
26878 * @param[in] RgSchCellCb *cell
26879 * @param[in] RgSchDlSf *subFrm
26880 * @param[in] RgSchUeCb *ue
26881 * @param[in] U32 bo
26882 * @param[out] U32 *effBo
26883 * @param[in] RgSchDlHqProcCb *proc
26884 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26889 PRIVATE Void rgSCHCmnDlAllocTxRbTM3
26896 RgSchDlHqProcCb *proc,
26897 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26900 PRIVATE Void rgSCHCmnDlAllocTxRbTM3(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26906 RgSchDlHqProcCb *proc;
26907 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26912 /* Both TBs free for TX allocation */
26913 rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo,\
26914 proc, cellWdAllocInfo);
26921 * @brief This function determines the RBs and Bytes required for BO
26922 * retransmission for UEs configured with TM 3.
26926 * Function: rgSCHCmnDlAllocRetxRbTM3
26929 * Reference Parameter effBo is filled with alloced bytes.
26930 * Returns RFAILED if BO not satisfied at all.
26932 * Invoked by: rgSCHCmnDlAllocRetxRb
26934 * @param[in] RgSchCellCb *cell
26935 * @param[in] RgSchDlSf *subFrm
26936 * @param[in] RgSchUeCb *ue
26937 * @param[in] U32 bo
26938 * @param[out] U32 *effBo
26939 * @param[in] RgSchDlHqProcCb *proc
26940 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26945 PRIVATE Void rgSCHCmnDlAllocRetxRbTM3
26952 RgSchDlHqProcCb *proc,
26953 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
26956 PRIVATE Void rgSCHCmnDlAllocRetxRbTM3(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
26962 RgSchDlHqProcCb *proc;
26963 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
26968 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
26969 (proc->tbInfo[1].state == HQ_TB_NACKED))
26972 printf ("RETX RB TM3 nack for both hqp %d cell %d \n", proc->procId, proc->hqE->cell->cellId);
26974 /* Both TBs require RETX allocation */
26975 rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo,\
26976 proc, cellWdAllocInfo);
26980 /* One of the TBs need RETX allocation. Other TB may/maynot
26981 * be available for new TX allocation. */
26982 rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo,\
26983 proc, cellWdAllocInfo);
26991 * @brief This function performs the DCI format selection in case of
26992 * Transmit Diversity scheme where there can be more
26993 * than 1 option for DCI format selection.
26997 * Function: rgSCHCmnSlctPdcchFrmt
26998 * Purpose: 1. If DLFS is enabled, then choose TM specific
26999 * DCI format for Transmit diversity. All the
27000 * TM Specific DCI Formats support Type0 and/or
27001 * Type1 resource allocation scheme. DLFS
27002 * supports only Type-0&1 Resource allocation.
27003 * 2. If DLFS is not enabled, select a DCI format
27004 * which is of smaller size. Since Non-DLFS
27005 * scheduler supports all Resource allocation
27006 * schemes, selection is based on efficiency.
27008 * Invoked by: DL UE Allocation by Common Scheduler.
27010 * @param[in] RgSchCellCb *cell
27011 * @param[in] RgSchUeCb *ue
27012 * @param[out] U8 *raType
27013 * @return TfuDciFormat
27017 TfuDciFormat rgSCHCmnSlctPdcchFrmt
27024 TfuDciFormat rgSCHCmnSlctPdcchFrmt(cell, ue, raType)
27030 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
27033 /* ccpu00140894- Selective DCI Format and RA type should be selected only
27034 * after TX Mode transition is completed*/
27035 if ((cellSch->dl.isDlFreqSel) && (ue->txModeTransCmplt))
27037 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciRAType;
27038 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].spfcDciFrmt);
27042 *raType = rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciRAType;
27043 return (rgSchCmnDciFrmtOptns[ue->mimoInfo.txMode-1].prfrdDciFrmt);
27049 * @brief This function handles Retx allocation in case of TM3 UEs
27050 * where both the TBs were NACKED previously.
27054 * Function: rgSCHCmnDlTM3RetxRetx
27055 * Purpose: If forceTD flag enabled
27056 * TD for TB1 on CW1.
27058 * DCI Frmt 2A and RA Type 0
27059 * RI layered SM of both TBs on 2 CWs
27060 * Add UE to cell Alloc Info.
27061 * Fill UE alloc Info.
27064 * Successful allocation is indicated by non-zero effBo value.
27066 * Invoked by: rgSCHCmnDlAllocRbTM3
27068 * @param[in] RgSchCellCb *cell
27069 * @param[in] RgSchDlSf *subFrm
27070 * @param[in] RgSchUeCb *ue
27071 * @param[in] U32 bo
27072 * @param[out] U32 *effBo
27073 * @param[in] RgSchDlHqProcCb *proc
27074 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27079 PRIVATE Void rgSCHCmnDlTM3RetxRetx
27086 RgSchDlHqProcCb *proc,
27087 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27090 PRIVATE Void rgSCHCmnDlTM3RetxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
27096 RgSchDlHqProcCb *proc;
27097 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
27101 RgSchDlRbAlloc *allocInfo;
27110 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
27112 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
27114 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
27115 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
27117 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
27119 if (ret == RFAILED)
27121 /* Allocation couldn't be made for Retx */
27122 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
27125 /* Fix for ccpu00123927: Retransmit 2 codewords irrespective of current rank */
27126 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
27127 #ifdef FOUR_TX_ANTENNA
27128 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1 should
27129 * have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
27130 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
27133 proc->cwSwpEnabled = TRUE;
27136 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27137 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
27141 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
27144 /* Adding UE to allocInfo RETX Lst */
27145 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
27147 /* Fill UE alloc Info scratch pad */
27148 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
27149 precInfo, noTxLyrs, subFrm);
27156 * @brief This function handles Retx allocation in case of TM4 UEs
27157 * where both the TBs were NACKED previously.
27161 * Function: rgSCHCmnDlTM4RetxRetx
27162 * Purpose: If forceTD flag enabled
27163 * TD for TB1 on CW1.
27165 * DCI Frmt 2 and RA Type 0
27167 * 1 layer SM of TB1 on CW1.
27169 * RI layered SM of both TBs on 2 CWs
27170 * Add UE to cell Alloc Info.
27171 * Fill UE alloc Info.
27174 * Successful allocation is indicated by non-zero effBo value.
27176 * Invoked by: rgSCHCmnDlAllocRbTM4
27178 * @param[in] RgSchCellCb *cell
27179 * @param[in] RgSchDlSf *subFrm
27180 * @param[in] RgSchUeCb *ue
27181 * @param[in] U32 bo
27182 * @param[out] U32 *effBo
27183 * @param[in] RgSchDlHqProcCb *proc
27184 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27189 PRIVATE Void rgSCHCmnDlTM4RetxRetx
27196 RgSchDlHqProcCb *proc,
27197 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
27200 PRIVATE Void rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
27206 RgSchDlHqProcCb *proc;
27207 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
27211 RgSchDlRbAlloc *allocInfo;
27213 Bool swpFlg = FALSE;
27215 #ifdef FOUR_TX_ANTENNA
27222 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
27224 /* Irrespective of RI Schedule both CWs */
27225 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
27226 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
27228 ret = rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc, &numRb, &swpFlg,\
27230 if (ret == RFAILED)
27232 /* Allocation couldn't be made for Retx */
27233 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
27236 noTxLyrs = proc->tbInfo[0].numLyrs + proc->tbInfo[1].numLyrs;
27238 #ifdef FOUR_TX_ANTENNA
27239 /*Chandra: For 4X4 MIM RETX with noTxLyrs=3, CW0 should be 1-LyrTB and CW1
27240 * should have 2-LyrTB as per Table 6.3.3.2-1 of 36.211 */
27241 if(noTxLyrs == 3 && proc->tbInfo[0].numLyrs==2)
27244 proc->cwSwpEnabled = TRUE;
27246 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27247 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
27251 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
27254 /* Adding UE to allocInfo RETX Lst */
27255 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
27257 /* Fill UE alloc Info scratch pad */
27258 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
27259 precInfo, noTxLyrs, subFrm);
27267 * @brief This function determines Transmission attributes
27268 * incase of Spatial multiplexing for TX and RETX TBs.
27272 * Function: rgSCHCmnDlSMGetAttrForTxRetx
27273 * Purpose: 1. Reached here for a TM3/4 UE's HqP whose one of the TBs is
27274 * NACKED and the other TB is either NACKED or WAITING.
27275 * 2. Select the NACKED TB for RETX allocation.
27276 * 3. Allocation preference for RETX TB by mapping it to a better
27277 * CW (better in terms of efficiency).
27278 * 4. Determine the state of the other TB.
27279 * Determine if swapFlag were to be set.
27280 * Swap flag would be set if Retx TB is cross
27282 * 5. If UE has new data available for TX and if the other TB's state
27283 * is ACKED then set furtherScope as TRUE.
27285 * Invoked by: rgSCHCmnDlTM3[4]TxRetx
27287 * @param[in] RgSchUeCb *ue
27288 * @param[in] RgSchDlHqProcCb *proc
27289 * @param[out] RgSchDlHqTbCb **retxTb
27290 * @param[out] RgSchDlHqTbCb **txTb
27291 * @param[out] Bool *frthrScp
27292 * @param[out] Bool *swpFlg
27297 PRIVATE Void rgSCHCmnDlSMGetAttrForTxRetx
27300 RgSchDlHqProcCb *proc,
27301 RgSchDlHqTbCb **retxTb,
27302 RgSchDlHqTbCb **txTb,
27307 PRIVATE Void rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, frthrScp,\
27310 RgSchDlHqProcCb *proc;
27311 RgSchDlHqTbCb **retxTb;
27312 RgSchDlHqTbCb **txTb;
27317 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,proc->hqE->cell);
27318 RgSchDlRbAlloc *allocInfo;
27321 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27323 *retxTb = &proc->tbInfo[0];
27324 *txTb = &proc->tbInfo[1];
27325 /* TENB_BRDCM_TM4- Currently disabling swapflag for TM3/TM4, since
27326 * HqFeedback processing does not consider a swapped hq feedback */
27327 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 1))
27330 proc->cwSwpEnabled = TRUE;
27332 if (proc->tbInfo[1].state == HQ_TB_ACKED)
27334 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
27335 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
27340 *retxTb = &proc->tbInfo[1];
27341 *txTb = &proc->tbInfo[0];
27342 /* TENB_BRDCM_TM4 - Currently disabling swapflag for TM3/TM4, since
27343 * HqFeedback processing does not consider a swapped hq feedback */
27344 if ((ue->mimoInfo.txMode == RGR_UE_TM_4) && (ueDl->mimoInfo.btrCwIdx == 0))
27347 proc->cwSwpEnabled = TRUE;
27349 if (proc->tbInfo[0].state == HQ_TB_ACKED)
27351 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue, proc->hqE->cell);
27352 *frthrScp = allocInfo->mimoAllocInfo.hasNewTxData;
27360 * @brief Determine Precoding information for TM3 2 TX Antenna.
27364 * Function: rgSCHCmnDlTM3PrecInf2
27367 * Invoked by: rgSCHCmnDlGetAttrForTM3
27369 * @param[in] RgSchUeCb *ue
27370 * @param[in] U8 numTxLyrs
27371 * @param[in] Bool bothCwEnbld
27376 PRIVATE U8 rgSCHCmnDlTM3PrecInf2
27384 PRIVATE U8 rgSCHCmnDlTM3PrecInf2(ue, numTxLyrs, bothCwEnbld)
27397 * @brief Determine Precoding information for TM4 2 TX Antenna.
27401 * Function: rgSCHCmnDlTM4PrecInf2
27402 * Purpose: To determine a logic of deriving precoding index
27403 * information from 36.212 table 5.3.3.1.5-4
27405 * Invoked by: rgSCHCmnDlGetAttrForTM4
27407 * @param[in] RgSchUeCb *ue
27408 * @param[in] U8 numTxLyrs
27409 * @param[in] Bool bothCwEnbld
27414 PRIVATE U8 rgSCHCmnDlTM4PrecInf2
27422 PRIVATE U8 rgSCHCmnDlTM4PrecInf2(ue, numTxLyrs, bothCwEnbld)
27429 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27433 if (ueDl->mimoInfo.ri == numTxLyrs)
27435 if (ueDl->mimoInfo.ri == 2)
27437 /* PrecInfo corresponding to 2 CW
27439 if (ue->mimoInfo.puschFdbkVld)
27445 precIdx = ueDl->mimoInfo.pmi - 1;
27450 /* PrecInfo corresponding to 1 CW
27452 if (ue->mimoInfo.puschFdbkVld)
27458 precIdx = ueDl->mimoInfo.pmi + 1;
27462 else if (ueDl->mimoInfo.ri > numTxLyrs)
27464 /* In case of choosing among the columns of a
27465 * precoding matrix, choose the column corresponding
27466 * to the MAX-CQI */
27467 if (ue->mimoInfo.puschFdbkVld)
27473 precIdx = (ueDl->mimoInfo.pmi- 1)* 2 + 1;
27476 else /* if RI < numTxLyrs */
27478 precIdx = (ueDl->mimoInfo.pmi < 2)? 0:1;
27485 * @brief Determine Precoding information for TM3 4 TX Antenna.
27489 * Function: rgSCHCmnDlTM3PrecInf4
27490 * Purpose: To determine a logic of deriving precoding index
27491 * information from 36.212 table 5.3.3.1.5A-2
27493 * Invoked by: rgSCHCmnDlGetAttrForTM3
27495 * @param[in] RgSchUeCb *ue
27496 * @param[in] U8 numTxLyrs
27497 * @param[in] Bool bothCwEnbld
27502 PRIVATE U8 rgSCHCmnDlTM3PrecInf4
27510 PRIVATE U8 rgSCHCmnDlTM3PrecInf4(ue, numTxLyrs, bothCwEnbld)
27522 precIdx = numTxLyrs - 2;
27524 else /* one 1 CW transmission */
27533 * @brief Determine Precoding information for TM4 4 TX Antenna.
27537 * Function: rgSCHCmnDlTM4PrecInf4
27538 * Purpose: To determine a logic of deriving precoding index
27539 * information from 36.212 table 5.3.3.1.5-5
27541 * Invoked by: rgSCHCmnDlGetAttrForTM4
27543 * @param[in] RgSchUeCb *ue
27544 * @param[in] U8 numTxLyrs
27545 * @param[in] Bool bothCwEnbld
27550 PRIVATE U8 rgSCHCmnDlTM4PrecInf4
27558 PRIVATE U8 rgSCHCmnDlTM4PrecInf4(cell, ue, numTxLyrs, bothCwEnbld)
27565 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27566 U8 precInfoBaseIdx, precIdx;
27569 precInfoBaseIdx = (ue->mimoInfo.puschFdbkVld)? (16):
27570 (ueDl->mimoInfo.pmi);
27573 precIdx = precInfoBaseIdx + (numTxLyrs-2)*17;
27575 else /* one 1 CW transmission */
27577 precInfoBaseIdx += 1;
27578 precIdx = precInfoBaseIdx + (numTxLyrs-1)*17;
27585 * @brief This function determines Transmission attributes
27586 * incase of TM3 scheduling.
27590 * Function: rgSCHCmnDlGetAttrForTM3
27591 * Purpose: Determine retx TB and tx TB based on TB states.
27592 * If forceTD enabled
27593 * perform only retx TB allocation.
27594 * If retxTB == TB2 then DCI Frmt = 2A, RA Type = 0.
27595 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
27597 * perform retxTB allocation on CW1.
27599 * Determine further Scope and Swap Flag attributes
27600 * assuming a 2 CW transmission of RetxTB and new Tx TB.
27601 * If no further scope for new TX allocation
27602 * Allocate only retx TB using 2 layers if
27603 * this TB was previously transmitted using 2 layers AND
27604 * number of Tx antenna ports == 4.
27605 * otherwise do single layer precoding.
27607 * Invoked by: rgSCHCmnDlTM3TxRetx
27609 * @param[in] RgSchUeCb *ue
27610 * @param[in] RgSchDlHqProcCb *proc
27611 * @param[out] U8 *numTxLyrs
27612 * @param[out] Bool *isTraDiv
27613 * @param[out] U8 *prcdngInf
27614 * @param[out] U8 *raType
27619 PRIVATE Void rgSCHCmnDlGetAttrForTM3
27623 RgSchDlHqProcCb *proc,
27625 TfuDciFormat *dciFrmt,
27627 RgSchDlHqTbCb **retxTb,
27628 RgSchDlHqTbCb **txTb,
27634 PRIVATE Void rgSCHCmnDlGetAttrForTM3(cell, ue, proc, numTxLyrs, dciFrmt,\
27635 prcdngInf, retxTb, txTb, frthrScp, swpFlg, raType)
27638 RgSchDlHqProcCb *proc;
27640 TfuDciFormat *dciFrmt;
27642 RgSchDlHqTbCb **retxTb;
27643 RgSchDlHqTbCb **txTb;
27649 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27653 /* Avoiding Tx-Retx for LAA cell as firstSchedTime is associated with
27655 /* Integration_fix: SPS Proc shall always have only one Cw */
27657 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
27658 (ueDl->mimoInfo.forceTD))
27660 ||(TRUE == rgSCHLaaSCellEnabled(cell))
27664 if ((ueDl->mimoInfo.forceTD)
27666 || (TRUE == rgSCHLaaSCellEnabled(cell))
27671 /* Transmit Diversity. Format based on dlfsEnabled
27672 * No further scope */
27673 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27675 *retxTb = &proc->tbInfo[0];
27676 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27680 *retxTb = &proc->tbInfo[1];
27681 *dciFrmt = TFU_DCI_FORMAT_2A;
27682 *raType = RG_SCH_CMN_RA_TYPE0;
27690 /* Determine the 2 TB transmission attributes */
27691 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
27695 /* Prefer allocation of RETX TB over 2 layers rather than combining
27696 * it with a new TX. */
27697 if ((ueDl->mimoInfo.ri == 2)
27698 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
27700 /* Allocate TB on CW1, using 2 Lyrs,
27701 * Format 2, precoding accordingly */
27707 *numTxLyrs= ((*retxTb)->numLyrs + ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)].noLyr);
27709 if((*retxTb)->tbIdx == 0 && ((*retxTb)->numLyrs == 2 ) && *numTxLyrs ==3)
27712 proc->cwSwpEnabled = TRUE;
27714 else if((*retxTb)->tbIdx == 1 && ((*retxTb)->numLyrs == 1) && *numTxLyrs ==3)
27717 proc->cwSwpEnabled = TRUE;
27721 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27722 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])\
27723 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
27724 *dciFrmt = TFU_DCI_FORMAT_2A;
27725 *raType = RG_SCH_CMN_RA_TYPE0;
27727 else /* frthrScp == FALSE */
27729 if (cell->numTxAntPorts == 2)
27731 /* Transmit Diversity */
27733 if ((*retxTb)->tbIdx == 0)
27735 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27739 /* If retxTB is TB2 then use format 2A */
27740 *dciFrmt = TFU_DCI_FORMAT_2A;
27741 *raType = RG_SCH_CMN_RA_TYPE0;
27746 else /* NumAntPorts == 4 */
27748 if ((*retxTb)->numLyrs == 2)
27750 /* Allocate TB on CW1, using 2 Lyrs,
27751 * Format 2A, precoding accordingly */
27753 *dciFrmt = TFU_DCI_FORMAT_2A;
27754 *raType = RG_SCH_CMN_RA_TYPE0;
27755 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27756 *prcdngInf = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, *numTxLyrs, *frthrScp);
27761 /* Transmit Diversity */
27763 if ((*retxTb)->tbIdx == 0)
27765 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27769 /* If retxTB is TB2 then use format 2A */
27770 *dciFrmt = TFU_DCI_FORMAT_2A;
27771 *raType = RG_SCH_CMN_RA_TYPE0;
27785 * @brief This function determines Transmission attributes
27786 * incase of TM4 scheduling.
27790 * Function: rgSCHCmnDlGetAttrForTM4
27791 * Purpose: Determine retx TB and tx TB based on TB states.
27792 * If forceTD enabled
27793 * perform only retx TB allocation.
27794 * If retxTB == TB2 then DCI Frmt = 2, RA Type = 0.
27795 * Else DCI Frmt and RA Type based on cell->isDlfsEnbld
27797 * perform retxTB allocation on CW1.
27799 * Determine further Scope and Swap Flag attributes
27800 * assuming a 2 CW transmission of RetxTB and new Tx TB.
27801 * If no further scope for new TX allocation
27802 * Allocate only retx TB using 2 layers if
27803 * this TB was previously transmitted using 2 layers AND
27804 * number of Tx antenna ports == 4.
27805 * otherwise do single layer precoding.
27807 * Invoked by: rgSCHCmnDlTM4TxRetx
27809 * @param[in] RgSchUeCb *ue
27810 * @param[in] RgSchDlHqProcCb *proc
27811 * @param[out] U8 *numTxLyrs
27812 * @param[out] Bool *isTraDiv
27813 * @param[out] U8 *prcdngInf
27814 * @param[out] U8 *raType
27819 PRIVATE Void rgSCHCmnDlGetAttrForTM4
27823 RgSchDlHqProcCb *proc,
27825 TfuDciFormat *dciFrmt,
27827 RgSchDlHqTbCb **retxTb,
27828 RgSchDlHqTbCb **txTb,
27834 PRIVATE Void rgSCHCmnDlGetAttrForTM4(cell, ue, proc, numTxLyrs, dciFrmt,\
27835 prcdngInf, retxTb, txTb, frthrScp, swpFlg, raType)
27838 RgSchDlHqProcCb *proc;
27840 TfuDciFormat *dciFrmt;
27842 RgSchDlHqTbCb **retxTb;
27843 RgSchDlHqTbCb **txTb;
27849 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
27854 /* Integration_fix: SPS Proc shall always have only one Cw */
27856 if (((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
27857 (ueDl->mimoInfo.forceTD))
27859 ||(TRUE == rgSCHLaaSCellEnabled(cell))
27863 if ((ueDl->mimoInfo.forceTD)
27865 || (TRUE == rgSCHLaaSCellEnabled(cell))
27870 /* Transmit Diversity. Format based on dlfsEnabled
27871 * No further scope */
27872 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27874 *retxTb = &proc->tbInfo[0];
27875 *dciFrmt = rgSCHCmnSlctPdcchFrmt(cell, ue, raType);
27879 *retxTb = &proc->tbInfo[1];
27880 *dciFrmt = TFU_DCI_FORMAT_2;
27881 *raType = RG_SCH_CMN_RA_TYPE0;
27889 if (ueDl->mimoInfo.ri == 1)
27891 /* single layer precoding. Format 2.
27892 * No further scope */
27893 if (proc->tbInfo[0].state == HQ_TB_NACKED)
27895 *retxTb = &proc->tbInfo[0];
27899 *retxTb = &proc->tbInfo[1];
27902 *dciFrmt = TFU_DCI_FORMAT_2;
27903 *raType = RG_SCH_CMN_RA_TYPE0;
27905 *prcdngInf = 0; /*When RI= 1*/
27909 /* Determine the 2 TB transmission attributes */
27910 rgSCHCmnDlSMGetAttrForTxRetx(ue, proc, retxTb, txTb, \
27912 *dciFrmt = TFU_DCI_FORMAT_2;
27913 *raType = RG_SCH_CMN_RA_TYPE0;
27916 /* Prefer allocation of RETX TB over 2 layers rather than combining
27917 * it with a new TX. */
27918 if ((ueDl->mimoInfo.ri == 2)
27919 && ((*retxTb)->numLyrs == 2) && (cell->numTxAntPorts == 4))
27921 /* Allocate TB on CW1, using 2 Lyrs,
27922 * Format 2, precoding accordingly */
27926 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27927 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])
27928 (cell, ue, ueDl->mimoInfo.ri, *frthrScp);
27930 else /* frthrScp == FALSE */
27932 if (cell->numTxAntPorts == 2)
27934 /* single layer precoding. Format 2. */
27936 *prcdngInf = (getPrecInfoFunc[1][cell->numTxAntPorts/2 - 1])\
27937 (cell, ue, *numTxLyrs, *frthrScp);
27940 else /* NumAntPorts == 4 */
27942 if ((*retxTb)->numLyrs == 2)
27944 /* Allocate TB on CW1, using 2 Lyrs,
27945 * Format 2, precoding accordingly */
27947 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27948 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
27949 (cell, ue, *numTxLyrs, *frthrScp);
27954 /* Allocate TB with 1 lyr precoding,
27955 * Format 2, precoding info accordingly */
27957 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
27958 *prcdngInf = (getPrecInfoFunc[1][precInfoAntIdx])\
27959 (cell, ue, *numTxLyrs, *frthrScp);
27970 * @brief This function handles Retx allocation in case of TM3 UEs
27971 * where previously one of the TBs was NACKED and the other
27972 * TB is either ACKED/WAITING.
27976 * Function: rgSCHCmnDlTM3TxRetx
27977 * Purpose: Determine the TX attributes for TM3 TxRetx Allocation.
27978 * If futher Scope for New Tx Allocation on other TB
27979 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
27980 * Add UE to cell wide RetxTx List.
27982 * Perform only RETX alloc'n on CW1.
27983 * Add UE to cell wide Retx List.
27985 * effBo is set to a non-zero value if allocation is
27988 * Invoked by: rgSCHCmnDlAllocRbTM3
27990 * @param[in] RgSchCellCb *cell
27991 * @param[in] RgSchDlSf *subFrm
27992 * @param[in] RgSchUeCb *ue
27993 * @param[in] U32 bo
27994 * @param[out] U32 *effBo
27995 * @param[in] RgSchDlHqProcCb *proc
27996 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28001 PRIVATE Void rgSCHCmnDlTM3TxRetx
28008 RgSchDlHqProcCb *proc,
28009 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28012 PRIVATE Void rgSCHCmnDlTM3TxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28018 RgSchDlHqProcCb *proc;
28019 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28023 RgSchDlRbAlloc *allocInfo;
28025 RgSchDlHqTbCb *retxTb, *txTb;
28034 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28037 /* Determine the transmission attributes */
28038 rgSCHCmnDlGetAttrForTM3(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
28039 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
28040 &allocInfo->raType);
28045 printf ("TX RETX called from proc %d cell %d \n",proc->procId, cell->cellId);
28047 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
28049 if (ret == RFAILED)
28051 /* Allocation couldn't be made for Retx */
28052 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28055 /* Adding UE to RbAllocInfo RETX-TX Lst */
28056 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
28060 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
28061 numTxLyrs, &numRb, effBo);
28062 if (ret == RFAILED)
28064 /* Allocation couldn't be made for Retx */
28065 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28069 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28072 /* Adding UE to allocInfo RETX Lst */
28073 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28076 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
28077 prcdngInf, numTxLyrs, subFrm);
28084 * @brief This function handles Retx allocation in case of TM4 UEs
28085 * where previously one of the TBs was NACKED and the other
28086 * TB is either ACKED/WAITING.
28090 * Function: rgSCHCmnDlTM4TxRetx
28091 * Purpose: Determine the TX attributes for TM4 TxRetx Allocation.
28092 * If futher Scope for New Tx Allocation on other TB
28093 * Perform RETX alloc'n on 1 CW and TX alloc'n on other.
28094 * Add UE to cell wide RetxTx List.
28096 * Perform only RETX alloc'n on CW1.
28097 * Add UE to cell wide Retx List.
28099 * effBo is set to a non-zero value if allocation is
28102 * Invoked by: rgSCHCmnDlAllocRbTM4
28104 * @param[in] RgSchCellCb *cell
28105 * @param[in] RgSchDlSf *subFrm
28106 * @param[in] RgSchUeCb *ue
28107 * @param[in] U32 bo
28108 * @param[out] U32 *effBo
28109 * @param[in] RgSchDlHqProcCb *proc
28110 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28115 PRIVATE Void rgSCHCmnDlTM4TxRetx
28122 RgSchDlHqProcCb *proc,
28123 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28126 PRIVATE Void rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28132 RgSchDlHqProcCb *proc;
28133 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28137 RgSchDlRbAlloc *allocInfo;
28139 RgSchDlHqTbCb *retxTb, *txTb;
28147 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28150 /* Determine the transmission attributes */
28151 rgSCHCmnDlGetAttrForTM4(cell, ue, proc, &numTxLyrs, &allocInfo->dciFormat,\
28152 &prcdngInf, &retxTb, &txTb, &frthrScp, &swpFlg,\
28153 &allocInfo->raType);
28157 ret = rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, retxTb, txTb,\
28159 if (ret == RFAILED)
28161 /* Fix : syed If TxRetx allocation failed then add the UE along
28162 * with the proc to the nonSchdTxRetxUeLst and let spfc scheduler
28163 * take care of it during finalization. */
28164 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28167 /* Adding UE to RbAllocInfo RETX-TX Lst */
28168 rgSCHCmnDlRbInfoAddUeRetxTx(cell, cellWdAllocInfo, ue, proc);
28172 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, retxTb,
28173 numTxLyrs, &numRb, effBo);
28174 if (ret == RFAILED)
28176 /* Allocation couldn't be made for Retx */
28177 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28181 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28184 /* Adding UE to allocInfo RETX Lst */
28185 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28188 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, swpFlg, \
28189 prcdngInf, numTxLyrs, subFrm)
28196 * @brief This function handles Retx allocation in case of TM4 UEs
28197 * where previously both the TBs were ACKED and ACKED
28202 * Function: rgSCHCmnDlTM3TxTx
28203 * Purpose: Reached here for a TM3 UE's HqP's fresh allocation
28204 * where both the TBs are free for TX scheduling.
28205 * If forceTD flag is set
28206 * perform TD on CW1 with TB1.
28211 * RI layered precoding 2 TB on 2 CW.
28212 * Set precoding info.
28213 * Add UE to cellAllocInfo.
28214 * Fill ueAllocInfo.
28216 * effBo is set to a non-zero value if allocation is
28219 * Invoked by: rgSCHCmnDlAllocRbTM3
28221 * @param[in] RgSchCellCb *cell
28222 * @param[in] RgSchDlSf *subFrm
28223 * @param[in] RgSchUeCb *ue
28224 * @param[in] U32 bo
28225 * @param[out] U32 *effBo
28226 * @param[in] RgSchDlHqProcCb *proc
28227 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28232 PRIVATE Void rgSCHCmnDlTM3TxTx
28239 RgSchDlHqProcCb *proc,
28240 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28243 PRIVATE Void rgSCHCmnDlTM3TxTx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28249 RgSchDlHqProcCb *proc;
28250 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28253 RgSchCmnDlUe *ueDl;
28254 RgSchDlRbAlloc *allocInfo;
28263 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28264 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28266 /* Integration_fix: SPS Proc shall always have only one Cw */
28268 #ifdef FOUR_TX_ANTENNA
28269 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28270 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
28272 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28273 (ueDl->mimoInfo.forceTD))
28276 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
28279 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
28280 &allocInfo->raType);
28281 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28282 bo, &numRb, effBo);
28283 if (ret == RFAILED)
28285 /* If allocation couldn't be made then return */
28289 precInfo = 0; /* TD */
28291 else /* Precoding */
28293 allocInfo->dciFormat = TFU_DCI_FORMAT_2A;
28294 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
28296 /* Spatial Multiplexing using 2 CWs */
28297 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
28298 if (ret == RFAILED)
28300 /* If allocation couldn't be made then return */
28303 noTxLyrs = ueDl->mimoInfo.ri;
28304 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
28305 RGSCH_ARRAY_BOUND_CHECK(cell->instIdx, getPrecInfoFunc[0], precInfoAntIdx);
28306 precInfo = (getPrecInfoFunc[0][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
28310 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28313 /* Adding UE to RbAllocInfo TX Lst */
28314 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28316 /* Fill UE allocInfo scrath pad */
28317 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
28318 precInfo, noTxLyrs, subFrm);
28325 * @brief This function handles Retx allocation in case of TM4 UEs
28326 * where previously both the TBs were ACKED and ACKED
28331 * Function: rgSCHCmnDlTM4TxTx
28332 * Purpose: Reached here for a TM4 UE's HqP's fresh allocation
28333 * where both the TBs are free for TX scheduling.
28334 * If forceTD flag is set
28335 * perform TD on CW1 with TB1.
28341 * Single layer precoding of TB1 on CW1.
28342 * Set precoding info.
28344 * RI layered precoding 2 TB on 2 CW.
28345 * Set precoding info.
28346 * Add UE to cellAllocInfo.
28347 * Fill ueAllocInfo.
28349 * effBo is set to a non-zero value if allocation is
28352 * Invoked by: rgSCHCmnDlAllocRbTM4
28354 * @param[in] RgSchCellCb *cell
28355 * @param[in] RgSchDlSf *subFrm
28356 * @param[in] RgSchUeCb *ue
28357 * @param[in] U32 bo
28358 * @param[out] U32 *effBo
28359 * @param[in] RgSchDlHqProcCb *proc
28360 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28365 PRIVATE Void rgSCHCmnDlTM4TxTx
28372 RgSchDlHqProcCb *proc,
28373 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28376 PRIVATE Void rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28382 RgSchDlHqProcCb *proc;
28383 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28386 RgSchCmnDlUe *ueDl;
28387 RgSchDlRbAlloc *allocInfo;
28396 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28397 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28399 /* Integration_fix: SPS Proc shall always have only one Cw */
28401 #ifdef FOUR_TX_ANTENNA
28402 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28403 (ueDl->mimoInfo.forceTD) || proc->hasDcch) /*Chandra Avoid DCCH to be SM */
28405 if ((RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc)) ||
28406 (ueDl->mimoInfo.forceTD))
28409 if (ueDl->mimoInfo.forceTD) /* Transmit Diversity (TD) */
28412 allocInfo->dciFormat = rgSCHCmnSlctPdcchFrmt(cell, ue, \
28413 &allocInfo->raType);
28415 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28416 bo, &numRb, effBo);
28417 if (ret == RFAILED)
28419 /* If allocation couldn't be made then return */
28423 precInfo = 0; /* TD */
28425 else /* Precoding */
28427 allocInfo->dciFormat = TFU_DCI_FORMAT_2;
28428 allocInfo->raType = RG_SCH_CMN_RA_TYPE0;
28430 if (ueDl->mimoInfo.ri == 1)
28432 /* Single Layer SM using FORMAT 2 */
28433 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28434 bo, &numRb, effBo);
28435 if (ret == RFAILED)
28437 /* If allocation couldn't be made then return */
28441 precInfo = 0; /* PrecInfo as 0 for RI=1*/
28445 /* Spatial Multiplexing using 2 CWs */
28446 ret = rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, &numRb, effBo);
28447 if (ret == RFAILED)
28449 /* If allocation couldn't be made then return */
28452 noTxLyrs = ueDl->mimoInfo.ri;
28453 precInfoAntIdx = cell->numTxAntPorts/2 - 1;
28454 precInfo = (getPrecInfoFunc[1][precInfoAntIdx])(cell, ue, noTxLyrs, TRUE);
28460 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28463 /* Adding UE to RbAllocInfo TX Lst */
28464 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28467 /* Fill UE allocInfo scrath pad */
28468 RG_SCH_CMN_FILL_DL_TXINFO(allocInfo, numRb, FALSE, \
28469 precInfo, noTxLyrs, subFrm);
28476 * @brief This function determines the RBs and Bytes required for BO
28477 * transmission for UEs configured with TM 4.
28481 * Function: rgSCHCmnDlAllocTxRbTM4
28482 * Purpose: Invokes the functionality particular to the
28483 * current state of the TBs of the "proc".
28485 * Reference Parameter effBo is filled with alloced bytes.
28486 * Returns RFAILED if BO not satisfied at all.
28488 * Invoked by: rgSCHCmnDlAllocTxRb
28490 * @param[in] RgSchCellCb *cell
28491 * @param[in] RgSchDlSf *subFrm
28492 * @param[in] RgSchUeCb *ue
28493 * @param[in] U32 bo
28494 * @param[out] U32 *effBo
28495 * @param[in] RgSchDlHqProcCb *proc
28496 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28501 PRIVATE Void rgSCHCmnDlAllocTxRbTM4
28508 RgSchDlHqProcCb *proc,
28509 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28512 PRIVATE Void rgSCHCmnDlAllocTxRbTM4(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28518 RgSchDlHqProcCb *proc;
28519 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28523 /* Both TBs free for TX allocation */
28524 rgSCHCmnDlTM4TxTx(cell, subFrm, ue, bo, effBo,\
28525 proc, cellWdAllocInfo);
28532 * @brief This function determines the RBs and Bytes required for BO
28533 * retransmission for UEs configured with TM 4.
28537 * Function: rgSCHCmnDlAllocRetxRbTM4
28538 * Purpose: Invokes the functionality particular to the
28539 * current state of the TBs of the "proc".
28541 * Reference Parameter effBo is filled with alloced bytes.
28542 * Returns RFAILED if BO not satisfied at all.
28544 * Invoked by: rgSCHCmnDlAllocRetxRb
28546 * @param[in] RgSchCellCb *cell
28547 * @param[in] RgSchDlSf *subFrm
28548 * @param[in] RgSchUeCb *ue
28549 * @param[in] U32 bo
28550 * @param[out] U32 *effBo
28551 * @param[in] RgSchDlHqProcCb *proc
28552 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28557 PRIVATE Void rgSCHCmnDlAllocRetxRbTM4
28564 RgSchDlHqProcCb *proc,
28565 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28568 PRIVATE Void rgSCHCmnDlAllocRetxRbTM4(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28574 RgSchDlHqProcCb *proc;
28575 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28579 if ((proc->tbInfo[0].state == HQ_TB_NACKED) &&
28580 (proc->tbInfo[1].state == HQ_TB_NACKED))
28582 /* Both TBs require RETX allocation */
28583 rgSCHCmnDlTM4RetxRetx(cell, subFrm, ue, bo, effBo,\
28584 proc, cellWdAllocInfo);
28588 /* One of the TBs need RETX allocation. Other TB may/maynot
28589 * be available for new TX allocation. */
28590 rgSCHCmnDlTM4TxRetx(cell, subFrm, ue, bo, effBo,\
28591 proc, cellWdAllocInfo);
28600 * @brief This function determines the RBs and Bytes required for BO
28601 * transmission for UEs configured with TM 5.
28605 * Function: rgSCHCmnDlAllocTxRbTM5
28608 * Reference Parameter effBo is filled with alloced bytes.
28609 * Returns RFAILED if BO not satisfied at all.
28611 * Invoked by: rgSCHCmnDlAllocTxRb
28613 * @param[in] RgSchCellCb *cell
28614 * @param[in] RgSchDlSf *subFrm
28615 * @param[in] RgSchUeCb *ue
28616 * @param[in] U32 bo
28617 * @param[out] U32 *effBo
28618 * @param[in] RgSchDlHqProcCb *proc
28619 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28624 PRIVATE Void rgSCHCmnDlAllocTxRbTM5
28631 RgSchDlHqProcCb *proc,
28632 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28635 PRIVATE Void rgSCHCmnDlAllocTxRbTM5(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28641 RgSchDlHqProcCb *proc;
28642 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28645 #if (ERRCLASS & ERRCLS_DEBUG)
28646 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
28653 * @brief This function determines the RBs and Bytes required for BO
28654 * retransmission for UEs configured with TM 5.
28658 * Function: rgSCHCmnDlAllocRetxRbTM5
28661 * Reference Parameter effBo is filled with alloced bytes.
28662 * Returns RFAILED if BO not satisfied at all.
28664 * Invoked by: rgSCHCmnDlAllocRetxRb
28666 * @param[in] RgSchCellCb *cell
28667 * @param[in] RgSchDlSf *subFrm
28668 * @param[in] RgSchUeCb *ue
28669 * @param[in] U32 bo
28670 * @param[out] U32 *effBo
28671 * @param[in] RgSchDlHqProcCb *proc
28672 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28677 PRIVATE Void rgSCHCmnDlAllocRetxRbTM5
28684 RgSchDlHqProcCb *proc,
28685 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28688 PRIVATE Void rgSCHCmnDlAllocRetxRbTM5(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28694 RgSchDlHqProcCb *proc;
28695 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28698 #if (ERRCLASS & ERRCLS_DEBUG)
28699 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Invalid TM 5 for CRNTI:%d",ue->ueId);
28707 * @brief This function determines the RBs and Bytes required for BO
28708 * transmission for UEs configured with TM 6.
28712 * Function: rgSCHCmnDlAllocTxRbTM6
28715 * Reference Parameter effBo is filled with alloced bytes.
28716 * Returns RFAILED if BO not satisfied at all.
28718 * Invoked by: rgSCHCmnDlAllocTxRb
28720 * @param[in] RgSchCellCb *cell
28721 * @param[in] RgSchDlSf *subFrm
28722 * @param[in] RgSchUeCb *ue
28723 * @param[in] U32 bo
28724 * @param[out] U32 *effBo
28725 * @param[in] RgSchDlHqProcCb *proc
28726 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28731 PRIVATE Void rgSCHCmnDlAllocTxRbTM6
28738 RgSchDlHqProcCb *proc,
28739 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28742 PRIVATE Void rgSCHCmnDlAllocTxRbTM6(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28748 RgSchDlHqProcCb *proc;
28749 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28752 RgSchDlRbAlloc *allocInfo;
28753 RgSchCmnDlUe *ueDl;
28759 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28760 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28762 if (ueDl->mimoInfo.forceTD)
28764 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
28765 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28769 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
28770 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28771 /* Fill precoding information for FORMAT 1B */
28772 /* First 4 least significant bits to indicate PMI.
28773 * 4th most significant corresponds to pmi Confirmation.
28775 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
28776 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
28778 ret = rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, &proc->tbInfo[0],\
28779 bo, &numRb, effBo);
28780 if (ret == RFAILED)
28782 /* If allocation couldn't be made then return */
28787 if (!RG_SCH_CMN_SPS_DL_IS_SPS_HQP(proc))
28790 /* Adding UE to RbAllocInfo TX Lst */
28791 rgSCHCmnDlRbInfoAddUeTx(cell, cellWdAllocInfo, ue, proc);
28793 /* Fill UE alloc Info */
28794 allocInfo->rbsReq = numRb;
28795 allocInfo->dlSf = subFrm;
28801 * @brief This function determines the RBs and Bytes required for BO
28802 * retransmission for UEs configured with TM 6.
28806 * Function: rgSCHCmnDlAllocRetxRbTM6
28809 * Reference Parameter effBo is filled with alloced bytes.
28810 * Returns RFAILED if BO not satisfied at all.
28812 * Invoked by: rgSCHCmnDlAllocRetxRb
28814 * @param[in] RgSchCellCb *cell
28815 * @param[in] RgSchDlSf *subFrm
28816 * @param[in] RgSchUeCb *ue
28817 * @param[in] U32 bo
28818 * @param[out] U32 *effBo
28819 * @param[in] RgSchDlHqProcCb *proc
28820 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28825 PRIVATE Void rgSCHCmnDlAllocRetxRbTM6
28832 RgSchDlHqProcCb *proc,
28833 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28836 PRIVATE Void rgSCHCmnDlAllocRetxRbTM6(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28842 RgSchDlHqProcCb *proc;
28843 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28846 RgSchDlRbAlloc *allocInfo;
28847 RgSchCmnDlUe *ueDl;
28853 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
28854 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
28856 if (ueDl->mimoInfo.forceTD)
28858 allocInfo->dciFormat = TFU_DCI_FORMAT_1A;
28859 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28863 allocInfo->dciFormat = TFU_DCI_FORMAT_1B;
28864 allocInfo->raType = RG_SCH_CMN_RA_TYPE2;
28865 /* Fill precoding information for FORMAT 1B */
28866 /* First 4 least significant bits to indicate PMI.
28867 * 4th most significant corresponds to pmi Confirmation.
28869 allocInfo->mimoAllocInfo.precIdxInfo |= ue->mimoInfo.puschFdbkVld << 4;
28870 allocInfo->mimoAllocInfo.precIdxInfo |= ueDl->mimoInfo.pmi;
28873 /* Get the Allocation in terms of RBs that are required for
28874 * this retx of TB1 */
28875 ret = rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, &proc->tbInfo[0],
28877 if (ret == RFAILED)
28879 /* Allocation couldn't be made for Retx */
28880 rgSCHCmnDlAdd2NonSchdRetxLst(cellWdAllocInfo, ue, proc);
28883 /* Adding UE to allocInfo RETX Lst */
28884 rgSCHCmnDlRbInfoAddUeRetx(cell, cellWdAllocInfo, ue, proc);
28885 /* Fill UE alloc Info */
28886 allocInfo->rbsReq = numRb;
28887 allocInfo->dlSf = subFrm;
28893 * @brief This function determines the RBs and Bytes required for BO
28894 * transmission for UEs configured with TM 7.
28898 * Function: rgSCHCmnDlAllocTxRbTM7
28901 * Reference Parameter effBo is filled with alloced bytes.
28902 * Returns RFAILED if BO not satisfied at all.
28904 * Invoked by: rgSCHCmnDlAllocTxRb
28906 * @param[in] RgSchCellCb *cell
28907 * @param[in] RgSchDlSf *subFrm
28908 * @param[in] RgSchUeCb *ue
28909 * @param[in] U32 bo
28910 * @param[out] U32 *effBo
28911 * @param[in] RgSchDlHqProcCb *proc
28912 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28917 PRIVATE Void rgSCHCmnDlAllocTxRbTM7
28924 RgSchDlHqProcCb *proc,
28925 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28928 PRIVATE Void rgSCHCmnDlAllocTxRbTM7(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28934 RgSchDlHqProcCb *proc;
28935 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28938 rgSCHCmnDlAllocTxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
28944 * @brief This function determines the RBs and Bytes required for BO
28945 * retransmission for UEs configured with TM 7.
28949 * Function: rgSCHCmnDlAllocRetxRbTM7
28952 * Reference Parameter effBo is filled with alloced bytes.
28953 * Returns RFAILED if BO not satisfied at all.
28955 * Invoked by: rgSCHCmnDlAllocRetxRb
28957 * @param[in] RgSchCellCb *cell
28958 * @param[in] RgSchDlSf *subFrm
28959 * @param[in] RgSchUeCb *ue
28960 * @param[in] U32 bo
28961 * @param[out] U32 *effBo
28962 * @param[in] RgSchDlHqProcCb *proc
28963 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28968 PRIVATE Void rgSCHCmnDlAllocRetxRbTM7
28975 RgSchDlHqProcCb *proc,
28976 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
28979 PRIVATE Void rgSCHCmnDlAllocRetxRbTM7(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
28985 RgSchDlHqProcCb *proc;
28986 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
28989 rgSCHCmnDlAllocRetxRb1Tb1Cw(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo);
28995 * @brief This function invokes the TM specific DL TX RB Allocation routine.
28999 * Function: rgSCHCmnDlAllocTxRb
29000 * Purpose: This function invokes the TM specific
29001 * DL TX RB Allocation routine.
29003 * Invoked by: Specific Schedulers
29005 * @param[in] RgSchCellCb *cell
29006 * @param[in] RgSchDlSf *subFrm
29007 * @param[in] RgSchUeCb *ue
29008 * @param[in] U32 bo
29009 * @param[out] U32 *effBo
29010 * @param[in] RgSchDlHqProcCb *proc
29011 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29016 S16 rgSCHCmnDlAllocTxRb
29023 RgSchDlHqProcCb *proc,
29024 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29027 S16 rgSCHCmnDlAllocTxRb(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
29033 RgSchDlHqProcCb *proc;
29034 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
29037 U32 newSchBits = 0;
29038 U32 prevSchBits = 0;
29039 RgSchDlRbAlloc *allocInfo;
29042 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
29044 ue->dl.aggTbBits = 0;
29048 /* Calculate totals bits previously allocated */
29049 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29050 if (allocInfo->tbInfo[0].schdlngForTb)
29052 prevSchBits += allocInfo->tbInfo[0].bytesReq;
29054 if (allocInfo->tbInfo[1].schdlngForTb)
29056 prevSchBits += allocInfo->tbInfo[1].bytesReq;
29059 /* Call TM specific RB allocation routine */
29060 (dlAllocTxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
29061 proc, cellWdAllocInfo);
29065 /* Calculate totals bits newly allocated */
29066 if (allocInfo->tbInfo[0].schdlngForTb)
29068 newSchBits += allocInfo->tbInfo[0].bytesReq;
29070 if (allocInfo->tbInfo[1].schdlngForTb)
29072 newSchBits += allocInfo->tbInfo[1].bytesReq;
29074 if (newSchBits > prevSchBits)
29076 ue->dl.aggTbBits += ((newSchBits - prevSchBits) * 8);
29077 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
29084 /* DwPTS Scheduling Changes Start */
29087 * @brief Retransmit decision for TDD. Retx is avoided in below cases
29088 * 1) DL Sf -> Spl Sf
29089 * 2) DL SF -> DL SF 0
29093 * Function: rgSCHCmnRetxAvoidTdd
29094 * Purpose: Avoid allocating RETX for cases 1, 2
29096 * Invoked by: rgSCHCmnRetxAvoidTdd
29098 * @param[in] RgSchDlSf *curSf
29099 * @param[in] RgSchCellCb *cell
29100 * @param[in] RgSchDlHqProcCb *proc
29105 Bool rgSCHCmnRetxAvoidTdd
29109 RgSchDlHqProcCb *proc
29112 Bool rgSCHCmnRetxAvoidTdd(curSf, cell, proc)
29115 RgSchDlHqProcCb *proc;
29118 RgSchTddSfType txSfType = 0;
29121 /* Get the RBs of TB that will be retransmitted */
29122 if (proc->tbInfo[0].state == HQ_TB_NACKED)
29124 txSfType = proc->tbInfo[0].sfType;
29126 #ifdef XEON_SPECIFIC_CHANGES
29127 #ifndef XEON_TDD_SPCL
29128 /* Avoid re-transmission on Normal SF when the corresponding TB wss transmitted on SPCL SF */
29129 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
29136 if (proc->tbInfo[1].state == HQ_TB_NACKED)
29138 /* Select the TxSf with the highest num of possible REs
29139 * In ascending order -> 1) SPL SF 2) DL_SF_0 3) DL_SF */
29140 txSfType = RGSCH_MAX(txSfType, proc->tbInfo[1].sfType);
29142 #ifdef XEON_SPECIFIC_CHANGES
29143 #ifndef XEON_TDD_SPCL
29144 /* Avoid re-transmission on Normal SF when the corresponding TB wss tranmitted on SPCL SF */
29145 if(txSfType <= RG_SCH_SPL_SF_DATA && curSf->sfType >= RG_SCH_DL_SF_0)
29153 if (txSfType > curSf->sfType)
29164 /* DwPTS Scheduling Changes End */
29167 * @brief Avoid allocating RETX incase of collision
29168 * with reserved resources for BCH/PSS/SSS occassions.
29172 * Function: rgSCHCmnRetxAllocAvoid
29173 * Purpose: Avoid allocating RETX incase of collision
29174 * with reserved resources for BCH/PSS/SSS occassions
29176 * Invoked by: rgSCHCmnDlAllocRetxRb
29178 * @param[in] RgSchDlSf *subFrm
29179 * @param[in] RgSchUeCb *ue
29180 * @param[in] RgSchDlHqProcCb *proc
29185 Bool rgSCHCmnRetxAllocAvoid
29189 RgSchDlHqProcCb *proc
29192 Bool rgSCHCmnRetxAllocAvoid(subFrm, cell, proc)
29195 RgSchDlHqProcCb *proc;
29201 if (proc->tbInfo[0].state == HQ_TB_NACKED)
29203 reqRbs = proc->tbInfo[0].dlGrnt.numRb;
29207 reqRbs = proc->tbInfo[1].dlGrnt.numRb;
29209 /* Consider the dlGrnt.numRb of the Retransmitting proc->tbInfo
29210 * and current available RBs to determine if this RETX TB
29211 * will collide with the BCH/PSS/SSS occassion */
29212 if (subFrm->sfNum % 5 == 0)
29214 if ((subFrm->bwAssigned < cell->pbchRbEnd) &&
29215 (((subFrm->bwAssigned + reqRbs) - cell->pbchRbStart) > 0))
29227 * @brief This function invokes the TM specific DL RETX RB Allocation routine.
29231 * Function: rgSCHCmnDlAllocRetxRb
29232 * Purpose: This function invokes the TM specific
29233 * DL RETX RB Allocation routine.
29235 * Invoked by: Specific Schedulers
29237 * @param[in] RgSchCellCb *cell
29238 * @param[in] RgSchDlSf *subFrm
29239 * @param[in] RgSchUeCb *ue
29240 * @param[in] U32 bo
29241 * @param[out] U32 *effBo
29242 * @param[in] RgSchDlHqProcCb *proc
29243 * @param[out] RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29248 S16 rgSCHCmnDlAllocRetxRb
29255 RgSchDlHqProcCb *proc,
29256 RgSchCmnDlRbAllocInfo *cellWdAllocInfo
29259 S16 rgSCHCmnDlAllocRetxRb(cell, subFrm, ue, bo, effBo, proc, cellWdAllocInfo)
29265 RgSchDlHqProcCb *proc;
29266 RgSchCmnDlRbAllocInfo *cellWdAllocInfo;
29269 U32 newSchBits = 0;
29270 RgSchDlRbAlloc *allocInfo;
29273 if ( !RGSCH_TIMEINFO_SAME((cell->crntTime),(ue->dl.lstSchTime) ))
29275 ue->dl.aggTbBits = 0;
29279 /* Check for DL BW exhaustion */
29280 if (subFrm->bw <= subFrm->bwAssigned)
29284 /* Call TM specific RB allocation routine */
29285 (dlAllocRetxRbFunc[ue->mimoInfo.txMode - 1])(cell, subFrm, ue, bo, effBo, \
29286 proc, cellWdAllocInfo);
29290 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29291 /* Calculate totals bits newly allocated */
29292 if (allocInfo->tbInfo[0].schdlngForTb)
29294 newSchBits += allocInfo->tbInfo[0].bytesReq;
29296 if (allocInfo->tbInfo[1].schdlngForTb)
29298 newSchBits += allocInfo->tbInfo[1].bytesReq;
29300 ue->dl.aggTbBits += (newSchBits * 8);
29301 RGSCHCPYTIMEINFO((cell->crntTime),(ue->dl.lstSchTime))
29309 * @brief This function determines the RBs and Bytes required for
29310 * Transmission on 1 CW.
29314 * Function: rgSCHCmnDlAlloc1CwTxRb
29315 * Purpose: This function determines the RBs and Bytes required
29316 * for Transmission of DL SVC BO on 1 CW.
29317 * Also, takes care of SVC by SVC allocation by tracking
29318 * previous SVCs allocations.
29319 * Returns RFAILED if BO not satisfied at all.
29321 * Invoked by: DL UE Allocation
29323 * @param[in] RgSchCellCb *cell
29324 * @param[in] RgSchDlSf *subFrm
29325 * @param[in] RgSchUeCb *ue
29326 * @param[in] RgSchDlHqTbCb *tbInfo
29327 * @param[in] U32 bo
29328 * @param[out] U8 *numRb
29329 * @param[out] U32 *effBo
29334 PRIVATE S16 rgSCHCmnDlAlloc1CwTxRb
29339 RgSchDlHqTbCb *tbInfo,
29345 PRIVATE S16 rgSCHCmnDlAlloc1CwTxRb(cell, subFrm, ue, tbInfo, bo, numRb, effBo)
29349 RgSchDlHqTbCb *tbInfo;
29358 RgSchCmnDlUe *ueDl;
29359 RgSchDlRbAlloc *allocInfo;
29362 /* Correcting wrap around issue.
29363 * This change has been done at mutliple places in this function.*/
29367 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29368 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29369 oldReq = ueDl->outStndAlloc;
29372 //TODO_SID: Currently setting max Tb size wrt to 5GTF TM3
29373 iTbs = ue->ue5gtfCb.mcs;
29374 ueDl->maxTbSz = MAX_5GTF_TB_SIZE * ue->ue5gtfCb.rank;
29375 ueDl->maxRb = MAX_5GTF_PRBS;
29377 ueDl->outStndAlloc += bo;
29378 /* consider Cumulative amount of this BO and bytes so far allocated */
29379 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbSz/8);
29380 /* Get the number of REs needed for this bo. */
29381 //noRes = ((bo * 8 * 1024) / eff);
29383 /* Get the number of RBs needed for this transmission */
29384 /* Number of RBs = No of REs / No of REs per RB */
29385 //tempNumRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
29386 tempNumRb = MAX_5GTF_PRBS;
29387 tbSz = RGSCH_MIN(bo, (rgSch5gtfTbSzTbl[iTbs]/8) * ue->ue5gtfCb.rank);
29389 /* DwPts Scheduling Changes End */
29390 *effBo = RGSCH_MIN(tbSz - oldReq, reqBytes);
29393 //RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs);
29398 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbSz, \
29399 iTbs, imcs, tbInfo, ue->ue5gtfCb.rank);
29400 *numRb = (U8) tempNumRb;
29402 /* Update the subframe Allocated BW field */
29403 subFrm->bwAssigned = subFrm->bwAssigned + tempNumRb - allocInfo->rbsReq;
29410 * @brief This function is invoked in the event of any TB's allocation
29411 * being underutilized by the specific scheduler. Here we reduce iMcs
29412 * to increase redundancy and hence increase reception quality at UE.
29416 * Function: rgSCHCmnRdcImcsTxTb
29417 * Purpose: This function shall reduce the iMcs in accordance with
29418 * the total consumed bytes by the UE at allocation
29421 * Invoked by: UE DL Allocation finalization routine
29422 * of specific scheduler.
29424 * @param[in] RgSchDlRbAlloc *allocInfo
29425 * @param[in] U8 tbInfoIdx
29426 * @param[in] U32 cnsmdBytes
29431 Void rgSCHCmnRdcImcsTxTb
29433 RgSchDlRbAlloc *allocInfo,
29438 Void rgSCHCmnRdcImcsTxTb(allocInfo, tbInfoIdx, cnsmdBytes)
29439 RgSchDlRbAlloc *allocInfo;
29445 /*The below functionality is not needed.*/
29451 iTbs = allocInfo->tbInfo[tbInfoIdx].iTbs;
29452 noLyr = allocInfo->tbInfo[tbInfoIdx].noLyr;
29453 numRb = allocInfo->rbsAlloc;
29456 if ((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) == cnsmdBytes)
29461 /* Get iTbs as suitable for the consumed bytes */
29462 while((rgTbSzTbl[noLyr-1][iTbs][numRb-1]/8) > cnsmdBytes)
29466 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].\
29467 tbCb->dlGrnt.iMcs);
29473 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, allocInfo->tbInfo[tbInfoIdx].tbCb->dlGrnt.iMcs);
29480 * @brief This function determines the RBs and Bytes required for
29481 * Transmission on 2 CWs.
29485 * Function: rgSCHCmnDlAlloc2CwTxRb
29486 * Purpose: This function determines the RBs and Bytes required
29487 * for Transmission of DL SVC BO on 2 CWs.
29488 * Also, takes care of SVC by SVC allocation by tracking
29489 * previous SVCs allocations.
29490 * Returns RFAILED if BO not satisfied at all.
29492 * Invoked by: TM3 and TM4 DL UE Allocation
29494 * @param[in] RgSchCellCb *cell
29495 * @param[in] RgSchDlSf *subFrm
29496 * @param[in] RgSchUeCb *ue
29497 * @param[in] RgSchDlHqProcCb *proc
29498 * @param[in] RgSchDlHqProcCb bo
29499 * @param[out] U8 *numRb
29500 * @param[out] U32 *effBo
29505 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRb
29510 RgSchDlHqProcCb *proc,
29516 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRb(cell, subFrm, ue, proc, bo, numRbRef, effBo)
29520 RgSchDlHqProcCb *proc;
29532 RgSchCmnDlCell *cellDl;
29533 RgSchCmnDlUe *ueDl;
29534 RgSchDlRbAlloc *allocInfo;
29537 /* Fix: MUE_PERTTI_DL */
29539 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
29540 U8 cfi = cellSch->dl.currCfi;
29549 cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
29550 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29551 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29552 oldReq = ueDl->outStndAlloc;
29555 if (ueDl->maxTbBits > ue->dl.aggTbBits)
29557 availBits = ueDl->maxTbBits - ue->dl.aggTbBits;
29559 /* check if we can further allocate to this UE */
29560 if ((ue->dl.aggTbBits >= ueDl->maxTbBits) ||
29561 (allocInfo->tbInfo[0].bytesReq >= ueDl->maxTbSz/8) ||
29562 (allocInfo->tbInfo[1].bytesReq >= ueDl->maxTbSz/8) ||
29563 (allocInfo->rbsReq >= ueDl->maxRb))
29565 RLOG_ARG0(L_DEBUG,DBG_CELLID,cell->cellId,
29566 "rgSCHCmnDlAllocRb(): UEs max allocation exceed");
29570 noLyr1 = ueDl->mimoInfo.cwInfo[0].noLyr;
29571 noLyr2 = ueDl->mimoInfo.cwInfo[1].noLyr;
29573 /* If there is no CFI change, continue to use the BLER based
29575 if (ueDl->lastCfi == cfi)
29577 iTbs1 = ueDl->mimoInfo.cwInfo[0].iTbs[noLyr1 - 1];
29578 iTbs2 = ueDl->mimoInfo.cwInfo[1].iTbs[noLyr2 - 1];
29582 U8 cqi = ueDl->mimoInfo.cwInfo[0].cqi;
29584 iTbs1 = (U8) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 0, noLyr1);
29586 iTbs1 = (U8) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 0, noLyr1);
29589 cqi = ueDl->mimoInfo.cwInfo[1].cqi;
29591 iTbs2 = (U8) rgSchCmnFetchItbs(cell, ueDl, subFrm, cqi, cfi, 1, noLyr2);
29593 iTbs2 = (U8) rgSchCmnFetchItbs(cell, ueDl, cqi, cfi, 1, noLyr2);
29597 /*ccpu00131191 and ccpu00131317 - Fix for RRC Reconfig failure
29598 * issue for VoLTE call */
29599 //if ((proc->hasDcch) || (TRUE == rgSCHLaaSCellEnabled(cell)))
29619 else if(!cellSch->dl.isDlFreqSel)
29622 /* for Tdd reduce iTbs only for SF0. SF5 contains only
29623 * SSS and can be ignored */
29624 if (subFrm->sfNum == 0)
29626 (iTbs1 > 1)? (iTbs1 -= 1) : (iTbs1 = 0);
29627 (iTbs2 > 1)? (iTbs2 -= 1) : (iTbs2 = 0);
29629 /* For SF 3 and 8 CRC is getting failed in DL.
29630 Need to do proper fix after the replay from
29632 #ifdef CA_PHY_BRDCM_61765
29633 if ((subFrm->sfNum == 3) || (subFrm->sfNum == 8))
29635 (iTbs1 > 2)? (iTbs1 -= 2) : (iTbs1 = 0);
29636 (iTbs2 > 2)? (iTbs2 -= 2) : (iTbs2 = 0);
29644 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
29646 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
29650 eff1 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr1 - 1][cfi]))[iTbs1];
29651 eff2 = (*(RgSchCmnTbSzEff *)(cellSch->dl.cqiToEffTbl[noLyr2 - 1][cfi]))[iTbs2];
29654 bo = RGSCH_MIN(bo,availBits/8);
29655 ueDl->outStndAlloc += bo;
29656 /* consider Cumulative amount of this BO and bytes so far allocated */
29657 bo = RGSCH_MIN(ueDl->outStndAlloc, ueDl->maxTbBits/8);
29658 bo = RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff1)/(eff1+eff2)),
29660 RGSCH_MIN(RGSCH_MAX(RGSCH_CMN_MIN_GRNT_HDR, (bo*eff2)/(eff1+eff2)),
29661 (ueDl->maxTbSz)/8) +
29662 1; /* Add 1 to adjust the truncation at weighted averaging */
29663 /* Get the number of REs needed for this bo. */
29664 noRes = ((bo * 8 * 1024) / (eff1 + eff2));
29666 /* Get the number of RBs needed for this transmission */
29667 /* Number of RBs = No of REs / No of REs per RB */
29668 numRb = RGSCH_CEIL(noRes, cellDl->noResPerRb[cfi]);
29669 /* Cannot exceed the maximum number of RBs per UE */
29670 if (numRb > ueDl->maxRb)
29672 numRb = ueDl->maxRb;
29677 if(RFAILED == rgSCHLaaCmn2CwAdjustPrb(allocInfo, boTmp, &numRb, ueDl, noLyr1, noLyr2, iTbs1, iTbs2))
29680 while ((numRb <= ueDl->maxRb) &&
29681 (rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1] <= ueDl->maxTbSz) &&
29682 (rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1] <= ueDl->maxTbSz) &&
29683 ((rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8 +
29684 rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8) <= bo))
29690 availBw = subFrm->bw - subFrm->bwAssigned;
29691 /* Cannot exceed the total number of RBs in the cell */
29692 if ((S16)(numRb - allocInfo->rbsReq) > availBw)
29694 numRb = availBw + allocInfo->rbsReq;
29696 tb1Sz = rgTbSzTbl[noLyr1 - 1][iTbs1][numRb-1]/8;
29697 tb2Sz = rgTbSzTbl[noLyr2 - 1][iTbs2][numRb-1]/8;
29698 /* DwPts Scheduling Changes Start */
29700 if(subFrm->sfType == RG_SCH_SPL_SF_DATA)
29702 /* Max Rb for Special Sf is approximated as 4/3 of maxRb */
29703 rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, (U8*)&numRb, ueDl->maxRb*4/3,
29704 &iTbs1, &iTbs2, noLyr1,
29705 noLyr2, &tb1Sz, &tb2Sz, cfi);
29706 /* Check for available Bw */
29707 if ((S16)numRb - allocInfo->rbsReq > availBw)
29709 numRb = availBw + allocInfo->rbsReq;
29710 tb1Sz = rgTbSzTbl[noLyr1-1][iTbs1][RGSCH_MAX(numRb*3/4,1)-1]/8;
29711 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs2][RGSCH_MAX(numRb*3/4,1)-1]/8;
29715 /* DwPts Scheduling Changes End */
29716 /* Update the subframe Allocated BW field */
29717 subFrm->bwAssigned = subFrm->bwAssigned + numRb - \
29720 *effBo = RGSCH_MIN((tb1Sz + tb2Sz) - oldReq, reqBytes);
29723 if (ROK != rgSCHLaaCmn2TBPrbCheck(allocInfo, tb1Sz, tb2Sz, boTmp, effBo, iTbs1, iTbs2, numRb, proc))
29729 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs1, imcs1);
29730 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs2, imcs2);
29731 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tb1Sz, \
29732 iTbs1, imcs1, &proc->tbInfo[0], noLyr1);
29733 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
29734 iTbs2, imcs2, &proc->tbInfo[1], noLyr2);
29735 *numRbRef = (U8)numRb;
29743 * @brief This function determines the RBs and Bytes required for
29744 * Transmission & Retransmission on 2 CWs.
29748 * Function: rgSCHCmnDlAlloc2CwTxRetxRb
29749 * Purpose: This function determines the RBs and Bytes required
29750 * for Transmission & Retransmission on 2 CWs. Allocate
29751 * RETX TB on a better CW and restrict new TX TB by
29753 * Returns RFAILED if BO not satisfied at all.
29755 * Invoked by: TM3 and TM4 DL UE Allocation
29757 * @param[in] RgSchCellCb *cell
29758 * @param[in] RgSchDlSf *subFrm
29759 * @param[in] RgSchUeCb *ue
29760 * @param[in] RgSchDlHqTbCb *reTxTb
29761 * @param[in] RgSchDlHqTbCb *txTb
29762 * @param[out] U8 *numRb
29763 * @param[out] U32 *effBo
29768 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRetxRb
29773 RgSchDlHqTbCb *reTxTb,
29774 RgSchDlHqTbCb *txTb,
29779 PRIVATE S16 rgSCHCmnDlAlloc2CwTxRetxRb(cell, subFrm, ue, reTxTb, txTb, numRb,\
29784 RgSchDlHqTbCb *reTxTb;
29785 RgSchDlHqTbCb *txTb;
29790 RgSchCmnDlUe *ueDl;
29791 RgSchDlRbAlloc *allocInfo;
29795 RgSchCmnDlUeCwInfo *otherCw;
29797 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
29798 U8 cfi = cellDl->currCfi;
29802 ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
29803 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29804 otherCw = &ueDl->mimoInfo.cwInfo[!(ueDl->mimoInfo.btrCwIdx)];
29807 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
29808 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
29810 availBw = subFrm->bw - subFrm->bwAssigned;
29811 *numRb = reTxTb->dlGrnt.numRb;
29813 #ifdef XEON_TDD_SPCL
29814 *numRb = (reTxTb->initTxNumRbs);
29815 if(reTxTb->sfType == RG_SCH_SPL_SF_DATA && subFrm->sfType != RG_SCH_SPL_SF_DATA)
29817 *numRb = (reTxTb->initTxNumRbs*3/4);
29821 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
29827 if ((S16)*numRb > availBw)
29831 /* Update the subframe Allocated BW field */
29832 subFrm->bwAssigned += *numRb;
29833 noLyr2 = otherCw->noLyr;
29834 RG_SCH_CMN_GET_MCS_FOR_RETX(reTxTb, imcs1);
29836 /* If there is no CFI change, continue to use the BLER based
29838 if (ueDl->lastCfi == cfi)
29840 iTbs = otherCw->iTbs[noLyr2-1];
29845 iTbs = (U8) rgSchCmnFetchItbs(cell, ueDl, subFrm, otherCw->cqi, cfi,
29846 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
29848 iTbs = (U8) rgSchCmnFetchItbs(cell, ueDl, otherCw->cqi, cfi,
29849 !(ueDl->mimoInfo.btrCwIdx), noLyr2);
29852 tb2Sz = rgTbSzTbl[noLyr2-1][iTbs][*numRb-1]/8;
29853 /* DwPts Scheduling Changes Start */
29856 /* DwPts Scheduling Changes End */
29857 RG_SCH_CMN_DL_TBS_TO_MCS(iTbs, imcs2);
29859 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], reTxTb->tbSz, \
29860 0, imcs1, reTxTb, reTxTb->numLyrs);
29862 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], tb2Sz, \
29863 iTbs, imcs2, txTb, noLyr2);
29865 *effBo = reTxTb->tbSz + tb2Sz;
29872 * @brief This function determines the RBs and Bytes required for BO
29873 * Retransmission on 2 CWs.
29877 * Function: rgSCHCmnDlAlloc2CwRetxRb
29878 * Purpose: This function determines the RBs and Bytes required
29879 * for BO Retransmission on 2 CWs. Allocate larger TB
29880 * on a better CW and check if the smaller TB can be
29881 * accomodated on the other CW.
29882 * Returns RFAILED if BO not satisfied at all.
29884 * Invoked by: Common Scheduler
29886 * @param[in] RgSchCellCb *cell
29887 * @param[in] RgSchDlSf *subFrm
29888 * @param[in] RgSchUeCb *ue
29889 * @param[in] RgSchDlHqProcCb *proc
29890 * @param[out] U8 *numRb
29891 * @param[out] Bool *swpFlg
29892 * @param[out] U32 *effBo
29897 PRIVATE S16 rgSCHCmnDlAlloc2CwRetxRb
29902 RgSchDlHqProcCb *proc,
29908 PRIVATE S16 rgSCHCmnDlAlloc2CwRetxRb(cell, subFrm, ue, proc,\
29909 numRb, swpFlg, effBo)
29913 RgSchDlHqProcCb *proc;
29919 RgSchDlRbAlloc *allocInfo;
29922 RgSchDlHqTbCb *lrgTbInfo, *othrTbInfo;
29925 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
29928 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
29929 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
29931 lrgTbInfo = &proc->tbInfo[0];
29932 othrTbInfo = &proc->tbInfo[1];
29933 *numRb = lrgTbInfo->dlGrnt.numRb;
29934 #ifdef XEON_TDD_SPCL
29935 if((lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA || othrTbInfo->sfType == RG_SCH_SPL_SF_DATA))
29937 if(lrgTbInfo->sfType == RG_SCH_SPL_SF_DATA)
29939 *numRb = (lrgTbInfo->initTxNumRbs);
29943 *numRb = (othrTbInfo->initTxNumRbs);
29946 if(subFrm->sfType != RG_SCH_SPL_SF_DATA)
29948 *numRb = (*numRb)*3/4;
29953 RLOG1(L_ERROR," Number of RBs [%d] are less than or equal to 3",*numRb);
29958 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
29962 /* Update the subframe Allocated BW field */
29963 subFrm->bwAssigned += *numRb;
29964 RG_SCH_CMN_GET_MCS_FOR_RETX(lrgTbInfo, imcs1);
29965 RG_SCH_CMN_GET_MCS_FOR_RETX(othrTbInfo, imcs2);
29966 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], lrgTbInfo->tbSz, \
29967 0, imcs1, lrgTbInfo, lrgTbInfo->numLyrs);
29968 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[1], othrTbInfo->tbSz, \
29969 0, imcs2, othrTbInfo, othrTbInfo->numLyrs);
29970 *effBo = lrgTbInfo->tbSz + othrTbInfo->tbSz;
29979 * @brief This function determines the RBs and Bytes required for BO
29980 * Retransmission on 1 CW.
29984 * Function: rgSCHCmnDlAlloc1CwRetxRb
29985 * Purpose: This function determines the RBs and Bytes required
29986 * for BO Retransmission on 1 CW, the first CW.
29987 * Returns RFAILED if BO not satisfied at all.
29989 * Invoked by: Common Scheduler
29991 * @param[in] RgSchCellCb *cell
29992 * @param[in] RgSchDlSf *subFrm
29993 * @param[in] RgSchUeCb *ue
29994 * @param[in] RgSchDlHqTbCb *tbInfo
29995 * @param[in] U8 noLyr
29996 * @param[out] U8 *numRb
29997 * @param[out] U32 *effBo
30002 PRIVATE S16 rgSCHCmnDlAlloc1CwRetxRb
30007 RgSchDlHqTbCb *tbInfo,
30013 PRIVATE S16 rgSCHCmnDlAlloc1CwRetxRb(cell, subFrm, ue, tbInfo, noLyr,\
30018 RgSchDlHqTbCb *tbInfo;
30024 RgSchDlRbAlloc *allocInfo;
30028 allocInfo = RG_SCH_CMN_GET_ALLOCCB_FRM_UE(ue,cell);
30031 /* Fix for ccpu00123919: In case of RETX TB scheduling avoiding recomputation of RB
30032 * and Tbs. Set all parameters same as Init TX except RV(only for NACKED) and
30034 *numRb = tbInfo->dlGrnt.numRb;
30035 if ((S16)*numRb > (S16)(subFrm->bw - subFrm->bwAssigned))
30039 /* Update the subframe Allocated BW field */
30040 subFrm->bwAssigned += *numRb;
30041 imcs = tbInfo->dlGrnt.iMcs;
30042 allocInfo->dciFormat = tbInfo->dlGrnt.dciFormat;
30043 /* Fix: For a RETX TB the iTbs is irrelevant, hence setting 0 */
30044 RG_SCH_CMN_FILL_DL_TBINFO(&allocInfo->tbInfo[0], tbInfo->tbSz, \
30045 0, imcs, tbInfo, tbInfo->numLyrs);
30046 *effBo = tbInfo->tbSz;
30054 * @brief This function is called to handle Release PDCCH feedback for SPS UE
30058 * Function: rgSCHCmnDlRelPdcchFbk
30059 * Purpose: Invokes SPS module to handle release PDCCH feedback
30063 * @param[in] RgSchCellCb *cell
30064 * @param[in] RgSchUeCb *ue
30065 * @param[in] Bool isAck
30070 Void rgSCHCmnDlRelPdcchFbk
30077 Void rgSCHCmnDlRelPdcchFbk(cell, ue, isAck)
30084 rgSCHCmnSpsDlRelPdcchFbk(cell, ue, isAck);
30091 * @brief This function is invoked to handle Ack processing for a HARQ proc.
30095 * Function: rgSCHCmnDlProcAck
30096 * Purpose: DTX processing for HARQ proc
30100 * @param[in] RgSchCellCb *cell
30101 * @param[in] RgSchDlHqProcCb *hqP
30106 Void rgSCHCmnDlProcAck
30109 RgSchDlHqProcCb *hqP
30112 Void rgSCHCmnDlProcAck(cell, hqP)
30114 RgSchDlHqProcCb *hqP;
30119 if (RG_SCH_CMN_SPS_DL_IS_SPS_HQP(hqP))
30121 /* Invoke SPS module if SPS service was scheduled for this HARQ proc */
30122 rgSCHCmnSpsDlProcAck(cell, hqP);
30126 #ifdef RGSCH_SPS_STATS
30127 extern U32 rgSchStatCrntiCeRcvCnt;
30130 * @brief This function is invoked to handle CRNTI CE reception for an UE
30134 * Function: rgSCHCmnHdlCrntiCE
30135 * Purpose: Handle CRNTI CE reception
30139 * @param[in] RgSchCellCb *cell
30140 * @param[in] RgSchDlHqProcCb *hqP
30145 Void rgSCHCmnHdlCrntiCE
30151 Void rgSCHCmnHdlCrntiCE(cell, ue)
30157 #ifdef RGSCH_SPS_STATS
30158 rgSchStatCrntiCeRcvCnt++;
30161 /* When UL sync lost happened due to TA timer expiry UE is being moved to
30162 PDCCH order inactivity list.But when CRNTI CE received in msg3 from UE
30163 we are not moving UE into active state due to that RRC Reconfiguration is
30165 So here we are moving UE to active list whenever we receive the CRNTI CE and
30167 /* CR ccpu00144525 */
30168 if (RG_SCH_CMN_IS_UE_PDCCHODR_INACTV(ue))
30170 /* Activate this UE if it was inactive */
30171 RG_SCH_CMN_DL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
30172 RG_SCH_CMN_UL_UPDT_INACTV_MASK ( cell, ue, RG_PDCCHODR_INACTIVE);
30175 /* Handling is same as reception of UE RESET for both DL and UL */
30176 if (ue->dl.dlSpsCfg.isDlSpsEnabled)
30178 rgSCHCmnSpsDlUeReset(cell, ue);
30180 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30182 rgSCHCmnSpsUlUeReset(cell, ue);
30190 * @brief This function is called to handle relInd from MAC for a UE
30194 * Function: rgSCHCmnUlSpsRelInd
30195 * Purpose: Invokes SPS module to handle UL SPS release for a UE
30197 * Invoked by: SCH_UTL
30199 * @param[in] RgSchCellCb *cell
30200 * @param[in] RgSchUeCb *ue
30201 * @param[in] Bool isExplRel
30206 Void rgSCHCmnUlSpsRelInd
30213 Void rgSCHCmnUlSpsRelInd(cell, ue, isExplRel)
30220 rgSCHCmnSpsUlProcRelInd(cell, ue, isExplRel);
30223 } /* end of rgSCHCmnUlSpsRelInd */
30226 * @brief This function is called to handle SPS Activate Ind from MAC for a UE
30230 * Function: rgSCHCmnUlSpsActInd
30231 * Purpose: Invokes SPS module to handle UL SPS activate for a UE
30233 * Invoked by: SCH_UTL
30235 * @param[in] RgSchCellCb *cell
30236 * @param[in] RgSchUeCb *ue
30241 Void rgSCHCmnUlSpsActInd
30248 Void rgSCHCmnUlSpsActInd(cell, ue,spsSduSize)
30256 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30258 rgSCHCmnSpsUlProcActInd(cell, ue,spsSduSize);
30262 } /* end of rgSCHCmnUlSpsActInd */
30265 * @brief This function is called to handle CRC in UL for UEs
30266 * undergoing SPS release
30270 * Function: rgSCHCmnUlCrcInd
30271 * Purpose: Invokes SPS module to handle CRC in UL for SPS UE
30273 * Invoked by: SCH_UTL
30275 * @param[in] RgSchCellCb *cell
30276 * @param[in] RgSchUeCb *ue
30277 * @param[in] CmLteTimingInfo crcTime
30282 Void rgSCHCmnUlCrcInd
30286 CmLteTimingInfo crcTime
30289 Void rgSCHCmnUlCrcInd(cell, ue, crcTime)
30292 CmLteTimingInfo crcTime;
30296 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30298 rgSCHCmnSpsUlProcCrcInd(cell, ue, crcTime);
30302 } /* end of rgSCHCmnUlCrcFailInd */
30305 * @brief This function is called to handle CRC failure in UL
30309 * Function: rgSCHCmnUlCrcFailInd
30310 * Purpose: Invokes SPS module to handle CRC failure in UL for SPS UE
30312 * Invoked by: SCH_UTL
30314 * @param[in] RgSchCellCb *cell
30315 * @param[in] RgSchUeCb *ue
30316 * @param[in] CmLteTimingInfo crcTime
30321 Void rgSCHCmnUlCrcFailInd
30325 CmLteTimingInfo crcTime
30328 Void rgSCHCmnUlCrcFailInd(cell, ue, crcTime)
30331 CmLteTimingInfo crcTime;
30335 if (ue->ul.ulSpsCfg.isUlSpsEnabled == TRUE)
30337 rgSCHCmnSpsUlProcDtxInd(cell, ue, crcTime);
30341 } /* end of rgSCHCmnUlCrcFailInd */
30343 #endif /* LTEMAC_SPS */
30346 * @brief BCH,BCCH,PCCH Dowlink Scheduling Handler.
30350 * Function: rgSCHCmnDlBcchPcchAlloc
30351 * Purpose: This function calls common scheduler APIs to
30352 * schedule for BCCH/PCCH.
30353 * It then invokes Allocator for actual RB
30354 * allocations. It processes on the actual resources allocated
30355 * against requested to the allocator module.
30357 * Invoked by: Common Scheduler
30359 * @param[in] RgSchCellCb *cell
30363 PRIVATE Void rgSCHCmnDlBcchPcchAlloc
30368 PRIVATE Void rgSCHCmnDlBcchPcchAlloc(cell)
30373 U8 nextSfIdx = (cell->crntSfIdx) % RGSCH_SF_ALLOC_SIZE;
30375 #ifdef LTEMAC_HDFDD
30376 U8 nextSfIdx = (cell->crntSfIdx + RG_SCH_CMN_HARQ_INTERVAL) % RGSCH_NUM_SUB_FRAMES;
30378 U8 nextSfIdx = (cell->crntSfIdx) % RGSCH_NUM_SUB_FRAMES;
30381 RgInfSfAlloc *nextsfAlloc = &(cell->sfAllocArr[nextSfIdx]);
30382 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
30383 RgSchCmnDlRbAllocInfo *allocInfo = &cellSch->allocInfo;
30387 /*Reset the bitmask for BCCH/PCCH*/
30388 rgSCHUtlResetSfAlloc(nextsfAlloc,TRUE,FALSE);
30389 #ifndef DISABLE_MIB_SIB /* Not sending MIB and SIB to CL */
30391 rgSCHChkNUpdSiCfg(cell);
30392 rgSCHSelectSi(cell);
30395 /*Perform the scheduling for BCCH,PCCH*/
30396 rgSCHCmnDlBcchPcch(cell, allocInfo, nextsfAlloc);
30398 /* Call common allocator for RB Allocation */
30399 rgSCHBcchPcchDlRbAlloc(cell, allocInfo);
30401 /* Finalize the Allocations for reqested Against alloced */
30402 rgSCHCmnDlBcchPcchFnlz(cell, allocInfo);
30403 #endif /* DISABLE_MIB_SIB */
30408 * @brief Handles RB allocation for BCCH/PCCH for downlink.
30412 * Function : rgSCHBcchPcchDlRbAlloc
30414 * Invoking Module Processing:
30415 * - This function is invoked for DL RB allocation of BCCH/PCCH
30417 * Processing Steps:
30418 * - If cell is frequency selecive,
30419 * - Call rgSCHDlfsBcchPcchAllocRb().
30421 * - Do the processing
30423 * @param[in] RgSchCellCb *cell
30424 * @param[in] RgSchDlRbAllocInfo *allocInfo
30429 PRIVATE Void rgSCHBcchPcchDlRbAlloc
30432 RgSchCmnDlRbAllocInfo *allocInfo
30435 PRIVATE Void rgSCHBcchPcchDlRbAlloc(cell, allocInfo)
30437 RgSchCmnDlRbAllocInfo *allocInfo;
30440 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
30444 if (cellSch->dl.isDlFreqSel)
30446 cellSch->apisDlfs->rgSCHDlfsBcchPcchAllocRb(cell, allocInfo);
30450 rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo);
30457 * @brief Handles RB allocation for BCCH,PCCH for frequency
30458 * non-selective cell.
30462 * Function : rgSCHCmnNonDlfsBcchPcchRbAlloc
30464 * Invoking Module Processing:
30465 * - SCH shall invoke this if downlink frequency selective is disabled for
30466 * the cell for RB allocation.
30467 * - MAX C/I/PFS/RR shall provide the requiredBytes, required RBs
30468 * estimate and subframe for each allocation to be made to SCH.
30470 * Processing Steps:
30471 * - Allocate sequentially for BCCH,PCCH common channels.
30473 * @param[in] RgSchCellCb *cell
30474 * @param[in] RgSchCmnDlRbAllocInfo *allocInfo
30479 PRIVATE Void rgSCHCmnNonDlfsBcchPcchRbAlloc
30482 RgSchCmnDlRbAllocInfo *allocInfo
30485 PRIVATE Void rgSCHCmnNonDlfsBcchPcchRbAlloc(cell, allocInfo)
30487 RgSchCmnDlRbAllocInfo *allocInfo;
30490 RgSchDlRbAlloc *reqAllocInfo;
30494 /* Allocate for PCCH */
30495 reqAllocInfo = &(allocInfo->pcchAlloc);
30496 if (reqAllocInfo->rbsReq)
30498 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
30500 /* Allocate for BCCH on DLSCH */
30501 reqAllocInfo = &(allocInfo->bcchAlloc);
30502 if (reqAllocInfo->rbsReq)
30504 rgSCHCmnNonDlfsCmnRbAlloc(cell, reqAllocInfo);
30512 * @brief This function implements the handling to check and
30513 * update the SI cfg at the start of the modificiation period.
30517 * Function: rgSCHChkNUpdSiCfg
30518 * Purpose: This function implements handling for update of SI Cfg
30519 * at the start of modification period.
30521 * Invoked by: Scheduler
30523 * @param[in] RgSchCellCb* cell
30529 PRIVATE Void rgSCHChkNUpdSiCfg
30534 PRIVATE Void rgSCHChkNUpdSiCfg(cell)
30538 CmLteTimingInfo pdSchTmInfo;
30542 pdSchTmInfo = cell->crntTime;
30543 #ifdef LTEMAC_HDFDD
30544 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30545 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30546 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30548 RGSCH_INCR_SUB_FRAME(pdSchTmInfo, RG_SCH_CMN_DL_DELTA);
30552 /* Updating the SIB1 for Warning SI message immediately after it is received
30553 * from application. No need to wait for next modification period.
30555 if((pdSchTmInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
30556 && (RGSCH_SIB1_TX_SF_NUM == (pdSchTmInfo.slot % RGSCH_NUM_SUB_FRAMES)))
30558 /*Check whether SIB1 with PWS has been updated*/
30559 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_PWS_UPD)
30561 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
30562 cell->siCb.newSiInfo.sib1Info.sib1);
30563 cell->siCb.crntSiInfo.sib1Info.mcs =
30564 cell->siCb.newSiInfo.sib1Info.mcs;
30565 cell->siCb.crntSiInfo.sib1Info.nPrb =
30566 cell->siCb.newSiInfo.sib1Info.nPrb;
30567 cell->siCb.crntSiInfo.sib1Info.msgLen =
30568 cell->siCb.newSiInfo.sib1Info.msgLen;
30569 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_PWS_UPD;
30573 /*Check if this SFN and SF No marks the start of next modification
30574 period. If current SFN,SF No doesn't marks the start of next
30575 modification period, then return. */
30576 if(!((pdSchTmInfo.sfn % cell->siCfg.modPrd == 0)
30577 && (0 == pdSchTmInfo.slot)))
30578 /*if(!((((pdSchTmInfo.hSfn * 1024) + pdSchTmInfo.sfn) % cell->siCfg.modPrd == 0)
30579 && (0 == pdSchTmInfo.slot)))*/
30584 /*Check whether MIB has been updated*/
30585 if(cell->siCb.siBitMask & RGSCH_SI_MIB_UPD)
30587 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.mib,
30588 cell->siCb.newSiInfo.mib);
30589 cell->siCb.siBitMask &= ~RGSCH_SI_MIB_UPD;
30592 /*Check whether SIB1 has been updated*/
30593 if(cell->siCb.siBitMask & RGSCH_SI_SIB1_UPD)
30595 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.sib1Info.sib1,
30596 cell->siCb.newSiInfo.sib1Info.sib1);
30597 cell->siCb.crntSiInfo.sib1Info.mcs = cell->siCb.newSiInfo.sib1Info.mcs;
30598 cell->siCb.crntSiInfo.sib1Info.nPrb = cell->siCb.newSiInfo.sib1Info.nPrb;
30599 cell->siCb.crntSiInfo.sib1Info.msgLen =
30600 cell->siCb.newSiInfo.sib1Info.msgLen;
30601 cell->siCb.siBitMask &= ~RGSCH_SI_SIB1_UPD;
30604 /*Check whether SIs have been updated*/
30605 if(cell->siCb.siBitMask & RGSCH_SI_SI_UPD)
30609 /*Check if SI cfg have been modified And Check if numSi have
30610 been changed, if yes then we would need to update the
30611 pointers for all the SIs */
30612 if((cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD) &&
30613 (cell->siCfg.numSi != cell->siCb.newSiCfg.numSi))
30615 for(idx = 0;idx < cell->siCb.newSiCfg.numSi;idx++)
30617 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
30618 cell->siCb.newSiInfo.siInfo[idx].si);
30619 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
30620 cell->siCb.siArray[idx].isWarningSi = FALSE;
30622 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
30623 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
30624 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
30627 /*If numSi have been reduced then we need to free the
30628 pointers at the indexes in crntSiInfo which haven't
30629 been exercised. If numSi has increased then nothing
30630 additional is requires as above handling has taken
30632 if(cell->siCfg.numSi > cell->siCb.newSiCfg.numSi)
30634 for(idx = cell->siCb.newSiCfg.numSi;
30635 idx < cell->siCfg.numSi;idx++)
30637 RGSCH_FREE_MSG(cell->siCb.crntSiInfo.siInfo[idx].si);
30638 cell->siCb.siArray[idx].si = NULLP;
30644 /*numSi has not been updated, we just need to update the
30645 pointers for the SIs which are set to NON NULLP */
30646 /*ccpu00118260 - Correct Update of SIB2 */
30647 for(idx = 0;idx < cell->siCfg.numSi;idx++)
30649 if(NULLP != cell->siCb.newSiInfo.siInfo[idx].si)
30651 RGSCH_SET_SI_INFO(cell->siCb.crntSiInfo.siInfo[idx].si,
30652 cell->siCb.newSiInfo.siInfo[idx].si);
30654 cell->siCb.siArray[idx].si = cell->siCb.crntSiInfo.siInfo[idx].si;
30655 cell->siCb.siArray[idx].isWarningSi = FALSE;
30656 cell->siCb.crntSiInfo.siInfo[idx].mcs = cell->siCb.newSiInfo.siInfo[idx].mcs;
30657 cell->siCb.crntSiInfo.siInfo[idx].nPrb = cell->siCb.newSiInfo.siInfo[idx].nPrb;
30658 cell->siCb.crntSiInfo.siInfo[idx].msgLen = cell->siCb.newSiInfo.siInfo[idx].msgLen;
30662 cell->siCb.siBitMask &= ~RGSCH_SI_SI_UPD;
30665 /*Check whether SI cfg have been updated*/
30666 if(cell->siCb.siBitMask & RGSCH_SI_SICFG_UPD)
30668 cell->siCfg = cell->siCb.newSiCfg;
30669 cell->siCb.siBitMask &= ~RGSCH_SI_SICFG_UPD;
30677 * @brief This function implements the selection of the SI
30678 * that is to be scheduled.
30682 * Function: rgSCHSelectSi
30683 * Purpose: This function implements the selection of SI
30684 * that is to be scheduled.
30686 * Invoked by: Scheduler
30688 * @param[in] RgSchCellCb* cell
30694 PRIVATE Void rgSCHSelectSi
30699 PRIVATE Void rgSCHSelectSi(cell)
30703 CmLteTimingInfo crntTmInfo;
30710 crntTmInfo = cell->crntTime;
30711 #ifdef LTEMAC_HDFDD
30712 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30713 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30714 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30716 RGSCH_INCR_SUB_FRAME(crntTmInfo, RG_SCH_CMN_DL_DELTA);
30719 siWinSize = cell->siCfg.siWinSize;
30721 /* Select SI only once at the starting of the new window */
30722 if(cell->siCb.inWindow)
30724 if ((crntTmInfo.sfn % cell->siCfg.minPeriodicity) == 0 &&
30725 crntTmInfo.slot == 0)
30727 /* Reinit inWindow at the beginning of every SI window */
30728 cell->siCb.inWindow = siWinSize - 1;
30732 cell->siCb.inWindow--;
30736 else /* New window. Re-init the winSize counter with the window length */
30738 if((cell->siCb.siArray[cell->siCb.siCtx.siId - 1].isWarningSi == TRUE)&&
30739 (cell->siCb.siCtx.retxCntRem != 0))
30741 rgSCHUtlFreeWarningSiPdu(cell);
30742 cell->siCb.siCtx.warningSiFlag = FALSE;
30745 cell->siCb.inWindow = siWinSize - 1;
30748 x = rgSCHCmnGetSiSetId(crntTmInfo.sfn, crntTmInfo.slot,
30749 cell->siCfg.minPeriodicity);
30751 /* Window Id within a SI set. This window Id directly maps to a
30753 windowId = (((crntTmInfo.sfn * RGSCH_NUM_SUB_FRAMES_5G) +
30754 crntTmInfo.slot) - (x * (cell->siCfg.minPeriodicity * 10)))
30757 if(windowId >= RGR_MAX_NUM_SI)
30760 /* Update the siCtx if there is a valid SI and its periodicity
30762 if (NULLP != cell->siCb.siArray[windowId].si)
30764 /* Warning SI Periodicity is same as SIB2 Periodicity */
30765 if(((cell->siCb.siArray[windowId].isWarningSi == FALSE) &&
30766 (x % (cell->siCfg.siPeriodicity[windowId]
30767 /cell->siCfg.minPeriodicity) == 0)) ||
30768 ((cell->siCb.siArray[windowId].isWarningSi == TRUE) &&
30769 (x % (cell->siCfg.siPeriodicity[0]
30770 /cell->siCfg.minPeriodicity) == 0)))
30772 cell->siCb.siCtx.siId = windowId+1;
30773 cell->siCb.siCtx.retxCntRem = cell->siCfg.retxCnt;
30774 cell->siCb.siCtx.warningSiFlag = cell->siCb.siArray[windowId].
30776 cell->siCb.siCtx.timeToTx.sfn = crntTmInfo.sfn;
30777 cell->siCb.siCtx.timeToTx.slot = crntTmInfo.slot;
30779 RG_SCH_ADD_TO_CRNT_TIME(cell->siCb.siCtx.timeToTx,
30780 cell->siCb.siCtx.maxTimeToTx, (siWinSize - 1))
30784 {/* Update the siCtx with invalid si Id */
30785 cell->siCb.siCtx.siId = 0;
30793 * @brief This function implements scheduler DL allocation for
30798 * Function: rgSCHDlSiSched
30799 * Purpose: This function implements scheduler for DL allocation
30802 * Invoked by: Scheduler
30804 * @param[in] RgSchCellCb* cell
30810 PRIVATE Void rgSCHDlSiSched
30813 RgSchCmnDlRbAllocInfo *allocInfo,
30814 RgInfSfAlloc *subfrmAlloc
30817 PRIVATE Void rgSCHDlSiSched(cell, allocInfo, subfrmAlloc)
30819 RgSchCmnDlRbAllocInfo *allocInfo;
30820 RgInfSfAlloc *subfrmAlloc;
30823 CmLteTimingInfo crntTimInfo;
30829 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
30830 /* DwPTS Scheduling Changes Start */
30833 U8 cfi = cellDl->currCfi;
30835 /* DwPTS Scheduling Changes End */
30839 crntTimInfo = cell->crntTime;
30840 #ifdef LTEMAC_HDFDD
30841 /* For HDFDD we need scheduling information at least RG_SCH_CMN_DL_DELTA
30842 + RG_SCH_CMN_HARQ_INTERVAL (7) subframes ahead */
30843 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA + RG_SCH_CMN_HARQ_INTERVAL);
30845 RGSCH_INCR_SUB_FRAME(crntTimInfo, RG_SCH_CMN_DL_DELTA);
30848 /* Compute the subframe for which allocation is being made.
30849 Essentially, we need pointer to the dl frame for this subframe */
30850 sf = rgSCHUtlSubFrmGet(cell, crntTimInfo);
30852 /*Check if scheduling of MIB is required */
30854 /* since we are adding the MIB repetition logic for EMTC UEs, checking if
30855 * emtcEnabled or not, If enabled MIB would be repeted at as part of EMTC
30856 * feature, otherwise scheduling at (n,0) */
30857 if(0 == cell->emtcEnable)
30860 if((crntTimInfo.sfn % RGSCH_MIB_PERIODICITY == 0)
30861 && (RGSCH_MIB_TX_SF_NUM == crntTimInfo.slot))
30864 U8 sfnOctet, mibOct2 = 0;
30866 /*If MIB has not been yet setup by Application, return*/
30867 if(NULLP == cell->siCb.crntSiInfo.mib)
30870 SFndLenMsg(cell->siCb.crntSiInfo.mib, &mibLen);
30871 sf->bch.tbSize = mibLen;
30872 /*Fill the interface information */
30873 rgSCHUtlFillRgInfCmnLcInfo(sf, subfrmAlloc, NULLD, NULLD);
30875 /*Set the bits of MIB to reflect SFN */
30876 /*First get the Most signficant 8 bits of SFN */
30877 sfnOctet = (U8)(crntTimInfo.sfn >> 2);
30878 /*Get the first two octets of MIB, and then update them
30879 using the SFN octet value obtained above.*/
30880 if(ROK != SExamMsg((Data *)(&mibOct1),
30881 cell->siCb.crntSiInfo.mib, 0))
30884 if(ROK != SExamMsg((Data *)(&mibOct2),
30885 cell->siCb.crntSiInfo.mib, 1))
30888 /* ccpu00114572- Fix for improper way of MIB Octet setting for SFN */
30889 mibOct1 = (mibOct1 & 0xFC) | (sfnOctet >> 6);
30890 mibOct2 = (mibOct2 & 0x03) | (sfnOctet << 2);
30891 /* ccpu00114572- Fix ends*/
30893 /*Now, replace the two octets in MIB */
30894 if(ROK != SRepMsg((Data)(mibOct1),
30895 cell->siCb.crntSiInfo.mib, 0))
30898 if(ROK != SRepMsg((Data)(mibOct2),
30899 cell->siCb.crntSiInfo.mib, 1))
30902 /*Copy the MIB msg buff into interface buffer */
30903 SCpyMsgMsg(cell->siCb.crntSiInfo.mib,
30904 rgSchCb[cell->instIdx].rgSchInit.region,
30905 rgSchCb[cell->instIdx].rgSchInit.pool,
30906 &subfrmAlloc->cmnLcInfo.bchInfo.pdu);
30907 /* Added Dl TB count for MIB message transmission
30908 * This counter is incremented 4 times to consider
30909 * the retransmission at the PHY level on PBCH channel*/
30911 cell->dlUlTbCnt.tbTransDlTotalCnt += RG_SCH_MIB_CNT;
30918 allocInfo->bcchAlloc.schdFirst = FALSE;
30919 /*Check if scheduling of SIB1 is required.
30920 Check of (crntTimInfo.sfn % RGSCH_SIB1_PERIODICITY == 0)
30921 is not required here since the below check takes care
30922 of SFNs applicable for this one too.*/
30923 if((crntTimInfo.sfn % RGSCH_SIB1_RPT_PERIODICITY == 0)
30924 && (RGSCH_SIB1_TX_SF_NUM == crntTimInfo.slot))
30926 /*If SIB1 has not been yet setup by Application, return*/
30927 if(NULLP == (cell->siCb.crntSiInfo.sib1Info.sib1))
30932 allocInfo->bcchAlloc.schdFirst = TRUE;
30933 mcs = cell->siCb.crntSiInfo.sib1Info.mcs;
30934 nPrb = cell->siCb.crntSiInfo.sib1Info.nPrb;
30935 msgLen = cell->siCb.crntSiInfo.sib1Info.msgLen;
30939 /*Check if scheduling of SI can be performed.*/
30940 Bool invalid = FALSE;
30942 if(cell->siCb.siCtx.siId == 0)
30945 /*Check if the Si-Window for the current Si-Context is completed*/
30946 invalid = rgSCHCmnChkPastWin(crntTimInfo, cell->siCb.siCtx.maxTimeToTx);
30949 /* LTE_ADV_FLAG_REMOVED_START */
30950 if(cell->siCb.siCtx.retxCntRem)
30952 RGSCHLOGERROR(cell->instIdx,ERRCLS_INT_PAR,ERG011,(ErrVal)cell->siCb.siCtx.siId,
30953 "rgSCHDlSiSched(): SI not scheduled and window expired");
30955 /* LTE_ADV_FLAG_REMOVED_END */
30956 if(cell->siCb.siCtx.warningSiFlag == TRUE)
30958 rgSCHUtlFreeWarningSiPdu(cell);
30959 cell->siCb.siCtx.warningSiFlag = FALSE;
30964 /*Check the timinginfo of the current SI-Context to see if its
30965 transmission can be scheduled. */
30966 if(FALSE == (rgSCHCmnChkInWin(crntTimInfo,
30967 cell->siCb.siCtx.timeToTx,
30968 cell->siCb.siCtx.maxTimeToTx)))
30973 /*Check if retransmission count has become 0*/
30974 if(0 == cell->siCb.siCtx.retxCntRem)
30979 /* LTE_ADV_FLAG_REMOVED_START */
30980 /* Check if ABS is enabled/configured */
30981 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
30983 /* The pattern type is RGR_ABS_MUTE, then eNB need to blank the subframe */
30984 if(cell->lteAdvCb.absCfg.absPatternType & RGR_ABS_MUTE)
30986 /* Determine next scheduling subframe is ABS or not */
30987 if(RG_SCH_ABS_ENABLED_ABS_SF == (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern
30988 [((crntTimInfo.sfn*RGSCH_NUM_SUB_FRAMES) + crntTimInfo.slot) % RGR_ABS_PATTERN_LEN]))
30990 /* Skip the SI scheduling to next tti */
30995 /* LTE_ADV_FLAG_REMOVED_END */
30997 /*Schedule the transmission of the current SI-Context */
30998 /*Find out the messg length for the SI message */
30999 /* warningSiFlag is to differentiate between Warning SI
31001 if((rgSCHUtlGetMcsAndNPrb(cell, &nPrb, &mcs, &msgLen)) != ROK)
31006 cell->siCb.siCtx.i = RGSCH_CALC_SF_DIFF(crntTimInfo,
31007 cell->siCb.siCtx.timeToTx);
31011 /*Get the number of rb required */
31012 /*rgSCHCmnClcRbAllocForFxdTb(cell, msgLen, cellDl->ccchCqi, &rb);*/
31013 if(cellDl->bitsPerRb==0)
31015 while ((rgTbSzTbl[0][0][rb]) < (U32) (msgLen*8))
31023 rb = RGSCH_CEIL((msgLen*8), cellDl->bitsPerRb);
31025 /* DwPTS Scheduling Changes Start */
31027 if (sf->sfType == RG_SCH_SPL_SF_DATA)
31029 RGSCH_GET_SPS_SF_CFI(cell->bwCfg.dlTotalBw, cfi);
31031 /* Calculate the less RE's because of DwPTS */
31032 lostRe = rb * (cellDl->noResPerRb[cfi] - cellDl->numReDwPts[cfi]);
31034 /* Increase number of RBs in Spl SF to compensate for lost REs */
31035 rb += RGSCH_CEIL(lostRe, cellDl->numReDwPts[cfi]);
31038 /* DwPTS Scheduling Changes End */
31039 /*ccpu00115595- end*/
31040 /* Additional check to see if required RBs
31041 * exceeds the available */
31042 if (rb > sf->bw - sf->bwAssigned)
31044 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "rgSCHDlSiSched(): "
31045 "BW allocation failed CRNTI:%d",RGSCH_SI_RNTI);
31049 /* Update the subframe Allocated BW field */
31050 sf->bwAssigned = sf->bwAssigned + rb;
31052 /*Fill the parameters in allocInfo */
31053 allocInfo->bcchAlloc.rnti = RGSCH_SI_RNTI;
31054 allocInfo->bcchAlloc.dlSf = sf;
31055 allocInfo->bcchAlloc.rbsReq = rb;
31056 /*ccpu00116710- MCS is not getting assigned */
31057 allocInfo->bcchAlloc.tbInfo[0].imcs = mcs;
31059 /* ccpu00117510 - ADD - Assignment of nPrb and other information */
31060 allocInfo->bcchAlloc.nPrb = nPrb;
31061 allocInfo->bcchAlloc.tbInfo[0].bytesReq = msgLen;
31062 allocInfo->bcchAlloc.tbInfo[0].noLyr = 1;
31065 #endif /*RGR_SI_SCH*/
31068 /* ccpu00117452 - MOD - Changed macro name from
31069 RGR_RRM_DLPWR_CNTRL to RGR_CQI_REPT */
31070 #ifdef RGR_CQI_REPT
31072 * @brief This function Updates the DL CQI for the UE.
31076 * Function: rgSCHCmnUeDlPwrCtColltCqiRept
31077 * Purpose: Manages PUSH N CQI reporting
31078 * Step 1: Store the CQI in collation array
31079 * Step 2: Increament the tracking count
31080 * Step 3: Check is it time to to send the report
31081 * Step 4: if yes, Send StaInd to RRM
31082 * Step 4.1: Fill StaInd for sending collated N CQI rpeorts
31083 * Step 4.2: Call utility function (rgSCHUtlRgrStaInd) to send rpts to RRM
31084 * Step 4.2.1: If sending was not sucessful, return RFAILED
31085 * Step 4.2.2: If sending was sucessful, return ROK
31086 * Step 5: If no, return
31087 * Invoked by: rgSCHCmnDlCqiInd
31089 * @param[in] RgSchCellCb *cell
31090 * @param[in] RgSchUeCb *ue
31091 * @param[in] RgrUeCqiRept *ueCqiRpt
31096 PRIVATE S16 rgSCHCmnUeDlPwrCtColltCqiRept
31100 RgrUeCqiRept *ueCqiRpt
31103 PRIVATE S16 rgSCHCmnUeDlPwrCtColltCqiRept(cell, ue, ueCqiRpt)
31106 RgrUeCqiRept *ueCqiRpt;
31109 U8 *cqiCount = NULLP;
31111 RgrStaIndInfo *staInfo = NULLP;
31114 /* Step 1: Store the CQI in collation array */
31115 /* Step 2: Increament the tracking count */
31116 cqiCount = &(ue->schCqiInfo.cqiCount);
31117 ue->schCqiInfo.cqiRept[(*cqiCount)++] =
31121 /* Step 3: Check is it time to to send the report */
31122 if(RG_SCH_CQIR_IS_TIMTOSEND_CQIREPT(ue))
31124 /* Step 4: if yes, Send StaInd to RRM */
31125 retVal = rgSCHUtlAllocSBuf (cell->instIdx,(Data**)&staInfo,
31126 sizeof(RgrStaIndInfo));
31129 RLOG_ARG1(L_ERROR,DBG_CELLID,cell->cellId, "Could not "
31130 "allocate memory for sending StaInd CRNTI:%d",ue->ueId);
31134 /* Step 4.1: Fill StaInd for sending collated N CQI rpeorts */
31137 extern U32 gCqiReptToAppCount;
31138 gCqiReptToAppCount++;
31143 retVal = rgSCHUtlFillSndStaInd(cell, ue, staInfo,
31144 ue->cqiReptCfgInfo.numColltdCqiRept);
31150 } /* End of rgSCHCmnUeDlPwrCtColltCqiRept */
31152 #endif /* End of RGR_CQI_REPT */
31155 * @brief This function checks for the retransmisson
31156 * for a DTX scenario.
31163 * @param[in] RgSchCellCb *cell
31164 * @param[in] RgSchUeCb *ue
31170 Void rgSCHCmnChkRetxAllowDtx
31174 RgSchDlHqProcCb *proc,
31178 Void rgSCHCmnChkRetxAllowDtx(cell, ueCb, proc, reTxAllwd)
31181 RgSchDlHqProcCb *proc;
31189 if ((proc->tbInfo[0].isAckNackDtx == TFU_HQFDB_DTX))
31191 *reTxAllwd = FALSE;
31198 * @brief API for calculating the SI Set Id
31202 * Function: rgSCHCmnGetSiSetId
31204 * This API is used for calculating the SI Set Id, as shown below
31206 * siSetId = 0 siSetId = 1
31207 * |******************|******************|---------------->
31208 * (0,0) (8,0) (16,0) (SFN, SF)
31211 * @param[in] U16 sfn
31213 * @return U16 siSetId
31216 U16 rgSCHCmnGetSiSetId
31223 U16 rgSCHCmnGetSiSetId(sfn, sf, minPeriodicity)
31226 U16 minPeriodicity;
31229 /* 80 is the minimum SI periodicity in sf. Also
31230 * all other SI periodicities are multiples of 80 */
31231 return (((sfn * RGSCH_NUM_SUB_FRAMES_5G) + sf) / (minPeriodicity * 10));
31235 * @brief API for calculating the DwPts Rb, Itbs and tbSz
31239 * Function: rgSCHCmnCalcDwPtsTbSz
31241 * @param[in] RgSchCellCb *cell
31242 * @param[in] U32 bo
31243 * @param[in/out] U8 *rb
31244 * @param[in/out] U8 *iTbs
31245 * @param[in] U8 lyr
31246 * @param[in] U8 cfi
31250 PRIVATE U32 rgSCHCmnCalcDwPtsTbSz
31260 PRIVATE U32 rgSCHCmnCalcDwPtsTbSz(cell, bo, rb, iTbs, lyr, cfi)
31270 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
31271 U32 numRE = *rb * cellDl->noResPerRb[cfi];
31272 U32 numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
31275 /* DwPts Rb cannot exceed the cell Bw */
31276 numDwPtsRb = RGSCH_MIN(numDwPtsRb, cellDl->maxDlBwPerUe);
31278 /* Adjust the iTbs for optimum usage of the DwPts region.
31279 * Using the same iTbs adjustment will not work for all
31280 * special subframe configurations and iTbs levels. Hence use the
31281 * static iTbs Delta table for adjusting the iTbs */
31282 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs);
31286 while(rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1] < bo*8 &&
31287 numDwPtsRb < cellDl->maxDlBwPerUe)
31292 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
31296 tbSz = rgTbSzTbl[lyr-1][*iTbs][RGSCH_MAX(numDwPtsRb*3/4,1)-1];
31304 * @brief API for calculating the DwPts Rb, Itbs and tbSz
31308 * Function: rgSCHCmnCalcDwPtsTbSz2Cw
31310 * @param[in] RgSchCellCb *cell
31311 * @param[in] U32 bo
31312 * @param[in/out] U8 *rb
31313 * @param[in] U8 maxRb
31314 * @param[in/out] U8 *iTbs1
31315 * @param[in/out] U8 *iTbs2
31316 * @param[in] U8 lyr1
31317 * @param[in] U8 lyr2
31318 * @return[in/out] U32 *tb1Sz
31319 * @return[in/out] U32 *tb2Sz
31320 * @param[in] U8 cfi
31323 PRIVATE Void rgSCHCmnCalcDwPtsTbSz2Cw
31338 PRIVATE Void rgSCHCmnCalcDwPtsTbSz2Cw(cell, bo, rb, maxRb, iTbs1, iTbs2,
31339 lyr1, lyr2, tb1Sz, tb2Sz, cfi)
31353 RgSchCmnDlCell *cellDl = RG_SCH_CMN_GET_DL_CELL(cell);
31354 U32 numRE = *rb * cellDl->noResPerRb[cfi];
31355 U32 numDwPtsRb = RGSCH_CEIL(numRE, cellDl->numReDwPts[cfi]);
31358 /* DwPts Rb cannot exceed the cell Bw */
31359 numDwPtsRb = RGSCH_MIN(numDwPtsRb, maxRb);
31361 /* Adjust the iTbs for optimum usage of the DwPts region.
31362 * Using the same iTbs adjustment will not work for all
31363 * special subframe configurations and iTbs levels. Hence use the
31364 * static iTbs Delta table for adjusting the iTbs */
31365 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs1);
31366 RG_SCH_CMN_ADJ_DWPTS_ITBS(cellDl, *iTbs2);
31368 while((rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1] +
31369 rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1])< bo*8 &&
31370 numDwPtsRb < maxRb)
31375 *tb1Sz = rgTbSzTbl[lyr1-1][*iTbs1][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
31376 *tb2Sz = rgTbSzTbl[lyr2-1][*iTbs2][RGSCH_MAX(numDwPtsRb*3/4,1)-1]/8;
31386 * @brief Updates the GBR LCGs when datInd is received from MAC
31390 * Function: rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
31391 * Purpose: This function updates the GBR LCGs
31392 * when datInd is received from MAC.
31396 * @param[in] RgSchCellCb *cell
31397 * @param[in] RgSchUeCb *ue
31398 * @param[in] RgInfUeDatInd *datInd
31402 Void rgSCHCmnUpdUeDataIndLcg
31406 RgInfUeDatInd *datInd
31409 Void rgSCHCmnUpdUeDataIndLcg(cell, ue, datInd)
31412 RgInfUeDatInd *datInd;
31416 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31418 Inst inst = cell->instIdx;
31422 for (idx = 0; (idx < RGINF_MAX_LCG_PER_UE - 1); idx++)
31424 if (datInd->lcgInfo[idx].bytesRcvd != 0)
31426 U8 lcgId = datInd->lcgInfo[idx].lcgId;
31427 U32 bytesRcvd = datInd->lcgInfo[idx].bytesRcvd;
31429 if (RGSCH_LCG_ISCFGD(&ue->ul.lcgArr[lcgId]))
31431 RgSchCmnLcg *cmnLcg = ((RgSchCmnLcg *)(ue->ul.lcgArr[lcgId].sch));
31432 if (RGSCH_IS_GBR_BEARER(cmnLcg->cfgdGbr))
31434 if(bytesRcvd > cmnLcg->effGbr)
31436 bytesRcvd -= cmnLcg->effGbr;
31437 cmnLcg->effDeltaMbr = (cmnLcg->effDeltaMbr > bytesRcvd) ? \
31438 (cmnLcg->effDeltaMbr - bytesRcvd) : (0);
31439 cmnLcg->effGbr = 0;
31443 cmnLcg->effGbr -= bytesRcvd;
31445 /* To keep BS updated with the amount of data received for the GBR */
31446 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31447 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31448 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, cmnLcg->effGbr+cmnLcg->effDeltaMbr);
31450 else if(lcgId != 0)
31452 ue->ul.effAmbr = (ue->ul.effAmbr > datInd->lcgInfo[idx].bytesRcvd) ? \
31453 (ue->ul.effAmbr - datInd->lcgInfo[idx].bytesRcvd) : (0);
31454 cmnLcg->reportedBs = (cmnLcg->reportedBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31455 (cmnLcg->reportedBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31456 cmnLcg->bs = RGSCH_MIN(cmnLcg->reportedBs, ue->ul.effAmbr);
31457 ue->ul.nonGbrLcgBs = (ue->ul.nonGbrLcgBs > datInd->lcgInfo[idx].bytesRcvd) ? \
31458 (ue->ul.nonGbrLcgBs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31460 ue->ul.nonLcg0Bs = (ue->ul.nonLcg0Bs > datInd->lcgInfo[idx].bytesRcvd) ? \
31461 (ue->ul.nonLcg0Bs - datInd->lcgInfo[idx].bytesRcvd) : (0);
31470 if(TRUE == ue->isEmtcUe)
31472 if (cellSch->apisEmtcUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
31474 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
31481 if (cellSch->apisUl->rgSCHRgrUlLcgUpd(cell, ue, datInd) != ROK)
31483 RGSCHDBGERRNEW(inst, (rgSchPBuf(inst), "\n rgSCHCmnUpdUeDataIndLcg(): rgSCHRgrUlLcgUpd returned failure"));
31489 /** @brief This function initializes DL allocation lists and prepares
31494 * Function: rgSCHCmnInitRbAlloc
31496 * @param [in] RgSchCellCb *cell
31502 PRIVATE Void rgSCHCmnInitRbAlloc
31507 PRIVATE Void rgSCHCmnInitRbAlloc (cell)
31511 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31512 CmLteTimingInfo frm;
31517 /* Initializing RgSchCmnUlRbAllocInfo structure.*/
31518 rgSCHCmnInitDlRbAllocInfo(&cellSch->allocInfo);
31520 frm = cellSch->dl.time;
31522 dlSf = rgSCHUtlSubFrmGet(cell, frm);
31524 dlSf->numGrpPerTti = cell->cell5gtfCb.ueGrpPerTti;
31525 dlSf->numUePerGrp = cell->cell5gtfCb.uePerGrpPerTti;
31526 for(idx = 0; idx < MAX_5GTF_BEAMS; idx++)
31528 dlSf->sfBeamInfo[idx].totVrbgAllocated = 0;
31529 dlSf->sfBeamInfo[idx].totVrbgRequired = 0;
31530 dlSf->sfBeamInfo[idx].vrbgStart = 0;
31533 dlSf->remUeCnt = cellSch->dl.maxUePerDlSf;
31534 /* Updating the Subframe information in RBAllocInfo */
31535 cellSch->allocInfo.dedAlloc.dedDlSf = dlSf;
31536 cellSch->allocInfo.msg4Alloc.msg4DlSf = dlSf;
31538 /* LTE_ADV_FLAG_REMOVED_START */
31539 /* Determine next scheduling subframe is ABS or not */
31540 if(RGR_ENABLE == cell->lteAdvCb.absCfg.status)
31542 cell->lteAdvCb.absPatternDlIdx =
31543 ((frm.sfn*RGSCH_NUM_SUB_FRAMES_5G) + frm.slot) % RGR_ABS_PATTERN_LEN;
31544 cell->lteAdvCb.absDlSfInfo = (RgSchAbsSfEnum)(cell->lteAdvCb.absCfg.absPattern[
31545 cell->lteAdvCb.absPatternDlIdx]);
31550 cell->lteAdvCb.absDlSfInfo = RG_SCH_ABS_DISABLED;
31552 /* LTE_ADV_FLAG_REMOVED_END */
31555 cellSch->allocInfo.ccchSduAlloc.ccchSduDlSf = dlSf;
31558 /* Update subframe-wide allocation information with SPS allocation */
31559 rgSCHCmnSpsDlUpdDlSfAllocWithSps(cell, frm, dlSf);
31568 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
31573 * Function: rgSCHCmnSendTxModeInd(cell, ueUl, newTxMode)
31574 * Purpose: This function sends the TX mode Change
31575 * indication to RRM
31580 * @param[in] RgSchCellCb *cell
31581 * @param[in] RgSchUeCb *ue
31582 * @param[in] U8 newTxMode
31586 PRIVATE Void rgSCHCmnSendTxModeInd
31593 PRIVATE Void rgSCHCmnSendTxModeInd(cell, ue, newTxMode)
31599 RgmTransModeInd *txModeChgInd;
31600 RgSchCmnDlUe *ueDl = RG_SCH_CMN_GET_DL_UE(ue,cell);
31603 if(!(ueDl->mimoInfo.forceTD & RG_SCH_CMN_TD_TXMODE_RECFG))
31606 if(SGetSBuf(cell->rgmSap->sapCfg.sapPst.region,
31607 cell->rgmSap->sapCfg.sapPst.pool, (Data**)&txModeChgInd,
31608 sizeof(RgmTransModeInd)) != ROK)
31612 RG_SCH_FILL_RGM_TRANSMODE_IND(ue->ueId, cell->cellId, newTxMode, txModeChgInd);
31613 RgUiRgmChangeTransModeInd(&(cell->rgmSap->sapCfg.sapPst),
31614 cell->rgmSap->sapCfg.suId, txModeChgInd);
31617 ue->mimoInfo.txModUpChgFactor = 0;
31618 ue->mimoInfo.txModDownChgFactor = 0;
31619 ueDl->laCb[0].deltaiTbs = 0;
31625 * @brief Check & Updates the TM Mode chnage threashold based on cqiiTbs and
31630 * Function: rgSchCheckAndTriggerModeChange(cell, ueUl, iTbsNew)
31631 * Purpose: This function update and check for threashold for TM mode
31636 * @param[in] RgSchCellCb *cell
31637 * @param[in] RgSchUeCb *ue
31638 * @param[in] U8 iTbs
31642 Void rgSchCheckAndTriggerModeChange
31651 Void rgSchCheckAndTriggerModeChange(cell, ue, reportediTbs, previTbs, maxiTbs)
31659 RgrTxMode txMode; /*!< UE's Transmission Mode */
31660 RgrTxMode modTxMode; /*!< UE's Transmission Mode */
31663 txMode = ue->mimoInfo.txMode;
31665 /* Check for Step down */
31666 /* Step down only when TM4 is configured. */
31667 if(RGR_UE_TM_4 == txMode)
31669 if((previTbs <= reportediTbs) && ((reportediTbs - previTbs) >= RG_SCH_MODE_CHNG_STEPDOWN_CHECK_FACTOR))
31671 ue->mimoInfo.txModDownChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
31675 ue->mimoInfo.txModDownChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
31678 ue->mimoInfo.txModDownChgFactor =
31679 RGSCH_MAX(ue->mimoInfo.txModDownChgFactor, -(RG_SCH_MODE_CHNG_STEPDOWN_THRSHD));
31681 if(ue->mimoInfo.txModDownChgFactor >= RG_SCH_MODE_CHNG_STEPDOWN_THRSHD)
31683 /* Trigger Mode step down */
31684 modTxMode = RGR_UE_TM_3;
31685 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
31689 /* Check for Setup up */
31690 /* Step Up only when TM3 is configured, Max possible Mode is TM4*/
31691 if(RGR_UE_TM_3 == txMode)
31693 if((previTbs > reportediTbs) || (maxiTbs == previTbs))
31695 ue->mimoInfo.txModUpChgFactor += RG_SCH_MODE_CHNG_STEPUP_FACTOR;
31699 ue->mimoInfo.txModUpChgFactor -= RG_SCH_MODE_CHNG_STEPDOWN_FACTOR;
31702 ue->mimoInfo.txModUpChgFactor =
31703 RGSCH_MAX(ue->mimoInfo.txModUpChgFactor, -(RG_SCH_MODE_CHNG_STEPUP_THRSHD));
31705 /* Check if TM step up need to be triggered */
31706 if(ue->mimoInfo.txModUpChgFactor >= RG_SCH_MODE_CHNG_STEPUP_THRSHD)
31708 /* Trigger mode chnage */
31709 modTxMode = RGR_UE_TM_4;
31710 rgSCHCmnSendTxModeInd(cell, ue, modTxMode);
31719 * @brief Updates the GBR LCGs when datInd is received from MAC
31723 * Function: rgSCHCmnIsDlCsgPrio (cell)
31724 * Purpose: This function returns if csg UEs are
31725 * having priority at current time
31727 * Invoked by: Scheduler
31729 * @param[in] RgSchCellCb *cell
31730 * @param[in] RgSchUeCb *ue
31731 * @param[in] RgInfUeDatInd *datInd
31735 Bool rgSCHCmnIsDlCsgPrio
31740 Bool rgSCHCmnIsDlCsgPrio(cell)
31745 RgSchCmnDlCell *cmnDlCell = RG_SCH_CMN_GET_DL_CELL(cell);
31747 /* Calculating the percentage resource allocated */
31748 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
31754 if(((cmnDlCell->ncsgPrbCnt * 100) / cmnDlCell->totPrbCnt) < cell->minDlResNonCsg)
31766 * @brief Updates the GBR LCGs when datInd is received from MAC
31770 * Function: rgSCHCmnIsUlCsgPrio (cell)
31771 * Purpose: This function returns if csg UEs are
31772 * having priority at current time
31774 * Invoked by: Scheduler
31776 * @param[in] RgSchCellCb *cell
31777 * @param[in] RgSchUeCb *ue
31778 * @param[in] RgInfUeDatInd *datInd
31782 Bool rgSCHCmnIsUlCsgPrio
31787 Bool rgSCHCmnIsUlCsgPrio(cell)
31791 RgSchCmnUlCell *cmnUlCell = RG_SCH_CMN_GET_UL_CELL(cell);
31794 /* Calculating the percentage resource allocated */
31795 if(RGR_CELL_ACCS_HYBRID != rgSchCb[cell->instIdx].rgrSchedEnbCfg.accsMode)
31801 if (((cmnUlCell->ncsgPrbCnt * 100) /cmnUlCell->totPrbCnt) < cell->minUlResNonCsg)
31812 /** @brief DL scheduler for SPS, and all other downlink data
31816 * Function: rgSchCmnPreDlSch
31818 * @param [in] Inst schInst;
31823 Void rgSchCmnPreDlSch
31825 RgSchCellCb **cell,
31827 RgSchCellCb **cellLst
31830 Void rgSchCmnPreDlSch(cell, nCell, cellLst)
31831 RgSchCellCb **cell;
31833 RgSchCellCb **cellLst;
31836 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell[0]);
31841 if(nCell > CM_LTE_MAX_CELLS)
31846 if (cell[0]->isDlDataAllwd && (cell[0]->stopDlSch == FALSE))
31848 /* Specific DL scheduler to perform UE scheduling */
31849 cellSch->apisDl->rgSCHDlPreSched(cell[0]);
31851 /* Rearranging the cell entries based on their remueCnt in SF.
31852 * cells will be processed in the order of number of ue scheduled
31854 for (idx = 0; idx < nCell; idx++)
31857 cellSch = RG_SCH_CMN_GET_CELL(cell[idx]);
31858 sf = cellSch->allocInfo.dedAlloc.dedDlSf;
31862 cellLst[idx] = cell[idx];
31866 for(j = 0; j < idx; j++)
31868 RgSchCmnCell *cmnCell = RG_SCH_CMN_GET_CELL(cellLst[j]);
31869 RgSchDlSf *subfrm = cmnCell->allocInfo.dedAlloc.dedDlSf;
31871 if(sf->remUeCnt < subfrm->remUeCnt)
31874 for(k = idx; k > j; k--)
31876 cellLst[k] = cellLst[k-1];
31881 cellLst[j] = cell[idx];
31886 for (idx = 0; idx < nCell; idx++)
31888 cellLst[idx] = cell[idx];
31894 /** @brief DL scheduler for SPS, and all other downlink data
31897 * Function: rgSchCmnPstDlSch
31899 * @param [in] Inst schInst;
31904 Void rgSchCmnPstDlSch
31909 Void rgSchCmnPstDlSch(cell)
31913 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
31916 if (cell->isDlDataAllwd && (cell->stopDlSch == FALSE))
31918 cellSch->apisDl->rgSCHDlPstSched(cell->instIdx);
31923 U8 rgSCHCmnCalcPcqiBitSz
31929 U8 rgSCHCmnCalcPcqiBitSz(ueCb, numTxAnt)
31937 RgSchUePCqiCb *cqiCb = ueCb->nPCqiCb;
31940 confRepMode = cqiCb->cqiCfg.cqiSetup.prdModeEnum;
31941 if((ueCb->mimoInfo.txMode != RGR_UE_TM_3) &&
31942 (ueCb->mimoInfo.txMode != RGR_UE_TM_4))
31948 ri = cqiCb->perRiVal;
31950 switch(confRepMode)
31952 case RGR_PRD_CQI_MOD10:
31958 case RGR_PRD_CQI_MOD11:
31971 else if(numTxAnt == 4)
31984 /* This is number of antenna case 1.
31985 * This is not applicable for Mode 1-1.
31986 * So setting it to invalid value */
31992 case RGR_PRD_CQI_MOD20:
32000 pcqiSz = 4 + cqiCb->label;
32005 case RGR_PRD_CQI_MOD21:
32020 else if(numTxAnt == 4)
32033 /* This might be number of antenna case 1.
32034 * For mode 2-1 wideband case only antenna port 2 or 4 is supported.
32035 * So setting invalid value.*/
32043 pcqiSz = 4 + cqiCb->label;
32047 pcqiSz = 7 + cqiCb->label;
32060 /** @brief DL scheduler for SPS, and all other downlink data
32064 * Function: rgSCHCmnDlSch
32066 * @param [in] RgSchCellCb *cell
32077 Void rgSCHCmnDlSch (cell)
32082 RgSchCmnCell *cellSch = RG_SCH_CMN_GET_CELL(cell);
32084 RgSchDynTddCb *rgSchDynTddInfo = &(rgSchCb[cell->instIdx].rgSchDynTdd);
32089 dlSf = rgSCHUtlSubFrmGet(cell, cellSch->dl.time);
32091 if (rgSchDynTddInfo->isDynTddEnbld)
32093 RG_SCH_DYN_TDD_GET_SFIDX(dlCntrlSfIdx, rgSchDynTddInfo->crntDTddSfIdx,
32094 RG_SCH_CMN_DL_DELTA);
32095 if(RG_SCH_DYNTDD_DLC_ULD == rgSchDynTddInfo->sfInfo[dlCntrlSfIdx].sfType)
32097 if(1 == cell->cellId)
32099 ul5gtfsidDlAlreadyMarkUl++;
32101 printf("ul5gtfsidDlAlreadyMarkUl: %d, [sfn:sf] [%04d:%02d]\n",
32102 ul5gtfsidDlAlreadyMarkUl, cellSch->dl.time.sfn,
32103 cellSch->dl.time.slot);
32111 /* Specific DL scheduler to perform UE scheduling */
32112 cellSch->apisDl->rgSCHDlNewSched(cell, &cellSch->allocInfo);
32113 /* LTE_ADV_FLAG_REMOVED_END */
32115 /* call common allocator for RB Allocation */
32116 rgSCHCmnDlRbAlloc(cell, &cellSch->allocInfo);
32118 /* Finalize the Allocations for reqested Against alloced */
32119 rgSCHCmnDlAllocFnlz(cell);
32121 /* Perform Pdcch allocations for PDCCH Order Q.
32122 * As of now, giving this the least preference.
32123 * This func call could be moved above other allocations
32125 rgSCHCmnGenPdcchOrder(cell, dlSf);
32127 /* Do group power control for PUCCH */
32128 rgSCHCmnGrpPwrCntrlPucch(cell, dlSf);
32133 /**********************************************************************
32136 **********************************************************************/