1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /********************************************************************20**
21 Name: Multi-threaded System Services - Solaris
25 Desc: C source code for the MTSS-Solaris implementation of
30 *********************************************************************21*/
35 #ifndef _POSIX_C_SOURCE
36 #define _POSIX_C_SOURCE 199309L
38 /* mt003.301 moved env files to use the __USE_UNIX98 flag in sys includes */
45 #include <sys/types.h>
50 /* mt003.301: included sys/time.h
51 * for both solaris and linux
54 /* mt008.21: addition */
59 /* header include files (.h) */
62 #include "common_def.h"
63 #include "mt_ss.h" /* MTSS specific */
64 #include "mt_err.h" /* MTSS error defines */
66 #include "ss_queue.h" /* queues */
67 #include "ss_task.h" /* tasking */
68 #include "ss_msg.h" /* messaging */
69 #include "ss_mem.h" /* memory management interface */
70 #include "ss_gen.h" /* general */
71 /* mt003.301 Additions - Task deregistration */
72 #include "ss_err.h" /* error */
73 #include "cm_mem.h" /* common memory manager */
74 /* mt001.301 : Additions */
75 #ifdef SS_THREAD_PROFILE
78 #ifdef SS_LOCKLESS_MEMORY
83 /* multi-core support enhancement */
84 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
85 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
91 #include <sys/types.h>
92 #include <sys/processor.h>
93 #include <sys/procset.h>
96 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
97 /* mt001.301 : Additions */
99 #include <sys/types.h>
100 #include <sys/socket.h>
101 #include <netinet/in.h>
102 #include <arpa/inet.h>
103 #endif /* SS_WATCHDOG */
105 #ifdef SS_USE_WLS_MEM
106 #include <rte_common.h>
107 #include <rte_debug.h>
111 /* header/extern include files (.x) */
113 #include "gen.x" /* general layer */
114 #include "ssi.x" /* system services */
116 #include "cm5.x" /* common timers */
118 #include "mt_ss.x" /* MTSS specific */
119 #ifdef SS_LOCKLESS_MEMORY
120 #include "mt_ss_wl.x" /* MTSS specific */
121 #endif /* SS_LOCKLESS_MEMORY */
123 #include "ss_queue.x" /* queues */
124 #include "ss_task.x" /* tasking */
125 #include "ss_timer.x" /* timers */
126 #include "ss_strm.x" /* STREAMS */
127 #include "ss_msg.x" /* messaging */
128 #include "ss_mem.x" /* memory management interface */
129 #include "ss_drvr.x" /* driver tasks */
130 #include "ss_gen.x" /* general */
131 #ifdef SS_LOCKLESS_MEMORY
132 #include "cm_llist.x"
134 #include "cm_mem_wl.x" /* common memory manager */
136 #include "cm_mem.x" /* common memory manager */
137 #endif /* SS_LOCKLESS_MEMORY */
138 #include "cm_lte.x" /* common memory manager */
139 /* mt001.301 : Additions */
140 #ifdef SS_LOGGER_SUPPORT
142 #endif /* SS_LOGGER_SUPPORT */
144 /*mt005.301: Cavium Changes */
145 #ifdef SS_SEUM_CAVIUM
146 /* cvmx includes files */
147 #include "cvmx-config.h"
149 #include "cvmx-pow.h"
150 #include "cvmx-tim.h"
151 #include "cvmx-fpa.h"
152 #include "cvmx-helper-fpa.h"
153 #include "cvmx-malloc.h"
154 #endif /* SS_SEUM_CAVIUM */
157 #include "mt_plat_t33.h"
158 #include "mt_plat_t33.x"
159 #include "sys/syscall.h"
162 #if defined(RGL_SPECIFIC_CHANGES) || defined(INTEL_WLS) || defined(SS_USE_WLS_MEM)
164 #include <hugetlbfs.h>
167 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
168 S16 rgBatchProc (Void);
170 #ifdef RLC_MAC_DAT_REQ_RBUF
171 S16 rgDlDatReqBatchProc ARGS((
174 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
175 S16 rgBatchProc ARGS((
179 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
180 /* general purpose debug zone */
181 char my_buffer2[4096 * 4] = { 0 };
182 char my_buffer[4096] = { 0 };
183 int my_buffer_idx = 0;
187 #define sigsegv_print(x, ...) my_buffer_idx += sprintf(&my_buffer[my_buffer_idx], x "\n", ##__VA_ARGS__)
189 struct sigcontext my_uc_mcontext = { 0 };
194 #include <ucontext.h>
198 #define SIGSEGV_STACK_GENERIC
199 #define REGFORMAT "%x\n"
201 #ifdef XEON_SPECIFIC_CHANGES
202 Void cmPrcTmr ARGS((CmTqCp* tqCp, CmTqType* tq, PFV func));
205 void dump_external(void);
207 static Void mtDelSigals(Void)
211 memset(&sa, 0, sizeof(struct sigaction));
212 sigemptyset(&sa.sa_mask);
213 sa.sa_handler = SIG_DFL;
214 sigaction(SIGSEGV, &sa, NULL);
216 memset(&sa, 0, sizeof(struct sigaction));
217 sigemptyset(&sa.sa_mask);
218 sa.sa_handler = SIG_DFL;
219 sigaction(SIGILL, &sa, NULL);
223 static void signal_segv(int signum, siginfo_t * info, void *ptr)
225 static const char *si_codes[3] = { "", "SEGV_MAPERR", "SEGV_ACCERR" };
228 ucontext_t *ucontext = (ucontext_t *) ptr;
229 #ifdef XEON_SPECIFIC_CHANGES
231 int *p32 = (int *) 0x2fff0000;
236 printf("\nsegv ooops @ %p\n", info->si_addr);
239 printf("\nSegmentation Fault!\n");
240 printf("\ninfo.si_signo = %d\n", signum);
241 printf("\ninfo.si_errno = %d\n", info->si_errno);
242 printf("\ninfo.si_code = %d (%s)\n", info->si_code, si_codes[info->si_code]);
243 printf("\ninfo.si_addr = %p\n", info->si_addr);
245 memcpy(&my_uc_mcontext, &ucontext->uc_mcontext, sizeof(struct sigcontext));
248 #ifndef RGL_SPECIFIC_CHANGES
249 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r0);
250 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r1);
251 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r2);
252 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r3);
253 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r4);
254 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r5);
255 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r6);
256 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r7);
257 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r8);
258 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r9);
259 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r10);
260 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_fp);
261 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_ip);
262 printf("\nreg[sp] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_sp);
263 printf("\nreg[lr] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_lr);
264 printf("\nreg[pc] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_pc);
265 printf("\nreg[cpsr] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_cpsr);
268 printf("\nStack trace (non-dedicated):\n");
270 sz = backtrace(buffer, 50);
271 strings = backtrace_symbols(buffer, sz);
272 for (i = 0; i < sz; ++i)
273 printf("%s\n", strings[i]);
275 printf("\nEnd of stack trace\n");
277 #ifdef XEON_SPECIFIC_CHANGES
282 /* Lets first print our debug information */
283 printf("\nBefore dumping our Debug info\n");
285 printf("\nAfter dumping our Debug info\n");
287 /* Disable the signal and make the enodeb to dump. This will make
288 * eNB to generate the core with dumping the ccpu log
295 /* End printing debug information */
300 /*** TBD: IMPORTANT ***
301 *** The following definition is temporary. This must be removed
302 *** when all products have been updated with latest ssi.h file OR
303 *** all ssi.h files have been updated to contain this definitions
305 /* New error class for FTHA added */
307 #define ERRCLS_FTHA 0x8
308 #endif /* ERRCLS_FTHA */
310 typedef struct _SPThreadCreateArg
312 void *argument; /* argument that is to be passed to the actual pthread */
313 void *(*start_routine) (void *); /* function from which pthread starts */
316 void *pthreadCreateHdlr(void* arg);
318 #ifdef SS_LOCKLESS_MEMORY
319 Buffer *mtTskBuffer1;
320 Buffer *mtTskBuffer2;
322 pthread_t tmpRegTidMap[20];
324 S16 SGlobMemInfoShow(void);
325 #endif /* SS_LOCKLESS_MEMORY */
328 APP_CONTEXT AppContext;
332 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
333 unsigned int tlPost(void *handle);
336 /* forward references */
337 /* mt003.301 Modifications - Moved to ss_gen.x */
338 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
339 Void *mtTskHdlrT2kL2 ARGS((Void*));
340 void mtSigSegvHndlr ARGS((void));
341 void mtSigUsr2Hndlr ARGS((void));
344 static S16 ssdSetPthreadAttr ARGS ((S32 tskPrior, pthread_attr_t *attr));
345 static Void *mtTskHdlr ARGS((void *));
346 static S16 mtTskHdlMsg ARGS((SsSTskEntry *sTsk));
348 static Void *mtTmrHdlr ARGS((void *));
349 static Void mtTimeout ARGS((PTR tCb, S16 evnt));
351 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
352 static Void mtIntSigHndlr ARGS((int));
353 static Void mtExitClnup ARGS((void));
356 static Void *mtConHdlr ARGS((void *));
360 #ifdef SS_DRVR_SUPPORT
361 static Void *mtIsTskHdlr ARGS((void *));
365 /* mt020.201 - Addition for no command line available */
367 static Void mtGetOpts ARGS((void));
368 /* mt003.301 Additions - File Based task registration made
369 * common for both MULTICORE and NON-MULTICORE
371 static Bool fileBasedMemCfg = FALSE;
374 /* mt033.201 - addition of local function to print the statistics such as
375 * (size vs. numAttempts) and (allocations vs. deallocations)
377 #ifdef SSI_DEBUG_LEVEL1
378 static S16 SPrintRegMemStats ARGS((Region region));
379 #endif /* SSI_DEBUG_LEVEL1 */
381 #ifdef SS_MULTICORE_SUPPORT
382 static SsSTskEntry* ssdAddTmrSTsk(Void);
383 static SsSTskEntry* ssdReAddTmrSTsk ARGS((uint8_t idx));
384 #ifndef SS_LOCKLESS_MEMORY
385 #ifndef RGL_SPECIFIC_CHANGES
386 static S16 ssdInitMemInfo ARGS((void));
391 /* mt005.301: Cavium changes */
392 #ifdef SS_SEUM_CAVIUM
393 static Void *workRcvTsk ARGS((void *));
394 #endif /* SS_SEUM_CAVIUM */
396 #ifdef SS_THR_REG_MAP
397 S32 ssCheckAndAddMemoryRegionMap ARGS((pthread_t threadId,
399 S32 ssCheckAndDelMemoryRegionMap ARGS((pthread_t threadId));
400 #endif /* SS_THR_REG_MAP */
402 /* type declarations */
404 #ifdef SS_DRVR_SUPPORT
405 typedef struct mtIsFlag
415 /* public variable declarations */
417 Cntr cfgNumRegs = SS_MAX_REGS;
418 /* Set memory configuration as false.
419 * Set to true if memory configuration through file is successfull.
421 Bool memConfigured = FALSE;
422 /* mt022.201 - Modification for shared memory relay region and memcal tool */
423 SsRegCfg cfgRegInfo[SS_MAX_REGS] =
426 SS_DFLT_REGION, SS_MAX_POOLS_PER_REG - 1,
428 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
429 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
430 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
431 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
432 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
433 { SS_POOL_STATIC, 0 }
439 SS_DFLT_REGION + 1, SS_MAX_POOLS_PER_REG - 1,
441 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
442 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
443 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
444 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
445 { SS_POOL_STATIC, 0 }
448 #endif /* INTEL_WLS */
450 #ifdef SS_LOCKLESS_MEMORY
453 SS_DFLT_REGION + 1, SS_MAX_POOLS_PER_REG - 1,
455 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
456 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
457 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
458 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
459 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
460 { SS_POOL_STATIC, 0 }
464 SS_DFLT_REGION + 2, SS_MAX_POOLS_PER_REG - 1,
466 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
467 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
468 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
469 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
470 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
471 { SS_POOL_STATIC, 0 }
475 SS_DFLT_REGION + 3, SS_MAX_POOLS_PER_REG - 1,
477 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
478 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
479 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
480 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
481 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
482 { SS_POOL_STATIC, 0 }
486 SS_DFLT_REGION + 4, SS_MAX_POOLS_PER_REG - 1,
488 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
489 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
490 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
491 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
492 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
493 { SS_POOL_STATIC, 0 }
497 SS_DFLT_REGION + 5, SS_MAX_POOLS_PER_REG - 1,
499 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
500 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
501 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
502 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
503 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
504 { SS_POOL_STATIC, 0 }
508 SS_DFLT_REGION + 6, SS_MAX_POOLS_PER_REG - 1,
510 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
511 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
512 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
513 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
514 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
515 { SS_POOL_STATIC, 0 }
519 #endif /* SS_LOCKLESS_MEMORY */
521 /* mt003.301 Modifications - File Based task registration made
522 * common for both MULTICORE and NON-MULTICORE
525 #ifdef SS_LOCKLESS_MEMORY
526 MtDynMemCfg mtDynMemoCfg =
528 SS_MAX_REGS, /* number of regions */
531 SS_DFLT_REGION, /* region id */
532 MT_MAX_BKTS, /* number of buckets */
534 /* block size, no. of blocks, Upper threshold, lower threshold */
535 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
536 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
537 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
538 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
539 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
543 SS_DFLT_REGION + 1, /* region id */
544 MT_MAX_BKTS, /* number of buckets */
546 /* block size, no. of blocks, Upper threshold, lower threshold */
547 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
548 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
549 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
550 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
551 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
555 SS_DFLT_REGION + 2, /* region id */
556 MT_MAX_BKTS, /* number of buckets */
558 /* block size, no. of blocks, Upper threshold, lower threshold */
559 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
560 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
561 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
562 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
563 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
567 SS_DFLT_REGION + 3, /* region id */
568 MT_MAX_BKTS, /* number of buckets */
570 /* block size, no. of blocks, Upper threshold, lower threshold */
571 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
572 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
573 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
574 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
575 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
579 SS_DFLT_REGION + 4, /* region id */
580 MT_MAX_BKTS, /* number of buckets */
582 /* block size, no. of blocks, Upper threshold, lower threshold */
583 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
584 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
585 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
586 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
587 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
591 SS_DFLT_REGION + 5, /* region id */
592 MT_MAX_BKTS, /* number of buckets */
594 /* block size, no. of blocks, Upper threshold, lower threshold */
595 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
596 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
597 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
598 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
599 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
603 SS_DFLT_REGION + 6, /* region id */
604 MT_MAX_BKTS, /* number of buckets */
606 /* block size, no. of blocks, Upper threshold, lower threshold */
607 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
608 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
609 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
610 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
611 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
614 #if ((defined (SPLIT_RLC_DL_TASK)) && (!defined (L2_L3_SPLIT)))
617 SS_DFLT_REGION + 7, /* region id */
618 MT_MAX_BKTS, /* number of buckets */
620 /* block size, no. of blocks, Upper threshold, lower threshold */
621 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
622 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
623 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
624 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
632 MtGlobMemCfg mtGlobMemoCfg =
634 MT_MAX_BKTS, /* number of buckets */
637 /* block size, no. of blocks, Upper threshold, lower threshold */
638 {MT_BKT_0_DSIZE, (MT_BKT_0_NUMBLKS + MT_BKT_0_NUMBLKS), SS_DFLT_MEM_BLK_SET_SIZE},
639 {MT_BKT_1_DSIZE, MT_BKT_1_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
640 {MT_BKT_2_DSIZE, MT_BKT_2_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
641 {MT_BKT_3_DSIZE, MT_BKT_3_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
642 {MT_BKT_4_DSIZE, MT_BKT_4_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE}
644 {1024, 12800 /* MT_BKT_0_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE},
645 {1664, 12800 /* MT_BKT_1_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE},
646 {4096, 3840 /* MT_BKT_2_NUMBLKS*/, SS_DFLT_MEM_BLK_SET_SIZE},
647 {MT_BKT_3_DSIZE, 12800 /* MT_BKT_3_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE}
651 #endif /* SS_LOCKLESS_MEMORY */
653 /* mt022.201 - Modification for memory calculator tool */
654 /* mt018.201 - added memory configuration matrix */
658 SS_MAX_REGS - 1, /* number of regions */
660 #ifndef XEON_SPECIFIC_CHANGES
661 SS_MAX_REGS, /* number of regions */
668 SS_DFLT_REGION, /* region id */
669 MT_MAX_BKTS, /* number of buckets */
670 MT_HEAP_SIZE, /* heap size */
672 #ifndef XEON_SPECIFIC_CHANGES
673 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
674 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
675 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
676 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
677 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS}
679 {256, 491520}, /* 60 pages of 2M*/
680 {512, 12288}, /* 3 pages of 2M */
681 {2048, 99328}, /* 97 Pages of 2M */
682 {8192, 75008}, /* 293 Pages of 2M */
683 {16384, 4096} /* 32 pages of 2M */
688 #ifndef SS_LOCKLESS_MEMORY
690 SS_DFLT_REGION + 1, /* region id */
691 MT_MAX_BKTS, /* number of buckets */
692 /*MT_HEAP_SIZE 7194304 */ 10485760, /* heap size */
694 //{MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
695 //{MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
696 //{MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
697 //{MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS} /* block size, no. of blocks */
705 #endif /* SS_LOCKLESS_MEMORY */
706 #endif /* INTEL_WLS */
707 #ifdef SS_LOCKLESS_MEMORY
709 SS_DFLT_REGION + 1, /* region id */
710 MT_MAX_BKTS, /* number of buckets */
711 MT_HEAP_SIZE, /* heap size */
713 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
714 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
715 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
716 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
717 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
721 SS_DFLT_REGION + 2, /* region id */
722 MT_MAX_BKTS, /* number of buckets */
723 MT_HEAP_SIZE, /* heap size */
725 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
726 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
727 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
728 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
729 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
733 SS_DFLT_REGION + 3, /* region id */
734 MT_MAX_BKTS, /* number of buckets */
735 MT_HEAP_SIZE, /* heap size */
737 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
738 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
739 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
740 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
741 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
745 SS_DFLT_REGION + 4, /* region id */
746 MT_MAX_BKTS, /* number of buckets */
747 MT_HEAP_SIZE, /* heap size */
749 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
750 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
751 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
752 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
753 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
757 SS_DFLT_REGION + 5, /* region id */
758 MT_MAX_BKTS, /* number of buckets */
759 MT_HEAP_SIZE, /* heap size */
761 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
762 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
763 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
764 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
765 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
769 SS_DFLT_REGION + 5, /* region id */
770 MT_MAX_BKTS, /* number of buckets */
771 MT_HEAP_SIZE, /* heap size */
773 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
774 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
775 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
776 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
777 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
780 #endif /* SS_LOCKLESS_MEMORY */
784 /* mt003.301 Modifications - File Based task registration made
785 * common for both MULTICORE and NON-MULTICORE
786 * bucket info, as different regions may request for different no.
789 MtBktCfg mtBktInfo[MT_MAX_BKTS];
790 S16 msArgc; /* argc */
791 Txt **msArgv; /* argv */
792 S16 msOptInd; /* SGetOpt vars */
793 S8 *msOptArg; /* SGetOpt vars */
796 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
797 typedef struct _MtRegMemSz
803 #ifdef SS_USE_WLS_MEM
804 static MtRegMemSz mtDynMemSz[MT_MAX_BKTS];
805 static S16 SPartitionWlsDynMem();
806 static S16 SAllocateWlsDynMem();
809 static MtRegMemSz mtRegMemSz[MT_MAX_BKTS+1];
814 /* private variable declarations */
815 /* mt018.201 - change mtCMMRegCfg as array of pointers */
816 static CmMmRegCfg *mtCMMRegCfg[SS_MAX_REGS];
817 static CmMmRegCb *mtCMMRegCb[SS_MAX_REGS];
818 /* mt003.301 - Fixed compilation warnings */
819 /*mt004.301-addede new veriable for FAP*/
820 /*mt010.301 - removed veriable defined for FA*/
823 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
826 void mtSetNtlHdl(unsigned int hdl)
831 unsigned int mtGetNtlHdl()
833 return(osCp.ntl.hdl);
837 void mtGetWlsHdl(void **hdlr)
839 *hdlr = osCp.wls.intf;
842 #ifdef XEON_MULTIPLE_CELL_CHANGES
843 S8 gWrWlsDeviceName[MAX_WLS_DEVICE_NAME_LEN];
844 S16 smWrReadWlsConfigParams (Void);
847 static int SOpenWlsIntf()
851 #define WLS_DEVICE_NAME "wls0"
853 char *my_argv[] = {"gnodeb", "-c3", "--proc-type=auto", "--file-prefix", WLS_DEVICE_NAME, "--iova-mode=pa"};
854 printf("\nCalling rte_eal_init: ");
855 for (i = 0; i < RTE_DIM(my_argv); i++)
857 printf("%s ", my_argv[i]);
861 if (rte_eal_init(RTE_DIM(my_argv), my_argv) < 0)
862 rte_panic("\nCannot init EAL\n");
865 #ifdef XEON_SPECIFIC_CHANGES
866 #ifdef XEON_MULTIPLE_CELL_CHANGES
867 hdl = WLS_Open(gWrWlsDeviceName, 1);
869 hdl = WLS_Open(WLS_DEVICE_NAME, 1);
872 hdl = WLS_Open(WLS_DEVICE_NAME, WLS_MASTER_CLIENT, WLS_MEM_SIZE);
879 printf("\nCould not open WLS Interface \n");
894 * Desc: This function is the entry point for the final binary. It
895 * calls SInit() in the common code. It can be replaced by a
896 * user function if required (SInit() must still be called).
898 * Ret: none on success
908 int argc, /* argument count */
909 char **argv /* argument vector */
913 #ifdef XEON_MULTIPLE_CELL_CHANGES
914 /* Read the WLS parameters from the file and copy into global control block */
915 if(smWrReadWlsConfigParams() != ROK)
917 fprintf(stderr, "Failed to read WLS params from file wr_cfg.txt");
919 } /* end of if statement */
922 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
925 #endif /* INTEL_WLS */
929 /* mt003.301 Modifications */
932 printf("\n SInit failed, SSI could not start \n");
933 /* pthread_exit(NULLP);*/ /* Commented to Come out of Main thread*/
937 /*mt010.301 cleanup part exposed to user*/
948 * Desc: This function is the entry point for the final binary. It
949 * calls SInit() in the common code. It can be replaced by a
950 * user function if required (SInit() must still be called).
952 * Ret: none on success
962 int argc, /* argument count */
963 char **argv /* argument vector */
979 * initialization functions
984 * Fun: Initialize OS control point
986 * Desc: This function initializes MTSS-specific information
987 * in the OS control point.
998 struct sigaction act;
1000 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1001 struct sigaction sa;
1005 /*mt014.301 : 4GMX release related changes*/
1006 #ifdef SS_4GMX_UCORE
1009 /* mt005.301 : Cavium changes */
1010 #ifdef SS_SEUM_CAVIUM
1011 /* set group mask for the core */
1012 cvmx_pow_set_group_mask(cvmx_get_core_num(), SS_CVMX_GRP_MASK);
1013 #endif /* SS_SEUM_CAVIUM */
1015 osCp.dep.sysTicks = 0;
1017 /* mt020.201 - Addition for no command line available */
1019 /* parse command line */
1021 /* mt003.301 Additions */
1022 if(fileBasedMemCfg == TRUE && memConfigured == FALSE)
1024 printf("\n File Based Memory configuration failed \n");
1029 #ifndef RGL_SPECIFIC_CHANGES /* ANOOP :: This ssdInitMemInfo() was present in 2.1 */
1030 #ifndef SS_LOCKLESS_MEMORY
1031 #ifdef SS_MULTICORE_SUPPORT
1032 if(memConfigured == FALSE)
1038 /* initialize the started semaphore */
1039 if (sem_init(&osCp.dep.ssStarted, 0, 0) != 0)
1044 /* mt028.201 added compile time flag to allow not to mask signals */
1046 /* mask all signals in the main thread */
1048 sigdelset(&set, SIGINT);
1049 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1050 sigdelset(&set, SIGSEGV);
1051 sigdelset(&set, SIGUSR2);
1052 sigdelset(&set, SIGILL);
1053 #ifdef XEON_SPECIFIC_CHANGES
1054 sigdelset(&set, SIGABRT);
1055 sigdelset(&set, SIGTERM);
1056 sigdelset(&set, SIGHUP);
1059 pthread_sigmask(SIG_SETMASK, &set, NULLP);
1060 #endif /* UNMASK_SIG */
1062 /* install a SIGINT handler to shutdown */
1063 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
1065 /*Initialize SIGSEGV Signal */
1066 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1068 memset(&sa, 0, sizeof(struct sigaction));
1069 sigemptyset(&sa.sa_mask);
1070 sa.sa_sigaction = signal_segv;
1071 sa.sa_flags = SA_SIGINFO;
1072 #ifndef XEON_SPECIFIC_CHANGES
1073 sigaction(SIGSEGV, &sa, NULL);
1075 memset(&sa, 0, sizeof(struct sigaction));
1076 sigemptyset(&sa.sa_mask);
1077 sa.sa_sigaction = signal_segv;
1078 sa.sa_flags = SA_SIGINFO;
1080 sigaction(SIGILL, &sa, NULL);
1082 if(sigaction(SIGILL, &sa, NULL) != 0)
1084 printf("\nFailed to process sigaction for the SIGILL\n");
1087 if(sigaction(SIGSEGV, &sa, NULL) != 0)
1089 printf("\nFailed to process sigaction for the SIGSEGV\n");
1092 if(sigaction(SIGABRT, &sa, NULL) != 0)
1094 printf("\nFailed to process sigaction for the SIGABRT\n");
1097 if(sigaction(SIGTERM, &sa, NULL) != 0)
1099 printf("\nFailed to process sigaction for the SIGTERM\n");
1102 if(sigaction(SIGHUP, &sa, NULL) != 0)
1104 printf("\nFailed to process sigaction for the SIGHUP\n");
1109 signal (SIGSEGV, mtSigSegvHndlr);
1110 signal (SIGKILL, mtSigSegvHndlr);
1111 signal (SIGUSR2, mtSigUsr2Hndlr);
1116 signal (SIGINT, mtStopHndlr);
1119 act.sa_handler = mtIntSigHndlr;
1120 sigfillset(&act.sa_mask);
1122 if (sigaction(SIGINT, &act, NULLP) != 0)
1128 /* mt040.201 initialise random seed */
1129 osCp.dep.randSeed = time(NULLP);
1137 * Fun: De-initialize OS control point
1139 * Desc: This function reverses the initialization in ssdInitGen().
1148 Void ssdDeinitGen(void)
1152 sem_destroy(&osCp.dep.ssStarted);
1157 #ifdef SS_LOCKLESS_MEMORY
1161 * Fun: ssPutDynMemBlkSet
1163 * Desc: Returns the set of dynamic Blocks into the global region
1166 * Ret: ROK - successful,
1167 * RFAILED - unsuccessful.
1174 S16 ssPutDynMemBlkSet
1176 uint8_t bktIdx, /* Index to bucket list */
1177 CmMmBlkSetElement *dynMemSetElem /* Memory set element which is needs to be
1178 added to global region */
1181 CmMmGlobRegCb *globReg;
1182 CmMmGlobalBktCb *bktCb;
1186 globReg = osCp.globRegCb;
1188 #if (ERRCLASS & ERRCLS_INT_PAR)
1189 if(bktIdx >= globReg->numBkts)
1193 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1195 bktCb = &(globReg->bktTbl[bktIdx]);
1197 for(blkCnt = 0; blkCnt < bktCb->bucketSetSize; blkCnt++)
1199 blkPtr = dynMemSetElem->nextBktPtr;
1200 dynMemSetElem->nextBktPtr = *((CmMmEntry **)blkPtr);
1201 free((Void *)blkPtr);
1204 dynMemSetElem->nextBktPtr = NULLP;
1205 dynMemSetElem->numFreeBlks = 0;
1212 * Fun: ssGetDynMemBlkSet
1214 * Desc: Gets the set of dynamic memory blocks from the global region
1217 * Ret: ROK - successful,
1218 * RFAILED - unsuccessful.
1225 S16 ssGetDynMemBlkSet
1227 uint8_t bktIdx, /* Index to bucket list */
1228 CmMmBlkSetElement *dynMemSetElem /* Memory set element which is updated
1229 with new set values */
1233 CmMmGlobRegCb *globReg;
1234 CmMmGlobalBktCb *bktCb;
1239 globReg = osCp.globRegCb;
1241 #if (ERRCLASS & ERRCLS_INT_PAR)
1242 if(bktIdx >= globReg->numBkts)
1246 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1248 bktCb = &(globReg->bktTbl[bktIdx]);
1249 basePtr = &(dynMemSetElem->nextBktPtr);
1251 for(blkCnt = 0; blkCnt < bktCb->bucketSetSize; blkCnt++)
1253 blkPtr = (Data *)malloc(bktCb->size);
1255 basePtr = (CmMmEntry **)blkPtr;
1258 dynMemSetElem->numFreeBlks = bktCb->bucketSetSize;
1262 } /* ssGetDynMemBlkSet */
1267 * Fun: ssPutDynMemBlkSet
1269 * Desc: Returns the set of dynamic Blocks into the global region
1272 * Ret: ROK - successful,
1273 * RFAILED - unsuccessful.
1280 S16 ssPutDynMemBlkSet
1282 uint8_t bktIdx, /* Index to bucket list */
1283 CmMmBlkSetElement *dynMemSetElem, /* Memory set element which is needs to be
1284 added to global region */
1285 uint32_t doNotBlockForLock /* Boolean whether to block for lock or not */
1288 CmMmGlobRegCb *globReg;
1289 CmMmGlobalBktCb *bktCb;
1291 CmMmBlkSetElement *globMemNode;
1295 globReg = osCp.globRegCb;
1297 #if (ERRCLASS & ERRCLS_INT_PAR)
1298 if(bktIdx >= globReg->numBkts)
1302 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1304 bktCb = &(globReg->bktTbl[bktIdx]);
1306 /* Lock the global region first. If the doNotBlockForLock is non-zero, the
1307 try lock is used as it is not required to block as it will be taken
1308 in the next go else it will be blocked for lock as we have to get the
1311 SLock(&(bktCb->bucketLock));
1317 /* Get a free node from the free node linked list */
1318 lstNode = cmLListFirst(&(bktCb->listFreeBktSet));
1319 if(lstNode == NULLP)
1321 SUnlock(&(bktCb->bucketLock));
1325 cmLListDelFrm(&(bktCb->listFreeBktSet), lstNode);
1327 /* Copy the content of the received element information on to free node
1328 * and add it to valid linked list */
1329 globMemNode = (CmMmBlkSetElement *)lstNode->node;
1330 globMemNode->numFreeBlks = dynMemSetElem->numFreeBlks;
1331 globMemNode->nextBktPtr = dynMemSetElem->nextBktPtr;
1332 dynMemSetElem->numFreeBlks = 0;
1333 dynMemSetElem->nextBktPtr = NULLP;
1335 cmLListAdd2Tail(&(bktCb->listValidBktSet), &(globMemNode->memSetNode));
1337 SUnlock(&(bktCb->bucketLock));
1345 * Fun: ssGetDynMemBlkSet
1347 * Desc: Gets the set of dynamic memory blocks from the global region
1350 * Ret: ROK - successful,
1351 * RFAILED - unsuccessful.
1353 * Notes: The parameter doNotBlockForLock specifies whether to block for lock
1359 S16 ssGetDynMemBlkSet
1361 uint8_t bktIdx, /* Index to bucket list */
1362 CmMmBlkSetElement *dynMemSetElem, /* Memory set element which is updated
1363 with new set values */
1364 uint32_t doNotBlockForLock /* Boolean whether to block for lock or not */
1367 CmMmGlobRegCb *globReg;
1368 CmMmGlobalBktCb *bktCb;
1370 CmMmBlkSetElement *globMemNode;
1374 globReg = osCp.globRegCb;
1376 #if (ERRCLASS & ERRCLS_INT_PAR)
1377 if(bktIdx >= globReg->numBkts)
1381 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1383 bktCb = &(globReg->bktTbl[bktIdx]);
1385 /* Lock the global region first. If the doNotBlockForLock is non-zero, the
1386 try lock is used as it is not required to block as it will be taken
1387 in the next go else it will be blocked for lock as we have to get the
1390 SLock(&(bktCb->bucketLock));
1395 lstNode = cmLListFirst(&(bktCb->listValidBktSet));
1397 if(lstNode == NULLP)
1399 SUnlock(&(bktCb->bucketLock));
1403 /* Delete the node from the valid linked list and copy the values of the
1404 * elements of structrues into pointer */
1405 cmLListDelFrm(&(bktCb->listValidBktSet), lstNode);
1406 globMemNode = (CmMmBlkSetElement *)lstNode->node;
1407 dynMemSetElem->numFreeBlks = globMemNode->numFreeBlks;
1408 dynMemSetElem->nextBktPtr = globMemNode->nextBktPtr;
1410 /* Add this node to the free node linked list */
1411 cmLListAdd2Tail(&(bktCb->listFreeBktSet), lstNode);
1413 SUnlock(&(bktCb->bucketLock));
1417 } /* ssGetDynMemBlkSet */
1420 #define NUM_CALLS_TO_CHECK_MEM_DYN_AGAIN 100
1421 uint32_t gDynMemAlrm[4];
1422 static uint32_t memoryCheckCounter;
1424 uint32_t isMemThreshReached(Region reg)
1426 CmMmGlobRegCb *globReg;
1427 CmMmGlobalBktCb *bktCb;
1428 uint8_t bktIdx= reg;
1430 globReg = osCp.globRegCb;
1432 #if (ERRCLASS & ERRCLS_INT_PAR)
1433 if(bktIdx >= globReg->numBkts)
1437 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1439 bktCb = &(globReg->bktTbl[bktIdx]);
1441 if(gDynMemAlrm[bktIdx])
1443 // printf ("\nunder memory bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1444 SLock(&(bktCb->bucketLock));
1445 if(bktCb->listValidBktSet.count > 25)
1447 gDynMemAlrm[bktIdx] = FALSE;
1448 // printf ("\nrecoverd bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1450 SUnlock(&(bktCb->bucketLock));
1456 if(memoryCheckCounter++ >= NUM_CALLS_TO_CHECK_MEM_DYN_AGAIN)
1458 // printf ("\nCHECK bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1459 SLock(&(bktCb->bucketLock));
1460 if(bktCb->listValidBktSet.count < 15 )
1461 gDynMemAlrm[bktIdx] = TRUE;
1462 memoryCheckCounter = 0;
1463 SUnlock(&(bktCb->bucketLock));
1469 #endif /* USE_MALLOC */
1470 #endif /* SS_LOCKLESS_MEMORY */
1472 #ifdef SS_USE_ICC_MEMORY
1475 * Fun: Initialize region/pool tables
1477 * Desc: This function initializes MTSS-specific information
1478 * in the region/pool tables and configures the common
1479 * memory manager for use.
1488 Void * ssGetIccHdl(Region region)
1490 CmMmDynRegCb *dynRegCb;
1492 /* Klock work fix ccpu00148484 */
1493 if(!(region < SS_MAX_REGS))
1498 dynRegCb = (CmMmDynRegCb *)osCp.dynRegionTbl[region].regCb;
1500 return (dynRegCb->iccHdl);
1502 #endif /* SS_USE_ICC_MEMORY */
1504 #ifdef T2K_MEM_LEAK_DBG
1505 RegionMemLeakInfo regMemLeakInfo;
1506 #endif /* T2K_MEM_LEAK_DBG */
1508 #ifdef SS_USE_WLS_MEM
1509 static S16 SPartitionWlsDynMem()
1512 uint8_t *bktMemStrtAddr = (uint8_t *)(((uint8_t*)osCp.wls.allocAddr) + (4 * 1024 * 1024));
1514 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1516 mtDynMemSz[i].startAddr = bktMemStrtAddr;
1517 bktMemStrtAddr += mtDynMemSz[i].reqdSz;
1520 printf("\nGlobal Memory Info: \n");
1521 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1523 printf("mtDynMemSz[%d]: [0x%016lx]\n", i, (unsigned long int)mtDynMemSz[i].startAddr);
1528 static S16 SAllocateWlsDynMem()
1533 memset(&mtDynMemSz[0], 0, sizeof(mtDynMemSz));
1535 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1537 reqdMemSz += (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1538 mtDynMemSz[i].reqdSz += (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1540 osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf,
1541 #ifdef INTEL_L1_V19_10
1544 (reqdMemSz + (4 * 1024 * 1024)));
1546 printf("\n *************** \n WLS memory: %lx, %d\n", (PTR)osCp.wls.allocAddr, reqdMemSz);
1547 SPartitionWlsDynMem();
1555 S16 SPartitionWlsMemory()
1560 uint64_t pageSize[1], hugePageSize;
1563 long int pageSize[1], hugePageSize;
1566 #define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) )
1568 uint8_t *regMemStrtAddr = (uint8_t *)osCp.wls.allocAddr;
1570 gethugepagesizes(pageSize,1);
1571 hugePageSize = pageSize[0];
1572 for (i = 0; i < 1; i++)
1574 mtRegMemSz[i].startAddr = regMemStrtAddr;
1575 //CM_LOG_DEBUG(CM_LOG_ID_MT, "Global Region-->Bkt[%d] Addr:%p\n", i, mtRegMemSz[i].startAddr);
1577 numHugePg = DIV_ROUND_OFFSET(mtRegMemSz[i].reqdSz, hugePageSize);
1578 reqdSz = numHugePg * hugePageSize;
1579 regMemStrtAddr += reqdSz;
1580 #ifdef T2K_MEM_LEAK_DBG
1581 /* Since wls is region 0 */
1582 regMemLeakInfo.regStartAddr[i] = (uint64_t)mtRegMemSz[i].startAddr;
1583 regMemLeakInfo.numActvRegions++;
1584 #endif /* T2K_MEM_LEAK_DBG */
1586 //Store last region addr for validation
1587 mtRegMemSz[i].startAddr = regMemStrtAddr;
1591 #ifdef SS_MEM_WL_DEBUG
1592 Void SChkAddrValid(int type, int region, PTR ptr)
1594 char *tryPtr = NULL;
1595 if(type == 0) //Global
1597 if(ptr < mtRegMemSz[0].startAddr || ptr >=
1598 (mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr + mtGlobMemoCfg.heapSize))
1600 printf("\n****INVALID PTR in Global Region: ptr:%p start:%p end:%p***\n", ptr, mtRegMemSz[0].startAddr, mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr);
1606 if(ptr > mtRegMemSz[0].startAddr && ptr <= mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr )
1608 printf("\n****INVALID PTR in Static Region: ptr:%p start:%p end:%p***\n", ptr, mtRegMemSz[0].startAddr, mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr);
1614 #endif /* SS_MEM_WL_DEBUG */
1616 S16 SPartitionStaticMemory(uint8_t *startAddr)
1621 uint8_t *regMemStrtAddr = (uint8_t *)startAddr;
1624 //for (i = 0; i < mtMemoCfg.numRegions; i++)
1625 for (i = 1; i < mtMemoCfg.numRegions; i++)
1627 mtRegMemSz[i].startAddr = regMemStrtAddr;
1628 reqdSz = /* regMemStrtAddr + */mtRegMemSz[i].reqdSz;
1629 regMemStrtAddr += reqdSz;
1630 #ifdef T2K_MEM_LEAK_DBG
1631 { /* Since region 1 onwards are used for non wls */
1632 regMemLeakInfo.regStartAddr[i] = (uint64_t)mtRegMemSz[i].startAddr;
1633 regMemLeakInfo.numActvRegions++;
1635 #endif /* T2K_MEM_LEAK_DBG */
1639 S16 SAllocateWlsMem()
1647 //memset(&mtRegMemSz[0], sizeof(mtRegMemSz), 0);
1648 memset(&mtRegMemSz[0], 0, sizeof(mtRegMemSz));
1650 for (i = 0; i < 1; i++)
1652 /* allocate space for the region */
1653 region = &mtMemoCfg.region[i];
1654 reqdMemSz += region->heapsize;
1655 mtRegMemSz[i].reqdSz += region->heapsize;
1657 for (j = 0; j < region->numBkts; j++)
1659 reqdMemSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1660 mtRegMemSz[i].reqdSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1663 osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf, (512 *1024 * 1024));
1664 //osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf, (reqdMemSz + (1024 * 1024 * 2 * 2)));
1666 printf("\n ************* \n WLS memory: %llx, %ld\n ****** \n", osCp.wls.allocAddr, reqdMemSz);
1668 printf("\n ************* \n WLS memory: %lx, %d\n ****** \n", (PTR)osCp.wls.allocAddr, reqdMemSz);
1670 SPartitionWlsMemory();
1673 S16 SAllocateStaticMem()
1682 //memset(&mtRegMemSz[0], sizeof(mtRegMemSz), 0);
1684 //for (i = 0; i < mtMemoCfg.numRegions; i++)
1685 for (i = 1; i < mtMemoCfg.numRegions; i++)
1687 /* allocate space for the region */
1688 region = &mtMemoCfg.region[i];
1689 reqdMemSz += region->heapsize;
1690 mtRegMemSz[i].reqdSz += region->heapsize;
1692 for (j = 0; j < region->numBkts; j++)
1694 reqdMemSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1695 mtRegMemSz[i].reqdSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1699 startAddr = malloc(reqdMemSz + (1024 * 10));
1701 printf("\n ************* \n Static memory: %llx, %ld\n ****** \n", startAddr, reqdMemSz);
1703 printf("\n ************* \n Static memory: %lx, %d\n ****** \n", (PTR)startAddr, reqdMemSz);
1705 SPartitionStaticMemory(startAddr);
1708 #endif /* INTEL_WLS */
1714 * Fun: Initialize region/pool tables
1716 * Desc: This function initializes MTSS-specific information
1717 * in the region/pool tables and configures the common
1718 * memory manager for use.
1727 S16 ssdInitMem(void)
1729 /* mt018.201 - added local variable */
1733 MtRegCfg *region = NULLP;
1734 Txt errMsg[256] = {'\0'};
1735 #ifdef SS_LOCKLESS_MEMORY
1736 CmMmDynRegCb *dynRegCb =0;
1737 #ifdef SS_USE_ICC_MEMORY
1739 CmMmGlobRegCb *globReg = NULLP;
1742 #endif /* SS_LOCKLESS_MEMORY */
1745 /* Use the default SSI memory manager if the ICC memory manager is not
1746 * avilable. If ICC memory manager is avilable, it will be used for
1747 * all sharable memory allocation and de-allocation */
1748 #ifdef SS_LOCKLESS_MEMORY
1749 #ifdef SS_USE_ICC_MEMORY
1750 #ifndef YS_PHY_3_8_2
1752 for (i = 0; i < mtDynMemoCfg.numRegions; i++)
1754 dynRegCb = (CmMmDynRegCb *)calloc(1, sizeof(CmMmDynRegCb));
1755 if(dynRegCb == NULLP)
1759 for(k = 0; k < mtDynMemoCfg.region[i].numBkts; k++)
1761 dynRegCb->bktSize[k] = mtGlobMemoCfg.bkt[k].blkSize;
1763 dynRegCb->region = i;
1764 cmMmDynRegInit(dynRegCb);
1765 printf("\niccHdl = %lx\n", (PTR)dynRegCb->iccHdl);
1768 /* ysIccHdl = dynRegCb->iccHdl; */
1771 /* Initialize the global region first */
1772 osCp.globRegCb = calloc(1, sizeof(CmMmGlobRegCb));
1774 if(osCp.globRegCb == NULLP)
1779 globReg = (CmMmGlobRegCb *)osCp.globRegCb;
1781 #ifdef SS_USE_WLS_MEM
1782 SAllocateWlsDynMem();
1785 for(i = 0; i < mtGlobMemoCfg.numBkts; i++)
1787 memSize = (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1788 #if !defined (INTEL_WLS) && defined (SS_USE_WLS_MEM)
1789 globReg->bktTbl[i].startAddr = (Data *)mtDynMemSz[i].startAddr;
1790 printf("\nStarting Address of Bkt Entry [%d]: [0x%016lx], memSize[%d]\n", i, (unsigned long int)globReg->bktTbl[i].startAddr, memSize);
1793 globReg->bktTbl[i].startAddr = (Data *)calloc(memSize, sizeof(Data));
1795 globReg->bktTbl[i].startAddr = (Data *)mtRegMemSz[i].startAddr;
1798 if(globReg->bktTbl[i].startAddr == NULLP)
1802 globReg->bktTbl[i].poolId = i;
1803 globReg->bktTbl[i].size = mtGlobMemoCfg.bkt[i].blkSize;
1804 globReg->bktTbl[i].numBlks = mtGlobMemoCfg.bkt[i].numBlks;
1805 globReg->bktTbl[i].bucketSetSize = mtGlobMemoCfg.bkt[i].bucketSetSize;
1808 globReg->numBkts = mtGlobMemoCfg.numBkts;
1809 cmMmGlobRegInit(globReg);
1811 /* Initialize the dynamic task regions and sanity check for the theshold
1813 for (i = 0; i < mtDynMemoCfg.numRegions; i++)
1815 dynRegCb = (CmMmDynRegCb *)calloc(1, sizeof(CmMmDynRegCb));
1816 if(dynRegCb == NULLP)
1820 for(k = 0; k < mtDynMemoCfg.region[i].numBkts; k++)
1822 if((mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold <
1823 mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold) ||
1824 (mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold == 0) ||
1825 (mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold == 0))
1827 #ifdef XEON_SPECIFIC_CHANGES
1832 dynRegCb->bktTbl[k].poolId = k;
1833 dynRegCb->bktTbl[k].size = mtGlobMemoCfg.bkt[k].blkSize;
1834 dynRegCb->bktTbl[k].blkSetRelThreshold = mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold;
1835 dynRegCb->bktTbl[k].blkSetAcquireThreshold = mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold;
1836 dynRegCb->bktTbl[k].bucketSetSize = mtGlobMemoCfg.bkt[k].bucketSetSize;
1837 if(dynRegCb->bktMaxBlkSize < dynRegCb->bktTbl[k].size)
1839 dynRegCb->bktMaxBlkSize = dynRegCb->bktTbl[k].size;
1842 dynRegCb->region = i;
1843 dynRegCb->numBkts = mtDynMemoCfg.region[i].numBkts;
1844 cmMmDynRegInit(dynRegCb);
1846 #endif /* SS_USE_ICC_MEMORY */
1847 #endif /* SS_LOCKLESS_MEMORY */
1849 #ifdef T2K_MEM_LEAK_DBG
1851 /* Initailize mem leak tool memorys for debguing */
1852 regMemLeakInfo.numActvRegions=0;
1853 for(reg=0; reg <SS_MAX_REGS; reg++)
1855 regMemLeakInfo.gMemLeakInfo[reg] = malloc(sizeof(T2kMeamLeakInfo)*T2K_MEM_LEAK_INFO_TABLE_SIZE);
1856 memset(regMemLeakInfo.gMemLeakInfo[reg],0x0,
1857 sizeof(T2kMeamLeakInfo)*T2K_MEM_LEAK_INFO_TABLE_SIZE);
1858 regMemLeakInfo.regStartAddr[reg] = 0;
1861 regMemLeakInfo.regStartAddr[reg] = 0;
1862 if (pthread_mutex_init(&(regMemLeakInfo.memLock[reg]), NULL) != 0)
1864 printf("\n mutex init failed\n");
1870 /* Now allocate WLS memory */
1872 SAllocateStaticMem();
1874 /* mt018.201 - CMM Initialization */
1875 for (i = 0; i < mtMemoCfg.numRegions; i++)
1877 /* allocate space for the region control block */
1878 mtCMMRegCb[i] = (CmMmRegCb *)calloc(1, sizeof(CmMmRegCb));
1879 #ifdef TENB_RTLIN_CHANGES
1880 mlock(mtCMMRegCb[i], sizeof(CmMmRegCb));
1882 if (mtCMMRegCb[i] == NULLP)
1884 sprintf(errMsg,"\n ssdInitMem(): Could not allocated memory \
1885 for the Region:%d control block\n",i);
1887 for (k = 0; k < i; k++)
1889 cmMmRegDeInit(mtCMMRegCb[k]);
1890 free(mtCMMRegCfg[k]->vAddr);
1891 free(mtCMMRegCb[k]);
1892 free(mtCMMRegCfg[k]);
1897 mtCMMRegCfg[i] = (CmMmRegCfg *)calloc(1, sizeof(CmMmRegCfg));
1898 #ifdef TENB_RTLIN_CHANGES
1899 mlock(mtCMMRegCfg[i], sizeof(CmMmRegCfg));
1901 if (mtCMMRegCfg[i] == NULLP)
1903 for (k = 0; k < i; k++)
1905 cmMmRegDeInit(mtCMMRegCb[k]);
1906 free(mtCMMRegCfg[k]->vAddr);
1907 free(mtCMMRegCb[k]);
1908 free(mtCMMRegCfg[k]);
1910 free(mtCMMRegCb[i]);
1915 /* allocate space for the region */
1916 region = &mtMemoCfg.region[i];
1917 mtCMMRegCfg[i]->size = region->heapsize;
1918 for (j = 0; j < region->numBkts; j++)
1920 /* mt033.201 - addition for including the header size while computing the total size */
1921 #ifdef SSI_DEBUG_LEVEL1
1922 mtCMMRegCfg[i]->size += (region->bkt[j].blkSize + sizeof(CmMmBlkHdr)) *\
1923 (region->bkt[j].numBlks);
1925 mtCMMRegCfg[i]->size += region->bkt[j].blkSize * region->bkt[j].numBlks;
1926 #endif /* SSI_DEBUG_LEVEL1 */
1929 mtCMMRegCfg[i]->vAddr = (Data *)mtRegMemSz[i].startAddr;
1931 mtCMMRegCfg[i]->vAddr = (Data *)calloc(mtCMMRegCfg[i]->size,
1934 #ifdef XEON_SPECIFIC_CHANGES
1935 CM_LOG_DEBUG(CM_LOG_ID_MT, "Static Region-->Bkt[%d] Addr:[%p] RegionId=[%d] Size=[%d] \n",
1936 i, mtCMMRegCfg[i]->vAddr, region->regionId, mtCMMRegCfg[i]->size);
1938 #ifdef TENB_RTLIN_CHANGES
1939 mlock(mtCMMRegCfg[i]->vAddr, mtCMMRegCfg[i]->size*sizeof(Data));
1942 if (mtCMMRegCfg[i]->vAddr == NULLP)
1944 sprintf(errMsg,"\n ssdInitMem(): Could not allocate memory \
1945 for the Region:%d \n",i);
1947 for (k = 0; k < i; k++)
1949 cmMmRegDeInit(mtCMMRegCb[k]);
1950 free(mtCMMRegCfg[k]->vAddr);
1951 free(mtCMMRegCb[k]);
1952 free(mtCMMRegCfg[k]);
1954 free(mtCMMRegCb[i]);
1955 free(mtCMMRegCfg[i]);
1960 /* set up the CMM configuration structure */
1961 mtCMMRegCfg[i]->lType = SS_LOCK_MUTEX;
1962 mtCMMRegCfg[i]->chFlag = 0;
1963 mtCMMRegCfg[i]->bktQnSize = MT_BKTQNSIZE;
1964 mtCMMRegCfg[i]->numBkts = region->numBkts;
1966 for (j = 0; j < region->numBkts; j++)
1968 mtCMMRegCfg[i]->bktCfg[j].size = region->bkt[j].blkSize;
1969 mtCMMRegCfg[i]->bktCfg[j].numBlks = region->bkt[j].numBlks;
1972 /* initialize the CMM */
1973 #ifdef SS_LOCKLESS_MEMORY
1974 if (cmMmStatRegInit(region->regionId, mtCMMRegCb[i], mtCMMRegCfg[i]) != ROK)
1976 if (cmMmRegInit(region->regionId, mtCMMRegCb[i], mtCMMRegCfg[i]) != ROK)
1977 #endif /* SS_LOCKLESS_MEMORY */
1979 for (k = 0; k < i; k++)
1981 cmMmRegDeInit(mtCMMRegCb[k]);
1982 free(mtCMMRegCfg[k]->vAddr);
1983 free(mtCMMRegCb[k]);
1984 free(mtCMMRegCfg[k]);
1986 free(mtCMMRegCfg[i]->vAddr);
1987 free(mtCMMRegCb[i]);
1988 free(mtCMMRegCfg[i]);
1993 /* initialize the STREAMS module */
1994 /* mt019.201: STREAMS module will only apply to DFLT_REGION */
1995 if (region->regionId == 0)
1997 if (ssStrmCfg(region->regionId, region->regionId) != ROK)
1999 for (k = 0; k < i; k++)
2001 cmMmRegDeInit(mtCMMRegCb[k]);
2002 free(mtCMMRegCfg[k]->vAddr);
2003 free(mtCMMRegCb[k]);
2004 free(mtCMMRegCfg[k]);
2006 cmMmRegDeInit(mtCMMRegCb[i]);
2007 free(mtCMMRegCfg[i]->vAddr);
2008 free(mtCMMRegCb[i]);
2009 free(mtCMMRegCfg[i]);
2014 /* mt001.301 : Additions */
2015 #ifdef SS_MEM_LEAK_STS
2017 #endif /* SS_MEM_LEAK_STS */
2025 * Fun: De-initialize region/pool tables
2027 * Desc: This function reverses the initialization in ssdInitMem().
2036 Void ssdDeinitMem(void)
2038 /* mt018.201 - added local variables */
2041 /* mt008.301 Additions */
2042 #ifdef SS_MEM_LEAK_STS
2043 cmDeinitMemLeakMdl();
2044 #endif /* SS_MEM_LEAK_STS */
2046 for (i = 0; i < mtMemoCfg.numRegions; i++)
2048 cmMmRegDeInit(mtCMMRegCb[i]);
2049 free(mtCMMRegCfg[i]->vAddr);
2050 free(mtCMMRegCb[i]);
2051 free(mtCMMRegCfg[i]);
2060 * Fun: Initialize task table
2062 * Desc: This function initializes MTSS-specific information
2063 * in the task table.
2072 S16 ssdInitTsk(void)
2074 /* mt001.301 : Additions */
2075 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
2076 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
2077 uint32_t tskInd = 0;
2078 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
2082 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
2083 #if defined(SS_MULTICORE_SUPPORT) || defined(SS_AFFINITY_SUPPORT)
2084 /* initialize system task information */
2085 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
2087 osCp.sTskTbl[tskInd].dep.lwpId = 0;
2089 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
2096 * Fun: Deinitialize task table
2098 * Desc: This function reverses the initialization perfomed in
2108 Void ssdDeinitTsk(void)
2115 #ifdef SS_DRVR_SUPPORT
2118 * Fun: Initialize driver task table
2120 * Desc: This function initializes MTSS-specific information
2121 * in the driver task table.
2130 S16 ssdInitDrvr(void)
2134 pthread_attr_t attr;
2139 /* initialize the dependent portion of the driver task entries */
2140 for (i = 0; i < SS_MAX_DRVRTSKS; i++)
2142 osCp.drvrTskTbl[i].dep.flag = FALSE;
2146 /* create pipe for communication between SSetIntPend() and
2147 * the isTskHdlr thread.
2149 if (pipe(osCp.dep.isFildes) != 0)
2155 /* create the isTskHdlr thread */
2156 pthread_attr_init(&attr);
2157 /* mt021.201 - Addition to set stack size */
2158 pthread_attr_setstacksize(&attr, (size_t)MT_ISTASK_STACK);
2159 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2160 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2161 if ((pthread_create(&osCp.dep.isTskHdlrTID, &attr, mtIsTskHdlr, NULLP)) != 0)
2163 /* mt020.201 - Addition for destroying thread attribute object attr */
2164 pthread_attr_destroy(&attr);
2170 /*mt014.301 : 4GMX release related changes*/
2171 #ifdef SS_4GMX_UCORE
2179 /* mt020.201 - Addition for destroying thread attribute object attr */
2180 pthread_attr_destroy(&attr);
2189 * Fun: Deinitialize driver information
2191 * Desc: This function reverses the initialization performed in
2201 Void ssdDeinitDrvr(void)
2203 /* mt008.301: Terminate the Driver Task on exit */
2204 while(pthread_cancel(osCp.dep.isTskHdlrTID));
2207 TL_Close(AppContext.hUAII);
2208 if (clusterMode == RADIO_CLUSTER_MODE)
2210 TL_Close(AppContext.hUAII_second);
2216 #endif /* SS_DRVR_SUPPORT */
2221 * Fun: Initialize timer table
2223 * Desc: This function initializes MTSS-specific information
2224 * in the timer table.
2233 S16 ssdInitTmr(void)
2235 pthread_attr_t attr;
2236 struct sched_param param_sched;
2237 /* mt010.21: addition */
2239 #ifdef SS_MULTICORE_SUPPORT
2241 #endif /* SS_MULTICORE_SUPPORT */
2242 #ifdef SS_THR_REG_MAP
2243 uint32_t threadCreated = FALSE;
2244 #endif /* SS_THR_REG_MAP */
2248 osCp.dep.tmrTqCp.tmrLen = SS_MAX_TMRS;
2249 /* mt010.21: addition */
2250 osCp.dep.tmrTqCp.nxtEnt = 0;
2251 for (i=0; i< SS_MAX_TMRS; i++)
2253 osCp.dep.tmrTq[i].first = (CmTimer *)NULLP;
2256 #ifdef SS_MULTICORE_SUPPORT
2257 sTsk = ssdAddTmrSTsk();
2262 #endif /* SS_MULTICORE_SUPPORT */
2263 /* create the timer handler thread */
2264 pthread_attr_init(&attr);
2265 /* mt021.201 - Addition to set stack size */
2266 pthread_attr_setstacksize(&attr, (size_t)MT_TMRTASK_STACK);
2267 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2268 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2269 pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
2270 param_sched.sched_priority = sched_get_priority_max(SCHED_FIFO);
2271 pthread_attr_setschedparam(&attr, ¶m_sched);
2274 #ifdef SS_THR_REG_MAP
2275 /* When the thread is created, we check for the memory mapping table if
2276 * threadId can be placed in thread memory map table. If it is not able to place
2277 * threadId is stored in tmporary array. Once thread is created successful,
2278 * thread_cancel is sent for each thread which are created before. All the
2279 * threads are made to wait on sema which is cancel point for thread.
2281 while(threadCreated == FALSE)
2284 if ((pthread_create(&osCp.dep.tmrHdlrTID, &attr, mtTmrHdlr, NULLP)) != 0)
2286 /* mt020.201 - Addition for destroying thread attribute object attr */
2287 pthread_attr_destroy(&attr);
2292 #ifdef SS_THR_REG_MAP
2293 threadCreated = ssCheckAndAddMemoryRegionMap(osCp.dep.tmrHdlrTID,
2296 #endif /* SS_THR_REG_MAP */
2297 #ifdef SS_MEM_WL_DEBUG
2298 tmpRegTidMap[sTsk->region] = osCp.dep.tmrHdlrTID;
2301 /* mt020.201 - Addition for destroying thread attribute object attr */
2302 pthread_attr_destroy(&attr);
2311 * Fun: Deinitialize timer table
2313 * Desc: This function reverses the initialization performed in
2323 Void ssdDeinitTmr(void)
2325 #ifdef SS_MULTICORE_SUPPORT
2328 #endif /* SS_MULTICORE_SUPPORT */
2331 #ifdef SS_MULTICORE_SUPPORT
2332 ret = SLock(&osCp.sTskTblLock);
2336 #if (ERRCLASS & ERRCLS_DEBUG)
2337 MTLOGERROR(ERRCLS_DEBUG, EMT008, (ErrVal) ret,
2338 "Could not lock system task table");
2342 sTsk = &osCp.sTskTbl[0]; /* first entry is timer entry always */
2343 /* clean up the system task entry */
2347 SDestroyLock(&sTsk->lock);
2348 ssDestroyDmndQ(&sTsk->dQ);
2351 /* make this entry available in the system task table */
2352 sTsk->nxt = osCp.nxtSTskEntry;
2353 osCp.nxtSTskEntry = 0;
2357 /* unlock the system task table */
2358 SUnlock(&osCp.sTskTblLock);
2360 #endif /* SS_MULTICORE_SUPPORT */
2361 /* mt008.301: Terminate the timer thread on exit */
2362 while(pthread_cancel(osCp.dep.tmrHdlrTID));
2372 * Desc: Pre-tst() initialization.
2381 S16 ssdInitLog(void)
2383 /* mt027.201 - Modification to fix warnings with no STDIN and STDOUT */
2387 pthread_attr_t attr;
2390 #endif /* CONSTDIO */
2395 /* mt008.301: ssdInitFinal changed to ssdInitLog */
2400 osCp.dep.conInFp = (FILE *) stdin;
2401 osCp.dep.conOutFp = (FILE *) stdout;
2402 /* added compile time flag CONRD: mt017.21 */
2406 /* disable canonical input processing */
2407 fd = fileno(osCp.dep.conInFp);
2408 if ((tcgetattr(fd, &tio)) != 0)
2410 printf("\nError: disable canonical input processing\n");
2414 tio.c_lflag &= ~ICANON;
2415 tio.c_cc[VMIN] = 1; /* wait for a minimum of 1 character input */
2416 tio.c_cc[VTIME] = 0;
2417 if ((tcsetattr(fd, TCSANOW, &tio)) != 0)
2419 printf("\nError: while tcsetattr() processing\n");
2423 #endif /* CONSTDIO */
2426 /* set up the input fd to block when no data is available */
2427 fd = fileno(osCp.dep.conInFp);
2428 flags = fcntl(fd, F_GETFL, &flags);
2429 flags &= ~O_NONBLOCK;
2430 if (fcntl(fd, F_SETFL, flags) == -1)
2432 printf("\nError: while fcntl processing\n");
2437 /* create the console handler thread */
2438 pthread_attr_init(&attr);
2439 /* mt021.201 - Addition to set stack size */
2440 pthread_attr_setstacksize(&attr, (size_t)MT_CONSOLE_STACK);
2441 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2442 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2445 if((SCreatePThread(&osCp.dep.conHdlrTID, &attr, mtConHdlr, NULLP)) != 0)
2447 /* mt020.201 - Addition for destroying thread attribute object attr */
2448 pthread_attr_destroy(&attr);
2450 printf("\nError: Logging Thread creation failed \n");
2454 /* mt020.201 - Addition for destroying thread attribute object attr */
2455 pthread_attr_destroy(&attr);
2469 * Desc: This function reverses the initialization performed in
2479 /* mt008.301: ssdDeinitFinal changed to ssdDeinitLog */
2480 Void ssdDeinitLog(void)
2482 /* mt008.301: ssdDeinitFinal changed to ssdDeinitLog */
2485 /* mt008.301: Terminate the console reader on exit */
2486 while(pthread_cancel(osCp.dep.conHdlrTID));
2492 /* mt001.301 : Additions */
2496 S16 ssdInitWatchDog(uint16_t port)
2499 Txt prntBuf[PRNTSZE];
2502 #ifdef SS_WATCHDOG_IPV6
2503 struct sockaddr_in6 tmpaddr;
2505 struct sockaddr_in tmpaddr;
2506 #endif /* SS_WATCHDOG_IPV6 */
2507 #ifdef SS_MULTIPLE_PROCS
2508 ProcId procId = SS_WD_WDPROC;
2509 if (SAddProcIdLst(1, &procId) != ROK)
2513 #endif /* SS_MULTIPLE_PROCS */
2516 SInitLock(&osCp.wdCp.wdLock, SS_LOCK_MUTEX);
2518 /* Create a watch dog system task */
2519 SCreateSTsk(0, &(osCp.wdCp.watchDgTskId));
2521 /* Create a watch dog reveiver system task */
2522 SCreateSTsk(0, &(osCp.wdCp.watchDgRcvrTskId));
2524 /* Register and attach watch dog TAPA task */
2525 #ifdef SS_MULTIPLE_PROCS
2526 SRegTTsk (procId, ENTDW, INST0, TTNORM, PRIOR0, NULLP, watchDgActvTsk);
2527 SAttachTTsk (procId, ENTDW, INST0, osCp.wdCp.watchDgTskId);
2529 SRegTTsk ( ENTDW, INST0, TTNORM, PRIOR0, NULLP, watchDgActvTsk);
2530 SAttachTTsk ( ENTDW, INST0, osCp.wdCp.watchDgTskId);
2531 #endif /* SS_MULTIPLE_PROCS */
2532 /* Register and attach watch dog receiver TAPA task */
2533 #ifdef SS_MULTIPLE_PROCS
2534 SRegTTsk (procId, ENTHB, INST0, TTNORM, PRIOR0, NULLP, watchDgRcvrActvTsk);
2535 SAttachTTsk (procId, ENTHB, INST0, osCp.wdCp.watchDgRcvrTskId);
2537 SRegTTsk ( ENTHB, INST0, TTNORM, PRIOR0, NULLP, watchDgRcvrActvTsk);
2538 SAttachTTsk ( ENTHB, INST0, osCp.wdCp.watchDgRcvrTskId);
2539 #endif /* SS_MULTIPLE_PROCS */
2541 #ifndef SS_MULTIPLE_PROCS
2542 osCp.wdCp.watchDgPst.srcProcId = SFndProcId();
2543 osCp.wdCp.watchDgPst.dstProcId = SFndProcId();
2545 osCp.wdCp.watchDgPst.srcProcId = procId;
2546 osCp.wdCp.watchDgPst.dstProcId = procId;
2547 #endif /* SS_MULTIPLE_PROCS */
2549 /* Initialise the pst structure */
2550 ssdInitWatchDgPst(&(osCp.wdCp.watchDgPst));
2551 /* Initialize the watch dog timer resolution default is 1 sec */
2553 cmInitTimers(osCp.wdCp.watchDgTmr, (uint8_t)1);
2554 osCp.wdCp.watchDgTqCp.nxtEnt = 0;
2555 osCp.wdCp.watchDgTqCp.tmrLen = 1;
2556 for(idx = 0; idx < 1; idx++)
2558 osCp.wdCp.watchDgTs[idx].first = NULLP;
2559 osCp.wdCp.watchDgTs[idx].tail = NULLP;
2561 #ifdef SS_MULTIPLE_PROCS
2562 SRegCfgTmr(procId,ENTDW, INST0, 10, SS_100MS, ssdWatchDgActvTmr);
2564 SRegCfgTmr(ENTDW, INST0, 10, SS_100MS, ssdWatchDgActvTmr);
2565 #endif /* SS_MULTIPLE_PROCS */
2567 /* Create the watch dog receiver socket */
2568 osCp.wdCp.globWd.sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
2569 if(osCp.wdCp.globWd.sock == -1)
2571 sprintf(prntBuf,"ssdInitWatchDog: socket failed errno [%d]\n", errno);
2575 #ifdef SS_WATCHDOG_IPV6
2576 tmpaddr.sin6_len = sizeof(tmpadDr);
2577 tmpaddr.sin6_family = AF_INET6;
2578 tmpaddr.sin6_addr = in6addr_any;
2579 tmpaddr.sin6_port = htons(port);
2581 tmpaddr.sin_family = AF_INET;
2582 tmpaddr.sin_addr.s_addr = htonl(INADDR_ANY);
2583 tmpaddr.sin_port = htons(port);
2584 #endif /* SS_WATCHDOG_IPV6 */
2586 if(bind(osCp.wdCp.globWd.sock, (struct sockaddr *)&tmpaddr, sizeof(struct sockaddr)) != 0
2589 sprintf(prntBuf,"ssdInitWatchDog: bind failed errno [%d]\n", errno);
2593 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
2597 #ifndef SS_MULTIPLE_PROCS
2598 pst.srcProcId = SFndProcId();
2599 pst.dstProcId = SFndProcId();
2601 pst.srcProcId = procId;
2602 pst.dstProcId = procId;
2603 #endif /* SS_MULTIPLE_PROCS */
2604 pst.event = EVTSSHRTBTREQ;
2605 ssdInitWatchDgPst(&pst);
2606 SPstTsk(&pst, mBuf);
2611 S16 ssdInitWatchDgPst(Pst *pst)
2614 pst->selector = SS_LOOSE_COUPLING;
2616 pst->region = DFLT_REGION; /* region */
2617 pst->pool = DFLT_POOL; /* pool */
2619 pst->prior = PRIOR0; /* priority */
2620 pst->route = RTESPEC; /* route */
2622 pst->dstEnt = ENTHB; /* destination entity */
2624 pst->srcEnt = ENTDW; /* source entity */
2630 #ifdef SS_MULTIPLE_PROCS
2631 S16 ssdWatchDgActvTmr
2638 S16 ssdWatchDgActvTmr(Void)
2639 #endif /* SS_MULTIPLE_PROCS */
2642 cmPrcTmr(&osCp.wdCp.watchDgTqCp, osCp.wdCp.watchDgTs, (PFV)ssdWatchDgTmrEvt);
2647 Void ssdWatchDgTmrEvt
2649 PTR cb, /* control block */
2650 S16 event /* timer number */
2653 /* mt003.301 Fixed warings */
2657 Txt prntBuf[PRNTSZE];
2666 SPrint("Timer Heartbeat Request Expired");
2668 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2673 SLock(&osCp.wdCp.wdLock);
2674 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2676 if(osCp.wdCp.globWd.wdsta[i].status == 0)
2678 sprintf(prntBuf, "Node [ %s ] Down. Calling user callback\n", inet_ntoa(osCp.wdCp.globWd.wdsta[i].addr));
2680 if(osCp.wdCp.globWd.callback != 0)
2682 osCp.wdCp.globWd.callback(osCp.wdCp.globWd.data);
2686 SUnlock(&osCp.wdCp.wdLock);
2688 if(!osCp.wdCp.globWd.watchdogStop)
2690 ssdStartWatchDgTmr(NULLP, SS_TMR_HRTBT, osCp.wdCp.globWd.timeout);
2691 ssdSndHrtBtMsg(restartTmr, SS_WD_HB_REQ);
2701 Void ssdStartWatchDgTmr
2712 Txt prntBuf[PRNTSZE];
2716 /* mt003.301 Modifications */
2719 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2720 if(event == SS_TMR_HRTBT)
2722 SPrint("\nSTART SS_TMR_HRTBT");
2729 SLock(&osCp.wdCp.wdLock);
2730 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2732 osCp.wdCp.globWd.wdsta[i].status = 0;
2734 SUnlock(&osCp.wdCp.wdLock);
2736 arg.tq = osCp.wdCp.watchDgTs;
2737 arg.tqCp = &osCp.wdCp.watchDgTqCp;
2738 arg.timers = osCp.wdCp.watchDgTmr;
2739 arg.cb = (PTR)NULLP;
2741 arg.wait = osCp.wdCp.globWd.timeout = wait;
2749 Void ssdStopWatchDgTmr
2758 Txt prntBuf[PRNTSZE];
2762 /* mt003.301 Modifications */
2765 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2766 if(event == SS_TMR_HRTBT)
2768 SPrint("STOP SS_TMR_HRTBT");
2772 SLock(&osCp.wdCp.wdLock);
2773 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2775 osCp.wdCp.globWd.wdsta[i].status = 0;
2777 SUnlock(&osCp.wdCp.wdLock);
2780 arg.tq = osCp.wdCp.watchDgTs;
2781 arg.tqCp = &osCp.wdCp.watchDgTqCp;
2782 arg.timers = osCp.wdCp.watchDgTmr;
2783 arg.cb = (PTR)NULLP;
2802 Txt prntBuf[PRNTSZE];
2804 struct sockaddr_in tmpaddr;
2805 char hbMsg[SS_WD_HB_MSG_SIZE];
2812 sprintf(prntBuf,"TX HEARTBEAT REQ Time: %02d:%02d:%02d\n", dt.hour, dt.min, dt.sec);
2816 /* Pack the message */
2817 strcpy(hbMsg, "<HB>REQ</HB>");
2819 /* Send the heartbeat messages to all the configured nodes */
2820 SLock(&osCp.wdCp.wdLock);
2821 for (n=0; n < osCp.wdCp.globWd.numNodes; n++)
2823 if(osCp.wdCp.globWd.wdsta[n].addr.s_addr == 0)
2828 /* Identify the destination node */
2829 #ifdef SS_WATCHDOG_IPV6
2830 tmpaddr.sin6_len = sizeof(tmpaddr);
2831 tmpaddr.sin6_family = AF_INET6;
2832 tmpaddr.sin6_addr = osCp.wdCp.globWd.wdsta[n].addr;
2833 tmpaddr.sin_port = osCp.wdCp.globWd.wdsta[n].port;
2835 tmpaddr.sin_family = AF_INET;
2836 tmpaddr.sin_addr.s_addr = osCp.wdCp.globWd.wdsta[n].addr.s_addr;
2837 tmpaddr.sin_port = osCp.wdCp.globWd.wdsta[n].port;
2838 #endif /* SS_WATCHDOG_IPV6 */
2840 err = sendto(osCp.wdCp.globWd.sock, hbMsg, strlen(hbMsg), 0, (struct sockaddr *)&tmpaddr, sizeof(struct sockaddr));
2844 sprintf(prntBuf,"ssdSndHrtBtMsg: HB to node [%s:%d] failed status[%d]\n",
2845 inet_ntoa(tmpaddr.sin_addr), tmpaddr.sin_port, errno);
2852 sprintf(prntBuf,"ssdSndHrtBtMsg: HB to node [%s:%d] sent[%d]\n", inet_ntoa(tmpaddr.sin_addr), tmpaddr.sin_port, err);
2857 SUnlock(&osCp.wdCp.wdLock);
2862 #endif /* SS_WATCHDOG */
2866 /* mt022.201 - Modification to fix problem when NOCMDLINE is defined */
2872 * Desc: This function gets command line options.
2881 static Void mtGetOpts(void)
2888 FILE *memOpt; /* memory options file pointer */
2891 /* mt007.301 : Fix related to file based mem config on 64 bit machine */
2897 /*KWORK_FIX: Initializing the variable for avoidning corruption */
2899 /*mt010.301 Fix for reading the variables on 64 bit/32bit platforms correctly */
2905 #ifdef SS_LOCKLESS_MEMORY
2920 osCp.dep.fileOutFp = (FILE *)NULLP;
2922 /* initialize memOpt */
2923 memOpt = (FILE *) NULLP;
2930 while ((ret = SGetOpt(argc, argv, "o:f:s:m:c:")) != EOF)
2935 /* mt001.301 : Additions */
2936 #ifdef SS_MEM_LEAK_STS
2938 cmMemOpenMemLkFile(msOptArg);
2942 osCp.dep.fileOutFp = fopen(msOptArg, "w");
2945 fileBasedMemCfg = TRUE;
2946 memOpt = fopen(msOptArg, "r");
2948 /* if file does not exist or could not be opened then use the
2949 * default memory configuration as defined in mt_ss.h
2951 if (memOpt == (FILE *) NULLP)
2953 sprintf(pBuf, "\nMTSS: Memory configuration file: %s could not\
2954 be opened, using default mem configuration\n", msOptArg);
2959 while (fgets((Txt *)line, 256, memOpt) != NULLP)
2961 if(line[0] == '#' || line[0] < '0' || line[0] > '9') /* Comment line or non numeric character, so skip it and read next line */
2967 case 0: /*** INPUT: Number of regions ***/
2968 sscanf(line, "%ld", (long *) &numReg);
2969 mtMemoCfg.numRegions = numReg;
2970 if(mtMemoCfg.numRegions > SS_MAX_REGS)
2972 printf("\n No. of regions are > SS_MAX_REGS:%d \n",SS_MAX_REGS);
2978 case 1: /*** INPUT: Number of buckets and number of Pools ***/
2979 sscanf(line, "%ld %ld", (long *) &numBkts, (long *) &numPools);
2980 if(numBkts > MT_MAX_BKTS)
2982 printf("\n No. of buckets are > MT_MAX_BKTS :%d \n",MT_MAX_BKTS);
2986 if(numPools > SS_MAX_POOLS_PER_REG)
2988 printf("\n No. of pools are > SS_MAX_POOLS_PER_REG:%d \n",SS_MAX_POOLS_PER_REG);
2993 * Delay updation from local variable to global
2994 * structure of number of regions and heap data to
2995 * counter error conditions present above.
2997 for(idx = 0; idx < cfgNumRegs; idx++)
2999 mtMemoCfg.region[idx].numBkts = numBkts;
3000 cfgRegInfo[idx].region = idx;
3001 cfgRegInfo[idx].numPools = numPools;
3003 * Initialize the pool info as static type with size zero
3005 for(poolIdx = 0; poolIdx < numPools; poolIdx++)
3007 cfgRegInfo[idx].pools[poolIdx].type = SS_POOL_STATIC;
3008 cfgRegInfo[idx].pools[poolIdx].size = 0;
3013 case 2: /*** INPUT: Bucket Id and size of the bucket ***/
3014 if(bktUpdtCnt < numBkts) /* more set of bucket can be added */
3016 sscanf(line, "%ld %ld",(long *)&bktIdx, (long *) &bktSz);
3018 if(bktIdx >= numBkts)
3020 printf("\n Invalid Bucket Id, may be >= the No. of buckets:%ld\n",numBkts);
3025 mtBktInfo[bktIdx].blkSize = bktSz;
3027 if(bktUpdtCnt == numBkts)
3029 i++; /*done reading bkt info, start reading individual region info*/
3033 case 3: /*** INPUT: Region Id (ranges from 0 to numRegions-1) **/
3034 sscanf(line,"%ld",(long *) ®Id);
3035 if(regId >= mtMemoCfg.numRegions)
3037 printf("\n Invalid Region Id, may be >= the No. of regions:%d\n",mtMemoCfg.numRegions);
3038 #ifndef XEON_SPECIFIC_CHANGES
3043 mtMemoCfg.region[regId].regionId = regId;
3046 case 4: /*** INPUT: BktId (ranges from 0 to numBkts-1), No. of blks ***/
3047 if(bktUpdtCnt < numBkts)
3049 sscanf(line, "%ld %ld",(long *)&bktIdx, (long *)&bktNum);
3050 if(bktIdx >= numBkts)
3052 printf("\n Invalid Bucket Id, may be >= the No. of buckets:%ld\n",numBkts);
3057 if(bktIdx < MT_MAX_BKTS)
3059 mtMemoCfg.region[regId].bkt[bktIdx].blkSize = mtBktInfo[bktIdx].blkSize;
3060 mtMemoCfg.region[regId].bkt[bktIdx].numBlks = bktNum;
3061 cfgRegInfo[regId].pools[bktIdx].type = SS_POOL_DYNAMIC;
3062 cfgRegInfo[regId].pools[bktIdx].size = mtBktInfo[bktIdx].blkSize - (sizeof(SsMblk)+sizeof(SsDblk));
3065 if(bktUpdtCnt == numBkts)
3072 case 5: /* INPUT: Heapsize ***/
3073 sscanf(line, "%ld", (long *) &heapSz);
3074 mtMemoCfg.region[regId].heapsize = heapSz;
3076 if(regUpdtCnt != mtMemoCfg.numRegions)
3085 #ifdef SS_LOCKLESS_MEMORY
3087 sscanf(line, "%ld", (long *) &numBkts);
3088 mtGlobMemoCfg.numBkts = numBkts;
3089 #ifndef XEON_SPECIFIC_CHANGES
3090 mtDynMemoCfg.numRegions = mtMemoCfg.numRegions;
3093 #ifdef XEON_SPECIFIC_CHANGES
3094 CM_LOG_DEBUG(CM_LOG_ID_MT, "numRegions = %d numBkts = %d\n",
3095 mtDynMemoCfg.numRegions, mtGlobMemoCfg.numBkts);
3096 for(idx = 0; idx < mtDynMemoCfg.numRegions; idx++)
3098 for(idx = 0; idx < mtMemoCfg.numRegions; idx++)
3101 mtDynMemoCfg.region[idx].regionId = idx;
3102 mtDynMemoCfg.region[idx].numBkts = numBkts;
3110 if(bktUpdtCnt < numBkts)
3112 sscanf(line, "%ld %ld %ld %ld %ld %ld", (long *) &bktIdx,
3113 (long *) &bktSz, (long *) &bktNum,
3114 (long *) &bktSetSize, (long *) &bktRelThr,
3115 (long *) &bktAqurThr);
3116 /* Klock work fix ccpu00148484 */
3117 if(bktIdx < SS_MAX_POOLS_PER_REG)
3119 mtGlobMemoCfg.bkt[bktIdx].blkSize = bktSz;
3120 mtGlobMemoCfg.bkt[bktIdx].numBlks = bktNum;
3121 mtGlobMemoCfg.bkt[bktIdx].bucketSetSize = bktSetSize;
3122 #ifdef XEON_SPECIFIC_CHANGES
3123 CM_LOG_DEBUG(CM_LOG_ID_MT, "Pool [%d] blkSize %d numBlks %d bucketSetSize %d\n",
3124 bktUpdtCnt, mtGlobMemoCfg.bkt[bktIdx].blkSize,
3125 mtGlobMemoCfg.bkt[bktIdx].numBlks, mtGlobMemoCfg.bkt[bktIdx].bucketSetSize);
3127 if(bktIdx >= SS_MAX_POOLS_PER_REG)
3129 printf("\nNo. of Buckets/pools are > SS_MAX_POOLS_PER_REG:%d\n",SS_MAX_POOLS_PER_REG);
3135 for(idx = 0; idx < mtMemoCfg.numRegions; idx++)
3137 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetRelThreshold = bktRelThr;
3138 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetAcquireThreshold = bktAqurThr;
3139 #ifdef XEON_SPECIFIC_CHANGES
3140 CM_LOG_DEBUG(CM_LOG_ID_MT, "Pool [%d] blkSetRelThreshold %d blkSetAcquireThreshold %d\n",
3141 bktUpdtCnt, mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetRelThreshold,
3142 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetAcquireThreshold);
3148 #ifdef XEON_SPECIFIC_CHANGES
3149 if(bktUpdtCnt == numBkts)
3155 case 8: /* INPUT: Global Heapsize ***/
3156 sscanf(line, "%ld", (long *) &heapSz);
3157 mtGlobMemoCfg.heapSize = heapSz;
3158 CM_LOG_DEBUG(CM_LOG_ID_MT, "Global Heap size = %d\n", mtGlobMemoCfg.heapSize);
3166 memConfigured = FALSE;
3170 memConfigured = TRUE;
3178 /* mt028.201: modification: multiple procs support related changes */
3179 #ifndef SS_MULTIPLE_PROCS
3182 osCp.procId = PID_STK((ProcId) strtol(msOptArg, NULLP, 0));
3184 osCp.procId = (ProcId) strtol(msOptArg, NULLP, 0);
3187 #else /* SS_MULTIPLE_PROCS */
3191 procId = PID_STK((ProcId) strtol(msOptArg, NULLP, 0));
3193 procId = (ProcId) strtol(msOptArg, NULLP, 0);
3195 SAddProcIdLst(1, &procId);
3198 #endif /* SS_MULTIPLE_PROCS */
3202 osCp.configFilePath = msOptArg;
3226 * Desc: Get options from command line
3228 * Ret: option - success
3230 * EOF - end of options
3232 * Notes: Handles command lines like the following
3235 * then command line should look like this...
3236 * -a foo -b foo1 -c -d foo
3240 * while ((ret = SGetOpt(msArgc, msArgv, "ls")) != EOF )
3245 * nloops = atoi(msArgv[msOptInd]);
3248 * state1 = atoi(msArgv[msOptInd]);
3260 int argc, /* argument count */
3261 char **argv, /* argument value */
3262 char *opts /* options */
3265 /* mt020.201 - Removed for no command line */
3273 /* mt020.201 - Addition for no command line */
3285 /*mt013.301 : Changes as per coding standards*/
3286 if (msOptInd >= (S16) argc || argv[msOptInd][0] == '\0')
3292 if (!strcmp(argv[msOptInd], "--"))
3297 else if (argv[msOptInd][0] != '-')
3305 c = argv[msOptInd][sp];
3306 if (c == ':' || (cp = (S8 *) strchr(opts, c)) == (S8 *) NULLP)
3308 if (argv[msOptInd][++sp] == '\0')
3319 if (argv[msOptInd][sp+1] != '\0') msOptArg = &argv[msOptInd++][sp+1];
3322 if (++msOptInd >= (S16) argc)
3327 else msOptArg = argv[msOptInd++];
3334 if (argv[msOptInd][++sp] == '\0')
3346 #endif /* NOCMDLINE */
3354 * Desc: This function starts system services execution; the
3355 * permanent tasks are started and the system enters a
3372 /* mt025.201 - Modification for adding lock to timer handler */
3373 for (i = 0; i <= SS_MAX_STSKS + 5; i++)
3375 sem_post(&osCp.dep.ssStarted);
3384 * indirect interface functions to system services service user
3390 * Fun: ssdAttachTTsk
3392 * Desc: This function sends the initial tick message to a TAPA
3393 * task if the task is a permanent task.
3404 SsTTskEntry *tTsk /* pointer to TAPA task entry */
3411 if (tTsk->tskType == SS_TSK_PERMANENT)
3413 /* Send a permanent tick message to this task, to start
3416 ret = SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf);
3419 #if (ERRCLASS & ERRCLS_DEBUG)
3420 MTLOGERROR(ERRCLS_DEBUG, EMT001, ret, "SGetMsg() failed");
3425 mInfo = (SsMsgInfo *)mBuf->b_rptr;
3426 mInfo->eventInfo.event = SS_EVNT_PERMTICK;
3428 /* set up post structure */
3429 /* mt028.201: modification: multiple procs support related changes */
3430 #ifndef SS_MULTIPLE_PROCS
3431 mInfo->pst.dstProcId = SFndProcId();
3432 mInfo->pst.srcProcId = SFndProcId();
3433 #else /* SS_MULTIPLE_PROCS */
3434 mInfo->pst.dstProcId = tTsk->proc;
3435 mInfo->pst.srcProcId = tTsk->proc;
3436 #endif /* SS_MULTIPLE_PROCS */
3437 mInfo->pst.selector = SEL_LC_NEW;
3438 mInfo->pst.region = DFLT_REGION;
3439 mInfo->pst.pool = DFLT_POOL;
3440 mInfo->pst.prior = PRIOR3;
3441 mInfo->pst.route = RTESPEC;
3442 mInfo->pst.event = 0;
3443 mInfo->pst.dstEnt = tTsk->ent;
3444 mInfo->pst.dstInst = tTsk->inst;
3445 mInfo->pst.srcEnt = tTsk->ent;
3446 mInfo->pst.srcInst = tTsk->inst;
3448 ret = ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
3449 (tTsk->tskPrior * SS_MAX_MSG_PRI) + PRIOR3);
3455 #if (ERRCLASS & ERRCLS_DEBUG)
3456 MTLOGERROR(ERRCLS_DEBUG, EMT002, ret,
3457 "Could not write to demand queue");
3470 * Fun: ssdDetachTTsk
3472 * Desc: Does nothing.
3483 SsTTskEntry *tTsk /* pointer to TAPA task entry */
3493 * Fun: ssdCreateSTsk
3495 * Desc: This function creates a system task. A thread is started
3496 * on the system task handler function defined later.
3507 SsSTskEntry *sTsk /* pointer to system task entry */
3511 pthread_attr_t attr;
3512 /* struct sched_param param_sched;*/
3514 #ifdef SS_THR_REG_MAP
3515 uint32_t threadCreated = FALSE;
3520 #ifdef SS_SINGLE_THREADED
3521 /* mt001.301 : Additions */
3523 #ifdef SS_MULTICORE_SUPPORT
3524 if (osCp.numSTsks > 1)
3526 if (osCp.numSTsks > 0)
3527 #endif /* SS_MULTICORE_SUPPORT */
3529 #ifdef SS_MULTICORE_SUPPORT
3530 if (osCp.numSTsks > 3)
3532 if (osCp.numSTsks > 2)
3533 #endif /* SS_MULTICORE_SUPPORT */
3534 #endif /* SS_WATCHDOG */
3541 /* set the current executing entity and instance IDs to
3542 * 'not configured'. create the lock to access them.
3544 sTsk->dep.ent = ENTNC;
3545 sTsk->dep.inst = INSTNC;
3548 /* create the thread */
3549 pthread_attr_init(&attr);
3550 ssdSetPthreadAttr(sTsk->tskPrior, &attr);
3552 printf("\nCreating thread here %s %d\n", __FILE__, __LINE__);
3553 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
3554 if (sTsk->tskPrior == 0)
3556 printf("\nCreating RT thread #######################\n");
3557 #ifdef SS_THR_REG_MAP
3558 /* When the thread is created, we check for the memory mapping table if
3559 * threadId can be placed in thread memory map table. If it is not able to place
3560 * threadId is stored in tmporary array. Once thread is created successful,
3561 * thread_cancel is sent for each thread which are created before. All the
3562 * threads are made to wait on sema which is cancel point for thread.
3564 while(threadCreated == FALSE)
3567 ret = pthread_create(&sTsk->dep.tId, &attr, mtTskHdlr, (Ptr)sTsk);
3570 DU_LOG("\nDU APP : Failed to create thread. Cause[%d]",ret);
3571 pthread_attr_destroy(&attr);
3573 #if (ERRCLASS & ERRCLS_DEBUG)
3574 MTLOGERROR(ERRCLS_DEBUG, EMT004, ERRZERO, "Could not create thread");
3579 #ifdef SS_THR_REG_MAP
3580 threadCreated = ssCheckAndAddMemoryRegionMap(sTsk->dep.tId,
3588 #ifdef SS_THR_REG_MAP
3589 /* When the thread is created, we check for the memory mapping table if
3590 * threadId can be placed in thread memory map table. If it is not able to place
3591 * threadId is stored in tmporary array. Once thread is created successful,
3592 * thread_cancel is sent for each thread which are created before. All the
3593 * threads are made to wait on sema which is cancel point for thread.
3595 while(threadCreated == FALSE)
3598 ret = pthread_create(&sTsk->dep.tId, &attr, mtTskHdlr, (Ptr)sTsk);
3602 /* mt020.201 - Addition for destroying thread attribute object attr */
3603 pthread_attr_destroy(&attr);
3605 #if (ERRCLASS & ERRCLS_DEBUG)
3606 MTLOGERROR(ERRCLS_DEBUG, EMT004, ERRZERO, "Could not create thread");
3611 #ifdef SS_THR_REG_MAP
3612 threadCreated = ssCheckAndAddMemoryRegionMap(sTsk->dep.tId,
3619 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
3620 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
3622 static uint32_t stLwpId = 3;
3623 sTsk->dep.lwpId = ++stLwpId;
3625 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
3627 /* mt020.201 - Addition for destroying thread attribute object attr */
3628 pthread_attr_destroy(&attr);
3637 pthread_attr_t* attr,
3638 void *(*start_routine) (void *),
3643 #ifdef SS_THR_REG_MAP
3644 uint32_t threadCreated = FALSE;
3647 SPThreadCreateArg* threadArg = (SPThreadCreateArg*)malloc(sizeof(SPThreadCreateArg));
3648 /* Klock work fix ccpu00148484 */
3649 if(threadArg == NULLP)
3653 threadArg->argument = arg;
3654 threadArg->start_routine = start_routine;
3657 printf("\nCreating thread here %s %d\n", __FILE__, __LINE__);
3659 #ifdef SS_THR_REG_MAP
3660 /* When the thread is created, we check for the memory mapping table if
3661 * threadId can be placed in thread memory map table. If it is not able to place
3662 * threadId is stored in tmporary array. Once thread is created successful,
3663 * thread_cancel is sent for each thread which are created before. All the
3664 * threads are made to wait on sema which is cancel point for thread.
3666 while(threadCreated == FALSE)
3669 /*pthreadCreateHdlr */
3670 if (((retVal = pthread_create(tid, attr, pthreadCreateHdlr, threadArg))) != 0)
3675 #ifdef SS_THR_REG_MAP
3676 threadCreated = ssCheckAndAddMemoryRegionMap(*tid, SS_MAX_REGS - 1);
3687 * Fun: Set Pthread Attributes
3689 * Desc: This function is used to set various explicit
3690 * pthread attributes like, priority scheduling,etc
3700 static S16 ssdSetPthreadAttr
3703 pthread_attr_t *attr
3706 struct sched_param param;
3709 SMemSet(¶m, 0, sizeof(param));
3711 #ifndef TENB_T2K3K_SPECIFIC_CHANGES
3712 param.sched_priority = 100 - 1 - tskPrior;
3714 param.sched_priority = 100 - 10 - tskPrior;
3717 #if 1/* Nawas:: Overriding DL RLC prority to one higher than iccserv */
3718 /* TODO:: This can be avoided by reducing the priority
3719 * of iccserv thread in l1_master.sh*/
3721 if (clusterMode == RADIO_CLUSTER_MODE)
3723 if(tskPrior == PRIOR1)
3725 param.sched_priority = 91;
3732 printf("\nSet priority %u\n", param.sched_priority);
3734 /* Set Scheduler to explicit, without this non of the below
3735 pthread attr works */
3736 #ifdef TENB_RTLIN_CHANGES
3737 pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED);
3740 pthread_attr_setstacksize(attr, (size_t)MT_TASK_STACK);
3741 pthread_attr_setscope(attr, PTHREAD_SCOPE_SYSTEM);
3742 pthread_attr_setdetachstate(attr, PTHREAD_CREATE_DETACHED);
3743 #ifdef TENB_RTLIN_CHANGES
3744 pthread_attr_setschedpolicy(attr, SCHED_FIFO);
3746 pthread_attr_setschedparam(attr, ¶m);
3750 } /* ssdSetPthreadAttr */
3752 /************* multi-core support **************/
3753 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
3754 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
3758 * Fun: Get the current core/cpu affinity for a thread/lwp
3760 * Desc: This function is used to get the current processor/core
3761 * affinity for a a system task (thread/lwp). It sets the
3762 * affinity based on the mode supplied by the caller.
3765 * RFAILED - failed, general (optional)
3774 SSTskId *tskId, /* filled in with system task ID */
3775 uint32_t *coreId /* the core/processor id to which the affinity is set */
3785 uint32_t cpuInd = 0;
3786 /*mt013.301 :Fix for TRACE5 feature crash due to missing TRC MACRO*/
3789 uint32_t lwpId = *tskId;
3793 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3795 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3797 tId = osCp.sTskTbl[tskInd].dep.tId;
3802 /* if tskId is not found in the tskTbl */
3803 if (tskInd == SS_MAX_STSKS)
3805 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3810 /* initialize the cpu mask */
3813 /* set thread affinity for linux */
3814 if (pthread_getaffinity_np(tId, sizeof(cpuSet), &cpuSet) < 0)
3816 #if (ERRCLASS & ERRCLS_DEBUG)
3817 MTLOGERROR(ERRCLS_DEBUG, EMT037, ERRZERO, "Could not get thread affinity\n");
3820 } /* end if pthread_setaffinity fails */
3822 for (cpuInd = 0; cpuInd <CPU_SETSIZE; cpuInd++)
3824 if (CPU_ISSET (cpuInd, & cpuSet))
3833 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3835 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3837 lwpId = osCp.sTskTbl[tskInd].dep.lwpId;
3842 /* if tskId is not found in the tskTbl */
3843 if (tskInd == SS_MAX_STSKS)
3845 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3849 /* set thread affinity for Solaris */
3850 if (processor_bind(P_LWPID, lwpId, PBIND_QUERY, (processorid_t*)coreId) < 0)
3852 #if (ERRCLASS & ERRCLS_DEBUG)
3853 MTLOGERROR(ERRCLS_DEBUG, EMT037, ERRZERO, "Could not get thread affinity\n");
3856 } /* end if processor_bind fails */
3859 #endif /* SS_LINUX */
3863 } /* ssdGetAffinity */
3868 * Fun: Set the core/cpu affinity for a thread/lwp
3870 * Desc: This function is used to set processor/core affinity for a
3871 * a system task (thread/lwp). It sets the affinity based on the
3872 * mode supplied by the caller.
3875 * RFAILED - failed, general (optional)
3884 SSTskId *tskId, /* filled in with system task ID */
3885 uint32_t coreId /* the core/processor id to which the affinity has to be set */
3889 uint32_t tskInd = 0;
3894 /*mt013.301 :Fix for TRACE5 feature crash due to missing TRC MACRO*/
3897 uint32_t lwpId = *tskId;
3903 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3905 /* Here tskId can not be used as index as the task may be terminated if
3906 there is a TERM even for that tsk, thus breaking the task Id numbering
3908 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3910 tId = osCp.sTskTbl[tskInd].dep.tId;
3915 /* if tskId is not found in the tskTbl */
3916 if (tskInd == SS_MAX_STSKS)
3918 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3922 /* initialize the cpu mask */
3925 /* set the cpu mask */
3926 CPU_SET(coreId, &cpuSet);
3928 /* set thread affinity for linux */
3929 if (pthread_setaffinity_np(tId, sizeof(cpuSet), &cpuSet) < 0)
3931 #if (ERRCLASS & ERRCLS_DEBUG)
3932 MTLOGERROR(ERRCLS_DEBUG, EMT038, ERRZERO, "Could not set thread affinity\n");
3935 } /* end if pthread_setaffinity fails */
3939 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3941 /* comment: modify to use tskId as lwpId to avoid the loop and the new lwpId variable in dep */
3942 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3944 lwpId = osCp.sTskTbl[tskInd].dep.lwpId;
3949 /* if tskId is not found in the tskTbl */
3950 if (tskInd == SS_MAX_STSKS)
3952 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3956 /* set thread affinity for Solaris */
3957 if (processor_bind(P_LWPID, lwpId, coreId, NULL) < 0)
3959 #if (ERRCLASS & ERRCLS_DEBUG)
3960 MTLOGERROR(ERRCLS_DEBUG, EMT038, ERRZERO, "Could not set thread affinity\n");
3963 } /* end if processor_bind fails */
3966 #endif /* SS_LINUX */
3968 } /* ssdSetAffinity */
3970 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
3971 /************ end multi-core support *************/
3976 * Fun: ssdDestroySTsk
3978 * Desc: This function destroys a system task. A terminate
3979 * event message is sent to the thread function.
3990 SsSTskEntry *sTsk /* pointer to system task entry */
3999 /* we send a message to this system task to tell it to die */
4000 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
4003 #if (ERRCLASS & ERRCLASS_DEBUG)
4004 MTLOGERROR(ERRCLS_DEBUG, EMT005, ERRZERO, "Could not get a message");
4010 mInfo = (SsMsgInfo *)mBuf->b_rptr;
4011 mInfo->eventInfo.event = SS_EVNT_TERM;
4013 if (ssDmndQPutLast(&sTsk->dQ, mBuf, 0) != ROK)
4017 #if (ERRCLASS & ERRCLASS_DEBUG)
4018 MTLOGERROR(ERRCLS_DEBUG, EMT006, ERRZERO,
4019 "Could not write to demand queue");
4029 /* mt023.201 - Added SThreadYield function to yield CPU
4033 * Desc: This function defers thread execution to any other ready
4044 S16 SThreadYield(void)
4048 /* mt024.201 - seperated Linux and other UNIX implementations
4054 /* Set sleep value to 0 to yield CPU */
4058 return (select(0,0,0,0,&tw) == 0 ? ROK : RFAILED);
4060 #else /* other UNICes */
4062 return (sleep(0) == 0 ? ROK : RFAILED);
4064 #endif /* SS_LINUX */
4071 * Fun: Register timer
4073 * Desc: This function is used to register a timer
4074 * function for the service user. System services
4075 * will invoke the timer activation function
4076 * passed to it at the specified intervals.
4080 * Notes: Timing is handled by the common timers. The
4081 * ticks are handled by a thread that uses
4082 * nanosleep() and thus timing precision will not
4090 SsTmrEntry *tmr /* pointer to timer entry */
4098 /* initialize common timers */
4099 cmInitTimers(tmr->dep.timers, TMR_DEF_MAX);
4102 /* start the timer */
4103 arg.tq = osCp.dep.tmrTq;
4104 arg.tqCp = &osCp.dep.tmrTqCp;
4105 arg.timers = tmr->dep.timers;
4110 arg.max = TMR_DEF_MAX;
4111 arg.wait = tmr->interval;
4121 * Fun: Deregister timer
4123 * Desc: This function is used to deregister a timer function.
4134 SsTmrEntry *tmr /* pointer to timer entry */
4142 /* stop the timer */
4143 arg.tq = osCp.dep.tmrTq;
4144 arg.tqCp = &osCp.dep.tmrTqCp;
4145 arg.timers = tmr->dep.timers;
4150 arg.max = TMR_DEF_MAX;
4151 arg.wait = tmr->interval;
4161 * Fun: Critical error
4163 * Desc: This function is called when a critical error occurs.
4174 Seq seq, /* sequence number */
4175 Reason reason /* reset reason */
4185 /* get calling task ID */
4186 tId = pthread_self();
4189 /* set up the message to display */
4190 sprintf(errBuf, "\n\nFATAL ERROR - taskid = %x, errno = %d,"
4191 "reason = %d\n\n", (uint8_t)tId, seq, reason);
4195 /* delete all system tasks */
4196 for (i = 0; i < SS_MAX_STSKS; i++)
4198 if (osCp.sTskTbl[i].used
4199 && !pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
4201 pthread_kill(osCp.sTskTbl[i].dep.tId, SIGKILL);
4207 pthread_exit(NULLP);
4210 /* won't reach here */
4219 * Desc: This function is called to log an error.
4230 Ent ent, /* Calling layer's entity id */
4231 Inst inst, /* Calling layer's instance id */
4232 ProcId procId, /* Calling layer's processor id */
4233 Txt *file, /* file name where error occured */
4234 S32 line, /* line in file where error occured */
4235 ErrCls errCls, /* error class */
4236 ErrCode errCode, /* layer unique error code */
4237 ErrVal errVal, /* error value */
4238 Txt *errDesc /* description of error */
4251 /* get calling task ID */
4253 tId = pthread_self();
4259 case ERRCLS_ADD_RES:
4260 errClsMsg = "ERRCLS_ADD_RES";
4263 case ERRCLS_INT_PAR:
4264 errClsMsg = "ERRCLS_INT_PAR";
4268 errClsMsg = "ERRCLS_DEBUG";
4271 /* mt028.201 : Addition - ERRCLS_FTHA changes */
4273 errClsMsg = "ERRCLS_FTHA";
4277 errClsMsg = "INVALID ERROR CLASS!";
4282 /*mt009.301 Fixed 64BIT compilation warnings*/
4285 "\nmtss(posix): sw error: ent: %03d inst: %03d proc id: %03d \n"
4286 "file: %s line: %03d errcode: %05d errcls: %s\n"
4287 "errval: %05d errdesc: %s\n",
4288 ent, inst, procId, file, line, errCode, errClsMsg, errVal, errDesc);
4291 "\nmtss(posix): sw error: ent: %03d inst: %03d proc id: %03d \n"
4292 "file: %s line: %03ld errcode: %05ld errcls: %s\n"
4293 "errval: %05ld errdesc: %s\n",
4294 ent, inst, procId, file, line, errCode, errClsMsg, errVal, errDesc);
4296 SDisplay(0, errBuf);
4297 /* mt001.301 : Additions */
4298 #ifdef SS_LOGGER_SUPPORT
4300 #endif /* SS_LOGGER_SUPPORT */
4304 /* debug errors halt the system */
4305 if (errCls == ERRCLS_DEBUG)
4307 /* mt001.301 : Additions */
4308 #ifdef SS_LOGGER_SUPPORT
4310 #endif /* SS_LOGGER_SUPPORT */
4311 /* delete all system tasks */
4312 for (i = 0; i < SS_MAX_STSKS; i++)
4314 if (osCp.sTskTbl[i].used
4315 && !pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
4317 pthread_kill(osCp.sTskTbl[i].dep.tId, SIGKILL);
4323 pthread_exit(NULLP);
4335 * Fun: Register driver task
4337 * Desc: This function is called to register the handlers for a
4349 SsDrvrTskEntry *drvrTsk /* driver task entry */
4356 /* mt001.30 : Additions */
4359 * Fun: Deregister driver task
4361 * Desc: This function is called to deregister the handlers for a
4373 SsDrvrTskEntry *drvrTsk /* driver task entry */
4386 * mt003.301 Additions - SDeRegTTsk fix
4388 #ifdef SS_MULTIPLE_PROCS
4395 #else /*SS_MULTIPLE_PROCS*/
4401 #endif /*SS_MULTIPLE_PROCS*/
4403 #ifdef SS_MULTIPLE_PROCS
4416 /* We check the sTsk element; if it is not NULLP, the
4417 * task is attached. So we have to detach it before
4418 * deregistering the task.
4420 ret = SLock(&osCp.sTskTblLock);
4423 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not lock system task table");
4426 SS_ACQUIRE_ALL_SEMA(&osCp.tTskTblSem, ret);
4429 #if (ERRCLASS & ERRCLS_DEBUG)
4430 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not lock TAPA task table");
4432 if ( SUnlock(&osCp.sTskTblLock) != ROK)
4434 #if (ERRCLASS & ERRCLS_DEBUG)
4435 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not Unlock system task table");
4443 #ifdef SS_MULTIPLE_PROCS
4445 if (tTsk->initTsk != NULLP)
4448 (Void)(*(tTsk->initTsk))(proc, ent, inst,
4451 &(osCp.tTskTbl[idx].xxCb));
4453 (Void)(*(tTsk->initTsk))(proc, ent, inst,
4456 &(osCp.tTskTbl[idx].xxCb));
4457 #endif /* USE_MEMCAL */
4459 #endif /* SS_MULTIPLE_PROCS */
4461 if (tTsk->sTsk != NULLP)
4465 sTsk->dep.ent = ent;
4466 sTsk->dep.inst = inst;
4468 for (n = 0; n < SS_MAX_TTSKS; n++)
4470 if (sTsk->tTsks[n] == idx)
4472 sTsk->tTsks[n] = SS_INVALID_IDX;
4478 /* call the implementation to detach the task */
4479 ssdDetachTTsk(tTsk);
4481 sTsk->dep.ent = ENTNC;
4482 sTsk->dep.inst = INSTNC;
4485 /* Now we empty the entry for this task and update the table
4488 #ifdef SS_MULTIPLE_PROCS
4489 osCp.tTskIds[procIdx][ent][inst] = SS_TSKNC;
4490 #else /* SS_MULTIPLE_PROCS */
4491 osCp.tTskIds[ent][inst] = SS_TSKNC;
4492 #endif /* SS_MULTIPLE_PROCS */
4495 #ifdef SS_MULTIPLE_PROCS
4496 tTsk->proc = PROCNC;
4497 #endif /* SS_MULTIPLE_PROCS */
4499 tTsk->inst = INSTNC;
4500 tTsk->tskType = TTUND;
4501 tTsk->initTsk = NULLP;
4502 tTsk->actvTsk = NULLP;
4505 tTsk->nxt = osCp.nxtTTskEntry;
4506 osCp.nxtTTskEntry = idx;
4509 #ifdef SS_MULTIPLE_PROCS
4510 /* mark the control block for this task as invalid */
4511 osCp.tTskTbl[idx].xxCb = NULLP;
4514 SS_RELEASE_ALL_SEMA(&osCp.tTskTblSem);
4515 if ( SUnlock(&osCp.sTskTblLock) != ROK)
4517 #if (ERRCLASS & ERRCLS_DEBUG)
4518 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not Unlock system task table");
4525 //#ifndef SPLIT_RLC_DL_TASK
4526 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
4527 #if defined (L2_L3_SPLIT) && defined(SPLIT_RLC_DL_TASK)
4528 Void ysMtTskHdlr(Void);
4529 Void ysMtPollPhyMsg(uint8_t region);
4530 Void ysMtRcvPhyMsg(Void);
4531 Void *mtTskHdlrT2kL2
4533 Ptr tskPtr /* pointer to task entry */
4539 /* wait for SS to come up */
4540 /* It is required to block on this semaphore before starting actual processing of
4541 the thread becasue the creator of this thread might want to cance it without
4542 doing any processing. When this semaphore is released, means the creator gives
4543 the go ahead for actual processing and we should never come back to this point */
4544 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4553 ysMtPollPhyMsg(0); /* blocks, waiting for messages for L2
4554 * (processes L1 msgs) */
4560 Void ysMtTskHdlr(Void);
4561 Void YsPhyRecvMsg();
4562 Void *mtTskHdlrT2kL2
4564 Ptr tskPtr /* pointer to task entry */
4570 /* get out the system task entry from the parameter */
4571 sTsk = (SsSTskEntry *) tskPtr;
4573 /* wait for SS to come up */
4574 /* It is required to block on this semaphore before starting actual processing of
4575 the thread becasue the creator of this thread might want to cance it without
4576 doing any processing. When this semaphore is released, means the creator gives
4577 the go ahead for actual processing and we should never come back to this point */
4578 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4581 #ifndef RGL_SPECIFIC_CHANGES
4589 #ifdef V5GTF_SPECIFIC_CHANGES
4592 ysMtTskHdlr(); /* blocks, waiting for messages for L2
4593 * (processes L1 msgs) */
4595 /* get a message from the demand queue */
4597 #ifdef RLC_MAC_DAT_REQ_RBUF
4598 rgDlDatReqBatchProc();
4601 ret = mtTskHdlMsg(sTsk);
4604 /* exit the for loop here */
4607 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
4614 #endif /* TENB_T2K3K_SPECIFIC_CHANGES */
4617 void *pthreadCreateHdlr(void * arg)
4620 SPThreadCreateArg* pthreadCreateArg = (SPThreadCreateArg*)arg;
4621 /* mt038.201 changed how sem_wait is called */
4622 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4625 pthreadCreateArg->start_routine(pthreadCreateArg->argument);
4633 * Desc: This is the system task handler function. It blocks on
4634 * the system task's demand queue. On receiving a message,
4635 * it identifies the target TAPA task, verifies that the
4636 * TAPA task belongs to this system task and if so, calls
4637 * the activation function of that TAPA task with the
4638 * received message. The task activation function or the
4639 * timer activation function may be called.
4641 * Ret: (thread function)
4650 Ptr tskPtr /* pointer to task entry */
4656 /* get out the system task entry from the parameter */
4657 sTsk = (SsSTskEntry *) tskPtr;
4660 /* wait for SS to come up */
4662 /* mt038.201 changed how sem_wait is called */
4663 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4665 #ifdef XEON_SPECIFIC_CHANGES
4666 printf("\n**********MT Task Handler********\n");
4670 /* Wait for a message from the demand queue */
4671 #ifdef SS_CDMNDQ_SUPPORT
4672 ret = ssCDmndQWait(&sTsk->dQ);
4674 ret = ssDmndQWait(&sTsk->dQ);
4679 ret = mtTskHdlMsg(sTsk);
4694 * Desc: This is the system task handler function. It blocks on
4695 * the system task's demand queue. On receiving a message,
4696 * it identifies the target TAPA task, verifies that the
4697 * TAPA task belongs to this system task and if so, calls
4698 * the activation function of that TAPA task with the
4699 * received message. The task activation function or the
4700 * timer activation function may be called.
4702 * Ret: (thread function)
4717 SsTTskEntry *tTsk=NULLP;
4720 Buffer *mBuf2=NULLP;
4722 SsMsgInfo *mInfo=NULLP;
4724 /* mt028.201: modification: multiple procs support related changes */
4725 #ifndef SS_MULTIPLE_PROCS
4727 PAIFTMRS16 tmrActvFnMt = NULLP;
4729 /* mt015.301 Initialized the timer activation functions with NULLP */
4730 PFS16 tmrActvFn = NULLP;
4732 PAIFTMRS16 tmrActvFn =NULLP;
4733 uint16_t procIdIdx =0;
4734 #endif /* SS_MULTIPLE_PROCS */
4735 /* mt003.301 Modifications */
4736 #ifdef SS_THREAD_PROFILE
4738 #endif /* SS_THREAD_PROFILE */
4741 ret = ssDmndQGet(&sTsk->dQ, &mBuf, SS_DQ_FIRST);
4744 /* nothing to receive */
4748 /* if we can't lock this system task entry, return the message */
4749 ret = SLock(&sTsk->lock);
4753 #if (ERRCLASS & ERRCLS_DEBUG)
4754 MTLOGERROR(ERRCLS_DEBUG, EMT007, (ErrVal) ret,
4755 "Could not lock system task entry");
4765 mBuf2 = mBuf->b_next;
4767 /* find out what kind of message this is */
4768 mInfo = (SsMsgInfo *)mBuf->b_rptr;
4769 #ifdef SS_MEM_WL_DEBUG
4770 mtTskBuffer1 = mBuf2;
4772 mtTskBuffer2 = mBuf2->b_next;
4774 if(mInfo == 0x5050505)
4778 cmAnalyseBtInfo((PTR) mBuf,4);
4780 printf("\n In trouble .... \n");
4782 else if (mInfo == 0x2020202)
4785 cmAnalyseBtInfo((PTR) mBuf,1);
4786 printf("\n In trouble .... \n");
4788 #endif /* SS_MEM_WL_DEBUG */
4789 switch (mInfo->eventInfo.event)
4791 /* this is a termination event, we die */
4793 /* release the message */
4796 /* Unlock the system task entry and lock the system
4797 * task table to clean our entry up.
4799 SUnlock(&sTsk->lock);
4801 ret = SLock(&osCp.sTskTblLock);
4805 #if (ERRCLASS & ERRCLS_DEBUG)
4806 MTLOGERROR(ERRCLS_DEBUG, EMT008, (ErrVal) ret,
4807 "Could not lock system task table");
4809 /* what to do here? */
4813 /* clean up the system task entry */
4816 /* mt003.301 Modifications - SDeRegTTsk */
4817 /* sTsk->numTTsks = 0; */
4818 SDestroyLock(&sTsk->lock);
4819 ssDestroyDmndQ(&sTsk->dQ);
4821 /* lock for current executing TAPA task ID */
4823 /* make this entry available in the system task table */
4824 sTsk->nxt = osCp.nxtSTskEntry;
4825 for (i = 0; i < SS_MAX_STSKS; i++)
4827 if (sTsk == &osCp.sTskTbl[i])
4829 osCp.nxtSTskEntry = i;
4836 /* unlock the system task table */
4837 SUnlock(&osCp.sTskTblLock);
4842 /* this is a data message or a permanent task keep-alive message */
4844 case SS_EVNT_PERMTICK:
4845 /* message to a task. find the destination task */
4846 /* mt028.201: modification: multiple procs support related changes */
4847 #ifdef SS_MULTIPLE_PROCS
4848 procIdIdx = SGetProcIdIdx(mInfo->pst.dstProcId);
4850 if (procIdIdx == SS_INV_PROCID_IDX)
4856 idx = osCp.tTskIds[procIdIdx][mInfo->pst.dstEnt][mInfo->pst.dstInst];
4857 #else /* SS_MULTIPLE_PROCS */
4858 idx = osCp.tTskIds[mInfo->pst.dstEnt][mInfo->pst.dstInst];
4859 #endif /* SS_MULTIPLE_PROCS */
4861 /* verify that it hasn't been deregistered */
4862 if (idx == SS_TSKNC)
4868 /* verify that this system task is still running it */
4869 tTsk = &osCp.tTskTbl[idx];
4870 if (tTsk->sTsk != sTsk)
4876 /* set the current executing TAPA task ID */
4877 sTsk->dep.ent = mInfo->pst.dstEnt;
4878 sTsk->dep.inst = mInfo->pst.dstInst;
4880 /* copy the Pst structure into a local duplicate */
4881 for (i = 0; i < (S16) sizeof(Pst); i++)
4882 *(((uint8_t *)(&nPst)) + i) = *(((uint8_t *)&mInfo->pst) + i);
4884 /* Give the message to the task activation function. If
4885 * its a normal data message, we pass it, if this is a
4886 * keep-alive message for a permanent task then we pass
4887 * NULLP in place of the message to the task activation
4890 if (mInfo->eventInfo.event == SS_EVNT_DATA)
4892 #ifndef RGL_SPECIFIC_CHANGES
4893 #ifdef SS_TSKLOG_ENABLE
4894 uint32_t t = MacGetTick();
4897 /* mt003.301 Modifications */
4898 #if SS_THREAD_PROFILE
4899 tTsk->curEvent = nPst.event;
4901 #endif /* SS_THREAD_PROFILE */
4902 tTsk->actvTsk(&nPst, mBuf);
4903 #ifndef RGL_SPECIFIC_CHANGES
4904 #ifdef SS_TSKLOG_ENABLE
4905 SStopTask(t,PID_SSI_TSK);
4908 #if SS_THREAD_PROFILE
4910 tTsk->curEvtTime = (uint32_t)(et2 - et1);
4911 tTsk->totTime += (uint64_t)tTsk->curEvtTime;
4912 #endif /* SS_THREAD_PROFILE */
4916 #if (ERRCLASS & ERRCLS_DEBUG)
4917 /* this message should only come to a permanent task */
4918 if (tTsk->tskType != SS_TSK_PERMANENT)
4920 MTLOGERROR(ERRCLS_DEBUG, EMT009, ERRZERO, "Logic failure");
4924 tTsk->actvTsk(&nPst, NULLP);
4926 /* We need to re-send this message back to ourselves so
4927 * the permanent task continues to run.
4929 /* Check if this task got deregistered or detached
4930 * by the activation function; if so, there's nothing
4931 * more to do here, otherwise go ahead.
4934 if (tTsk->used == TRUE && tTsk->sTsk != NULLP)
4936 ret = ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
4937 ((tTsk->tskPrior) * SS_MAX_MSG_PRI) +
4941 /* failure here is a real problem */
4944 #if (ERRCLASS & ERRCLS_DEBUG)
4945 MTLOGERROR(ERRCLS_DEBUG, EMT010, ERRZERO,
4946 "Could not write to demand queue");
4952 /* unset the current executing TAPA task ID */
4953 sTsk->dep.ent = ENTNC;
4954 sTsk->dep.inst = INSTNC;
4959 /* timer event. find the timer entry */
4960 idx = mInfo->eventInfo.u.tmr.tmrIdx;
4962 /* lock the timer table, coz we're going to peek in it */
4963 ret = SLock(&osCp.tmrTblLock);
4967 #if (ERRCLASS & ERRCLS_DEBUG)
4968 MTLOGERROR(ERRCLS_DEBUG, EMT011, (ErrVal) ret,
4969 "Could not lock timer table");
4975 /* Verify that this timer entry is still around and that it
4976 * belongs to our task.
4978 if (osCp.tmrTbl[idx].used == FALSE
4979 /* mt028.201: modification: multiple procs support related changes */
4980 #ifdef SS_MULTIPLE_PROCS
4981 || osCp.tmrTbl[idx].ownerProc != mInfo->pst.dstProcId
4982 #endif /* SS_MULTIPLE_PROCS */
4983 || osCp.tmrTbl[idx].ownerEnt != mInfo->pst.dstEnt
4984 || osCp.tmrTbl[idx].ownerInst != mInfo->pst.dstInst)
4986 SUnlock(&osCp.tmrTblLock);
4991 /* mt005.21: addition */
4992 /* set the current executing TAPA task ID */
4993 sTsk->dep.ent = mInfo->pst.dstEnt;
4994 sTsk->dep.inst = mInfo->pst.dstInst;
4996 #ifndef SS_MULTIPLE_PROCS
4998 /*mt006.301 Adding Initializing the tmrActvFnMt*/
4999 tmrActvFnMt = NULLP;
5000 if (osCp.tmrTbl[idx].ssTmrActvFn.mtFlag == TRUE)
5002 tmrActvFnMt = osCp.tmrTbl[idx].ssTmrActvFn.actvFnc.tmrActvFnMt;
5008 tmrActvFn = osCp.tmrTbl[idx].ssTmrActvFn.actvFnc.tmrActvFn;
5011 /* unlock the timer table */
5012 SUnlock(&osCp.tmrTblLock);
5014 /* activate the timer function */
5015 /* mt028.201: modification: multiple procs support related changes */
5016 #ifndef SS_MULTIPLE_PROCS
5020 tmrActvFnMt(osCp.tmrTbl[idx].ownerEnt,
5021 osCp.tmrTbl[idx].ownerInst);
5029 tmrActvFn(osCp.tmrTbl[idx].ownerProc, osCp.tmrTbl[idx].ownerEnt,
5030 osCp.tmrTbl[idx].ownerInst);
5031 #endif /* SS_MULTIPLE_PROCS */
5033 /*mt005.21: addition */
5034 /* unset the current executing TAPA task ID */
5035 sTsk->dep.ent = ENTNC;
5036 sTsk->dep.inst = INSTNC;
5039 /* return the message buffer */
5043 * mt003.301 - SDeRegTTsk fix
5045 case SS_EVNT_TTSK_TERM:
5046 #ifdef SS_MULTIPLE_PROCS
5047 procIdIdx = SGetProcIdIdx(mInfo->pst.dstProcId);
5049 if (procIdIdx == SS_INV_PROCID_IDX)
5055 idx = osCp.tTskIds[procIdIdx][mInfo->pst.dstEnt][mInfo->pst.dstInst];
5056 #else /* SS_MULTIPLE_PROCS */
5057 idx = osCp.tTskIds[mInfo->pst.dstEnt][mInfo->pst.dstInst];
5058 #endif /* SS_MULTIPLE_PROCS */
5060 /* verify that it hasn't been deregistered */
5061 if (idx == SS_TSKNC)
5067 /* verify that this system task is still running it */
5068 tTsk = &osCp.tTskTbl[idx];
5069 if (tTsk->sTsk != sTsk)
5074 #ifdef SS_MULTIPLE_PROCS
5075 ssdProcTTskTerm(procIdIdx, tTsk, idx);
5077 ssdProcTTskTerm(tTsk, idx);
5083 #if (ERRCLASS & ERRCLS_DEBUG)
5084 MTLOGERROR(ERRCLS_DEBUG, EMT012, (ErrVal) ret,
5091 } while (mBuf != NULLP);
5094 /* unlock the system task entry */
5095 SUnlock(&sTsk->lock);
5098 /* yield for other threads */
5099 /* mt024.201 - changed to use SSI SThreadYield instead of sleep */
5108 * Fun: mtTmrHdlrPublic
5110 Void mtTmrHdlrPublic()
5112 if (SLock(&osCp.tmrTblLock) != ROK)
5114 #if (ERRCLASS & ERRCLS_DEBUG)
5115 MTLOGERROR(ERRCLS_DEBUG, EMT016, ERRZERO, "Could not lock timer table");
5119 cmPrcTmr(&osCp.dep.tmrTqCp, osCp.dep.tmrTq, mtTimeout);
5120 /* unlock the timer table */
5121 SUnlock(&osCp.tmrTblLock);
5129 * Desc: The timer handler thread function. Counts time
5130 * and invokes the common timer function on each
5133 * Ret: (thread function)
5140 /*mt041.201 Modified SSI tick handling in mtTmrHdlr() */
5141 static Void *mtTmrHdlr
5143 void *parm /* unused */
5146 /*mt004.301-addede new region*/
5147 /* mt010.301 Removed SS_FAP portion and
5148 * enabled oroginal code in function mtTmrHdlr */
5152 uint32_t i, cnt, oldTicks, newTicks;
5153 struct timeval tv1,tv2;
5154 /* mt038.201 added return */
5156 /* mt039.201 changes for nanosleep */
5157 struct timespec tsN;
5158 static uint32_t err_in_usec;
5160 /*mt013.301 : doesn't need TRC macro ,as this will never return*/
5165 /* mt027.201 - Modification for SRegCfgTmr support */
5166 /* check SS_TICKS_SEC */
5167 if (SS_1MS < SS_TICKS_SEC)
5169 MTLOGERROR(ERRCLS_DEBUG, EMT013, ERRZERO, "Minimum SSI ticks is 1ms");
5172 /* mt025.201 - Addition to stop timer handler till task registration is done */
5173 /* wait for SS to come up */
5174 /* mt038.201 changed how sem_wait is called */
5175 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
5178 /* mt027.201 - Modification for SRegCfgTmr support */
5179 /* set up parameter to nanosleep() for SS_TICKS_SEC */
5181 ts.tv_nsec = (MT_TICK_CNT * 1000);
5182 /* mt039.201 changes for nanosleep */
5188 if (gettimeofday(&tv1, NULL) == -1)
5190 #if (ERRCLASS & ERRCLS_DEBUG)
5191 MTLOGERROR(ERRCLS_DEBUG, EMT014, (ErrVal) errno,
5192 "Error in clock_gettime");
5202 #ifndef STUB_TTI_HANDLING_5GTF
5203 printf("\nReturning from mtTmrHdlr()\n");
5208 /* mt039.201 changes for nanosleep */
5209 /* sleep for MT_TICK_CNT milli seconds */
5210 ts.tv_nsec = (MT_TICK_CNT - err_in_usec) * 1000;
5211 while ((ret = nanosleep (&ts, &tsN) != ROK) && (errno == EINTR))
5213 ts.tv_nsec = tsN.tv_nsec;
5218 if (gettimeofday(&tv2,NULL) == -1)
5220 #if (ERRCLASS & ERRCLS_DEBUG)
5221 MTLOGERROR(ERRCLS_DEBUG, EMT015, (ErrVal) errno,
5222 "Error in clock_gettime");
5226 /*mt013.301 : changed check while calculating timer to fix
5227 * diffrence between MTSS time and real unix time
5229 if ((tv2.tv_sec == tv1.tv_sec)&&(tv2.tv_usec > tv1.tv_usec))
5231 time_int = (tv2.tv_usec - tv1.tv_usec);
5233 else if (tv2.tv_sec > tv1.tv_sec)
5235 time_int = ((tv2.tv_sec - tv1.tv_sec)*1000000) + (tv2.tv_usec - tv1.tv_usec);
5237 else /* ts2 < ts1, this will not happen in normal scenario */
5239 /* to make sure cnt = 1 */
5241 time_int = MT_TICK_CNT;
5244 oldTicks = osCp.dep.sysTicks;
5245 osCp.dep.sysTicks += (time_int/(MT_TICK_CNT - err_in_usec));
5246 err_in_usec = (time_int % (MT_TICK_CNT - err_in_usec));
5247 newTicks = osCp.dep.sysTicks;
5248 tv1.tv_usec = tv2.tv_usec;
5249 tv1.tv_sec = tv2.tv_sec;
5251 cnt = newTicks - oldTicks;
5253 while(err_in_usec >= MT_TICK_CNT)
5256 err_in_usec -= MT_TICK_CNT;
5258 if( cnt >= MT_MAX_TICK_CNT_VAL)
5259 cnt = MT_MIN_TICK_CNT_VAL;
5260 /* call the common timer tick handler */
5261 for (i = 0; i < cnt; i++)
5263 /* mt008.301: cmPrcTmr is guarded with a lock */
5264 /* lock the timer table */
5265 if (SLock(&osCp.tmrTblLock) != ROK)
5267 #if (ERRCLASS & ERRCLS_DEBUG)
5268 MTLOGERROR(ERRCLS_DEBUG, EMT016, ERRZERO, "Could not lock timer table");
5272 cmPrcTmr(&osCp.dep.tmrTqCp, osCp.dep.tmrTq, mtTimeout);
5273 /* unlock the timer table */
5274 SUnlock(&osCp.tmrTblLock);
5278 /* mt009.21: addition */
5279 return ( (Void *) NULLP);
5280 /* will not reach here */
5288 * Desc: Process timer event. Called from the common timer
5289 * code when a timeout occurs.
5300 PTR tCb, /* control block */
5301 S16 evnt /* event */
5310 #ifndef TENB_RTLIN_CHANGES
5313 /* mt028.201: modification: multiple procs support related changes */
5314 #ifdef SS_MULTIPLE_PROCS
5316 #endif /* SS_MULTIPLE_PROCS */
5317 #ifdef RGL_SPECIFIC_CHANGES
5318 #ifdef MSPD_MLOG_NEW
5319 uint32_t t = GetTIMETICK();
5325 /* get the timer entry */
5326 tEnt = (SsTmrEntry *) tCb;
5329 /* if the timer was deleted, this will be NULL, so drop it */
5335 /* mt008.301 Deletion: tmrTbl Lock is moved to mtTmrHdlr */
5338 /* Hmmmm, the timer might have been deleted while we've been
5339 * working at getting here, so we just skip this.
5341 if (tEnt->used == FALSE)
5347 /* Set up and send a timer message to the destination tasks'
5350 #ifndef SS_MULTICORE_SUPPORT
5351 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
5353 #ifdef RGL_SPECIFIC_CHANGES
5354 if (SGetMsg((SS_DFLT_REGION), SS_DFLT_POOL, &mBuf) != ROK)
5356 if (SGetMsg((osCp.sTskTbl[0].region), SS_DFLT_POOL, &mBuf) != ROK)
5361 #if (ERRCLASS & ERRCLS_DEBUG)
5362 MTLOGERROR(ERRCLS_DEBUG, EMT017, ERRZERO, "Could not get message");
5368 mInfo = (SsMsgInfo *)mBuf->b_rptr;
5369 mInfo->eventInfo.event = SS_EVNT_TIMER;
5370 mInfo->eventInfo.u.tmr.tmrIdx = tEnt->tmrId;
5372 mInfo->pst.dstEnt = tEnt->ownerEnt;
5373 mInfo->pst.dstInst = tEnt->ownerInst;
5374 mInfo->pst.srcEnt = tEnt->ownerEnt;
5375 mInfo->pst.srcInst = tEnt->ownerInst;
5376 /* mt028.201: modification: multiple procs support related changes */
5377 #ifndef SS_MULTIPLE_PROCS
5378 mInfo->pst.dstProcId = SFndProcId();
5379 mInfo->pst.srcProcId = SFndProcId();
5380 #else /* SS_MULTIPLE_PROCS */
5381 mInfo->pst.dstProcId = tEnt->ownerProc;
5382 mInfo->pst.srcProcId = tEnt->ownerProc;
5383 #endif /* SS_MULTIPLE_PROCS */
5384 mInfo->pst.selector = SEL_LC_NEW;
5385 #ifndef SS_MULTICORE_SUPPORT
5386 mInfo->pst.region = DFLT_REGION;
5389 mInfo->pst.pool = DFLT_POOL;
5390 mInfo->pst.prior = PRIOR0;
5391 mInfo->pst.route = RTESPEC;
5392 mInfo->pst.event = 0;
5395 #ifndef TENB_RTLIN_CHANGES
5396 /* get a semaphore for the TAPA task table */
5397 SS_ACQUIRE_SEMA(&osCp.tTskTblSem, ret);
5402 #if (ERRCLASS & ERRCLS_DEBUG)
5403 MTLOGERROR(ERRCLS_DEBUG, EMT018, ret, "Could not lock TAPA task table");
5411 /* find the owner TAPA task */
5412 /* mt028.201: modification: multiple procs support related changes */
5413 #ifdef SS_MULTIPLE_PROCS
5414 procIdIdx = SGetProcIdIdx(tEnt->ownerProc);
5415 idx = osCp.tTskIds[procIdIdx][tEnt->ownerEnt][tEnt->ownerInst];
5416 #else /* SS_MULTIPLE_PROCS */
5417 idx = osCp.tTskIds[tEnt->ownerEnt][tEnt->ownerInst];
5418 #endif /* SS_MULTIPLE_PROCS */
5419 if (idx == SS_TSKNC)
5421 #ifndef TENB_RTLIN_CHANGES
5422 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5429 /* ensure that the TAPA task is hale and hearty */
5430 tTsk = &osCp.tTskTbl[idx];
5433 #ifndef TENB_RTLIN_CHANGES
5434 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5439 /* Klock work fix ccpu00148484 */
5440 /* write the timer message to the queue of the destination task */
5441 /* mt008.301 : check sTsk before putting into it's DQ */
5442 if (tTsk->sTsk == NULLP)
5444 #ifndef TENB_RTLIN_CHANGES
5445 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5449 #if (ERRCLASS & ERRCLS_DEBUG)
5450 MTLOGERROR(ERRCLS_DEBUG, EMT019, ERRZERO,
5451 "Could not write to demand queue");
5456 #ifdef SS_LOCKLESS_MEMORY
5457 mInfo->pst.region = tTsk->sTsk->region;
5458 mInfo->region = tTsk->sTsk->region;
5459 #endif /* SS_LOCKLESS_MEMORY */
5460 if (ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
5461 (tTsk->tskPrior * SS_MAX_MSG_PRI) + PRIOR0) != ROK)
5463 #ifndef TENB_RTLIN_CHANGES
5464 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5468 #if (ERRCLASS & ERRCLS_DEBUG)
5469 MTLOGERROR(ERRCLS_DEBUG, EMT019, ERRZERO,
5470 "Could not write to demand queue");
5475 /* Fix for ccpu00130657 */
5476 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
5477 if (tTsk->sTsk->tskPrior == PRIOR0)
5480 WLS_WakeUp(mtGetWlsHdl());
5487 /* release the semaphore for the TAPA task table */
5488 #ifndef TENB_RTLIN_CHANGES
5489 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5493 /* restart the timer */
5494 arg.tq = osCp.dep.tmrTq;
5495 arg.tqCp = &osCp.dep.tmrTqCp;
5496 arg.timers = tEnt->dep.timers;
5497 arg.cb = (PTR) tEnt;
5501 arg.max = TMR_DEF_MAX;
5502 arg.wait = tEnt->interval;
5504 #ifdef RGL_SPECIFIC_CHANGES
5505 #ifdef MSPD_MLOG_NEW
5506 MLogTask(131313, RESOURCE_LARM, t, GetTIMETICK());
5518 * Desc: This thread reads the console and hands over any
5519 * data read to a user function.
5521 * Ret: (thread function)
5528 static Void *mtConHdlr
5530 Ptr parm /* unused */
5537 /*mt013.301 : doesn't need TRC macro ,as this will never return*/
5543 /* check if we have a console input file handle */
5544 if (osCp.dep.conInFp == NULLP)
5550 fd = fileno(osCp.dep.conInFp);
5555 if ((read(fd, &data, 1)) != 1)
5561 /* call rdConQ, defined by the system service user */
5571 #ifdef SS_DRVR_SUPPORT
5574 * Fun: Interrupt service task handler
5576 * Desc: This is the interrupt service task handler. It blocks
5577 * on a pipe from which it reads an isFlag structure. The
5578 * structure indicates which interrupt service task is to
5579 * be executed. The thread identifies the task, calls the
5580 * isTsk function and sends itself a message to repeat
5581 * this operation until it receives a message to cease.
5590 /* mt009.21: addition */
5591 static Void *mtIsTskHdlr
5593 Ptr tskPtr /* pointer to task entry */
5596 #if (ERRCLASS & ERRCLS_DEBUG)
5603 if (read(osCp.dep.isFildes[0], &isFlag, sizeof(isFlag)) != sizeof(isFlag))
5608 switch (isFlag.action)
5611 osCp.drvrTskTbl[isFlag.id].dep.flag = TRUE;
5613 /* call the interrupt service task activation function */
5614 osCp.drvrTskTbl[isFlag.id].isTsk(isFlag.id);
5616 /* send self a message to keep doing this */
5617 isFlag.action = MT_IS_RESET;
5619 #if (ERRCLASS & ERRCLS_DEBUG)
5620 ret = write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5621 if (ret != sizeof(isFlag))
5623 MTLOGERROR(ERRCLS_DEBUG, EMT020, ERRZERO,
5624 "write() to pipe failed");
5627 write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5634 osCp.drvrTskTbl[isFlag.id].dep.flag = FALSE;
5639 if (osCp.drvrTskTbl[isFlag.id].dep.flag)
5641 /* call the interrupt service task activation function */
5642 osCp.drvrTskTbl[isFlag.id].isTsk(isFlag.id);
5644 #if (ERRCLASS & ERRCLS_DEBUG)
5645 /* send self a message to do this again */
5646 ret = write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5648 if (ret != sizeof(isFlag))
5650 MTLOGERROR(ERRCLS_DEBUG, EMT021, ERRZERO,
5651 "write() to pipe failed");
5654 write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5662 /* where did THIS come from?? */
5666 /* mt009.21: addition */
5667 return ( (Void *) NULLP);
5671 #endif /* SS_DRVR_SUPPORT */
5672 #endif /* L2_L3_SPLIT */
5674 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
5678 * Fun: mtIntSigHndlr
5680 * Desc: Exit function, shuts down.
5689 Void mtIntSigHndlr(int arg)
5692 osCp.dep.sigEvnt=TRUE;
5695 #ifdef TENB_RTLIN_CHANGES
5703 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
5708 * Desc: function, shuts down.
5717 Void mtExitClnup(void)
5723 SGetSysTime(&ticks);
5725 sprintf(buf, "\n\nmtss(posix) ends\nticks: %u\n", ticks);
5727 sprintf(buf, "\n\nmtss(posix) ends\nticks: %lu\n", ticks);
5729 #ifdef SS_HISTOGRAM_SUPPORT
5733 osCp.dep.sigEvnt=FALSE;
5735 if (osCp.dep.fileOutFp)
5737 fclose(osCp.dep.fileOutFp);
5745 Void SIncrementTtiCount(Void)
5750 Ticks SGetTtiCount(Void)
5759 * Desc: This function displays a string to a given output
5764 * Notes: Buffer should be null terminated.
5766 * channel 0 is reserved for backwards compatibility
5774 S16 chan, /* channel */
5775 Txt *buf /* buffer */
5779 /* mt020.201 - Fixed typo */
5780 #if (ERRCLASS & ERRCLS_INT_PAR)
5783 MTLOGERROR(ERRCLS_INT_PAR, EMT022, ERRZERO, "Null pointer");
5788 #ifndef XEON_SPECIFIC_CHANGES
5789 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
5790 ssMemlog(buf, strlen(buf));
5795 /* mt012.301 :FIX for LOG RELATED ISSUE */
5803 if (osCp.dep.conOutFp) fwrite(buf, strlen(buf), 1, osCp.dep.conOutFp);
5809 if (osCp.dep.fileOutFp)
5810 fwrite(buf, strlen(buf), 1, osCp.dep.fileOutFp);
5811 /*mt031.201 added under compile time flag FLUSHBUFF a call to fflush() */
5814 fflush(osCp.dep.fileOutFp);
5827 * Desc: function, shuts down.
5839 /* mt030.201 added under compilet time flag SS_LINUX and SLES9_PLUS
5840 a loop to overcome the child processes being killed upon exiting the
5842 #ifdef SS_LINUX /* this should have already been defined */
5843 /* mt010.301 removed flag SLES9_PLUS */
5844 /* wait forever for children */
5848 if(osCp.dep.sigEvnt==TRUE)
5855 pthread_exit(NULLP);
5861 * Fun: Set date and time
5863 * Desc: This function is used to set the calendar
5868 * Notes: Unimplemented
5875 REG1 DateTime *dt /* date and time */
5888 * Fun: Get date and time
5890 * Desc: This function is used to determine the calendar
5891 * date and time. This information may be used for
5892 * some management functions.
5904 REG1 DateTime *dt /* date and time */
5907 /*-- mt035.201 : SSI enhancements for micro second in datetime struct --*/
5910 struct timespec ptime;
5912 struct timeval ptime;
5919 #if (ERRCLASS & ERRCLS_INT_PAR)
5922 MTLOGERROR(ERRCLS_INT_PAR, EMT023, ERRZERO, "Null pointer");
5931 localtime_r(&tt, &tme);
5934 clock_gettime(CLOCK_REALTIME, &ptime);
5936 gettimeofday(&ptime, NULL);
5938 localtime_r(&ptime.tv_sec, &tme);
5940 dt->month = (uint8_t) tme.tm_mon + 1;
5941 dt->day = (uint8_t) tme.tm_mday;
5942 dt->year = (uint8_t) tme.tm_year;
5943 dt->hour = (uint8_t) tme.tm_hour;
5944 dt->min = (uint8_t) tme.tm_min;
5945 dt->sec = (uint8_t) tme.tm_sec;
5948 #ifdef SS_DATETIME_USEC
5950 dt->usec = ptime.tv_nsec / 1000;
5952 dt->usec = ptime.tv_usec;
5954 #endif /*-- SS_DATETIME_USEC --*/
5960 * Get time from epoch in milliseconds
5962 * Fun: Get time from epoch in milliseconds
5964 * Desc: This function is used to get the time from epoch in milli seconds.
5965 * This information may be used for calculating a layer's activation function
5966 * execution time used for thread profiling.
5975 /* mt003.301 Modifications */
5978 EpcTime *et /* date and time */
5981 /* mt003.301 Modifications */
5982 static uint64_t now;
5983 uint64_t to_sec = 1000000;
5984 uint64_t to_nsec = 1000;
5986 struct timespec ptime;
5988 struct timeval ptime;
5993 #if (ERRCLASS & ERRCLS_INT_PAR)
6002 clock_gettime(CLOCK_REALTIME, &ptime);
6004 gettimeofday(&ptime, NULL);
6005 #endif /* SS_LINUX */
6007 now = (ptime.tv_sec * to_sec);
6010 now += (ptime.tv_nsec / to_nsec);
6011 #else /* SS_LINUX */
6012 now += (ptime.tv_usec);
6014 #endif /* SS_LINUX */
6015 now = (now / to_nsec);
6026 * Fun: Get system time
6028 * Desc: This function is used to determine the system time.
6032 * Notes: osCp.dep.sysTicks is updated by the timer thread.
6039 Ticks *sysTime /* system time */
6044 #if (ERRCLASS & ERRCLS_INT_PAR)
6045 if (sysTime == NULLP)
6047 MTLOGERROR(ERRCLS_INT_PAR, EMT024, ERRZERO, "Null pointer");
6053 *sysTime = osCp.dep.sysTicks;
6059 /* mt021.201 - Addition of SGetRefTime function */
6062 * Fun: Get referenced time
6064 * Desc: This function is used to determine the time in seconds
6065 * and microseconds from a reference time. The reference
6066 * time is expressed in seconds from UTC EPOC, January 1,
6072 * Notes: Macros are defined for reference times:
6073 * SS_REFTIME_01_01_1970
6074 * SS_REFTIME_01_01_2002
6081 uint32_t refTime, /* reference time */
6088 struct timespec ptime;
6090 struct timeval ptime;
6095 clock_gettime(CLOCK_REALTIME, &ptime);
6097 gettimeofday(&ptime, NULL);
6100 #if (ERRCLASS & ERRCLS_INT_PAR)
6101 if (sec == NULLP || usec == NULLP)
6103 MTLOGERROR(ERRCLS_INT_PAR, EMT025, ERRZERO, "Null pointer");
6106 /* mt022.201 - Modification to fix compile warning */
6107 if (refTime > (uint32_t)(ptime.tv_sec))
6109 MTLOGERROR(ERRCLS_INT_PAR, EMT026, ERRZERO, "Reference time exceeds present time");
6114 *sec = ptime.tv_sec - refTime;
6116 *usec = ptime.tv_nsec / 1000;
6118 *usec = ptime.tv_usec;
6128 * Fun: Get Random Number
6130 * Desc: Invoked by layer when a pseudorandom number is required.
6134 * Notes: Suggested approach uses shuffled Linear Congruential
6135 * Operators as described in Byte magazine October
6136 * 1984; "Generating and Testing Pseudorandom Numbers"
6143 Random *value /* random number */
6148 #if (ERRCLASS & ERRCLS_INT_PAR)
6151 /* mt011.21: addition */
6152 MTLOGERROR(ERRCLS_INT_PAR, EMT028, (ErrVal)0 , "Null pointer");
6158 *value = (Random) rand_r(&osCp.dep.randSeed);
6169 * Desc: This function exits from a task.
6173 * Notes: Currently does nothing.
6188 * Fun: Exit Interrupt
6190 * Desc: This function exits from an interrupt.
6194 * Notes: Currently does nothing.
6209 * Fun: Hold Interrupt
6211 * Desc: This function prohibits interrupts from being enabled until
6212 * release interrupt. This function should be called when
6213 * interrupts are disabled and prior to any call to system
6214 * services either by entry to an interrupt service routine or
6215 * by explicit call to disable interrupt.
6219 * Notes: Currently does nothing
6234 * Fun: Release Interrupt
6236 * Desc: This function allows interrupts to be enabled.
6240 * Notes: Currently does nothing.
6257 * Desc: Enable interrupts
6259 * Ret: ROK on success
6262 * Notes: Currently does nothing.
6267 inline S16 SEnbInt(void)
6279 * Desc: Disable interrupts
6281 * Ret: ROK on success
6284 * Notes: Currently does nothing.
6289 inline S16 SDisInt(void)
6301 * Desc: This function gets the function address stored at the
6302 * specified interrupt vector.
6306 * Notes: Currently does nothing.
6313 VectNmb vectNmb, /* vector number */
6314 PIF *vectFnct /* vector function */
6331 * Desc: This function installs the specified function at the
6332 * specified interrupt vector.
6336 * Notes: Currently does nothing.
6343 VectNmb vectNmb, /* vector number */
6344 PIF vectFnct /* vector function */
6356 /* mt028.201: modification: multiple procs support related changes */
6357 #ifndef SS_MULTIPLE_PROCS
6363 * Desc: This function gets the current entity and instance.
6366 * RFAILED - failed, general (optional)
6368 * Notes: This function may be called by the OS or Layer 1
6376 Ent *ent, /* entity */
6377 Inst *inst /* instance */
6388 #if (ERRCLASS & ERRCLS_INT_PAR)
6389 /* check pointers */
6390 if (ent == NULLP || inst == NULLP)
6392 MTLOGERROR(ERRCLS_INT_PAR, EMT029, ERRZERO, "Null pointer");
6398 /* get the thread id */
6399 tId = pthread_self();
6402 /* find the system task in whose context we're running */
6404 ret = SLock(&osCp.sTskTblLock);
6409 for (i = 0; i < SS_MAX_STSKS; i++)
6411 if (pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
6413 sTsk = &osCp.sTskTbl[i];
6419 *ent = sTsk->dep.ent;
6420 *inst = sTsk->dep.inst;
6422 SUnlock(&osCp.sTskTblLock);
6425 return (ret == ROK ? ROK : RFAILED);
6433 * Desc: This function sets the current entity and instance.
6444 Ent ent, /* entity */
6445 Inst inst /* instance */
6456 #if (ERRCLASS & ERRCLS_INT_PAR)
6457 /* check entity and instance IDs */
6458 if (ent >= ENTNC || inst >= INSTNC)
6460 MTLOGERROR(ERRCLS_INT_PAR, EMT030, ERRZERO, "Invalid entity/instance");
6466 /* get the thread id */
6467 tId = pthread_self();
6470 /* find the system task in whose context we're running */
6472 ret = SLock(&osCp.sTskTblLock);
6477 for (i = 0; i < SS_MAX_STSKS; i++)
6479 if (pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
6481 sTsk = &osCp.sTskTbl[i];
6487 sTsk->dep.ent = ent;
6488 sTsk->dep.inst = inst;
6490 SUnlock(&osCp.sTskTblLock);
6493 return (ret == ROK ? ROK : RFAILED);
6496 #endif /* SS_MULTIPLE_PROCS */
6498 #ifdef SS_DRVR_SUPPORT
6504 * Desc: Set interrupt pending flag
6506 * Ret: ROK on success
6514 inline S16 SSetIntPend
6516 uint16_t id, /* driver task identifier */
6517 Bool flag /* flag */
6525 #if (ERRCLASS & ERRCLS_INT_PAR)
6526 if (id >= SS_MAX_DRVRTSKS || osCp.drvrTskTbl[id].used == FALSE)
6528 MTLOGERROR(ERRCLS_INT_PAR, EMT031, id, "Invalid instance");
6535 isFlag.action = (flag ? MT_IS_SET : MT_IS_UNSET);
6537 if (write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag)) != sizeof(isFlag))
6545 #endif /* SS_DRVR_SUPPORT */
6548 #ifdef SS_LOCKLESS_MEMORY
6551 * Fun: SGlobMemInfoShow
6553 * Desc: This function displays the memory usage information
6554 * for the destined region. It will show the usage of
6555 * each configured bucket and the heap for the specified region.
6558 * RFAILED Region not registered
6563 S16 SGlobMemInfoShow(Void)
6567 CmMmGlobRegCb *globReg;
6570 globReg = osCp.globRegCb;
6572 sprintf(prntBuf, "--------------------------------------------------------------\n");
6573 SDisplay(0, prntBuf);
6574 sprintf(prntBuf, "Global Region Bucket Information\n");
6575 SDisplay(0, prntBuf);
6576 sprintf(prntBuf, "====================================================\n");
6577 SDisplay(0, prntBuf);
6578 sprintf(prntBuf, "Bucket Id Set Size Free Sets Allocated\n");
6579 SDisplay(0, prntBuf);
6580 sprintf(prntBuf, "====================================================\n");
6581 SDisplay(0, prntBuf);
6584 for (idx = 0; idx < globReg->numBkts; idx++)
6586 #ifdef XEON_SPECIFIC_CHANGES
6587 sprintf(prntBuf, "%2u %12lu %12lu %8lu %9lu\n",
6588 idx, globReg->bktTbl[idx].size, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6591 sprintf(prntBuf, "%2u %12lu %8lu %9lu\n",
6592 idx, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6594 sprintf(prntBuf, "%2u %12u %8u %9u\n",
6595 idx, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6598 SDisplay(0, prntBuf);
6600 sprintf(prntBuf, "--------------------------------------------------------------\n");
6601 SDisplay(0, prntBuf);
6606 #endif /* SS_LOCKLESS_MEMORY */
6609 Bool IsMemoryThresholdHit(Region reg, Pool pool)
6611 if((mtCMMRegCb[reg]->bktTbl[pool].numAlloc * 100 )/mtCMMRegCb[reg]->bktTbl[pool].numBlks > 70)
6613 MSPD_DBG("Threshold reached reg(%d) pool(%d) numAllc(%d) numBlks(%d)\n",
6616 mtCMMRegCb[reg]->bktTbl[pool].numAlloc,
6617 mtCMMRegCb[reg]->bktTbl[pool].numBlks);
6624 /* mt022.201 - Addition of SRegInfoShow function */
6629 * Desc: This function displays the memory usage information
6630 * for the destined region. It will show the usage of
6631 * each configured bucket and the heap for the specified region.
6634 * RFAILED Region not registered
6636 * Notes: A Sample Output from the function
6637 * Bucket Memory: region 1
6638 * ====================================================
6639 * Bucket Number of Blks configured Size Allocated
6640 * ====================================================
6648 * Heap Memory: region 1
6651 * Heap Segmented blocks: 0
6667 #if (ERRCLASS & ERRCLS_INT_PAR)
6668 if (region > (SS_MAX_REGS-1) )
6670 MTLOGERROR(ERRCLS_INT_PAR, EMT032, ERRZERO, "Invalid Region");
6677 #ifndef TENB_T2K3K_SPECIFIC_CHANGES
6678 sprintf(prntBuf, "\n\nBucket Memory: region %d\n", region);
6679 SDisplay(0, prntBuf);
6680 sprintf(prntBuf, "====================================================\n");
6681 SDisplay(0, prntBuf);
6682 sprintf(prntBuf, "Bucket Number of Blks configured Size Allocated\n");
6683 SDisplay(0, prntBuf);
6684 sprintf(prntBuf, "====================================================\n");
6685 SDisplay(0, prntBuf);
6689 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
6691 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
6693 sprintf((char *)prntBuf, "%2u %8u %5u %8u %8u\n",
6694 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6695 mtCMMRegCb[region]->bktTbl[idx].size,
6696 mtCMMRegCb[region]->bktTbl[idx].numAlloc,
6697 mtCMMRegCb[region]->bktTbl[idx].maxAlloc);
6699 sprintf((char *)prntBuf, "%2u %8lu %5lu %8lu %8lu\n",
6700 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6701 mtCMMRegCb[region]->bktTbl[idx].size,
6702 mtCMMRegCb[region]->bktTbl[idx].numAlloc,
6703 mtCMMRegCb[region]->bktTbl[idx].maxAlloc);
6706 /*mt009.301 Fixed 64BIT compilation warnings*/
6708 sprintf(prntBuf, "%2u %8u %5u %8u\n",
6709 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6710 mtCMMRegCb[region]->bktTbl[idx].size,
6711 mtCMMRegCb[region]->bktTbl[idx].numAlloc);
6713 sprintf(prntBuf, "%2u %8lu %5lu %8lu\n",
6714 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6715 mtCMMRegCb[region]->bktTbl[idx].size,
6716 mtCMMRegCb[region]->bktTbl[idx].numAlloc);
6718 #endif /* not TENB_RTLIN_CHANGES */
6719 SDisplay(0, prntBuf);
6720 *availmem = *availmem + (mtCMMRegCb[region]->bktTbl[idx].size * \
6721 (mtCMMRegCb[region]->bktTbl[idx].numBlks - \
6722 mtCMMRegCb[region]->bktTbl[idx].numAlloc));
6724 sprintf(prntBuf, "\n---------------\n");
6725 SDisplay(0, prntBuf);
6726 sprintf(prntBuf, "Heap Memory: region %d\n", region);
6727 SDisplay(0, prntBuf);
6728 /*mt009.301 Fixed 64BIT compilation warnings*/
6730 sprintf(prntBuf, "Heap Size: %u\n", mtCMMRegCb[region]->heapSize);
6732 sprintf(prntBuf, "Heap Size: %lu\n", mtCMMRegCb[region]->heapSize);
6734 SDisplay(0, prntBuf);
6735 /*mt009.301 Fixed 64BIT compilation warnings*/
6737 sprintf(prntBuf, "Heap Allocated: %u\n",
6738 (mtCMMRegCb[region]->heapSize - mtCMMRegCb[region]->heapCb.avlSize));
6740 sprintf(prntBuf, "Heap Allocated: %lu\n",
6741 (mtCMMRegCb[region]->heapSize - mtCMMRegCb[region]->heapCb.avlSize));
6743 SDisplay(0, prntBuf);
6744 *availmem = *availmem + mtCMMRegCb[region]->heapCb.avlSize;
6745 #if (ERRCLASS & ERRCLS_DEBUG)
6746 sprintf(prntBuf, "Heap Segmented blocks: %d\n",
6747 mtCMMRegCb[region]->heapCb.numFragBlk);
6748 SDisplay(0, prntBuf);
6753 #ifdef XEON_SPECIFIC_CHANGES
6754 #define SSI_MAX_BKT_THRESHOLD 6
6755 #define SSI_MAX_REG_THRESHOLD 2
6756 uint32_t SMemMaxThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6757 uint32_t SMemMidThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6758 uint32_t SMemLowThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6760 static Void SInitMemThreshold
6767 for (idx = 0; (idx < maxBkt && idx < mtCMMRegCb[region]->numBkts); idx++)
6769 SMemMaxThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*95)/100;
6770 SMemMidThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*85)/100;
6771 SMemLowThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*80)/100;
6772 printf("\nREGION:%d, BKT:%d max:%d mid:%d low:%d\n", region, idx, SMemMaxThreshold[region][idx], SMemMidThreshold[region][idx], SMemLowThreshold[region][idx]);
6776 S16 SRegReachedMemThreshold
6783 uint8_t memStatus = 3;
6784 static uint8_t initFlag = 1;
6788 SInitMemThreshold(region, maxBkt);
6791 for (idx = 0; (idx < maxBkt && idx < mtCMMRegCb[region]->numBkts); idx++)
6793 if(mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemMaxThreshold[region][idx])
6798 else if((mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemMidThreshold[region][idx]) && (memStatus >1))
6802 else if((mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemLowThreshold[region][idx]) && (memStatus >2))
6810 /* mt033.201 - addition of API to return the memory statistical data */
6815 * Desc: This function returns the memory usage information
6816 * for the destined region. It will return the usage of
6817 * each configured bucket and the heap for the specified region.
6820 * RFAILED Region not registered
6830 SsMemDbgInfo *dbgInfo
6836 #if (ERRCLASS & ERRCLS_INT_PAR)
6837 if (region >= mtMemoCfg.numRegions )
6839 MTLOGERROR(ERRCLS_INT_PAR, EMT033, ERRZERO, "Invalid Region");
6844 dbgInfo->availmem = 0;
6846 if (mtCMMRegCb[region]->numBkts > SS_MAX_BKT_PER_DBGTBL)
6847 dbgInfo->numBkts = SS_MAX_BKT_PER_DBGTBL;
6849 dbgInfo->numBkts = mtCMMRegCb[region]->numBkts;
6851 for (idx = 0; (idx < mtCMMRegCb[region]->numBkts) && (idx < SS_MAX_BKT_PER_DBGTBL); idx++)
6853 dbgInfo->bktDbgTbl[idx].numBlks = mtCMMRegCb[region]->bktTbl[idx].numBlks;
6854 dbgInfo->bktDbgTbl[idx].size = mtCMMRegCb[region]->bktTbl[idx].size;
6855 dbgInfo->bktDbgTbl[idx].numAlloc = mtCMMRegCb[region]->bktTbl[idx].numAlloc;
6857 dbgInfo->availmem += (mtCMMRegCb[region]->bktTbl[idx].size * \
6858 (mtCMMRegCb[region]->bktTbl[idx].numBlks - \
6859 mtCMMRegCb[region]->bktTbl[idx].numAlloc));
6862 dbgInfo->region = region;
6864 dbgInfo->heapSize = mtCMMRegCb[region]->heapSize;
6866 dbgInfo->heapAlloc = (mtCMMRegCb[region]->heapSize - \
6867 mtCMMRegCb[region]->heapCb.avlSize);
6869 dbgInfo->availmem += mtCMMRegCb[region]->heapCb.avlSize;
6871 #if (ERRCLASS & ERRCLS_DEBUG)
6872 dbgInfo->numFragBlk = mtCMMRegCb[region]->heapCb.numFragBlk;
6884 /* Send number of Region available */
6885 *numRegion = mtMemoCfg.numRegions;
6886 /* Send number of Pools available */
6887 *numPool = cfgRegInfo[0].numPools;
6892 /* mt033.201 - addition of APIs to print the memory statistical data
6893 * as defined by SSI enhancements
6895 #ifdef SSI_DEBUG_LEVEL1
6898 * Fun: SPrintRegMemStatusInfo
6900 * Desc: This function displays the memory usage information
6901 * for the destined region. It will show the total memory
6902 * used for static and dynamic memory if typeFlag is
6903 * SS_MEM_BKT_ALLOC_PROFILE. It will show the number of
6904 * memory block allocated for a particular size if typeFlag
6905 * is SS_MEM_BLK_SIZE_PROFILE from the hash list by
6906 * calling SRegPrintMemStats.
6915 S16 SPrintRegMemStatusInfo
6923 uint32_t statMemSize;
6924 uint32_t dynMemSize;
6927 #if (ERRCLASS & ERRCLS_INT_PAR)
6928 if (region >= mtMemoCfg.numRegions )
6930 MTLOGERROR(ERRCLS_INT_PAR, EMT034, ERRZERO, "Invalid Region");
6935 /* initialize the counters*/
6939 if (typeFlag == SS_MEM_BKT_ALLOC_PROFILE)
6941 /* total static and dynamic memory allocated from all the buckets in region requested */
6942 sprintf(prntBuf, "\nAllocated Memory profile of Buckets from region: %d \n", region);
6943 SDisplay(0, prntBuf);
6944 sprintf(prntBuf, "===========================================\n");
6945 SDisplay(0, prntBuf);
6946 sprintf(prntBuf, "Bucket Static Memory Dynamic Memory\n");
6947 SDisplay(0, prntBuf);
6948 sprintf(prntBuf, "===========================================\n");
6949 SDisplay(0, prntBuf);
6950 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
6952 /*mt009.301 Fixed 64BIT compilation warnings*/
6954 sprintf(prntBuf, "%2u %8u %8u\n", idx,
6955 mtCMMRegCb[region]->bktTbl[idx].staticMemUsed,
6956 mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed);
6958 sprintf(prntBuf, "%2lu %8lu %8lu\n", idx,
6959 mtCMMRegCb[region]->bktTbl[idx].staticMemUsed,
6960 mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed);
6962 SDisplay(0, prntBuf);
6963 /* update the total count */
6964 statMemSize += mtCMMRegCb[region]->bktTbl[idx].staticMemUsed;
6965 dynMemSize += mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed;
6968 /*mt009.301 Fixed 64BIT compilation warnings*/
6970 sprintf(prntBuf, "Total Static Memory allocated from buckets: %u\n", statMemSize);
6971 SDisplay(0, prntBuf);
6972 sprintf(prntBuf, "Total Dynamic Memory allocated from buckets: %u\n", dynMemSize);
6974 sprintf(prntBuf, "Total Static Memory allocated from buckets: %lu\n", statMemSize);
6975 SDisplay(0, prntBuf);
6976 /*mt010.301 fix for compilation error*/
6977 sprintf(prntBuf, "Total Dynamic Memory allocated from buckets: %lu\n", dynMemSize);
6979 SDisplay(0, prntBuf);
6981 sprintf(prntBuf, "\n\nAllocated Memory profile from Heap of region: %d \n", region);
6982 SDisplay(0, prntBuf);
6983 /*mt009.301 Fixed 64BIT compilation warnings*/
6985 sprintf(prntBuf, "STATIC MEMORY: %u DYNAMIC MEMORY:%u \n",
6986 mtCMMRegCb[region]->heapCb.staticHeapMemUsed, mtCMMRegCb[region]->heapCb.dynamicHeapMemUsed);
6988 sprintf(prntBuf, "STATIC MEMORY: %lu DYNAMIC MEMORY:%lu \n",
6989 mtCMMRegCb[region]->heapCb.staticHeapMemUsed, mtCMMRegCb[region]->heapCb.dynamicHeapMemUsed);
6991 SDisplay(0, prntBuf);
6993 else if (typeFlag == SS_MEM_BLK_SIZE_PROFILE)
6995 /* Bucket Memory allocation Statistics */
6996 return (SPrintRegMemStats(region));
7001 sprintf(prntBuf, "\n Invalid choice \n");
7002 SDisplay(0, prntBuf);
7010 * Fun: SPrintRegMemStats
7012 * Desc: This function displays the memory usage information for
7013 * the destined region. It will show the number of memory
7014 * block allocated for a particular size from the hash list.
7023 static S16 SPrintRegMemStats(Region region)
7025 CmMmHashListCp *hashListCp;
7031 hashListCp = &mtCMMRegCb[region]->hashListCp;
7033 sprintf(prntBuf, "\n\nSize Vs. NumAttempts and Alloc/Dealloc profile of region %d\n", region);
7034 SDisplay(0, prntBuf);
7035 sprintf(prntBuf, "Maximum Entries: %u Current Entries: %u\n",
7036 hashListCp->numOfbins, hashListCp->numOfEntries);
7037 SDisplay(0, prntBuf);
7038 sprintf(prntBuf, "===================================\n");
7039 SDisplay(0, prntBuf);
7040 sprintf(prntBuf, "Block Size Total number of requests\n");
7041 SDisplay(0, prntBuf);
7042 sprintf(prntBuf, "===================================\n");
7043 SDisplay(0, prntBuf);
7045 for (idx = 0, cntEnt=0; (cntEnt < hashListCp->numOfEntries) &&
7046 (idx < CMM_STAT_HASH_TBL_LEN); idx++)
7048 if (hashListCp->hashList[idx].numAttempts)
7051 /*mt009.301 Fixed 64BIT compilation warnings*/
7053 sprintf(prntBuf, "%8u %8u\n", hashListCp->hashList[idx].size,
7054 hashListCp->hashList[idx].numAttempts);
7056 sprintf(prntBuf, "%8lu %8lu\n", hashListCp->hashList[idx].size,
7057 hashListCp->hashList[idx].numAttempts);
7059 SDisplay(0, prntBuf);
7063 sprintf(prntBuf, "\nAllocation/De-allocation profile in Buckets\n");
7064 SDisplay(0, prntBuf);
7065 sprintf(prntBuf, "=================================================\n");
7066 SDisplay(0, prntBuf);
7067 sprintf(prntBuf, "Bucket Num of Alloc Attempts Num of De-alloc Attempts\n");
7068 SDisplay(0, prntBuf);
7069 sprintf(prntBuf, "=================================================\n");
7070 SDisplay(0, prntBuf);
7072 /* Print the statistics of total number of alloc/de-alloc attempts in each bucket of this region */
7073 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
7075 /*mt009.301 Fixed 64BIT compilation warnings*/
7077 sprintf(prntBuf, "%4u %8u %8u\n", idx,
7078 mtCMMRegCb[region]->bktTbl[idx].numAllocAttempts,
7079 mtCMMRegCb[region]->bktTbl[idx].numDeallocAttempts);
7081 sprintf(prntBuf, "%4lu %8lu %8lu\n", idx,
7082 mtCMMRegCb[region]->bktTbl[idx].numAllocAttempts,
7083 mtCMMRegCb[region]->bktTbl[idx].numDeallocAttempts);
7085 SDisplay(0, prntBuf);
7087 sprintf(prntBuf, "\nAllocation/De-allocation profile in Heap\n");
7088 SDisplay(0, prntBuf);
7089 /*mt009.301 Fixed 64BIT compilation warnings*/
7091 sprintf(prntBuf, "Num of Alloc Attempts: %u Num of De-alloc Attempts: %u\n",
7092 mtCMMRegCb[region]->heapCb.numAllocAttempts,
7093 mtCMMRegCb[region]->heapCb.numDeallocAttempts);
7095 sprintf(prntBuf, "Num of Alloc Attempts: %lu Num of De-alloc Attempts: %lu\n",
7096 mtCMMRegCb[region]->heapCb.numAllocAttempts,
7097 mtCMMRegCb[region]->heapCb.numDeallocAttempts);
7099 SDisplay(0, prntBuf);
7100 sprintf(prntBuf, "\n");
7101 SDisplay(0, prntBuf);
7108 * Fun: SRegMemErrHdlr
7110 * Desc: This function handles the errors returned from the memory
7111 * related functions. Customers are suggested to modify this
7112 * API according to their specific requirement.
7131 if (errCode == RDBLFREE)
7133 sprintf(prntBuf, "\nDouble free attempted at location:%8p in region:%d\n", ptr, region);
7134 SDisplay(0, prntBuf);
7136 else if (errCode == RTRAMPLINGNOK)
7138 sprintf(prntBuf, "\nMemory Trampling crossed Threshold in region:%d\n", region);
7139 SDisplay(0, prntBuf);
7147 * Fun: SPrintRegMemProfile
7149 * Desc: This function displays the memory profile information
7150 * for the destined region. This function prints for:
7151 * 1) each memory bucket-Block address, size, size for which it is allocated, free/allocated, static/dynamic
7152 * 2) heap - memory block address, size, requested size, free/allocated, static/dynamic
7161 S16 SPrintRegMemProfile
7168 CmMmBlkHdr *curBktBlk;
7170 Size offsetToNxtBlk;
7178 #if (ERRCLASS & ERRCLS_INT_PAR)
7179 if (region >= mtMemoCfg.numRegions )
7181 MTLOGERROR(ERRCLS_INT_PAR, EMT035, ERRZERO, "Invalid Region");
7186 regCb = mtCMMRegCb[region];
7188 /* memory profile */
7189 sprintf(prntBuf, "\n\nFull Memory Profile of region %d\n", region);
7190 SDisplay(0, prntBuf);
7192 /* bucket profile */
7193 sprintf(prntBuf, "\nBucket Profile\n");
7194 SDisplay(0, prntBuf);
7196 for (idx = 0; idx < regCb->numBkts; idx++)
7199 /*mt009.301 Fixed 64BIT compilation warnings*/
7201 sprintf(prntBuf, "\nBucket number:%4u of Size:%u Num of Blocks: %u\n",
7202 idx, regCb->bktTbl[idx].size, regCb->bktTbl[idx].numBlks);
7204 sprintf(prntBuf, "\nBucket number:%4lu of Size:%lu Num of Blocks: %lu\n",
7205 idx, regCb->bktTbl[idx].size, regCb->bktTbl[idx].numBlks);
7207 SDisplay(0, prntBuf);
7209 sprintf(prntBuf, "==========================================================================\n");
7210 SDisplay(0, prntBuf);
7211 sprintf(prntBuf, " Block Location Free/Allocated Static/dynamic Size requested\n");
7212 SDisplay(0, prntBuf);
7213 sprintf(prntBuf, "==========================================================================\n");
7214 SDisplay(0, prntBuf);
7216 offsetToNxtBlk = regCb->bktTbl[idx].size + sizeof(CmMmBlkHdr);
7218 for (blkCnt=0, curBktBlk = (CmMmBlkHdr *)(regCb->bktTbl[idx].bktStartPtr);
7219 ((curBktBlk) && (blkCnt < regCb->bktTbl[idx].numBlks));
7220 curBktBlk = (CmMmBlkHdr *)((Data *)curBktBlk + offsetToNxtBlk), blkCnt++)
7222 /*mt009.301 Fixed 64BIT compilation warnings*/
7224 sprintf(prntBuf, "%6u %8p", blkCnt, (void *)curBktBlk);
7226 sprintf(prntBuf, "%6lu %8p", blkCnt, (void *)curBktBlk);
7228 SDisplay(0, prntBuf);
7229 /* check if it is a sane block, elxe jump to next block */
7230 if (cmMmRegIsBlkSane(curBktBlk) != ROK)
7232 sprintf(prntBuf, " Trampled \n");
7233 SDisplay(0, prntBuf);
7238 if (CMM_IS_STATIC(curBktBlk->memFlags))
7240 /*mt009.301 Fixed 64BIT compilation warnings*/
7242 sprintf(prntBuf, " Allocated Static %8u\n", curBktBlk->requestedSize);
7244 sprintf(prntBuf, " Allocated Static %8lu\n", curBktBlk->requestedSize);
7246 SDisplay(0, prntBuf);
7248 else if (CMM_IS_DYNAMIC(curBktBlk->memFlags))
7250 /*mt009.301 Fixed 64BIT compilation warnings*/
7252 sprintf(prntBuf, " Allocated Dynamic %8u\n", curBktBlk->requestedSize);
7254 sprintf(prntBuf, " Allocated Dynamic %8lu\n", curBktBlk->requestedSize);
7256 SDisplay(0, prntBuf);
7258 else if (CMM_IS_FREE(curBktBlk->memFlags))
7260 /*mt009.301 Fixed 64BIT compilation warnings*/
7262 sprintf(prntBuf, " Free %8u\n", curBktBlk->requestedSize);
7264 sprintf(prntBuf, " Free %8lu\n", curBktBlk->requestedSize);
7266 SDisplay(0, prntBuf);
7270 sprintf(prntBuf, " Trampled \n");
7271 SDisplay(0, prntBuf);
7277 sprintf(prntBuf, "\nHeap Profile\n");
7278 SDisplay(0, prntBuf);
7280 /* point to heapCb */
7281 heapCb = &(regCb->heapCb);
7283 sprintf(prntBuf, "\nHeap Start: %8p Heap End: %8p\n", heapCb->vStart, heapCb->vEnd);
7284 SDisplay(0, prntBuf);
7285 sprintf(prntBuf, "==========================================================================\n");
7286 SDisplay(0, prntBuf);
7287 sprintf(prntBuf, " Block Location Size Free/Allocated Static/dynamic Size requested\n");
7288 SDisplay(0, prntBuf);
7289 sprintf(prntBuf, "==========================================================================\n");
7290 SDisplay(0, prntBuf);
7292 /* traverse the entire heap to output the heap profile */
7293 hdrSize = sizeof(CmHEntry);
7294 for (blkCnt=0, curHBlk = (CmHEntry *)heapCb->vStart;
7295 ((curHBlk) && (curHBlk < (CmHEntry *)heapCb->vEnd)); blkCnt++)
7297 /*mt009.301 Fixed 64BIT compilation warnings*/
7299 sprintf(prntBuf, "%6u %8p", blkCnt, (void *)curHBlk);
7301 sprintf(prntBuf, "%6lu %8p", blkCnt, (void *)curHBlk);
7303 SDisplay(0, prntBuf);
7305 /* check if it is a sane block, elxe jump to next block */
7306 if (cmMmRegIsBlkSane((CmMmBlkHdr *)curHBlk) != ROK)
7308 sprintf(prntBuf, " Trampled \n");
7309 SDisplay(0, prntBuf);
7311 sprintf(prntBuf, "Trampled block encountered: Stopping heap profile\n");
7312 SDisplay(0, prntBuf);
7315 * To go to next block in the heap we do not have any offset value
7316 * other than curHBlk->size. As the block is already trampled
7317 * we cannot rely on this size. So it is better to stop here unless there
7318 * exists any other mechanism(?) to know the offset to next block.
7323 /*mt009.301 Fixed 64BIT compilation warnings*/
7325 sprintf(prntBuf, " %8u", curHBlk->size);
7327 sprintf(prntBuf, " %8lu", curHBlk->size);
7329 SDisplay(0, prntBuf);
7331 if (CMM_IS_STATIC(curHBlk->memFlags))
7333 /*mt009.301 Fixed 64BIT compilation warnings*/
7335 sprintf(prntBuf, " Allocated Static %8u\n", curHBlk->requestedSize);
7337 sprintf(prntBuf, " Allocated Static %8lu\n", curHBlk->requestedSize);
7339 SDisplay(0, prntBuf);
7341 else if (CMM_IS_DYNAMIC(curHBlk->memFlags))
7343 /*mt009.301 Fixed 64BIT compilation warnings*/
7345 sprintf(prntBuf, " Allocated Dynamic %8u\n", curHBlk->requestedSize);
7347 sprintf(prntBuf, " Allocated Dynamic %8lu\n", curHBlk->requestedSize);
7349 SDisplay(0, prntBuf);
7351 else if (CMM_IS_FREE(curHBlk->memFlags))
7353 /*mt009.301 Fixed 64BIT compilation warnings*/
7355 sprintf(prntBuf, " Free %8u\n", curHBlk->requestedSize);
7357 sprintf(prntBuf, " Free %8lu\n", curHBlk->requestedSize);
7359 SDisplay(0, prntBuf);
7363 sprintf(prntBuf, " Trampled \n");
7364 SDisplay(0, prntBuf);
7366 /* goto next block in the heap */
7367 curHBlk = (CmHEntry *)((Data *)curHBlk + hdrSize + curHBlk->size);
7373 #endif /* SSI_DEBUG_LEVEL1 */
7375 /*-- mt035.201 : Added new API for timestamp --*/
7378 * Fun: Get TimeStamp
7380 * Desc: This function is used to Get TimeStamp in micro seconds
7397 struct timespec ptime;
7399 struct timeval ptime;
7408 clock_gettime(CLOCK_REALTIME, &ptime);
7410 gettimeofday(&ptime, NULL);
7413 /* Obtain the time of day, and convert it to a tm struct. --*/
7414 ptm = localtime (&ptime.tv_sec);
7415 /* Klock work fix ccpu00148484 */
7418 /* Format the date and time, down to a single second. --*/
7419 strftime (time_string, sizeof (time_string), "%a %b %d %Y %H:%M:%S", ptm);
7422 /* Compute microseconds. --*/
7424 microseconds = ptime.tv_nsec / 1000;
7426 microseconds = ptime.tv_usec;
7429 /* Print the formatted time, in seconds, followed by a decimal point
7430 and the microseconds. --*/
7431 /*mt009.301 Fixed 64BIT compilation warnings*/
7433 sprintf(ts, "%s.%03d", time_string, microseconds);
7435 sprintf(ts, "%s.%03ld", time_string, microseconds);
7441 /*-- mt037.201 : Added new API for SGetSystemTsk --*/
7444 * Fun: Get SGetSystemTsk
7446 * Desc: This function is used to Get sytem task id
7455 uint32_t SGetSystemTsk(Void)
7458 return (pthread_self());
7460 } /* end of SGetSystemTsk */
7462 #ifdef SS_MULTICORE_SUPPORT
7465 * Fun: Add Timer thread into system task table
7467 * Desc: This function is used to add the system task
7468 * associated with Timer thread.
7477 static SsSTskEntry* ssdAddTmrSTsk(Void)
7483 /* lock the system task table */
7484 ret = SLock(&osCp.sTskTblLock);
7488 #if (ERRCLASS & ERRCLS_DEBUG)
7489 MTLOGERROR(ERRCLS_DEBUG, EMT039, (ErrVal) ret,
7490 "Could not lock system task table");
7496 /* check count of system tasks */
7497 if (osCp.numSTsks == SS_MAX_STSKS)
7500 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7502 #if (ERRCLASS & ERRCLS_DEBUG)
7503 MTLOGERROR(ERRCLS_DEBUG, EMT040, ERRZERO,
7504 "Could not give the Semaphore");
7509 #if (ERRCLASS & ERRCLS_ADD_RES)
7510 MTLOGERROR(ERRCLS_ADD_RES, EMT041, ERRZERO, "Too many system tasks");
7517 /* initialize the system task entry with the information we have */
7518 sTsk = &osCp.sTskTbl[osCp.nxtSTskEntry];
7520 /* store the system task priority */
7521 sTsk->tskPrior = SS_NORM_TSK_PRI;
7523 /* initialize the demand queue */
7524 if (ssInitDmndQ(&sTsk->dQ) != ROK)
7527 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7529 #if (ERRCLASS & ERRCLS_DEBUG)
7530 MTLOGERROR(ERRCLS_DEBUG, EMT042, ERRZERO,
7531 "Could not give the Semaphore");
7536 #if (ERRCLASS & ERRCLS_DEBUG)
7537 MTLOGERROR(ERRCLS_DEBUG, EMT043, (ErrVal) ret,
7538 "Could not initialize demand queue");
7544 /* initialize the system task entry lock */
7545 if (SInitLock(&sTsk->lock, SS_STSKENTRY_LOCK) != ROK)
7547 ssDestroyDmndQ(&sTsk->dQ);
7549 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7551 #if (ERRCLASS & ERRCLS_DEBUG)
7552 MTLOGERROR(ERRCLS_DEBUG, EMT044, ERRZERO,
7553 "Could not give the Semaphore");
7558 #if (ERRCLASS & ERRCLS_DEBUG)
7559 MTLOGERROR(ERRCLS_DEBUG, EMT045, (ErrVal) ret,
7560 "Could not initialize system task entry lock");
7567 /* success, update the table */
7568 sTsk->tskId = osCp.nxtSTskEntry;
7570 sTsk->termPend = FALSE;
7571 osCp.nxtSTskEntry = sTsk->nxt;
7574 /* unlock the system task table */
7576 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7578 #if (ERRCLASS & ERRCLS_DEBUG)
7579 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
7580 "Could not give the Semaphore");
7587 #endif /* SS_MULTICORE_SUPPORT */
7588 /* mt003.301 Readwrite lock and recursive mutex additions */
7589 #ifdef SS_LOCK_SUPPORT
7592 * Fun: ssdInitLockNew
7594 * Desc: This function is used to initialise lock/mutex
7603 S16 ssdInitLockNew(SLockInfo *lockId,uint8_t lockType)
7606 #ifdef SS_REC_LOCK_SUPPORT
7607 pthread_mutexattr_t attr;
7608 #endif /* SS_REC_LOCK_SUPPORT */
7609 Txt prntBuf[PRNTSZE];
7615 #ifdef SS_RDWR_LOCK_SUPPORT
7618 if((retVal = pthread_rwlock_init((&(lockId->l.rdWrLockId)), NULLP)) != ROK)
7620 sprintf(prntBuf, "\n\n ssdInitLockNew(): Initialization of read write lock failed,Error# retVal %d\n", retVal);
7621 SDisplay(0, prntBuf);
7626 #endif /* SS_RDWR_LOCK_SUPPORT */
7627 #ifdef SS_REC_LOCK_SUPPORT
7630 retVal = pthread_mutexattr_init(&attr);
7634 sprintf(prntBuf,"\n ssdInitLockNew(): mutexattr init failed,Error# %d \n",retVal);
7639 retVal = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
7641 retVal = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
7645 sprintf(prntBuf,"\n ssdInitLockNew(): mutexattr settype failed,Error# %d \n",retVal);
7646 pthread_mutexattr_destroy(&attr);
7650 retVal = pthread_mutex_init((pthread_mutex_t *)&(lockId->l.recurLock), &attr);
7653 sprintf(prntBuf,"\n ssdInitLockNew(): mutex init failed,Error# %d \n",retVal);
7654 pthread_mutexattr_destroy(&attr);
7660 #endif /* SS_REC_LOCK_SUPPORT */
7663 sprintf(prntBuf, "\n\n ssdInitLockNew(): Invalid lock type %d\n", lockType);
7664 SDisplay(0, prntBuf);
7674 * Desc: This function is used to aquire the read write lock
7683 S16 ssdLockNew(SLockInfo *lockId,uint8_t lockType)
7686 Txt prntBuf[PRNTSZE];
7692 #ifdef SS_RDWR_LOCK_SUPPORT
7695 if((retVal = pthread_rwlock_rdlock(&(lockId->l.rdWrLockId))) != ROK)
7697 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7698 SDisplay(0, prntBuf);
7705 if((retVal = pthread_rwlock_wrlock(&(lockId->l.rdWrLockId))) != ROK)
7707 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the write lock,Error# %d\n", retVal);
7708 SDisplay(0, prntBuf);
7715 if((retVal = pthread_rwlock_tryrdlock(&(lockId->l.rdWrLockId))) != ROK)
7717 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7718 SDisplay(0, prntBuf);
7725 if((retVal = pthread_rwlock_trywrlock(&(lockId->l.rdWrLockId))) != ROK)
7727 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7728 SDisplay(0, prntBuf);
7733 #endif /* SS_RDWR_LOCK_SUPPORT */
7734 #ifdef SS_REC_LOCK_SUPPORT
7737 if((retVal = pthread_mutex_lock(&(lockId->l.recurLock)) != ROK))
7739 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the recursive mutex,Error# %d\n", retVal);
7740 SDisplay(0, prntBuf);
7745 #endif /* SS_REC_LOCK_SUPPORT */
7748 sprintf(prntBuf, "\n\n ssdLockNew(): Invalid lock type %d\n", lockType);
7749 SDisplay(0, prntBuf);
7762 * Desc: This function is used to Unlock the read write lock
7771 S16 ssdUnlockNew(SLockInfo *lockId,uint8_t lockType)
7774 Txt prntBuf[PRNTSZE];
7780 #ifdef SS_RDWR_LOCK_SUPPORT
7783 if((retVal = pthread_rwlock_unlock(&(lockId->l.rdWrLockId))) != ROK)
7785 sprintf(prntBuf, "\n\n ssdUnLockNew(): Failed to unlock the lock,Error# %d\n", retVal);
7786 SDisplay(0, prntBuf);
7791 #endif /* SS_RDWR_LOCK_SUPPORT */
7792 #ifdef SS_REC_LOCK_SUPPORT
7795 if((retVal = pthread_mutex_unlock(&(lockId->l.recurLock)) != ROK))
7797 sprintf(prntBuf, "\n\n ssdUnLockNew(): Failed to aquire the recursive mutex,Error# %d\n", retVal);
7798 SDisplay(0, prntBuf);
7803 #endif /* SS_REC_LOCK_SUPPORT */
7806 sprintf(prntBuf, "\n\n ssdUnlockNew(): Invalid lock type %d\n", lockType);
7807 SDisplay(0, prntBuf);
7816 * Fun: ssdDestroyLockNew
7818 * Desc: This function is used to destroy the read write lock
7827 S16 ssdDestroyLockNew(SLockInfo *lockId,uint8_t lockType)
7829 Txt prntBuf[PRNTSZE];
7835 #ifdef SS_RDWR_LOCK_SUPPORT
7838 if((retVal = pthread_rwlock_destroy(&(lockId->l.rdWrLockId))) != ROK)
7840 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Failed to destroy the lock,Error# %d\n", retVal);
7841 SDisplay(0, prntBuf);
7846 #endif /* SS_RDWR_LOCK_SUPPORT */
7847 #ifdef SS_REC_LOCK_SUPPORT
7850 if((retVal = pthread_mutex_destroy(&(lockId->l.recurLock)) != ROK))
7852 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Failed to destroy the mutex,Error# %d\n", retVal);
7853 SDisplay(0, prntBuf);
7858 #endif /* SS_REC_LOCK_SUPPORT */
7861 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Invalid lock type %d\n", lockType);
7862 SDisplay(0, prntBuf);
7868 #endif /* SS_LOCK_SUPPORT */
7870 /* mt005.301 : Cavium Changes */
7871 #ifdef SS_SEUM_CAVIUM
7875 * Fun: ssInitRcvWork
7877 * Desc: This is the initializtion function of receive
7881 * RFAILED - failed, general (optional)
7883 * Notes: Function to initialize the work queue packet
7884 * receiving thread. This creates the new thread to
7885 * receive the work and sets the affinity.
7890 S16 ssInitRcvWork(void)
7892 pthread_attr_t attr;
7896 /* set the required attributes */
7897 pthread_attr_init(&attr);
7898 pthread_attr_setstacksize(&attr, (size_t)MT_ISTASK_STACK);
7899 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
7900 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
7902 /* Create a new thread to receive the work queue messages */
7903 if ((pthread_create(&thread, &attr, workRcvTsk, NULLP)) != 0)
7905 pthread_attr_destroy(&attr);
7910 pthread_attr_destroy(&attr);
7914 }/* ssInitRcvWork */
7921 * Desc: This is the handler function of receive
7925 * RFAILED - failed, general (optional)
7927 * Notes:The handler function of the work queue receiver task.
7928 * This will be waiting for the work and after receiving
7929 * it, work will converted and posted to that entityt
7935 static void *workRcvTsk(Ptr ptr)
7938 cvmx_wqe_t *workPtr;
7939 Buffer *mBuf, *rcvdBuf;
7940 SsMsgInfo *minfoPtr;
7949 /* get the work if its avilable */
7950 workPtr = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
7952 if ( workPtr == NULLP )
7954 /* If there is no work then sleep for 10 usec */
7956 ts.tv_nsec = 500000;
7958 nanosleep(&ts, NULLP);
7962 switch(workPtr->tag)
7964 /* Switch over according to the tag value */
7965 case SS_CVMX_MBUF_TAG:
7967 rcvdBuf = (Buffer*)workPtr->packet_ptr.ptr;
7969 /* Convert the physical address to Pointers */
7970 ret = SConvPhyPtr(&rcvdBuf);
7973 /* mt011.301: Cavium 32 bit changes */
7974 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
7978 /* Copy the buffer to this region */
7979 ret = SCpyFpaMsg(rcvdBuf, SS_DFLT_REGION, SS_DFLT_POOL, &mBuf);
7982 /* mt011.301: Cavium 32 bit changes */
7983 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
7987 /* mt011.301: Cavium 32 bit changes */
7988 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
7990 minfoPtr = (SsMsgInfo*)mBuf->b_rptr;
7992 /* Get the post strucutre and Post the message */
7993 if ( minfoPtr != NULLP)
7995 SMemCpy( &pst, &minfoPtr->pst, sizeof(Pst));
7997 (Void)SPstTsk(&pst, mBuf);
7999 /* Free the buffer allocated if it cannot be sent */
8008 /* Invalid tag value, drop the work */
8009 /* mt011.301: Cavium 32 bit changes */
8010 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
8019 #endif /* SS_SEUM_CAVIUM */
8021 #ifdef TENB_RTLIN_CHANGES
8022 S16 SInitLock(SLockId *l, uint8_t t)
8025 pthread_mutexattr_t prior;
8026 pthread_mutexattr_init(&prior);
8027 #ifndef RGL_SPECIFIC_CHANGES
8028 pthread_mutexattr_setprotocol(&prior, PTHREAD_PRIO_INHERIT);
8030 r = pthread_mutex_init(l, &prior);
8031 pthread_mutexattr_destroy(&prior);
8035 #ifdef SS_THR_REG_MAP
8038 * Fun: ssRegMainThread
8040 * Desc: This function is used to add the memory region
8041 * mapping for the main thread.
8043 * Ret: VOID (Always successful)
8051 Void ssRegMainThread(Void)
8054 if(SS_INVALID_THREAD_REG_MAP != SS_GET_THREAD_MEM_REGION())
8056 printf("\nnot able to get different Id for main thread\n");
8059 /* Here the default region is added as we dont have any region associated with
8060 * Main thread. The thread should not perform any allocation except
8061 * the initial configuratin
8063 #ifdef XEON_SPECIFIC_CHANGES
8064 SS_GET_THREAD_MEM_REGION() = mtMemoCfg.numRegions;
8066 SS_GET_THREAD_MEM_REGION() =
8073 * Fun: ssCheckAndAddMemoryRegionMap
8075 * Desc: This function is used to add the memory region
8076 * mapping for the provided sTsk associated thread.
8077 * If the threadId can be placed in the thread memory
8078 * region mapping table and returns success if it is able
8079 * to place. If not, it keeps the thread ID in the static
8080 * local array and increments the count. Once thread Id
8081 * is successfully placed in the thread memory region mapping
8082 * table, pthread_cancel is sent for all the previous threads
8083 * which are failed to place in table.
8085 * Ret: TRUE - Thread ID successfully placed in thread memory region
8087 * FALSE - If thread Id is not placed in thread memory region
8090 * Notes:mapping tablemapping tablng tablee
8095 S32 ssCheckAndAddMemoryRegionMap
8097 pthread_t threadId, /* Thread Id of system task */
8098 Region region /* Region associated with thread */
8101 static uint32_t createdThreads;
8102 static pthread_t createdThreadIds[SS_MAX_THREAD_CREATE_RETRY];
8106 /* Here 0xFF is considered as invalid region and if the mapping table
8107 * contains 0xFF, that mapping entry is free
8109 if(SS_INVALID_THREAD_REG_MAP !=
8110 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)])
8112 /* Klock work fix ccpu00148484 */
8113 if(!(createdThreads < SS_MAX_THREAD_CREATE_RETRY))
8115 printf("\nfailed in index = %ld\n", ((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP));
8116 printf("\nNot able to get the different thread ID, exiting\n");
8119 createdThreadIds[createdThreads++] = threadId;
8122 /* If we found free mapping table entry, place the region and send pthread_cancel
8123 * for all the thread Ids which are created before this
8125 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)] = region;
8126 #ifdef XEON_SPECIFIC_CHANGES
8127 printf("\nThreadId %ld, Thread Idx %d, Region %d\n", threadId,
8128 ((threadId >> SS_MEM_THREAD_ID_SHIFT) %
8129 SS_MAX_THREAD_REGION_MAP), region);
8131 for(indx = 0; indx < createdThreads; indx++)
8133 #ifdef XEON_SPECIFIC_CHANGES
8134 printf("\nSending pthred Cancel to thread Id %d \n",createdThreadIds[indx]);
8136 pthread_cancel(createdThreadIds[indx]);
8142 } /* ssCheckAndAddMemoryRegionMap */
8146 * Fun: ssCheckAndDelMemoryRegionMap
8148 * Desc: This function is used to add the memory region
8149 * mapping for the provided sTsk associated thread.
8150 * If the threadId can be placed in the thread memory
8151 * region mapping table and returns success if it is able
8152 * to place. If not, it keeps the thread ID in the static
8153 * local array and increments the count. Once thread Id
8154 * is successfully placed in the thread memory region mapping
8155 * table, pthread_cancel is sent for all the previous threads
8156 * which are failed to place in table.
8158 * Ret: TRUE - Thread ID successfully placed in thread memory region
8160 * FALSE - If thread Id is not placed in thread memory region
8163 * Notes:mapping tablemapping tablng tablee
8168 S32 ssCheckAndDelMemoryRegionMap
8170 pthread_t threadId /* Thread Id of system task */
8175 /* Raghu To-Do Check with team, is it necessary to acquire lock
8176 * as del and add may go parallel */
8177 /* Here 0xFF is considered as invalid region and if the mapping table
8178 * contains 0xFF, that mapping entry is free
8180 if(SS_INVALID_THREAD_REG_MAP ==
8181 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)])
8184 printf("\nInvalid Thread ID (%ld)\n", (uint32_t)threadId);
8186 printf("\nInvalid Thread ID (%d)\n", (uint32_t)threadId);
8190 /* If we found free mapping table entry, place the region and send pthread_cancel
8191 * for all the thread Ids which are created before this
8193 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)] = SS_INVALID_THREAD_REG_MAP;
8197 } /* ssCheckAndAddMemoryRegionMap */
8201 #ifdef SS_TSKLOG_ENABLE
8206 * Desc: This function will return current time through input parameter.
8209 * RFAILED - failed, general (optional)
8217 volatile uint32_t *startTime,
8221 #ifdef MSPD_MLOG_NEW
8222 *startTime = GetTIMETICK();
8231 * Desc: This function will return current time through input parameter.
8232 * and take the difference of start time provided as input parameter
8236 * RFAILED - failed, general (optional)
8244 volatile uint32_t startTime,
8248 /*uint32_t stopTime;*/
8251 case PID_MAC_HARQ_IND:
8252 case PID_SCH_TTI_IND:
8254 case PID_MAC_DAT_IND:
8255 case PID_MAC_SF_ALLOC_REQ:
8256 case PID_MAC_STA_RSP:
8257 case PID_MAC_DL_SCHD:
8258 case PID_MAC_DL_CQI_IND:
8259 case PID_MAC_UL_CQI_IND:
8260 case PID_MAC_UL_SCHD:
8261 case PID_MAC_TTI_IND:
8262 case PID_CL_RCV_PHY_MSG:
8263 case PID_CL_HARQ_STA_IND:
8264 case PID_MAC_AM_HARQ_RLS:
8265 case PID_CL_DL_BATCH_PROC:
8266 case PID_CL_DLM_PRC_TTI_IND:
8267 case PID_CRC_IND_REAL:
8268 case PID_CRC_IND_DUMMY:
8269 case PID_TTI_LATENCY:
8270 case PID_RECPREQ_PROC:
8273 MLogTask(0, taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8275 MLogTask(taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8278 MLogTask(taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8287 volatile uint32_t * startTime,
8297 volatile uint32_t startTime,
8304 #endif /*#ifdef SS_TSKLOG_ENABLE */
8305 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
8307 * This primitive is used to calculate the CPU Utilization per Core
8312 * @return Void - function is always success
8314 Void UpdateSocCpuInfo
8316 CmCpuStatsInfo *cpuInfo,
8321 S8 mipsStr[MIPS_STRING_LEN];
8328 /* Open the file which holds the MIPS available value */
8329 mipsFd = fopen(MIPS_FILE, "r");
8336 /* Get the free mips available value from the file */
8337 if(NULLP == fgets(mipsStr, 24, mipsFd))
8339 printf("\nfgets to get the free mips available failed\n");
8344 strtok(mipsStr, " ");
8346 strPart = strtok(NULLP, " ");
8348 if(idx == CM_L2_CPU_UTIL)
8350 if(strPart != NULLP)
8352 l2FreeCpu = atoi(strPart);
8353 l2CpuUsed = 100 - l2FreeCpu;
8354 cpuInfo->cpuUtil[0].totCpuUtil += l2CpuUsed;
8355 cpuInfo->cpuUtil[0].maxCpuUtil = GET_CPU_MAX((cpuInfo->cpuUtil[0].maxCpuUtil), l2CpuUsed);
8356 cpuInfo->cpuUtil[0].numSamples++;
8359 if(idx == CM_L3_CPU_UTIL)
8361 strPart = strtok(NULLP, " ");
8362 if(strPart != NULLP)
8364 l3FreeCpu = atoi(strPart);
8365 l3CpuUsed = 100 - l3FreeCpu;
8366 cpuInfo->cpuUtil[0].totCpuUtil += l3CpuUsed;
8367 cpuInfo->cpuUtil[0].maxCpuUtil = GET_CPU_MAX((cpuInfo->cpuUtil[0].maxCpuUtil), l3CpuUsed);
8368 cpuInfo->cpuUtil[0].numSamples++;
8371 if(idx == CM_L2_CPU_UTIL)
8373 cpuInfo->numCores = CM_NUM_L2_CORES ;
8375 else if(idx == CM_L3_CPU_UTIL)
8377 cpuInfo->numCores = CM_NUM_L3_CORES ;
8383 #endif /* TENB_T2K3K_SPECIFIC_CHANGES */
8384 #ifdef SS_MULTICORE_SUPPORT
8387 * Fun: Add Timer thread into system task table
8389 * Desc: This function is used to add the system task
8390 * associated with Timer thread.
8399 static SsSTskEntry* ssdReAddTmrSTsk(
8407 /* lock the system task table */
8408 ret = SLock(&osCp.sTskTblLock);
8412 #if (ERRCLASS & ERRCLS_DEBUG)
8413 MTLOGERROR(ERRCLS_DEBUG, EMT039, (ErrVal) ret,
8414 "Could not lock system task table");
8420 /* initialize the system task entry with the information we have */
8421 sTsk = &osCp.sTskTbl[idx];
8426 SDestroyLock(&sTsk->lock);
8427 ssDestroyDmndQ(&sTsk->dQ);
8430 /* store the system task priority */
8431 sTsk->tskPrior = SS_NORM_TSK_PRI;
8433 /* initialize the demand queue */
8434 if (ssInitDmndQ(&sTsk->dQ) != ROK)
8437 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8439 #if (ERRCLASS & ERRCLS_DEBUG)
8440 MTLOGERROR(ERRCLS_DEBUG, EMT042, ERRZERO,
8441 "Could not give the Semaphore");
8446 #if (ERRCLASS & ERRCLS_DEBUG)
8447 MTLOGERROR(ERRCLS_DEBUG, EMT043, (ErrVal) ret,
8448 "Could not initialize demand queue");
8454 /* initialize the system task entry lock */
8455 if (SInitLock(&sTsk->lock, SS_STSKENTRY_LOCK) != ROK)
8457 ssDestroyDmndQ(&sTsk->dQ);
8459 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8461 #if (ERRCLASS & ERRCLS_DEBUG)
8462 MTLOGERROR(ERRCLS_DEBUG, EMT044, ERRZERO,
8463 "Could not give the Semaphore");
8468 #if (ERRCLASS & ERRCLS_DEBUG)
8469 MTLOGERROR(ERRCLS_DEBUG, EMT045, (ErrVal) ret,
8470 "Could not initialize system task entry lock");
8477 /* success, update the table */
8478 sTsk->tskId = idx + 1;
8480 sTsk->termPend = FALSE;
8482 /* unlock the system task table */
8484 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8486 #if (ERRCLASS & ERRCLS_DEBUG)
8487 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
8488 "Could not give the Semaphore");
8495 #endif /* SS_MULTICORE_SUPPORT */
8500 * Fun: Initialize timer table
8502 * Desc: This function initializes MTSS-specific information
8503 * in the timer table.
8512 S16 ssdReInitTmr(void)
8514 pthread_attr_t attr;
8515 struct sched_param param_sched;
8516 #ifndef XEON_SPECIFIC_CHANGES
8519 #ifdef SS_MULTICORE_SUPPORT
8521 #endif /* SS_MULTICORE_SUPPORT */
8522 #ifdef SS_THR_REG_MAP
8523 uint32_t threadCreated = FALSE;
8524 #endif /* SS_THR_REG_MAP */
8527 #ifndef XEON_SPECIFIC_CHANGES
8528 ret = ssCheckAndDelMemoryRegionMap(osCp.dep.tmrHdlrTID);
8531 #if (ERRCLASS & ERRCLS_DEBUG)
8532 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
8533 "Could not give the Semaphore");
8539 osCp.dep.tmrTqCp.tmrLen = SS_MAX_TMRS;
8540 /* mt010.21: addition */
8542 #ifdef SS_MULTICORE_SUPPORT
8543 sTsk = ssdReAddTmrSTsk(0);
8548 #endif /* SS_MULTICORE_SUPPORT */
8549 /* create the timer handler thread */
8551 pthread_attr_init(&attr);
8552 /* mt021.201 - Addition to set stack size */
8553 pthread_attr_setstacksize(&attr, (size_t)MT_TMRTASK_STACK);
8554 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
8555 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
8556 pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
8557 param_sched.sched_priority = sched_get_priority_max(SCHED_FIFO);
8558 pthread_attr_setschedparam(&attr, ¶m_sched);
8561 #ifdef SS_THR_REG_MAP
8562 /* When the thread is created, we check for the memory mapping table if
8563 * threadId can be placed in thread memory map table. If it is not able to place
8564 * threadId is stored in tmporary array. Once thread is created successful,
8565 * thread_cancel is sent for each thread which are created before. All the
8566 * threads are made to wait on sema which is cancel point for thread.
8568 while(threadCreated == FALSE)
8571 if ((pthread_create(&osCp.dep.tmrHdlrTID, &attr, mtTmrHdlr, NULLP)) != 0)
8573 /* mt020.201 - Addition for destroying thread attribute object attr */
8574 pthread_attr_destroy(&attr);
8579 #ifdef SS_THR_REG_MAP
8580 threadCreated = ssCheckAndAddMemoryRegionMap(osCp.dep.tmrHdlrTID,
8583 #endif /* SS_THR_REG_MAP */
8584 #ifdef SS_MEM_WL_DEBUG
8585 tmpRegTidMap[sTsk->region] = osCp.dep.tmrHdlrTID;
8588 /* mt020.201 - Addition for destroying thread attribute object attr */
8589 pthread_attr_destroy(&attr);
8590 sem_post(&osCp.dep.ssStarted);
8594 /**********************************************************************
8596 **********************************************************************/