1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /********************************************************************20**
21 Name: Multi-threaded System Services - Solaris
25 Desc: C source code for the MTSS-Solaris implementation of
30 *********************************************************************21*/
35 #ifndef _POSIX_C_SOURCE
36 #define _POSIX_C_SOURCE 199309L
38 /* mt003.301 moved env files to use the __USE_UNIX98 flag in sys includes */
45 #include <sys/types.h>
50 /* mt003.301: included sys/time.h
51 * for both solaris and linux
54 /* mt008.21: addition */
59 /* header include files (.h) */
62 #include "common_def.h"
63 #include "mt_ss.h" /* MTSS specific */
64 #include "mt_err.h" /* MTSS error defines */
66 #include "ss_queue.h" /* queues */
67 #include "ss_task.h" /* tasking */
68 #include "ss_msg.h" /* messaging */
69 #include "ss_mem.h" /* memory management interface */
70 #include "ss_gen.h" /* general */
71 /* mt003.301 Additions - Task deregistration */
72 #include "ss_err.h" /* error */
73 #include "cm_mem.h" /* common memory manager */
74 /* mt001.301 : Additions */
75 #ifdef SS_THREAD_PROFILE
78 #ifdef SS_LOCKLESS_MEMORY
83 /* multi-core support enhancement */
84 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
85 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
91 #include <sys/types.h>
92 #include <sys/processor.h>
93 #include <sys/procset.h>
96 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
97 /* mt001.301 : Additions */
99 #include <sys/types.h>
100 #include <sys/socket.h>
101 #include <netinet/in.h>
102 #include <arpa/inet.h>
103 #endif /* SS_WATCHDOG */
105 #ifdef SS_USE_WLS_MEM
106 #include <rte_common.h>
107 #include <rte_debug.h>
111 /* header/extern include files (.x) */
113 #include "gen.x" /* general layer */
114 #include "ssi.x" /* system services */
116 #include "cm5.x" /* common timers */
118 #include "mt_ss.x" /* MTSS specific */
119 #ifdef SS_LOCKLESS_MEMORY
120 #include "mt_ss_wl.x" /* MTSS specific */
121 #endif /* SS_LOCKLESS_MEMORY */
123 #include "ss_queue.x" /* queues */
124 #include "ss_task.x" /* tasking */
125 #include "ss_timer.x" /* timers */
126 #include "ss_strm.x" /* STREAMS */
127 #include "ss_msg.x" /* messaging */
128 #include "ss_mem.x" /* memory management interface */
129 #include "ss_drvr.x" /* driver tasks */
130 #include "ss_gen.x" /* general */
131 #ifdef SS_LOCKLESS_MEMORY
132 #include "cm_llist.x"
134 #include "cm_mem_wl.x" /* common memory manager */
136 #include "cm_mem.x" /* common memory manager */
137 #endif /* SS_LOCKLESS_MEMORY */
138 #include "cm_lte.x" /* common memory manager */
139 /* mt001.301 : Additions */
140 #ifdef SS_LOGGER_SUPPORT
142 #endif /* SS_LOGGER_SUPPORT */
144 /*mt005.301: Cavium Changes */
145 #ifdef SS_SEUM_CAVIUM
146 /* cvmx includes files */
147 #include "cvmx-config.h"
149 #include "cvmx-pow.h"
150 #include "cvmx-tim.h"
151 #include "cvmx-fpa.h"
152 #include "cvmx-helper-fpa.h"
153 #include "cvmx-malloc.h"
154 #endif /* SS_SEUM_CAVIUM */
157 #include "mt_plat_t33.h"
158 #include "mt_plat_t33.x"
159 #include "sys/syscall.h"
162 #if defined(RGL_SPECIFIC_CHANGES) || defined(INTEL_WLS) || defined(SS_USE_WLS_MEM)
164 #include <hugetlbfs.h>
167 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
168 S16 rgBatchProc (Void);
170 #ifdef RLC_MAC_DAT_REQ_RBUF
171 S16 rgDlDatReqBatchProc ARGS((
174 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
175 S16 rgBatchProc ARGS((
179 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
180 /* general purpose debug zone */
181 char my_buffer2[4096 * 4] = { 0 };
182 char my_buffer[4096] = { 0 };
183 int my_buffer_idx = 0;
187 #define sigsegv_print(x, ...) my_buffer_idx += sprintf(&my_buffer[my_buffer_idx], x "\n", ##__VA_ARGS__)
189 struct sigcontext my_uc_mcontext = { 0 };
194 #include <ucontext.h>
198 #define SIGSEGV_STACK_GENERIC
199 #define REGFORMAT "%x\n"
201 #ifdef XEON_SPECIFIC_CHANGES
202 Void cmPrcTmr ARGS((CmTqCp* tqCp, CmTqType* tq, PFV func));
205 void dump_external(void);
207 static Void mtDelSigals(Void)
211 memset(&sa, 0, sizeof(struct sigaction));
212 sigemptyset(&sa.sa_mask);
213 sa.sa_handler = SIG_DFL;
214 sigaction(SIGSEGV, &sa, NULL);
216 memset(&sa, 0, sizeof(struct sigaction));
217 sigemptyset(&sa.sa_mask);
218 sa.sa_handler = SIG_DFL;
219 sigaction(SIGILL, &sa, NULL);
223 static void signal_segv(int signum, siginfo_t * info, void *ptr)
225 static const char *si_codes[3] = { "", "SEGV_MAPERR", "SEGV_ACCERR" };
228 ucontext_t *ucontext = (ucontext_t *) ptr;
229 #ifdef XEON_SPECIFIC_CHANGES
231 int *p32 = (int *) 0x2fff0000;
236 printf("segv ooops @ %p\n", info->si_addr);
239 printf("Segmentation Fault!\n");
240 printf("info.si_signo = %d\n", signum);
241 printf("info.si_errno = %d\n", info->si_errno);
242 printf("info.si_code = %d (%s)\n", info->si_code, si_codes[info->si_code]);
243 printf("info.si_addr = %p\n", info->si_addr);
245 memcpy(&my_uc_mcontext, &ucontext->uc_mcontext, sizeof(struct sigcontext));
248 #ifndef RGL_SPECIFIC_CHANGES
249 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r0);
250 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r1);
251 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r2);
252 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r3);
253 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r4);
254 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r5);
255 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r6);
256 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r7);
257 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r8);
258 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r9);
259 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r10);
260 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_fp);
261 printf("reg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_ip);
262 printf("reg[sp] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_sp);
263 printf("reg[lr] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_lr);
264 printf("reg[pc] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_pc);
265 printf("reg[cpsr] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_cpsr);
268 printf("Stack trace (non-dedicated):\n");
270 sz = backtrace(buffer, 50);
271 strings = backtrace_symbols(buffer, sz);
272 for (i = 0; i < sz; ++i)
273 printf("%s\n", strings[i]);
275 printf("End of stack trace.");
277 #ifdef XEON_SPECIFIC_CHANGES
282 /* Lets first print our debug information */
283 printf("Before dumping our Debug info\n");
285 printf("After dumping our Debug info\n");
287 /* Disable the signal and make the enodeb to dump. This will make
288 * eNB to generate the core with dumping the ccpu log
295 /* End printing debug information */
300 /*** TBD: IMPORTANT ***
301 *** The following definition is temporary. This must be removed
302 *** when all products have been updated with latest ssi.h file OR
303 *** all ssi.h files have been updated to contain this definitions
305 /* New error class for FTHA added */
307 #define ERRCLS_FTHA 0x8
308 #endif /* ERRCLS_FTHA */
310 typedef struct _SPThreadCreateArg
312 void *argument; /* argument that is to be passed to the actual pthread */
313 void *(*start_routine) (void *); /* function from which pthread starts */
316 void *pthreadCreateHdlr(void* arg);
318 #ifdef SS_LOCKLESS_MEMORY
319 Buffer *mtTskBuffer1;
320 Buffer *mtTskBuffer2;
322 pthread_t tmpRegTidMap[20];
324 S16 SGlobMemInfoShow(void);
325 #endif /* SS_LOCKLESS_MEMORY */
328 APP_CONTEXT AppContext;
332 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
333 unsigned int tlPost(void *handle);
336 /* forward references */
337 /* mt003.301 Modifications - Moved to ss_gen.x */
338 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
339 Void *mtTskHdlrT2kL2 ARGS((Void*));
340 void mtSigSegvHndlr ARGS((void));
341 void mtSigUsr2Hndlr ARGS((void));
344 static S16 ssdSetPthreadAttr ARGS ((S32 tskPrior, pthread_attr_t *attr));
345 static Void *mtTskHdlr ARGS((void *));
346 static S16 mtTskHdlMsg ARGS((SsSTskEntry *sTsk));
348 static Void *mtTmrHdlr ARGS((void *));
349 static Void mtTimeout ARGS((PTR tCb, S16 evnt));
351 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
352 static Void mtIntSigHndlr ARGS((int));
353 static Void mtExitClnup ARGS((void));
356 static Void *mtConHdlr ARGS((void *));
360 #ifdef SS_DRVR_SUPPORT
361 static Void *mtIsTskHdlr ARGS((void *));
365 /* mt020.201 - Addition for no command line available */
367 static Void mtGetOpts ARGS((void));
368 /* mt003.301 Additions - File Based task registration made
369 * common for both MULTICORE and NON-MULTICORE
371 static Bool fileBasedMemCfg = FALSE;
374 /* mt033.201 - addition of local function to print the statistics such as
375 * (size vs. numAttempts) and (allocations vs. deallocations)
377 #ifdef SSI_DEBUG_LEVEL1
378 static S16 SPrintRegMemStats ARGS((Region region));
379 #endif /* SSI_DEBUG_LEVEL1 */
381 #ifdef SS_MULTICORE_SUPPORT
382 static SsSTskEntry* ssdAddTmrSTsk(Void);
383 static SsSTskEntry* ssdReAddTmrSTsk ARGS((uint8_t idx));
384 #ifndef SS_LOCKLESS_MEMORY
385 #ifndef RGL_SPECIFIC_CHANGES
386 static S16 ssdInitMemInfo ARGS((void));
391 /* mt005.301: Cavium changes */
392 #ifdef SS_SEUM_CAVIUM
393 static Void *workRcvTsk ARGS((void *));
394 #endif /* SS_SEUM_CAVIUM */
396 #ifdef SS_THR_REG_MAP
397 S32 ssCheckAndAddMemoryRegionMap ARGS((pthread_t threadId,
399 S32 ssCheckAndDelMemoryRegionMap ARGS((pthread_t threadId));
400 #endif /* SS_THR_REG_MAP */
402 /* type declarations */
404 #ifdef SS_DRVR_SUPPORT
405 typedef struct mtIsFlag
415 /* public variable declarations */
417 Cntr cfgNumRegs = SS_MAX_REGS;
418 /* Set memory configuration as false.
419 * Set to true if memory configuration through file is successfull.
421 Bool memConfigured = FALSE;
422 /* mt022.201 - Modification for shared memory relay region and memcal tool */
423 SsRegCfg cfgRegInfo[SS_MAX_REGS] =
426 SS_DFLT_REGION, SS_MAX_POOLS_PER_REG - 1,
428 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
429 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
430 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
431 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
432 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
433 { SS_POOL_STATIC, 0 }
439 SS_DFLT_REGION + 1, SS_MAX_POOLS_PER_REG - 1,
441 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
442 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
443 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
444 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
445 { SS_POOL_STATIC, 0 }
448 #endif /* INTEL_WLS */
450 #ifdef SS_LOCKLESS_MEMORY
453 SS_DFLT_REGION + 1, SS_MAX_POOLS_PER_REG - 1,
455 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
456 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
457 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
458 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
459 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
460 { SS_POOL_STATIC, 0 }
464 SS_DFLT_REGION + 2, SS_MAX_POOLS_PER_REG - 1,
466 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
467 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
468 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
469 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
470 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
471 { SS_POOL_STATIC, 0 }
475 SS_DFLT_REGION + 3, SS_MAX_POOLS_PER_REG - 1,
477 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
478 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
479 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
480 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
481 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
482 { SS_POOL_STATIC, 0 }
486 SS_DFLT_REGION + 4, SS_MAX_POOLS_PER_REG - 1,
488 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
489 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
490 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
491 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
492 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
493 { SS_POOL_STATIC, 0 }
497 SS_DFLT_REGION + 5, SS_MAX_POOLS_PER_REG - 1,
499 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
500 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
501 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
502 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
503 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
504 { SS_POOL_STATIC, 0 }
508 SS_DFLT_REGION + 6, SS_MAX_POOLS_PER_REG - 1,
510 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
511 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
512 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
513 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
514 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
515 { SS_POOL_STATIC, 0 }
519 #endif /* SS_LOCKLESS_MEMORY */
521 /* mt003.301 Modifications - File Based task registration made
522 * common for both MULTICORE and NON-MULTICORE
525 #ifdef SS_LOCKLESS_MEMORY
526 MtDynMemCfg mtDynMemoCfg =
528 SS_MAX_REGS, /* number of regions */
531 SS_DFLT_REGION, /* region id */
532 MT_MAX_BKTS, /* number of buckets */
534 /* block size, no. of blocks, Upper threshold, lower threshold */
535 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
536 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
537 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
538 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
539 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
543 SS_DFLT_REGION + 1, /* region id */
544 MT_MAX_BKTS, /* number of buckets */
546 /* block size, no. of blocks, Upper threshold, lower threshold */
547 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
548 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
549 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
550 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
551 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
555 SS_DFLT_REGION + 2, /* region id */
556 MT_MAX_BKTS, /* number of buckets */
558 /* block size, no. of blocks, Upper threshold, lower threshold */
559 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
560 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
561 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
562 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
563 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
567 SS_DFLT_REGION + 3, /* region id */
568 MT_MAX_BKTS, /* number of buckets */
570 /* block size, no. of blocks, Upper threshold, lower threshold */
571 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
572 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
573 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
574 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
575 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
579 SS_DFLT_REGION + 4, /* region id */
580 MT_MAX_BKTS, /* number of buckets */
582 /* block size, no. of blocks, Upper threshold, lower threshold */
583 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
584 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
585 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
586 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
587 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
591 SS_DFLT_REGION + 5, /* region id */
592 MT_MAX_BKTS, /* number of buckets */
594 /* block size, no. of blocks, Upper threshold, lower threshold */
595 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
596 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
597 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
598 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
599 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
603 SS_DFLT_REGION + 6, /* region id */
604 MT_MAX_BKTS, /* number of buckets */
606 /* block size, no. of blocks, Upper threshold, lower threshold */
607 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
608 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
609 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
610 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
611 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
614 #if ((defined (SPLIT_RLC_DL_TASK)) && (!defined (L2_L3_SPLIT)))
617 SS_DFLT_REGION + 7, /* region id */
618 MT_MAX_BKTS, /* number of buckets */
620 /* block size, no. of blocks, Upper threshold, lower threshold */
621 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
622 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
623 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
624 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
632 MtGlobMemCfg mtGlobMemoCfg =
634 MT_MAX_BKTS, /* number of buckets */
637 /* block size, no. of blocks, Upper threshold, lower threshold */
638 {MT_BKT_0_DSIZE, (MT_BKT_0_NUMBLKS + MT_BKT_0_NUMBLKS), SS_DFLT_MEM_BLK_SET_SIZE},
639 {MT_BKT_1_DSIZE, MT_BKT_1_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
640 {MT_BKT_2_DSIZE, MT_BKT_2_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
641 {MT_BKT_3_DSIZE, MT_BKT_3_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
642 {MT_BKT_4_DSIZE, MT_BKT_4_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE}
644 {1024, 12800 /* MT_BKT_0_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE},
645 {1664, 12800 /* MT_BKT_1_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE},
646 {4096, 3840 /* MT_BKT_2_NUMBLKS*/, SS_DFLT_MEM_BLK_SET_SIZE},
647 {MT_BKT_3_DSIZE, 12800 /* MT_BKT_3_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE}
651 #endif /* SS_LOCKLESS_MEMORY */
653 /* mt022.201 - Modification for memory calculator tool */
654 /* mt018.201 - added memory configuration matrix */
658 SS_MAX_REGS - 1, /* number of regions */
660 #ifndef XEON_SPECIFIC_CHANGES
661 SS_MAX_REGS, /* number of regions */
668 SS_DFLT_REGION, /* region id */
669 MT_MAX_BKTS, /* number of buckets */
670 MT_HEAP_SIZE, /* heap size */
672 #ifndef XEON_SPECIFIC_CHANGES
673 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
674 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
675 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
676 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
677 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS}
679 {256, 491520}, /* 60 pages of 2M*/
680 {512, 12288}, /* 3 pages of 2M */
681 {2048, 99328}, /* 97 Pages of 2M */
682 {8192, 75008}, /* 293 Pages of 2M */
683 {16384, 4096} /* 32 pages of 2M */
688 #ifndef SS_LOCKLESS_MEMORY
690 SS_DFLT_REGION + 1, /* region id */
691 MT_MAX_BKTS, /* number of buckets */
692 /*MT_HEAP_SIZE 7194304 */ 10485760, /* heap size */
694 //{MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
695 //{MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
696 //{MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
697 //{MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS} /* block size, no. of blocks */
705 #endif /* SS_LOCKLESS_MEMORY */
706 #endif /* INTEL_WLS */
707 #ifdef SS_LOCKLESS_MEMORY
709 SS_DFLT_REGION + 1, /* region id */
710 MT_MAX_BKTS, /* number of buckets */
711 MT_HEAP_SIZE, /* heap size */
713 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
714 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
715 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
716 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
717 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
721 SS_DFLT_REGION + 2, /* region id */
722 MT_MAX_BKTS, /* number of buckets */
723 MT_HEAP_SIZE, /* heap size */
725 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
726 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
727 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
728 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
729 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
733 SS_DFLT_REGION + 3, /* region id */
734 MT_MAX_BKTS, /* number of buckets */
735 MT_HEAP_SIZE, /* heap size */
737 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
738 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
739 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
740 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
741 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
745 SS_DFLT_REGION + 4, /* region id */
746 MT_MAX_BKTS, /* number of buckets */
747 MT_HEAP_SIZE, /* heap size */
749 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
750 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
751 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
752 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
753 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
757 SS_DFLT_REGION + 5, /* region id */
758 MT_MAX_BKTS, /* number of buckets */
759 MT_HEAP_SIZE, /* heap size */
761 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
762 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
763 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
764 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
765 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
769 SS_DFLT_REGION + 5, /* region id */
770 MT_MAX_BKTS, /* number of buckets */
771 MT_HEAP_SIZE, /* heap size */
773 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
774 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
775 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
776 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
777 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
780 #endif /* SS_LOCKLESS_MEMORY */
784 /* mt003.301 Modifications - File Based task registration made
785 * common for both MULTICORE and NON-MULTICORE
786 * bucket info, as different regions may request for different no.
789 MtBktCfg mtBktInfo[MT_MAX_BKTS];
790 S16 msArgc; /* argc */
791 Txt **msArgv; /* argv */
792 S16 msOptInd; /* SGetOpt vars */
793 S8 *msOptArg; /* SGetOpt vars */
796 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
797 typedef struct _MtRegMemSz
803 #ifdef SS_USE_WLS_MEM
804 static MtRegMemSz mtDynMemSz[MT_MAX_BKTS];
805 static S16 SPartitionWlsDynMem();
806 static S16 SAllocateWlsDynMem();
809 static MtRegMemSz mtRegMemSz[MT_MAX_BKTS+1];
814 /* private variable declarations */
815 /* mt018.201 - change mtCMMRegCfg as array of pointers */
816 static CmMmRegCfg *mtCMMRegCfg[SS_MAX_REGS];
817 static CmMmRegCb *mtCMMRegCb[SS_MAX_REGS];
818 /* mt003.301 - Fixed compilation warnings */
819 /*mt004.301-addede new veriable for FAP*/
820 /*mt010.301 - removed veriable defined for FA*/
823 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
826 void mtSetNtlHdl(unsigned int hdl)
831 unsigned int mtGetNtlHdl()
833 return(osCp.ntl.hdl);
837 void mtGetWlsHdl(void **hdlr)
839 *hdlr = osCp.wls.intf;
842 #ifdef XEON_MULTIPLE_CELL_CHANGES
843 S8 gWrWlsDeviceName[MAX_WLS_DEVICE_NAME_LEN];
844 S16 smWrReadWlsConfigParams (Void);
847 static int SOpenWlsIntf()
851 #define WLS_DEVICE_NAME "wls0"
853 char *my_argv[] = {"gnodeb", "-c3", "--proc-type=auto", "--file-prefix", WLS_DEVICE_NAME, "--iova-mode=pa"};
854 printf("Calling rte_eal_init: ");
855 for (i = 0; i < RTE_DIM(my_argv); i++)
857 printf("%s ", my_argv[i]);
861 if (rte_eal_init(RTE_DIM(my_argv), my_argv) < 0)
862 rte_panic("Cannot init EAL\n");
865 #ifdef XEON_SPECIFIC_CHANGES
866 #ifdef XEON_MULTIPLE_CELL_CHANGES
867 hdl = WLS_Open(gWrWlsDeviceName, 1);
869 hdl = WLS_Open(WLS_DEVICE_NAME, 1);
872 hdl = WLS_Open(WLS_DEVICE_NAME, WLS_MASTER_CLIENT, WLS_MEM_SIZE);
879 printf("Could not open WLS Interface \n");
894 * Desc: This function is the entry point for the final binary. It
895 * calls SInit() in the common code. It can be replaced by a
896 * user function if required (SInit() must still be called).
898 * Ret: none on success
908 int argc, /* argument count */
909 char **argv /* argument vector */
913 #ifdef XEON_MULTIPLE_CELL_CHANGES
914 /* Read the WLS parameters from the file and copy into global control block */
915 if(smWrReadWlsConfigParams() != ROK)
917 fprintf(stderr, "Failed to read WLS params from file wr_cfg.txt");
919 } /* end of if statement */
922 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
925 #endif /* INTEL_WLS */
929 /* mt003.301 Modifications */
932 printf("\n SInit failed, SSI could not start \n");
933 /* pthread_exit(NULLP);*/ /* Commented to Come out of Main thread*/
937 /*mt010.301 cleanup part exposed to user*/
948 * Desc: This function is the entry point for the final binary. It
949 * calls SInit() in the common code. It can be replaced by a
950 * user function if required (SInit() must still be called).
952 * Ret: none on success
962 int argc, /* argument count */
963 char **argv /* argument vector */
979 * initialization functions
984 * Fun: Initialize OS control point
986 * Desc: This function initializes MTSS-specific information
987 * in the OS control point.
998 struct sigaction act;
1000 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1001 struct sigaction sa;
1005 /*mt014.301 : 4GMX release related changes*/
1006 #ifdef SS_4GMX_UCORE
1009 /* mt005.301 : Cavium changes */
1010 #ifdef SS_SEUM_CAVIUM
1011 /* set group mask for the core */
1012 cvmx_pow_set_group_mask(cvmx_get_core_num(), SS_CVMX_GRP_MASK);
1013 #endif /* SS_SEUM_CAVIUM */
1015 osCp.dep.sysTicks = 0;
1017 /* mt020.201 - Addition for no command line available */
1019 /* parse command line */
1021 /* mt003.301 Additions */
1022 if(fileBasedMemCfg == TRUE && memConfigured == FALSE)
1024 printf("\n File Based Memory configuration failed \n");
1029 #ifndef RGL_SPECIFIC_CHANGES /* ANOOP :: This ssdInitMemInfo() was present in 2.1 */
1030 #ifndef SS_LOCKLESS_MEMORY
1031 #ifdef SS_MULTICORE_SUPPORT
1032 if(memConfigured == FALSE)
1038 /* initialize the started semaphore */
1039 if (sem_init(&osCp.dep.ssStarted, 0, 0) != 0)
1044 /* mt028.201 added compile time flag to allow not to mask signals */
1046 /* mask all signals in the main thread */
1048 sigdelset(&set, SIGINT);
1049 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1050 sigdelset(&set, SIGSEGV);
1051 sigdelset(&set, SIGUSR2);
1052 sigdelset(&set, SIGILL);
1053 #ifdef XEON_SPECIFIC_CHANGES
1054 sigdelset(&set, SIGABRT);
1055 sigdelset(&set, SIGTERM);
1056 sigdelset(&set, SIGHUP);
1059 pthread_sigmask(SIG_SETMASK, &set, NULLP);
1060 #endif /* UNMASK_SIG */
1062 /* install a SIGINT handler to shutdown */
1063 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
1065 /*Initialize SIGSEGV Signal */
1066 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1068 memset(&sa, 0, sizeof(struct sigaction));
1069 sigemptyset(&sa.sa_mask);
1070 sa.sa_sigaction = signal_segv;
1071 sa.sa_flags = SA_SIGINFO;
1072 #ifndef XEON_SPECIFIC_CHANGES
1073 sigaction(SIGSEGV, &sa, NULL);
1075 memset(&sa, 0, sizeof(struct sigaction));
1076 sigemptyset(&sa.sa_mask);
1077 sa.sa_sigaction = signal_segv;
1078 sa.sa_flags = SA_SIGINFO;
1080 sigaction(SIGILL, &sa, NULL);
1082 if(sigaction(SIGILL, &sa, NULL) != 0)
1084 printf("Failed to process sigaction for the SIGILL\n");
1087 if(sigaction(SIGSEGV, &sa, NULL) != 0)
1089 printf("Failed to process sigaction for the SIGSEGV\n");
1092 if(sigaction(SIGABRT, &sa, NULL) != 0)
1094 printf("Failed to process sigaction for the SIGABRT\n");
1097 if(sigaction(SIGTERM, &sa, NULL) != 0)
1099 printf("Failed to process sigaction for the SIGTERM\n");
1102 if(sigaction(SIGHUP, &sa, NULL) != 0)
1104 printf("Failed to process sigaction for the SIGHUP\n");
1109 signal (SIGSEGV, mtSigSegvHndlr);
1110 signal (SIGKILL, mtSigSegvHndlr);
1111 signal (SIGUSR2, mtSigUsr2Hndlr);
1116 signal (SIGINT, mtStopHndlr);
1119 act.sa_handler = mtIntSigHndlr;
1120 sigfillset(&act.sa_mask);
1122 if (sigaction(SIGINT, &act, NULLP) != 0)
1128 /* mt040.201 initialise random seed */
1129 osCp.dep.randSeed = time(NULLP);
1137 * Fun: De-initialize OS control point
1139 * Desc: This function reverses the initialization in ssdInitGen().
1148 Void ssdDeinitGen(void)
1152 sem_destroy(&osCp.dep.ssStarted);
1157 #ifdef SS_LOCKLESS_MEMORY
1161 * Fun: ssPutDynMemBlkSet
1163 * Desc: Returns the set of dynamic Blocks into the global region
1166 * Ret: ROK - successful,
1167 * RFAILED - unsuccessful.
1174 S16 ssPutDynMemBlkSet
1176 uint8_t bktIdx, /* Index to bucket list */
1177 CmMmBlkSetElement *dynMemSetElem /* Memory set element which is needs to be
1178 added to global region */
1181 CmMmGlobRegCb *globReg;
1182 CmMmGlobalBktCb *bktCb;
1186 globReg = osCp.globRegCb;
1188 #if (ERRCLASS & ERRCLS_INT_PAR)
1189 if(bktIdx >= globReg->numBkts)
1193 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1195 bktCb = &(globReg->bktTbl[bktIdx]);
1197 for(blkCnt = 0; blkCnt < bktCb->bucketSetSize; blkCnt++)
1199 blkPtr = dynMemSetElem->nextBktPtr;
1200 dynMemSetElem->nextBktPtr = *((CmMmEntry **)blkPtr);
1201 free((Void *)blkPtr);
1204 dynMemSetElem->nextBktPtr = NULLP;
1205 dynMemSetElem->numFreeBlks = 0;
1212 * Fun: ssGetDynMemBlkSet
1214 * Desc: Gets the set of dynamic memory blocks from the global region
1217 * Ret: ROK - successful,
1218 * RFAILED - unsuccessful.
1225 S16 ssGetDynMemBlkSet
1227 uint8_t bktIdx, /* Index to bucket list */
1228 CmMmBlkSetElement *dynMemSetElem /* Memory set element which is updated
1229 with new set values */
1233 CmMmGlobRegCb *globReg;
1234 CmMmGlobalBktCb *bktCb;
1239 globReg = osCp.globRegCb;
1241 #if (ERRCLASS & ERRCLS_INT_PAR)
1242 if(bktIdx >= globReg->numBkts)
1246 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1248 bktCb = &(globReg->bktTbl[bktIdx]);
1249 basePtr = &(dynMemSetElem->nextBktPtr);
1251 for(blkCnt = 0; blkCnt < bktCb->bucketSetSize; blkCnt++)
1253 blkPtr = (Data *)malloc(bktCb->size);
1255 basePtr = (CmMmEntry **)blkPtr;
1258 dynMemSetElem->numFreeBlks = bktCb->bucketSetSize;
1262 } /* ssGetDynMemBlkSet */
1267 * Fun: ssPutDynMemBlkSet
1269 * Desc: Returns the set of dynamic Blocks into the global region
1272 * Ret: ROK - successful,
1273 * RFAILED - unsuccessful.
1280 S16 ssPutDynMemBlkSet
1282 uint8_t bktIdx, /* Index to bucket list */
1283 CmMmBlkSetElement *dynMemSetElem, /* Memory set element which is needs to be
1284 added to global region */
1285 uint32_t doNotBlockForLock /* Boolean whether to block for lock or not */
1288 CmMmGlobRegCb *globReg;
1289 CmMmGlobalBktCb *bktCb;
1291 CmMmBlkSetElement *globMemNode;
1295 globReg = osCp.globRegCb;
1297 #if (ERRCLASS & ERRCLS_INT_PAR)
1298 if(bktIdx >= globReg->numBkts)
1302 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1304 bktCb = &(globReg->bktTbl[bktIdx]);
1306 /* Lock the global region first. If the doNotBlockForLock is non-zero, the
1307 try lock is used as it is not required to block as it will be taken
1308 in the next go else it will be blocked for lock as we have to get the
1311 SLock(&(bktCb->bucketLock));
1317 /* Get a free node from the free node linked list */
1318 lstNode = cmLListFirst(&(bktCb->listFreeBktSet));
1319 if(lstNode == NULLP)
1321 SUnlock(&(bktCb->bucketLock));
1325 cmLListDelFrm(&(bktCb->listFreeBktSet), lstNode);
1327 /* Copy the content of the received element information on to free node
1328 * and add it to valid linked list */
1329 globMemNode = (CmMmBlkSetElement *)lstNode->node;
1330 globMemNode->numFreeBlks = dynMemSetElem->numFreeBlks;
1331 globMemNode->nextBktPtr = dynMemSetElem->nextBktPtr;
1332 dynMemSetElem->numFreeBlks = 0;
1333 dynMemSetElem->nextBktPtr = NULLP;
1335 cmLListAdd2Tail(&(bktCb->listValidBktSet), &(globMemNode->memSetNode));
1337 SUnlock(&(bktCb->bucketLock));
1345 * Fun: ssGetDynMemBlkSet
1347 * Desc: Gets the set of dynamic memory blocks from the global region
1350 * Ret: ROK - successful,
1351 * RFAILED - unsuccessful.
1353 * Notes: The parameter doNotBlockForLock specifies whether to block for lock
1359 S16 ssGetDynMemBlkSet
1361 uint8_t bktIdx, /* Index to bucket list */
1362 CmMmBlkSetElement *dynMemSetElem, /* Memory set element which is updated
1363 with new set values */
1364 uint32_t doNotBlockForLock /* Boolean whether to block for lock or not */
1367 CmMmGlobRegCb *globReg;
1368 CmMmGlobalBktCb *bktCb;
1370 CmMmBlkSetElement *globMemNode;
1374 globReg = osCp.globRegCb;
1376 #if (ERRCLASS & ERRCLS_INT_PAR)
1377 if(bktIdx >= globReg->numBkts)
1381 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1383 bktCb = &(globReg->bktTbl[bktIdx]);
1385 /* Lock the global region first. If the doNotBlockForLock is non-zero, the
1386 try lock is used as it is not required to block as it will be taken
1387 in the next go else it will be blocked for lock as we have to get the
1390 SLock(&(bktCb->bucketLock));
1395 lstNode = cmLListFirst(&(bktCb->listValidBktSet));
1397 if(lstNode == NULLP)
1399 SUnlock(&(bktCb->bucketLock));
1403 /* Delete the node from the valid linked list and copy the values of the
1404 * elements of structrues into pointer */
1405 cmLListDelFrm(&(bktCb->listValidBktSet), lstNode);
1406 globMemNode = (CmMmBlkSetElement *)lstNode->node;
1407 dynMemSetElem->numFreeBlks = globMemNode->numFreeBlks;
1408 dynMemSetElem->nextBktPtr = globMemNode->nextBktPtr;
1410 /* Add this node to the free node linked list */
1411 cmLListAdd2Tail(&(bktCb->listFreeBktSet), lstNode);
1413 SUnlock(&(bktCb->bucketLock));
1417 } /* ssGetDynMemBlkSet */
1420 #define NUM_CALLS_TO_CHECK_MEM_DYN_AGAIN 100
1421 uint32_t gDynMemAlrm[4];
1422 static uint32_t memoryCheckCounter;
1424 uint32_t isMemThreshReached(Region reg)
1426 CmMmGlobRegCb *globReg;
1427 CmMmGlobalBktCb *bktCb;
1428 uint8_t bktIdx= reg;
1430 globReg = osCp.globRegCb;
1432 #if (ERRCLASS & ERRCLS_INT_PAR)
1433 if(bktIdx >= globReg->numBkts)
1437 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1439 bktCb = &(globReg->bktTbl[bktIdx]);
1441 if(gDynMemAlrm[bktIdx])
1443 // printf ("under memory bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1444 SLock(&(bktCb->bucketLock));
1445 if(bktCb->listValidBktSet.count > 25)
1447 gDynMemAlrm[bktIdx] = FALSE;
1448 // printf ("recoverd bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1450 SUnlock(&(bktCb->bucketLock));
1456 if(memoryCheckCounter++ >= NUM_CALLS_TO_CHECK_MEM_DYN_AGAIN)
1458 // printf ("CHECK bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1459 SLock(&(bktCb->bucketLock));
1460 if(bktCb->listValidBktSet.count < 15 )
1461 gDynMemAlrm[bktIdx] = TRUE;
1462 memoryCheckCounter = 0;
1463 SUnlock(&(bktCb->bucketLock));
1469 #endif /* USE_MALLOC */
1470 #endif /* SS_LOCKLESS_MEMORY */
1472 #ifdef SS_USE_ICC_MEMORY
1475 * Fun: Initialize region/pool tables
1477 * Desc: This function initializes MTSS-specific information
1478 * in the region/pool tables and configures the common
1479 * memory manager for use.
1488 Void * ssGetIccHdl(Region region)
1490 CmMmDynRegCb *dynRegCb;
1492 /* Klock work fix ccpu00148484 */
1493 if(!(region < SS_MAX_REGS))
1498 dynRegCb = (CmMmDynRegCb *)osCp.dynRegionTbl[region].regCb;
1500 return (dynRegCb->iccHdl);
1502 #endif /* SS_USE_ICC_MEMORY */
1504 #ifdef T2K_MEM_LEAK_DBG
1505 RegionMemLeakInfo regMemLeakInfo;
1506 #endif /* T2K_MEM_LEAK_DBG */
1508 #ifdef SS_USE_WLS_MEM
1509 static S16 SPartitionWlsDynMem()
1512 uint8_t *bktMemStrtAddr = (uint8_t *)(((uint8_t*)osCp.wls.allocAddr) + (4 * 1024 * 1024));
1514 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1516 mtDynMemSz[i].startAddr = bktMemStrtAddr;
1517 bktMemStrtAddr += mtDynMemSz[i].reqdSz;
1520 printf("Global Memory Info: \n");
1521 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1523 printf("mtDynMemSz[%d]: [0x%016lx]\n", i, (unsigned long int)mtDynMemSz[i].startAddr);
1528 static S16 SAllocateWlsDynMem()
1533 memset(&mtDynMemSz[0], 0, sizeof(mtDynMemSz));
1535 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1537 reqdMemSz += (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1538 mtDynMemSz[i].reqdSz += (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1540 osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf,
1541 #ifdef INTEL_L1_V19_10
1544 (reqdMemSz + (4 * 1024 * 1024)));
1546 printf("\n *************** \n WLS memory: %lx, %d\n", (PTR)osCp.wls.allocAddr, reqdMemSz);
1547 SPartitionWlsDynMem();
1555 S16 SPartitionWlsMemory()
1560 uint64_t pageSize[1], hugePageSize;
1563 long int pageSize[1], hugePageSize;
1566 #define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) )
1568 uint8_t *regMemStrtAddr = (uint8_t *)osCp.wls.allocAddr;
1570 gethugepagesizes(pageSize,1);
1571 hugePageSize = pageSize[0];
1572 for (i = 0; i < 1; i++)
1574 mtRegMemSz[i].startAddr = regMemStrtAddr;
1575 //CM_LOG_DEBUG(CM_LOG_ID_MT, "Global Region-->Bkt[%d] Addr:%p\n", i, mtRegMemSz[i].startAddr);
1577 numHugePg = DIV_ROUND_OFFSET(mtRegMemSz[i].reqdSz, hugePageSize);
1578 reqdSz = numHugePg * hugePageSize;
1579 regMemStrtAddr += reqdSz;
1580 #ifdef T2K_MEM_LEAK_DBG
1581 /* Since wls is region 0 */
1582 regMemLeakInfo.regStartAddr[i] = (uint64_t)mtRegMemSz[i].startAddr;
1583 regMemLeakInfo.numActvRegions++;
1584 #endif /* T2K_MEM_LEAK_DBG */
1586 //Store last region addr for validation
1587 mtRegMemSz[i].startAddr = regMemStrtAddr;
1591 #ifdef SS_MEM_WL_DEBUG
1592 Void SChkAddrValid(int type, int region, PTR ptr)
1594 char *tryPtr = NULL;
1595 if(type == 0) //Global
1597 if(ptr < mtRegMemSz[0].startAddr || ptr >=
1598 (mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr + mtGlobMemoCfg.heapSize))
1600 printf("****INVALID PTR in Global Region: ptr:%p start:%p end:%p***\n", ptr, mtRegMemSz[0].startAddr, mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr);
1606 if(ptr > mtRegMemSz[0].startAddr && ptr <= mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr )
1608 printf("****INVALID PTR in Static Region: ptr:%p start:%p end:%p***\n", ptr, mtRegMemSz[0].startAddr, mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr);
1614 #endif /* SS_MEM_WL_DEBUG */
1616 S16 SPartitionStaticMemory(uint8_t *startAddr)
1621 uint8_t *regMemStrtAddr = (uint8_t *)startAddr;
1624 //for (i = 0; i < mtMemoCfg.numRegions; i++)
1625 for (i = 1; i < mtMemoCfg.numRegions; i++)
1627 mtRegMemSz[i].startAddr = regMemStrtAddr;
1628 reqdSz = /* regMemStrtAddr + */mtRegMemSz[i].reqdSz;
1629 regMemStrtAddr += reqdSz;
1630 #ifdef T2K_MEM_LEAK_DBG
1631 { /* Since region 1 onwards are used for non wls */
1632 regMemLeakInfo.regStartAddr[i] = (uint64_t)mtRegMemSz[i].startAddr;
1633 regMemLeakInfo.numActvRegions++;
1635 #endif /* T2K_MEM_LEAK_DBG */
1639 S16 SAllocateWlsMem()
1647 //memset(&mtRegMemSz[0], sizeof(mtRegMemSz), 0);
1648 memset(&mtRegMemSz[0], 0, sizeof(mtRegMemSz));
1650 for (i = 0; i < 1; i++)
1652 /* allocate space for the region */
1653 region = &mtMemoCfg.region[i];
1654 reqdMemSz += region->heapsize;
1655 mtRegMemSz[i].reqdSz += region->heapsize;
1657 for (j = 0; j < region->numBkts; j++)
1659 reqdMemSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1660 mtRegMemSz[i].reqdSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1663 osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf, (512 *1024 * 1024));
1664 //osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf, (reqdMemSz + (1024 * 1024 * 2 * 2)));
1666 printf("\n ************* \n WLS memory: %llx, %ld\n ****** \n", osCp.wls.allocAddr, reqdMemSz);
1668 printf("\n ************* \n WLS memory: %lx, %d\n ****** \n", (PTR)osCp.wls.allocAddr, reqdMemSz);
1670 SPartitionWlsMemory();
1673 S16 SAllocateStaticMem()
1682 //memset(&mtRegMemSz[0], sizeof(mtRegMemSz), 0);
1684 //for (i = 0; i < mtMemoCfg.numRegions; i++)
1685 for (i = 1; i < mtMemoCfg.numRegions; i++)
1687 /* allocate space for the region */
1688 region = &mtMemoCfg.region[i];
1689 reqdMemSz += region->heapsize;
1690 mtRegMemSz[i].reqdSz += region->heapsize;
1692 for (j = 0; j < region->numBkts; j++)
1694 reqdMemSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1695 mtRegMemSz[i].reqdSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1699 startAddr = malloc(reqdMemSz + (1024 * 10));
1701 printf("\n ************* \n Static memory: %llx, %ld\n ****** \n", startAddr, reqdMemSz);
1703 printf("\n ************* \n Static memory: %lx, %d\n ****** \n", (PTR)startAddr, reqdMemSz);
1705 SPartitionStaticMemory(startAddr);
1708 #endif /* INTEL_WLS */
1714 * Fun: Initialize region/pool tables
1716 * Desc: This function initializes MTSS-specific information
1717 * in the region/pool tables and configures the common
1718 * memory manager for use.
1727 S16 ssdInitMem(void)
1729 /* mt018.201 - added local variable */
1734 Txt errMsg[256] = {'\0'};
1735 #ifdef SS_LOCKLESS_MEMORY
1736 CmMmDynRegCb *dynRegCb;
1737 #ifdef SS_USE_ICC_MEMORY
1739 CmMmGlobRegCb *globReg;
1742 #endif /* SS_LOCKLESS_MEMORY */
1745 /* Use the default SSI memory manager if the ICC memory manager is not
1746 * avilable. If ICC memory manager is avilable, it will be used for
1747 * all sharable memory allocation and de-allocation */
1748 #ifdef SS_LOCKLESS_MEMORY
1749 #ifdef SS_USE_ICC_MEMORY
1750 #ifndef YS_PHY_3_8_2
1752 for (i = 0; i < mtDynMemoCfg.numRegions; i++)
1754 dynRegCb = (CmMmDynRegCb *)calloc(1, sizeof(CmMmDynRegCb));
1755 if(dynRegCb == NULLP)
1759 for(k = 0; k < mtDynMemoCfg.region[i].numBkts; k++)
1761 dynRegCb->bktSize[k] = mtGlobMemoCfg.bkt[k].blkSize;
1763 dynRegCb->region = i;
1764 cmMmDynRegInit(dynRegCb);
1765 printf("iccHdl = %lx\n", (PTR)dynRegCb->iccHdl);
1768 /* ysIccHdl = dynRegCb->iccHdl; */
1771 /* Initialize the global region first */
1772 osCp.globRegCb = calloc(1, sizeof(CmMmGlobRegCb));
1774 if(osCp.globRegCb == NULLP)
1779 globReg = (CmMmGlobRegCb *)osCp.globRegCb;
1781 #ifdef SS_USE_WLS_MEM
1782 SAllocateWlsDynMem();
1785 for(i = 0; i < mtGlobMemoCfg.numBkts; i++)
1787 memSize = (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1788 #if !defined (INTEL_WLS) && defined (SS_USE_WLS_MEM)
1789 globReg->bktTbl[i].startAddr = (Data *)mtDynMemSz[i].startAddr;
1790 printf("Starting Address of Bkt Entry [%d]: [0x%016lx], memSize[%d]\n", i, (unsigned long int)globReg->bktTbl[i].startAddr, memSize);
1793 globReg->bktTbl[i].startAddr = (Data *)calloc(memSize, sizeof(Data));
1795 globReg->bktTbl[i].startAddr = (Data *)mtRegMemSz[i].startAddr;
1798 if(globReg->bktTbl[i].startAddr == NULLP)
1802 globReg->bktTbl[i].poolId = i;
1803 globReg->bktTbl[i].size = mtGlobMemoCfg.bkt[i].blkSize;
1804 globReg->bktTbl[i].numBlks = mtGlobMemoCfg.bkt[i].numBlks;
1805 globReg->bktTbl[i].bucketSetSize = mtGlobMemoCfg.bkt[i].bucketSetSize;
1808 globReg->numBkts = mtGlobMemoCfg.numBkts;
1809 cmMmGlobRegInit(globReg);
1811 /* Initialize the dynamic task regions and sanity check for the theshold
1813 for (i = 0; i < mtDynMemoCfg.numRegions; i++)
1815 dynRegCb = (CmMmDynRegCb *)calloc(1, sizeof(CmMmDynRegCb));
1816 if(dynRegCb == NULLP)
1820 for(k = 0; k < mtDynMemoCfg.region[i].numBkts; k++)
1822 if((mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold <
1823 mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold) ||
1824 (mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold == 0) ||
1825 (mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold == 0))
1827 #ifdef XEON_SPECIFIC_CHANGES
1832 dynRegCb->bktTbl[k].poolId = k;
1833 dynRegCb->bktTbl[k].size = mtGlobMemoCfg.bkt[k].blkSize;
1834 dynRegCb->bktTbl[k].blkSetRelThreshold = mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold;
1835 dynRegCb->bktTbl[k].blkSetAcquireThreshold = mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold;
1836 dynRegCb->bktTbl[k].bucketSetSize = mtGlobMemoCfg.bkt[k].bucketSetSize;
1837 if(dynRegCb->bktMaxBlkSize < dynRegCb->bktTbl[k].size)
1839 dynRegCb->bktMaxBlkSize = dynRegCb->bktTbl[k].size;
1842 dynRegCb->region = i;
1843 dynRegCb->numBkts = mtDynMemoCfg.region[i].numBkts;
1844 cmMmDynRegInit(dynRegCb);
1846 #endif /* SS_USE_ICC_MEMORY */
1847 #endif /* SS_LOCKLESS_MEMORY */
1849 #ifdef T2K_MEM_LEAK_DBG
1851 /* Initailize mem leak tool memorys for debguing */
1852 regMemLeakInfo.numActvRegions=0;
1853 for(reg=0; reg <SS_MAX_REGS; reg++)
1855 regMemLeakInfo.gMemLeakInfo[reg] = malloc(sizeof(T2kMeamLeakInfo)*T2K_MEM_LEAK_INFO_TABLE_SIZE);
1856 memset(regMemLeakInfo.gMemLeakInfo[reg],0x0,
1857 sizeof(T2kMeamLeakInfo)*T2K_MEM_LEAK_INFO_TABLE_SIZE);
1858 regMemLeakInfo.regStartAddr[reg] = 0;
1861 regMemLeakInfo.regStartAddr[reg] = 0;
1862 if (pthread_mutex_init(&(regMemLeakInfo.memLock[reg]), NULL) != 0)
1864 printf("\n mutex init failed\n");
1870 /* Now allocate WLS memory */
1872 SAllocateStaticMem();
1874 /* mt018.201 - CMM Initialization */
1875 for (i = 0; i < mtMemoCfg.numRegions; i++)
1877 /* allocate space for the region control block */
1878 mtCMMRegCb[i] = (CmMmRegCb *)calloc(1, sizeof(CmMmRegCb));
1879 #ifdef TENB_RTLIN_CHANGES
1880 mlock(mtCMMRegCb[i], sizeof(CmMmRegCb));
1882 if (mtCMMRegCb[i] == NULLP)
1884 sprintf(errMsg,"\n ssdInitMem(): Could not allocated memory \
1885 for the Region:%d control block\n",i);
1887 for (k = 0; k < i; k++)
1889 cmMmRegDeInit(mtCMMRegCb[k]);
1890 free(mtCMMRegCfg[k]->vAddr);
1891 free(mtCMMRegCb[k]);
1892 free(mtCMMRegCfg[k]);
1897 mtCMMRegCfg[i] = (CmMmRegCfg *)calloc(1, sizeof(CmMmRegCfg));
1898 #ifdef TENB_RTLIN_CHANGES
1899 mlock(mtCMMRegCfg[i], sizeof(CmMmRegCfg));
1901 if (mtCMMRegCfg[i] == NULLP)
1903 for (k = 0; k < i; k++)
1905 cmMmRegDeInit(mtCMMRegCb[k]);
1906 free(mtCMMRegCfg[k]->vAddr);
1907 free(mtCMMRegCb[k]);
1908 free(mtCMMRegCfg[k]);
1910 free(mtCMMRegCb[i]);
1915 /* allocate space for the region */
1916 region = &mtMemoCfg.region[i];
1917 mtCMMRegCfg[i]->size = region->heapsize;
1918 for (j = 0; j < region->numBkts; j++)
1920 /* mt033.201 - addition for including the header size while computing the total size */
1921 #ifdef SSI_DEBUG_LEVEL1
1922 mtCMMRegCfg[i]->size += (region->bkt[j].blkSize + sizeof(CmMmBlkHdr)) *\
1923 (region->bkt[j].numBlks);
1925 mtCMMRegCfg[i]->size += region->bkt[j].blkSize * region->bkt[j].numBlks;
1926 #endif /* SSI_DEBUG_LEVEL1 */
1929 mtCMMRegCfg[i]->vAddr = (Data *)mtRegMemSz[i].startAddr;
1931 mtCMMRegCfg[i]->vAddr = (Data *)calloc(mtCMMRegCfg[i]->size,
1934 #ifdef XEON_SPECIFIC_CHANGES
1935 CM_LOG_DEBUG(CM_LOG_ID_MT, "Static Region-->Bkt[%d] Addr:[%p] RegionId=[%d] Size=[%d] \n",
1936 i, mtCMMRegCfg[i]->vAddr, region->regionId, mtCMMRegCfg[i]->size);
1938 #ifdef TENB_RTLIN_CHANGES
1939 mlock(mtCMMRegCfg[i]->vAddr, mtCMMRegCfg[i]->size*sizeof(Data));
1942 if (mtCMMRegCfg[i]->vAddr == NULLP)
1944 sprintf(errMsg,"\n ssdInitMem(): Could not allocate memory \
1945 for the Region:%d \n",i);
1947 for (k = 0; k < i; k++)
1949 cmMmRegDeInit(mtCMMRegCb[k]);
1950 free(mtCMMRegCfg[k]->vAddr);
1951 free(mtCMMRegCb[k]);
1952 free(mtCMMRegCfg[k]);
1954 free(mtCMMRegCb[i]);
1955 free(mtCMMRegCfg[i]);
1960 /* set up the CMM configuration structure */
1961 mtCMMRegCfg[i]->lType = SS_LOCK_MUTEX;
1962 mtCMMRegCfg[i]->chFlag = 0;
1963 mtCMMRegCfg[i]->bktQnSize = MT_BKTQNSIZE;
1964 mtCMMRegCfg[i]->numBkts = region->numBkts;
1966 for (j = 0; j < region->numBkts; j++)
1968 mtCMMRegCfg[i]->bktCfg[j].size = region->bkt[j].blkSize;
1969 mtCMMRegCfg[i]->bktCfg[j].numBlks = region->bkt[j].numBlks;
1972 /* initialize the CMM */
1973 #ifdef SS_LOCKLESS_MEMORY
1974 if (cmMmStatRegInit(region->regionId, mtCMMRegCb[i], mtCMMRegCfg[i]) != ROK)
1976 if (cmMmRegInit(region->regionId, mtCMMRegCb[i], mtCMMRegCfg[i]) != ROK)
1977 #endif /* SS_LOCKLESS_MEMORY */
1979 for (k = 0; k < i; k++)
1981 cmMmRegDeInit(mtCMMRegCb[k]);
1982 free(mtCMMRegCfg[k]->vAddr);
1983 free(mtCMMRegCb[k]);
1984 free(mtCMMRegCfg[k]);
1986 free(mtCMMRegCfg[i]->vAddr);
1987 free(mtCMMRegCb[i]);
1988 free(mtCMMRegCfg[i]);
1993 /* initialize the STREAMS module */
1994 /* mt019.201: STREAMS module will only apply to DFLT_REGION */
1995 if (region->regionId == 0)
1997 if (ssStrmCfg(region->regionId, region->regionId) != ROK)
1999 for (k = 0; k < i; k++)
2001 cmMmRegDeInit(mtCMMRegCb[k]);
2002 free(mtCMMRegCfg[k]->vAddr);
2003 free(mtCMMRegCb[k]);
2004 free(mtCMMRegCfg[k]);
2006 cmMmRegDeInit(mtCMMRegCb[i]);
2007 free(mtCMMRegCfg[i]->vAddr);
2008 free(mtCMMRegCb[i]);
2009 free(mtCMMRegCfg[i]);
2014 /* mt001.301 : Additions */
2015 #ifdef SS_MEM_LEAK_STS
2017 #endif /* SS_MEM_LEAK_STS */
2026 * Fun: De-initialize region/pool tables
2028 * Desc: This function reverses the initialization in ssdInitMem().
2037 Void ssdDeinitMem(void)
2039 /* mt018.201 - added local variables */
2042 /* mt008.301 Additions */
2043 #ifdef SS_MEM_LEAK_STS
2044 cmDeinitMemLeakMdl();
2045 #endif /* SS_MEM_LEAK_STS */
2047 for (i = 0; i < mtMemoCfg.numRegions; i++)
2049 cmMmRegDeInit(mtCMMRegCb[i]);
2050 free(mtCMMRegCfg[i]->vAddr);
2051 free(mtCMMRegCb[i]);
2052 free(mtCMMRegCfg[i]);
2061 * Fun: Initialize task table
2063 * Desc: This function initializes MTSS-specific information
2064 * in the task table.
2073 S16 ssdInitTsk(void)
2075 /* mt001.301 : Additions */
2076 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
2077 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
2078 uint32_t tskInd = 0;
2079 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
2083 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
2084 #if defined(SS_MULTICORE_SUPPORT) || defined(SS_AFFINITY_SUPPORT)
2085 /* initialize system task information */
2086 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
2088 osCp.sTskTbl[tskInd].dep.lwpId = 0;
2090 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
2097 * Fun: Deinitialize task table
2099 * Desc: This function reverses the initialization perfomed in
2109 Void ssdDeinitTsk(void)
2116 #ifdef SS_DRVR_SUPPORT
2119 * Fun: Initialize driver task table
2121 * Desc: This function initializes MTSS-specific information
2122 * in the driver task table.
2131 S16 ssdInitDrvr(void)
2135 pthread_attr_t attr;
2140 /* initialize the dependent portion of the driver task entries */
2141 for (i = 0; i < SS_MAX_DRVRTSKS; i++)
2143 osCp.drvrTskTbl[i].dep.flag = FALSE;
2147 /* create pipe for communication between SSetIntPend() and
2148 * the isTskHdlr thread.
2150 if (pipe(osCp.dep.isFildes) != 0)
2156 /* create the isTskHdlr thread */
2157 pthread_attr_init(&attr);
2158 /* mt021.201 - Addition to set stack size */
2159 pthread_attr_setstacksize(&attr, (size_t)MT_ISTASK_STACK);
2160 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2161 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2162 if ((pthread_create(&osCp.dep.isTskHdlrTID, &attr, mtIsTskHdlr, NULLP)) != 0)
2164 /* mt020.201 - Addition for destroying thread attribute object attr */
2165 pthread_attr_destroy(&attr);
2171 /*mt014.301 : 4GMX release related changes*/
2172 #ifdef SS_4GMX_UCORE
2180 /* mt020.201 - Addition for destroying thread attribute object attr */
2181 pthread_attr_destroy(&attr);
2190 * Fun: Deinitialize driver information
2192 * Desc: This function reverses the initialization performed in
2202 Void ssdDeinitDrvr(void)
2204 /* mt008.301: Terminate the Driver Task on exit */
2205 while(pthread_cancel(osCp.dep.isTskHdlrTID));
2208 TL_Close(AppContext.hUAII);
2209 if (clusterMode == RADIO_CLUSTER_MODE)
2211 TL_Close(AppContext.hUAII_second);
2217 #endif /* SS_DRVR_SUPPORT */
2222 * Fun: Initialize timer table
2224 * Desc: This function initializes MTSS-specific information
2225 * in the timer table.
2234 S16 ssdInitTmr(void)
2236 pthread_attr_t attr;
2237 struct sched_param param_sched;
2238 /* mt010.21: addition */
2240 #ifdef SS_MULTICORE_SUPPORT
2242 #endif /* SS_MULTICORE_SUPPORT */
2243 #ifdef SS_THR_REG_MAP
2244 uint32_t threadCreated = FALSE;
2245 #endif /* SS_THR_REG_MAP */
2249 osCp.dep.tmrTqCp.tmrLen = SS_MAX_TMRS;
2250 /* mt010.21: addition */
2251 osCp.dep.tmrTqCp.nxtEnt = 0;
2252 for (i=0; i< SS_MAX_TMRS; i++)
2254 osCp.dep.tmrTq[i].first = (CmTimer *)NULLP;
2257 #ifdef SS_MULTICORE_SUPPORT
2258 sTsk = ssdAddTmrSTsk();
2263 #endif /* SS_MULTICORE_SUPPORT */
2264 /* create the timer handler thread */
2265 pthread_attr_init(&attr);
2266 /* mt021.201 - Addition to set stack size */
2267 pthread_attr_setstacksize(&attr, (size_t)MT_TMRTASK_STACK);
2268 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2269 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2270 pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
2271 param_sched.sched_priority = sched_get_priority_max(SCHED_FIFO);
2272 pthread_attr_setschedparam(&attr, ¶m_sched);
2275 #ifdef SS_THR_REG_MAP
2276 /* When the thread is created, we check for the memory mapping table if
2277 * threadId can be placed in thread memory map table. If it is not able to place
2278 * threadId is stored in tmporary array. Once thread is created successful,
2279 * thread_cancel is sent for each thread which are created before. All the
2280 * threads are made to wait on sema which is cancel point for thread.
2282 while(threadCreated == FALSE)
2285 if ((pthread_create(&osCp.dep.tmrHdlrTID, &attr, mtTmrHdlr, NULLP)) != 0)
2287 /* mt020.201 - Addition for destroying thread attribute object attr */
2288 pthread_attr_destroy(&attr);
2293 #ifdef SS_THR_REG_MAP
2294 threadCreated = ssCheckAndAddMemoryRegionMap(osCp.dep.tmrHdlrTID,
2297 #endif /* SS_THR_REG_MAP */
2298 #ifdef SS_MEM_WL_DEBUG
2299 tmpRegTidMap[sTsk->region] = osCp.dep.tmrHdlrTID;
2302 /* mt020.201 - Addition for destroying thread attribute object attr */
2303 pthread_attr_destroy(&attr);
2312 * Fun: Deinitialize timer table
2314 * Desc: This function reverses the initialization performed in
2324 Void ssdDeinitTmr(void)
2326 #ifdef SS_MULTICORE_SUPPORT
2329 #endif /* SS_MULTICORE_SUPPORT */
2332 #ifdef SS_MULTICORE_SUPPORT
2333 ret = SLock(&osCp.sTskTblLock);
2337 #if (ERRCLASS & ERRCLS_DEBUG)
2338 MTLOGERROR(ERRCLS_DEBUG, EMT008, (ErrVal) ret,
2339 "Could not lock system task table");
2343 sTsk = &osCp.sTskTbl[0]; /* first entry is timer entry always */
2344 /* clean up the system task entry */
2348 SDestroyLock(&sTsk->lock);
2349 ssDestroyDmndQ(&sTsk->dQ);
2352 /* make this entry available in the system task table */
2353 sTsk->nxt = osCp.nxtSTskEntry;
2354 osCp.nxtSTskEntry = 0;
2358 /* unlock the system task table */
2359 SUnlock(&osCp.sTskTblLock);
2361 #endif /* SS_MULTICORE_SUPPORT */
2362 /* mt008.301: Terminate the timer thread on exit */
2363 while(pthread_cancel(osCp.dep.tmrHdlrTID));
2373 * Desc: Pre-tst() initialization.
2382 S16 ssdInitLog(void)
2384 /* mt027.201 - Modification to fix warnings with no STDIN and STDOUT */
2388 pthread_attr_t attr;
2391 #endif /* CONSTDIO */
2396 /* mt008.301: ssdInitFinal changed to ssdInitLog */
2401 osCp.dep.conInFp = (FILE *) stdin;
2402 osCp.dep.conOutFp = (FILE *) stdout;
2403 /* added compile time flag CONRD: mt017.21 */
2407 /* disable canonical input processing */
2408 fd = fileno(osCp.dep.conInFp);
2409 if ((tcgetattr(fd, &tio)) != 0)
2411 printf("Error: disable canonical input processing\n");
2415 tio.c_lflag &= ~ICANON;
2416 tio.c_cc[VMIN] = 1; /* wait for a minimum of 1 character input */
2417 tio.c_cc[VTIME] = 0;
2418 if ((tcsetattr(fd, TCSANOW, &tio)) != 0)
2420 printf("Error: while tcsetattr() processing\n");
2424 #endif /* CONSTDIO */
2427 /* set up the input fd to block when no data is available */
2428 fd = fileno(osCp.dep.conInFp);
2429 flags = fcntl(fd, F_GETFL, &flags);
2430 flags &= ~O_NONBLOCK;
2431 if (fcntl(fd, F_SETFL, flags) == -1)
2433 printf("Error: while fcntl processing\n");
2438 /* create the console handler thread */
2439 pthread_attr_init(&attr);
2440 /* mt021.201 - Addition to set stack size */
2441 pthread_attr_setstacksize(&attr, (size_t)MT_CONSOLE_STACK);
2442 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2443 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2446 if((SCreatePThread(&osCp.dep.conHdlrTID, &attr, mtConHdlr, NULLP)) != 0)
2448 /* mt020.201 - Addition for destroying thread attribute object attr */
2449 pthread_attr_destroy(&attr);
2451 printf("Error: Logging Thread creation failed \n");
2455 /* mt020.201 - Addition for destroying thread attribute object attr */
2456 pthread_attr_destroy(&attr);
2470 * Desc: This function reverses the initialization performed in
2480 /* mt008.301: ssdDeinitFinal changed to ssdDeinitLog */
2481 Void ssdDeinitLog(void)
2483 /* mt008.301: ssdDeinitFinal changed to ssdDeinitLog */
2486 /* mt008.301: Terminate the console reader on exit */
2487 while(pthread_cancel(osCp.dep.conHdlrTID));
2493 /* mt001.301 : Additions */
2497 S16 ssdInitWatchDog(uint16_t port)
2500 Txt prntBuf[PRNTSZE];
2503 #ifdef SS_WATCHDOG_IPV6
2504 struct sockaddr_in6 tmpaddr;
2506 struct sockaddr_in tmpaddr;
2507 #endif /* SS_WATCHDOG_IPV6 */
2508 #ifdef SS_MULTIPLE_PROCS
2509 ProcId procId = SS_WD_WDPROC;
2510 if (SAddProcIdLst(1, &procId) != ROK)
2514 #endif /* SS_MULTIPLE_PROCS */
2517 SInitLock(&osCp.wdCp.wdLock, SS_LOCK_MUTEX);
2519 /* Create a watch dog system task */
2520 SCreateSTsk(0, &(osCp.wdCp.watchDgTskId));
2522 /* Create a watch dog reveiver system task */
2523 SCreateSTsk(0, &(osCp.wdCp.watchDgRcvrTskId));
2525 /* Register and attach watch dog TAPA task */
2526 #ifdef SS_MULTIPLE_PROCS
2527 SRegTTsk (procId, ENTDW, INST0, TTNORM, PRIOR0, NULLP, watchDgActvTsk);
2528 SAttachTTsk (procId, ENTDW, INST0, osCp.wdCp.watchDgTskId);
2530 SRegTTsk ( ENTDW, INST0, TTNORM, PRIOR0, NULLP, watchDgActvTsk);
2531 SAttachTTsk ( ENTDW, INST0, osCp.wdCp.watchDgTskId);
2532 #endif /* SS_MULTIPLE_PROCS */
2533 /* Register and attach watch dog receiver TAPA task */
2534 #ifdef SS_MULTIPLE_PROCS
2535 SRegTTsk (procId, ENTHB, INST0, TTNORM, PRIOR0, NULLP, watchDgRcvrActvTsk);
2536 SAttachTTsk (procId, ENTHB, INST0, osCp.wdCp.watchDgRcvrTskId);
2538 SRegTTsk ( ENTHB, INST0, TTNORM, PRIOR0, NULLP, watchDgRcvrActvTsk);
2539 SAttachTTsk ( ENTHB, INST0, osCp.wdCp.watchDgRcvrTskId);
2540 #endif /* SS_MULTIPLE_PROCS */
2542 #ifndef SS_MULTIPLE_PROCS
2543 osCp.wdCp.watchDgPst.srcProcId = SFndProcId();
2544 osCp.wdCp.watchDgPst.dstProcId = SFndProcId();
2546 osCp.wdCp.watchDgPst.srcProcId = procId;
2547 osCp.wdCp.watchDgPst.dstProcId = procId;
2548 #endif /* SS_MULTIPLE_PROCS */
2550 /* Initialise the pst structure */
2551 ssdInitWatchDgPst(&(osCp.wdCp.watchDgPst));
2552 /* Initialize the watch dog timer resolution default is 1 sec */
2554 cmInitTimers(osCp.wdCp.watchDgTmr, (uint8_t)1);
2555 osCp.wdCp.watchDgTqCp.nxtEnt = 0;
2556 osCp.wdCp.watchDgTqCp.tmrLen = 1;
2557 for(idx = 0; idx < 1; idx++)
2559 osCp.wdCp.watchDgTs[idx].first = NULLP;
2560 osCp.wdCp.watchDgTs[idx].tail = NULLP;
2562 #ifdef SS_MULTIPLE_PROCS
2563 SRegCfgTmr(procId,ENTDW, INST0, 10, SS_100MS, ssdWatchDgActvTmr);
2565 SRegCfgTmr(ENTDW, INST0, 10, SS_100MS, ssdWatchDgActvTmr);
2566 #endif /* SS_MULTIPLE_PROCS */
2568 /* Create the watch dog receiver socket */
2569 osCp.wdCp.globWd.sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
2570 if(osCp.wdCp.globWd.sock == -1)
2572 sprintf(prntBuf,"ssdInitWatchDog: socket failed errno [%d]\n", errno);
2576 #ifdef SS_WATCHDOG_IPV6
2577 tmpaddr.sin6_len = sizeof(tmpadDr);
2578 tmpaddr.sin6_family = AF_INET6;
2579 tmpaddr.sin6_addr = in6addr_any;
2580 tmpaddr.sin6_port = htons(port);
2582 tmpaddr.sin_family = AF_INET;
2583 tmpaddr.sin_addr.s_addr = htonl(INADDR_ANY);
2584 tmpaddr.sin_port = htons(port);
2585 #endif /* SS_WATCHDOG_IPV6 */
2587 if(bind(osCp.wdCp.globWd.sock, (struct sockaddr *)&tmpaddr, sizeof(struct sockaddr)) != 0
2590 sprintf(prntBuf,"ssdInitWatchDog: bind failed errno [%d]\n", errno);
2594 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
2598 #ifndef SS_MULTIPLE_PROCS
2599 pst.srcProcId = SFndProcId();
2600 pst.dstProcId = SFndProcId();
2602 pst.srcProcId = procId;
2603 pst.dstProcId = procId;
2604 #endif /* SS_MULTIPLE_PROCS */
2605 pst.event = EVTSSHRTBTREQ;
2606 ssdInitWatchDgPst(&pst);
2607 SPstTsk(&pst, mBuf);
2612 S16 ssdInitWatchDgPst(Pst *pst)
2615 pst->selector = SS_LOOSE_COUPLING;
2617 pst->region = DFLT_REGION; /* region */
2618 pst->pool = DFLT_POOL; /* pool */
2620 pst->prior = PRIOR0; /* priority */
2621 pst->route = RTESPEC; /* route */
2623 pst->dstEnt = ENTHB; /* destination entity */
2625 pst->srcEnt = ENTDW; /* source entity */
2631 #ifdef SS_MULTIPLE_PROCS
2632 S16 ssdWatchDgActvTmr
2639 S16 ssdWatchDgActvTmr(Void)
2640 #endif /* SS_MULTIPLE_PROCS */
2643 cmPrcTmr(&osCp.wdCp.watchDgTqCp, osCp.wdCp.watchDgTs, (PFV)ssdWatchDgTmrEvt);
2648 Void ssdWatchDgTmrEvt
2650 PTR cb, /* control block */
2651 S16 event /* timer number */
2654 /* mt003.301 Fixed warings */
2658 Txt prntBuf[PRNTSZE];
2667 SPrint("Timer Heartbeat Request Expired");
2669 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2674 SLock(&osCp.wdCp.wdLock);
2675 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2677 if(osCp.wdCp.globWd.wdsta[i].status == 0)
2679 sprintf(prntBuf, "Node [ %s ] Down. Calling user callback\n", inet_ntoa(osCp.wdCp.globWd.wdsta[i].addr));
2681 if(osCp.wdCp.globWd.callback != 0)
2683 osCp.wdCp.globWd.callback(osCp.wdCp.globWd.data);
2687 SUnlock(&osCp.wdCp.wdLock);
2689 if(!osCp.wdCp.globWd.watchdogStop)
2691 ssdStartWatchDgTmr(NULLP, SS_TMR_HRTBT, osCp.wdCp.globWd.timeout);
2692 ssdSndHrtBtMsg(restartTmr, SS_WD_HB_REQ);
2702 Void ssdStartWatchDgTmr
2713 Txt prntBuf[PRNTSZE];
2717 /* mt003.301 Modifications */
2720 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2721 if(event == SS_TMR_HRTBT)
2723 SPrint("\nSTART SS_TMR_HRTBT");
2730 SLock(&osCp.wdCp.wdLock);
2731 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2733 osCp.wdCp.globWd.wdsta[i].status = 0;
2735 SUnlock(&osCp.wdCp.wdLock);
2737 arg.tq = osCp.wdCp.watchDgTs;
2738 arg.tqCp = &osCp.wdCp.watchDgTqCp;
2739 arg.timers = osCp.wdCp.watchDgTmr;
2740 arg.cb = (PTR)NULLP;
2742 arg.wait = osCp.wdCp.globWd.timeout = wait;
2750 Void ssdStopWatchDgTmr
2759 Txt prntBuf[PRNTSZE];
2763 /* mt003.301 Modifications */
2766 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2767 if(event == SS_TMR_HRTBT)
2769 SPrint("STOP SS_TMR_HRTBT");
2773 SLock(&osCp.wdCp.wdLock);
2774 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2776 osCp.wdCp.globWd.wdsta[i].status = 0;
2778 SUnlock(&osCp.wdCp.wdLock);
2781 arg.tq = osCp.wdCp.watchDgTs;
2782 arg.tqCp = &osCp.wdCp.watchDgTqCp;
2783 arg.timers = osCp.wdCp.watchDgTmr;
2784 arg.cb = (PTR)NULLP;
2803 Txt prntBuf[PRNTSZE];
2805 struct sockaddr_in tmpaddr;
2806 char hbMsg[SS_WD_HB_MSG_SIZE];
2813 sprintf(prntBuf,"TX HEARTBEAT REQ Time: %02d:%02d:%02d\n", dt.hour, dt.min, dt.sec);
2817 /* Pack the message */
2818 strcpy(hbMsg, "<HB>REQ</HB>");
2820 /* Send the heartbeat messages to all the configured nodes */
2821 SLock(&osCp.wdCp.wdLock);
2822 for (n=0; n < osCp.wdCp.globWd.numNodes; n++)
2824 if(osCp.wdCp.globWd.wdsta[n].addr.s_addr == 0)
2829 /* Identify the destination node */
2830 #ifdef SS_WATCHDOG_IPV6
2831 tmpaddr.sin6_len = sizeof(tmpaddr);
2832 tmpaddr.sin6_family = AF_INET6;
2833 tmpaddr.sin6_addr = osCp.wdCp.globWd.wdsta[n].addr;
2834 tmpaddr.sin_port = osCp.wdCp.globWd.wdsta[n].port;
2836 tmpaddr.sin_family = AF_INET;
2837 tmpaddr.sin_addr.s_addr = osCp.wdCp.globWd.wdsta[n].addr.s_addr;
2838 tmpaddr.sin_port = osCp.wdCp.globWd.wdsta[n].port;
2839 #endif /* SS_WATCHDOG_IPV6 */
2841 err = sendto(osCp.wdCp.globWd.sock, hbMsg, strlen(hbMsg), 0, (struct sockaddr *)&tmpaddr, sizeof(struct sockaddr));
2845 sprintf(prntBuf,"ssdSndHrtBtMsg: HB to node [%s:%d] failed status[%d]\n",
2846 inet_ntoa(tmpaddr.sin_addr), tmpaddr.sin_port, errno);
2853 sprintf(prntBuf,"ssdSndHrtBtMsg: HB to node [%s:%d] sent[%d]\n", inet_ntoa(tmpaddr.sin_addr), tmpaddr.sin_port, err);
2858 SUnlock(&osCp.wdCp.wdLock);
2863 #endif /* SS_WATCHDOG */
2867 /* mt022.201 - Modification to fix problem when NOCMDLINE is defined */
2873 * Desc: This function gets command line options.
2882 static Void mtGetOpts(void)
2889 FILE *memOpt; /* memory options file pointer */
2892 /* mt007.301 : Fix related to file based mem config on 64 bit machine */
2898 /*KWORK_FIX: Initializing the variable for avoidning corruption */
2900 /*mt010.301 Fix for reading the variables on 64 bit/32bit platforms correctly */
2906 #ifdef SS_LOCKLESS_MEMORY
2921 osCp.dep.fileOutFp = (FILE *)NULLP;
2923 /* initialize memOpt */
2924 memOpt = (FILE *) NULLP;
2931 while ((ret = SGetOpt(argc, argv, "o:f:s:m:c:")) != EOF)
2936 /* mt001.301 : Additions */
2937 #ifdef SS_MEM_LEAK_STS
2939 cmMemOpenMemLkFile(msOptArg);
2943 osCp.dep.fileOutFp = fopen(msOptArg, "w");
2946 fileBasedMemCfg = TRUE;
2947 memOpt = fopen(msOptArg, "r");
2949 /* if file does not exist or could not be opened then use the
2950 * default memory configuration as defined in mt_ss.h
2952 if (memOpt == (FILE *) NULLP)
2954 sprintf(pBuf, "\nMTSS: Memory configuration file: %s could not\
2955 be opened, using default mem configuration\n", msOptArg);
2960 while (fgets((Txt *)line, 256, memOpt) != NULLP)
2962 if(line[0] == '#' || line[0] < '0' || line[0] > '9') /* Comment line or non numeric character, so skip it and read next line */
2968 case 0: /*** INPUT: Number of regions ***/
2969 sscanf(line, "%ld", (long *) &numReg);
2970 mtMemoCfg.numRegions = numReg;
2971 if(mtMemoCfg.numRegions > SS_MAX_REGS)
2973 printf("\n No. of regions are > SS_MAX_REGS:%d \n",SS_MAX_REGS);
2979 case 1: /*** INPUT: Number of buckets and number of Pools ***/
2980 sscanf(line, "%ld %ld", (long *) &numBkts, (long *) &numPools);
2981 if(numBkts > MT_MAX_BKTS)
2983 printf("\n No. of buckets are > MT_MAX_BKTS :%d \n",MT_MAX_BKTS);
2987 if(numPools > SS_MAX_POOLS_PER_REG)
2989 printf("\n No. of pools are > SS_MAX_POOLS_PER_REG:%d \n",SS_MAX_POOLS_PER_REG);
2994 * Delay updation from local variable to global
2995 * structure of number of regions and heap data to
2996 * counter error conditions present above.
2998 for(idx = 0; idx < cfgNumRegs; idx++)
3000 mtMemoCfg.region[idx].numBkts = numBkts;
3001 cfgRegInfo[idx].region = idx;
3002 cfgRegInfo[idx].numPools = numPools;
3004 * Initialize the pool info as static type with size zero
3006 for(poolIdx = 0; poolIdx < numPools; poolIdx++)
3008 cfgRegInfo[idx].pools[poolIdx].type = SS_POOL_STATIC;
3009 cfgRegInfo[idx].pools[poolIdx].size = 0;
3014 case 2: /*** INPUT: Bucket Id and size of the bucket ***/
3015 if(bktUpdtCnt < numBkts) /* more set of bucket can be added */
3017 sscanf(line, "%ld %ld",(long *)&bktIdx, (long *) &bktSz);
3019 if(bktIdx >= numBkts)
3021 printf("\n Invalid Bucket Id, may be >= the No. of buckets:%ld\n",numBkts);
3026 mtBktInfo[bktIdx].blkSize = bktSz;
3028 if(bktUpdtCnt == numBkts)
3030 i++; /*done reading bkt info, start reading individual region info*/
3034 case 3: /*** INPUT: Region Id (ranges from 0 to numRegions-1) **/
3035 sscanf(line,"%ld",(long *) ®Id);
3036 if(regId >= mtMemoCfg.numRegions)
3038 printf("\n Invalid Region Id, may be >= the No. of regions:%d\n",mtMemoCfg.numRegions);
3039 #ifndef XEON_SPECIFIC_CHANGES
3044 mtMemoCfg.region[regId].regionId = regId;
3047 case 4: /*** INPUT: BktId (ranges from 0 to numBkts-1), No. of blks ***/
3048 if(bktUpdtCnt < numBkts)
3050 sscanf(line, "%ld %ld",(long *)&bktIdx, (long *)&bktNum);
3051 if(bktIdx >= numBkts)
3053 printf("\n Invalid Bucket Id, may be >= the No. of buckets:%ld\n",numBkts);
3058 if(bktIdx < MT_MAX_BKTS)
3060 mtMemoCfg.region[regId].bkt[bktIdx].blkSize = mtBktInfo[bktIdx].blkSize;
3061 mtMemoCfg.region[regId].bkt[bktIdx].numBlks = bktNum;
3062 cfgRegInfo[regId].pools[bktIdx].type = SS_POOL_DYNAMIC;
3063 cfgRegInfo[regId].pools[bktIdx].size = mtBktInfo[bktIdx].blkSize - (sizeof(SsMblk)+sizeof(SsDblk));
3066 if(bktUpdtCnt == numBkts)
3073 case 5: /* INPUT: Heapsize ***/
3074 sscanf(line, "%ld", (long *) &heapSz);
3075 mtMemoCfg.region[regId].heapsize = heapSz;
3077 if(regUpdtCnt != mtMemoCfg.numRegions)
3086 #ifdef SS_LOCKLESS_MEMORY
3088 sscanf(line, "%ld", (long *) &numBkts);
3089 mtGlobMemoCfg.numBkts = numBkts;
3090 #ifndef XEON_SPECIFIC_CHANGES
3091 mtDynMemoCfg.numRegions = mtMemoCfg.numRegions;
3094 #ifdef XEON_SPECIFIC_CHANGES
3095 CM_LOG_DEBUG(CM_LOG_ID_MT, "numRegions = %d numBkts = %d\n",
3096 mtDynMemoCfg.numRegions, mtGlobMemoCfg.numBkts);
3097 for(idx = 0; idx < mtDynMemoCfg.numRegions; idx++)
3099 for(idx = 0; idx < mtMemoCfg.numRegions; idx++)
3102 mtDynMemoCfg.region[idx].regionId = idx;
3103 mtDynMemoCfg.region[idx].numBkts = numBkts;
3111 if(bktUpdtCnt < numBkts)
3113 sscanf(line, "%ld %ld %ld %ld %ld %ld", (long *) &bktIdx,
3114 (long *) &bktSz, (long *) &bktNum,
3115 (long *) &bktSetSize, (long *) &bktRelThr,
3116 (long *) &bktAqurThr);
3117 /* Klock work fix ccpu00148484 */
3118 if(bktIdx < SS_MAX_POOLS_PER_REG)
3120 mtGlobMemoCfg.bkt[bktIdx].blkSize = bktSz;
3121 mtGlobMemoCfg.bkt[bktIdx].numBlks = bktNum;
3122 mtGlobMemoCfg.bkt[bktIdx].bucketSetSize = bktSetSize;
3123 #ifdef XEON_SPECIFIC_CHANGES
3124 CM_LOG_DEBUG(CM_LOG_ID_MT, "Pool [%d] blkSize %d numBlks %d bucketSetSize %d\n",
3125 bktUpdtCnt, mtGlobMemoCfg.bkt[bktIdx].blkSize,
3126 mtGlobMemoCfg.bkt[bktIdx].numBlks, mtGlobMemoCfg.bkt[bktIdx].bucketSetSize);
3128 if(bktIdx >= SS_MAX_POOLS_PER_REG)
3130 printf("\nNo. of Buckets/pools are > SS_MAX_POOLS_PER_REG:%d\n",SS_MAX_POOLS_PER_REG);
3136 for(idx = 0; idx < mtMemoCfg.numRegions; idx++)
3138 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetRelThreshold = bktRelThr;
3139 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetAcquireThreshold = bktAqurThr;
3140 #ifdef XEON_SPECIFIC_CHANGES
3141 CM_LOG_DEBUG(CM_LOG_ID_MT, "Pool [%d] blkSetRelThreshold %d blkSetAcquireThreshold %d\n",
3142 bktUpdtCnt, mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetRelThreshold,
3143 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetAcquireThreshold);
3149 #ifdef XEON_SPECIFIC_CHANGES
3150 if(bktUpdtCnt == numBkts)
3156 case 8: /* INPUT: Global Heapsize ***/
3157 sscanf(line, "%ld", (long *) &heapSz);
3158 mtGlobMemoCfg.heapSize = heapSz;
3159 CM_LOG_DEBUG(CM_LOG_ID_MT, "Global Heap size = %d\n", mtGlobMemoCfg.heapSize);
3167 memConfigured = FALSE;
3171 memConfigured = TRUE;
3179 /* mt028.201: modification: multiple procs support related changes */
3180 #ifndef SS_MULTIPLE_PROCS
3183 osCp.procId = PID_STK((ProcId) strtol(msOptArg, NULLP, 0));
3185 osCp.procId = (ProcId) strtol(msOptArg, NULLP, 0);
3188 #else /* SS_MULTIPLE_PROCS */
3192 procId = PID_STK((ProcId) strtol(msOptArg, NULLP, 0));
3194 procId = (ProcId) strtol(msOptArg, NULLP, 0);
3196 SAddProcIdLst(1, &procId);
3199 #endif /* SS_MULTIPLE_PROCS */
3203 osCp.configFilePath = msOptArg;
3227 * Desc: Get options from command line
3229 * Ret: option - success
3231 * EOF - end of options
3233 * Notes: Handles command lines like the following
3236 * then command line should look like this...
3237 * -a foo -b foo1 -c -d foo
3241 * while ((ret = SGetOpt(msArgc, msArgv, "ls")) != EOF )
3246 * nloops = atoi(msArgv[msOptInd]);
3249 * state1 = atoi(msArgv[msOptInd]);
3261 int argc, /* argument count */
3262 char **argv, /* argument value */
3263 char *opts /* options */
3266 /* mt020.201 - Removed for no command line */
3274 /* mt020.201 - Addition for no command line */
3286 /*mt013.301 : Changes as per coding standards*/
3287 if (msOptInd >= (S16) argc || argv[msOptInd][0] == '\0')
3293 if (!strcmp(argv[msOptInd], "--"))
3298 else if (argv[msOptInd][0] != '-')
3306 c = argv[msOptInd][sp];
3307 if (c == ':' || (cp = (S8 *) strchr(opts, c)) == (S8 *) NULLP)
3309 if (argv[msOptInd][++sp] == '\0')
3320 if (argv[msOptInd][sp+1] != '\0') msOptArg = &argv[msOptInd++][sp+1];
3323 if (++msOptInd >= (S16) argc)
3328 else msOptArg = argv[msOptInd++];
3335 if (argv[msOptInd][++sp] == '\0')
3347 #endif /* NOCMDLINE */
3355 * Desc: This function starts system services execution; the
3356 * permanent tasks are started and the system enters a
3373 /* mt025.201 - Modification for adding lock to timer handler */
3374 for (i = 0; i <= SS_MAX_STSKS + 5; i++)
3376 sem_post(&osCp.dep.ssStarted);
3385 * indirect interface functions to system services service user
3391 * Fun: ssdAttachTTsk
3393 * Desc: This function sends the initial tick message to a TAPA
3394 * task if the task is a permanent task.
3405 SsTTskEntry *tTsk /* pointer to TAPA task entry */
3412 if (tTsk->tskType == SS_TSK_PERMANENT)
3414 /* Send a permanent tick message to this task, to start
3417 ret = SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf);
3420 #if (ERRCLASS & ERRCLS_DEBUG)
3421 MTLOGERROR(ERRCLS_DEBUG, EMT001, ret, "SGetMsg() failed");
3426 mInfo = (SsMsgInfo *)mBuf->b_rptr;
3427 mInfo->eventInfo.event = SS_EVNT_PERMTICK;
3429 /* set up post structure */
3430 /* mt028.201: modification: multiple procs support related changes */
3431 #ifndef SS_MULTIPLE_PROCS
3432 mInfo->pst.dstProcId = SFndProcId();
3433 mInfo->pst.srcProcId = SFndProcId();
3434 #else /* SS_MULTIPLE_PROCS */
3435 mInfo->pst.dstProcId = tTsk->proc;
3436 mInfo->pst.srcProcId = tTsk->proc;
3437 #endif /* SS_MULTIPLE_PROCS */
3438 mInfo->pst.selector = SEL_LC_NEW;
3439 mInfo->pst.region = DFLT_REGION;
3440 mInfo->pst.pool = DFLT_POOL;
3441 mInfo->pst.prior = PRIOR3;
3442 mInfo->pst.route = RTESPEC;
3443 mInfo->pst.event = 0;
3444 mInfo->pst.dstEnt = tTsk->ent;
3445 mInfo->pst.dstInst = tTsk->inst;
3446 mInfo->pst.srcEnt = tTsk->ent;
3447 mInfo->pst.srcInst = tTsk->inst;
3449 ret = ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
3450 (tTsk->tskPrior * SS_MAX_MSG_PRI) + PRIOR3);
3456 #if (ERRCLASS & ERRCLS_DEBUG)
3457 MTLOGERROR(ERRCLS_DEBUG, EMT002, ret,
3458 "Could not write to demand queue");
3471 * Fun: ssdDetachTTsk
3473 * Desc: Does nothing.
3484 SsTTskEntry *tTsk /* pointer to TAPA task entry */
3494 * Fun: ssdCreateSTsk
3496 * Desc: This function creates a system task. A thread is started
3497 * on the system task handler function defined later.
3508 SsSTskEntry *sTsk /* pointer to system task entry */
3512 pthread_attr_t attr;
3513 /* struct sched_param param_sched;*/
3515 #ifdef SS_THR_REG_MAP
3516 uint32_t threadCreated = FALSE;
3521 #ifdef SS_SINGLE_THREADED
3522 /* mt001.301 : Additions */
3524 #ifdef SS_MULTICORE_SUPPORT
3525 if (osCp.numSTsks > 1)
3527 if (osCp.numSTsks > 0)
3528 #endif /* SS_MULTICORE_SUPPORT */
3530 #ifdef SS_MULTICORE_SUPPORT
3531 if (osCp.numSTsks > 3)
3533 if (osCp.numSTsks > 2)
3534 #endif /* SS_MULTICORE_SUPPORT */
3535 #endif /* SS_WATCHDOG */
3542 /* set the current executing entity and instance IDs to
3543 * 'not configured'. create the lock to access them.
3545 sTsk->dep.ent = ENTNC;
3546 sTsk->dep.inst = INSTNC;
3549 /* create the thread */
3550 pthread_attr_init(&attr);
3551 ssdSetPthreadAttr(sTsk->tskPrior, &attr);
3553 printf("Creating thread here %s %d\n", __FILE__, __LINE__);
3554 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
3555 if (sTsk->tskPrior == 0)
3557 printf("Creating RT thread #######################\n");
3558 #ifdef SS_THR_REG_MAP
3559 /* When the thread is created, we check for the memory mapping table if
3560 * threadId can be placed in thread memory map table. If it is not able to place
3561 * threadId is stored in tmporary array. Once thread is created successful,
3562 * thread_cancel is sent for each thread which are created before. All the
3563 * threads are made to wait on sema which is cancel point for thread.
3565 while(threadCreated == FALSE)
3568 ret = pthread_create(&sTsk->dep.tId, &attr, mtTskHdlr, (Ptr)sTsk);
3571 DU_LOG("\nDU APP : Failed to create thread. Cause[%d]",ret);
3572 pthread_attr_destroy(&attr);
3574 #if (ERRCLASS & ERRCLS_DEBUG)
3575 MTLOGERROR(ERRCLS_DEBUG, EMT004, ERRZERO, "Could not create thread");
3580 #ifdef SS_THR_REG_MAP
3581 threadCreated = ssCheckAndAddMemoryRegionMap(sTsk->dep.tId,
3589 #ifdef SS_THR_REG_MAP
3590 /* When the thread is created, we check for the memory mapping table if
3591 * threadId can be placed in thread memory map table. If it is not able to place
3592 * threadId is stored in tmporary array. Once thread is created successful,
3593 * thread_cancel is sent for each thread which are created before. All the
3594 * threads are made to wait on sema which is cancel point for thread.
3596 while(threadCreated == FALSE)
3599 ret = pthread_create(&sTsk->dep.tId, &attr, mtTskHdlr, (Ptr)sTsk);
3603 /* mt020.201 - Addition for destroying thread attribute object attr */
3604 pthread_attr_destroy(&attr);
3606 #if (ERRCLASS & ERRCLS_DEBUG)
3607 MTLOGERROR(ERRCLS_DEBUG, EMT004, ERRZERO, "Could not create thread");
3612 #ifdef SS_THR_REG_MAP
3613 threadCreated = ssCheckAndAddMemoryRegionMap(sTsk->dep.tId,
3620 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
3621 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
3623 static uint32_t stLwpId = 3;
3624 sTsk->dep.lwpId = ++stLwpId;
3626 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
3628 /* mt020.201 - Addition for destroying thread attribute object attr */
3629 pthread_attr_destroy(&attr);
3638 pthread_attr_t* attr,
3639 void *(*start_routine) (void *),
3644 #ifdef SS_THR_REG_MAP
3645 uint32_t threadCreated = FALSE;
3648 SPThreadCreateArg* threadArg = (SPThreadCreateArg*)malloc(sizeof(SPThreadCreateArg));
3649 /* Klock work fix ccpu00148484 */
3650 if(threadArg == NULLP)
3654 threadArg->argument = arg;
3655 threadArg->start_routine = start_routine;
3658 printf("Creating thread here %s %d\n", __FILE__, __LINE__);
3660 #ifdef SS_THR_REG_MAP
3661 /* When the thread is created, we check for the memory mapping table if
3662 * threadId can be placed in thread memory map table. If it is not able to place
3663 * threadId is stored in tmporary array. Once thread is created successful,
3664 * thread_cancel is sent for each thread which are created before. All the
3665 * threads are made to wait on sema which is cancel point for thread.
3667 while(threadCreated == FALSE)
3670 /*pthreadCreateHdlr */
3671 if (((retVal = pthread_create(tid, attr, pthreadCreateHdlr, threadArg))) != 0)
3676 #ifdef SS_THR_REG_MAP
3677 threadCreated = ssCheckAndAddMemoryRegionMap(*tid, SS_MAX_REGS - 1);
3688 * Fun: Set Pthread Attributes
3690 * Desc: This function is used to set various explicit
3691 * pthread attributes like, priority scheduling,etc
3701 static S16 ssdSetPthreadAttr
3704 pthread_attr_t *attr
3707 struct sched_param param;
3710 SMemSet(¶m, 0, sizeof(param));
3712 #ifndef TENB_T2K3K_SPECIFIC_CHANGES
3713 param.sched_priority = 100 - 1 - tskPrior;
3715 param.sched_priority = 100 - 10 - tskPrior;
3718 #if 1/* Nawas:: Overriding DL RLC prority to one higher than iccserv */
3719 /* TODO:: This can be avoided by reducing the priority
3720 * of iccserv thread in l1_master.sh*/
3722 if (clusterMode == RADIO_CLUSTER_MODE)
3724 if(tskPrior == PRIOR1)
3726 param.sched_priority = 91;
3733 printf("Set priority %u\n", param.sched_priority);
3735 /* Set Scheduler to explicit, without this non of the below
3736 pthread attr works */
3737 #ifdef TENB_RTLIN_CHANGES
3738 pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED);
3741 pthread_attr_setstacksize(attr, (size_t)MT_TASK_STACK);
3742 pthread_attr_setscope(attr, PTHREAD_SCOPE_SYSTEM);
3743 pthread_attr_setdetachstate(attr, PTHREAD_CREATE_DETACHED);
3744 #ifdef TENB_RTLIN_CHANGES
3745 pthread_attr_setschedpolicy(attr, SCHED_FIFO);
3747 pthread_attr_setschedparam(attr, ¶m);
3751 } /* ssdSetPthreadAttr */
3753 /************* multi-core support **************/
3754 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
3755 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
3759 * Fun: Get the current core/cpu affinity for a thread/lwp
3761 * Desc: This function is used to get the current processor/core
3762 * affinity for a a system task (thread/lwp). It sets the
3763 * affinity based on the mode supplied by the caller.
3766 * RFAILED - failed, general (optional)
3775 SSTskId *tskId, /* filled in with system task ID */
3776 uint32_t *coreId /* the core/processor id to which the affinity is set */
3786 uint32_t cpuInd = 0;
3787 /*mt013.301 :Fix for TRACE5 feature crash due to missing TRC MACRO*/
3790 uint32_t lwpId = *tskId;
3794 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3796 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3798 tId = osCp.sTskTbl[tskInd].dep.tId;
3803 /* if tskId is not found in the tskTbl */
3804 if (tskInd == SS_MAX_STSKS)
3806 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3811 /* initialize the cpu mask */
3814 /* set thread affinity for linux */
3815 if (pthread_getaffinity_np(tId, sizeof(cpuSet), &cpuSet) < 0)
3817 #if (ERRCLASS & ERRCLS_DEBUG)
3818 MTLOGERROR(ERRCLS_DEBUG, EMT037, ERRZERO, "Could not get thread affinity\n");
3821 } /* end if pthread_setaffinity fails */
3823 for (cpuInd = 0; cpuInd <CPU_SETSIZE; cpuInd++)
3825 if (CPU_ISSET (cpuInd, & cpuSet))
3834 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3836 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3838 lwpId = osCp.sTskTbl[tskInd].dep.lwpId;
3843 /* if tskId is not found in the tskTbl */
3844 if (tskInd == SS_MAX_STSKS)
3846 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3850 /* set thread affinity for Solaris */
3851 if (processor_bind(P_LWPID, lwpId, PBIND_QUERY, (processorid_t*)coreId) < 0)
3853 #if (ERRCLASS & ERRCLS_DEBUG)
3854 MTLOGERROR(ERRCLS_DEBUG, EMT037, ERRZERO, "Could not get thread affinity\n");
3857 } /* end if processor_bind fails */
3860 #endif /* SS_LINUX */
3864 } /* ssdGetAffinity */
3869 * Fun: Set the core/cpu affinity for a thread/lwp
3871 * Desc: This function is used to set processor/core affinity for a
3872 * a system task (thread/lwp). It sets the affinity based on the
3873 * mode supplied by the caller.
3876 * RFAILED - failed, general (optional)
3885 SSTskId *tskId, /* filled in with system task ID */
3886 uint32_t coreId /* the core/processor id to which the affinity has to be set */
3890 uint32_t tskInd = 0;
3895 /*mt013.301 :Fix for TRACE5 feature crash due to missing TRC MACRO*/
3898 uint32_t lwpId = *tskId;
3904 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3906 /* Here tskId can not be used as index as the task may be terminated if
3907 there is a TERM even for that tsk, thus breaking the task Id numbering
3909 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3911 tId = osCp.sTskTbl[tskInd].dep.tId;
3916 /* if tskId is not found in the tskTbl */
3917 if (tskInd == SS_MAX_STSKS)
3919 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3923 /* initialize the cpu mask */
3926 /* set the cpu mask */
3927 CPU_SET(coreId, &cpuSet);
3929 /* set thread affinity for linux */
3930 if (pthread_setaffinity_np(tId, sizeof(cpuSet), &cpuSet) < 0)
3932 #if (ERRCLASS & ERRCLS_DEBUG)
3933 MTLOGERROR(ERRCLS_DEBUG, EMT038, ERRZERO, "Could not set thread affinity\n");
3936 } /* end if pthread_setaffinity fails */
3940 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3942 /* comment: modify to use tskId as lwpId to avoid the loop and the new lwpId variable in dep */
3943 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3945 lwpId = osCp.sTskTbl[tskInd].dep.lwpId;
3950 /* if tskId is not found in the tskTbl */
3951 if (tskInd == SS_MAX_STSKS)
3953 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3957 /* set thread affinity for Solaris */
3958 if (processor_bind(P_LWPID, lwpId, coreId, NULL) < 0)
3960 #if (ERRCLASS & ERRCLS_DEBUG)
3961 MTLOGERROR(ERRCLS_DEBUG, EMT038, ERRZERO, "Could not set thread affinity\n");
3964 } /* end if processor_bind fails */
3967 #endif /* SS_LINUX */
3969 } /* ssdSetAffinity */
3971 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
3972 /************ end multi-core support *************/
3977 * Fun: ssdDestroySTsk
3979 * Desc: This function destroys a system task. A terminate
3980 * event message is sent to the thread function.
3991 SsSTskEntry *sTsk /* pointer to system task entry */
4000 /* we send a message to this system task to tell it to die */
4001 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
4004 #if (ERRCLASS & ERRCLASS_DEBUG)
4005 MTLOGERROR(ERRCLS_DEBUG, EMT005, ERRZERO, "Could not get a message");
4011 mInfo = (SsMsgInfo *)mBuf->b_rptr;
4012 mInfo->eventInfo.event = SS_EVNT_TERM;
4014 if (ssDmndQPutLast(&sTsk->dQ, mBuf, 0) != ROK)
4018 #if (ERRCLASS & ERRCLASS_DEBUG)
4019 MTLOGERROR(ERRCLS_DEBUG, EMT006, ERRZERO,
4020 "Could not write to demand queue");
4030 /* mt023.201 - Added SThreadYield function to yield CPU
4034 * Desc: This function defers thread execution to any other ready
4045 S16 SThreadYield(void)
4049 /* mt024.201 - seperated Linux and other UNIX implementations
4055 /* Set sleep value to 0 to yield CPU */
4059 return (select(0,0,0,0,&tw) == 0 ? ROK : RFAILED);
4061 #else /* other UNICes */
4063 return (sleep(0) == 0 ? ROK : RFAILED);
4065 #endif /* SS_LINUX */
4072 * Fun: Register timer
4074 * Desc: This function is used to register a timer
4075 * function for the service user. System services
4076 * will invoke the timer activation function
4077 * passed to it at the specified intervals.
4081 * Notes: Timing is handled by the common timers. The
4082 * ticks are handled by a thread that uses
4083 * nanosleep() and thus timing precision will not
4091 SsTmrEntry *tmr /* pointer to timer entry */
4099 /* initialize common timers */
4100 cmInitTimers(tmr->dep.timers, TMR_DEF_MAX);
4103 /* start the timer */
4104 arg.tq = osCp.dep.tmrTq;
4105 arg.tqCp = &osCp.dep.tmrTqCp;
4106 arg.timers = tmr->dep.timers;
4111 arg.max = TMR_DEF_MAX;
4112 arg.wait = tmr->interval;
4122 * Fun: Deregister timer
4124 * Desc: This function is used to deregister a timer function.
4135 SsTmrEntry *tmr /* pointer to timer entry */
4143 /* stop the timer */
4144 arg.tq = osCp.dep.tmrTq;
4145 arg.tqCp = &osCp.dep.tmrTqCp;
4146 arg.timers = tmr->dep.timers;
4151 arg.max = TMR_DEF_MAX;
4152 arg.wait = tmr->interval;
4162 * Fun: Critical error
4164 * Desc: This function is called when a critical error occurs.
4175 Seq seq, /* sequence number */
4176 Reason reason /* reset reason */
4186 /* get calling task ID */
4187 tId = pthread_self();
4190 /* set up the message to display */
4191 sprintf(errBuf, "\n\nFATAL ERROR - taskid = %x, errno = %d,"
4192 "reason = %d\n\n", (uint8_t)tId, seq, reason);
4196 /* delete all system tasks */
4197 for (i = 0; i < SS_MAX_STSKS; i++)
4199 if (osCp.sTskTbl[i].used
4200 && !pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
4202 pthread_kill(osCp.sTskTbl[i].dep.tId, SIGKILL);
4208 pthread_exit(NULLP);
4211 /* won't reach here */
4220 * Desc: This function is called to log an error.
4231 Ent ent, /* Calling layer's entity id */
4232 Inst inst, /* Calling layer's instance id */
4233 ProcId procId, /* Calling layer's processor id */
4234 Txt *file, /* file name where error occured */
4235 S32 line, /* line in file where error occured */
4236 ErrCls errCls, /* error class */
4237 ErrCode errCode, /* layer unique error code */
4238 ErrVal errVal, /* error value */
4239 Txt *errDesc /* description of error */
4252 /* get calling task ID */
4254 tId = pthread_self();
4260 case ERRCLS_ADD_RES:
4261 errClsMsg = "ERRCLS_ADD_RES";
4264 case ERRCLS_INT_PAR:
4265 errClsMsg = "ERRCLS_INT_PAR";
4269 errClsMsg = "ERRCLS_DEBUG";
4272 /* mt028.201 : Addition - ERRCLS_FTHA changes */
4274 errClsMsg = "ERRCLS_FTHA";
4278 errClsMsg = "INVALID ERROR CLASS!";
4283 /*mt009.301 Fixed 64BIT compilation warnings*/
4286 "\nmtss(posix): sw error: ent: %03d inst: %03d proc id: %03d \n"
4287 "file: %s line: %03d errcode: %05d errcls: %s\n"
4288 "errval: %05d errdesc: %s\n",
4289 ent, inst, procId, file, line, errCode, errClsMsg, errVal, errDesc);
4292 "\nmtss(posix): sw error: ent: %03d inst: %03d proc id: %03d \n"
4293 "file: %s line: %03ld errcode: %05ld errcls: %s\n"
4294 "errval: %05ld errdesc: %s\n",
4295 ent, inst, procId, file, line, errCode, errClsMsg, errVal, errDesc);
4297 SDisplay(0, errBuf);
4298 /* mt001.301 : Additions */
4299 #ifdef SS_LOGGER_SUPPORT
4301 #endif /* SS_LOGGER_SUPPORT */
4305 /* debug errors halt the system */
4306 if (errCls == ERRCLS_DEBUG)
4308 /* mt001.301 : Additions */
4309 #ifdef SS_LOGGER_SUPPORT
4311 #endif /* SS_LOGGER_SUPPORT */
4312 /* delete all system tasks */
4313 for (i = 0; i < SS_MAX_STSKS; i++)
4315 if (osCp.sTskTbl[i].used
4316 && !pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
4318 pthread_kill(osCp.sTskTbl[i].dep.tId, SIGKILL);
4324 pthread_exit(NULLP);
4336 * Fun: Register driver task
4338 * Desc: This function is called to register the handlers for a
4350 SsDrvrTskEntry *drvrTsk /* driver task entry */
4357 /* mt001.30 : Additions */
4360 * Fun: Deregister driver task
4362 * Desc: This function is called to deregister the handlers for a
4374 SsDrvrTskEntry *drvrTsk /* driver task entry */
4387 * mt003.301 Additions - SDeRegTTsk fix
4389 #ifdef SS_MULTIPLE_PROCS
4396 #else /*SS_MULTIPLE_PROCS*/
4402 #endif /*SS_MULTIPLE_PROCS*/
4404 #ifdef SS_MULTIPLE_PROCS
4417 /* We check the sTsk element; if it is not NULLP, the
4418 * task is attached. So we have to detach it before
4419 * deregistering the task.
4421 ret = SLock(&osCp.sTskTblLock);
4424 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not lock system task table");
4427 SS_ACQUIRE_ALL_SEMA(&osCp.tTskTblSem, ret);
4430 #if (ERRCLASS & ERRCLS_DEBUG)
4431 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not lock TAPA task table");
4433 if ( SUnlock(&osCp.sTskTblLock) != ROK)
4435 #if (ERRCLASS & ERRCLS_DEBUG)
4436 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not Unlock system task table");
4444 #ifdef SS_MULTIPLE_PROCS
4446 if (tTsk->initTsk != NULLP)
4449 (Void)(*(tTsk->initTsk))(proc, ent, inst,
4452 &(osCp.tTskTbl[idx].xxCb));
4454 (Void)(*(tTsk->initTsk))(proc, ent, inst,
4457 &(osCp.tTskTbl[idx].xxCb));
4458 #endif /* USE_MEMCAL */
4460 #endif /* SS_MULTIPLE_PROCS */
4462 if (tTsk->sTsk != NULLP)
4466 sTsk->dep.ent = ent;
4467 sTsk->dep.inst = inst;
4469 for (n = 0; n < SS_MAX_TTSKS; n++)
4471 if (sTsk->tTsks[n] == idx)
4473 sTsk->tTsks[n] = SS_INVALID_IDX;
4479 /* call the implementation to detach the task */
4480 ssdDetachTTsk(tTsk);
4482 sTsk->dep.ent = ENTNC;
4483 sTsk->dep.inst = INSTNC;
4486 /* Now we empty the entry for this task and update the table
4489 #ifdef SS_MULTIPLE_PROCS
4490 osCp.tTskIds[procIdx][ent][inst] = SS_TSKNC;
4491 #else /* SS_MULTIPLE_PROCS */
4492 osCp.tTskIds[ent][inst] = SS_TSKNC;
4493 #endif /* SS_MULTIPLE_PROCS */
4496 #ifdef SS_MULTIPLE_PROCS
4497 tTsk->proc = PROCNC;
4498 #endif /* SS_MULTIPLE_PROCS */
4500 tTsk->inst = INSTNC;
4501 tTsk->tskType = TTUND;
4502 tTsk->initTsk = NULLP;
4503 tTsk->actvTsk = NULLP;
4506 tTsk->nxt = osCp.nxtTTskEntry;
4507 osCp.nxtTTskEntry = idx;
4510 #ifdef SS_MULTIPLE_PROCS
4511 /* mark the control block for this task as invalid */
4512 osCp.tTskTbl[idx].xxCb = NULLP;
4515 SS_RELEASE_ALL_SEMA(&osCp.tTskTblSem);
4516 if ( SUnlock(&osCp.sTskTblLock) != ROK)
4518 #if (ERRCLASS & ERRCLS_DEBUG)
4519 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not Unlock system task table");
4526 //#ifndef SPLIT_RLC_DL_TASK
4527 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
4528 #if defined (L2_L3_SPLIT) && defined(SPLIT_RLC_DL_TASK)
4529 Void ysMtTskHdlr(Void);
4530 Void ysMtPollPhyMsg(uint8_t region);
4531 Void ysMtRcvPhyMsg(Void);
4532 Void *mtTskHdlrT2kL2
4534 Ptr tskPtr /* pointer to task entry */
4540 /* wait for SS to come up */
4541 /* It is required to block on this semaphore before starting actual processing of
4542 the thread becasue the creator of this thread might want to cance it without
4543 doing any processing. When this semaphore is released, means the creator gives
4544 the go ahead for actual processing and we should never come back to this point */
4545 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4554 ysMtPollPhyMsg(0); /* blocks, waiting for messages for L2
4555 * (processes L1 msgs) */
4561 Void ysMtTskHdlr(Void);
4562 Void YsPhyRecvMsg();
4563 Void *mtTskHdlrT2kL2
4565 Ptr tskPtr /* pointer to task entry */
4571 /* get out the system task entry from the parameter */
4572 sTsk = (SsSTskEntry *) tskPtr;
4574 /* wait for SS to come up */
4575 /* It is required to block on this semaphore before starting actual processing of
4576 the thread becasue the creator of this thread might want to cance it without
4577 doing any processing. When this semaphore is released, means the creator gives
4578 the go ahead for actual processing and we should never come back to this point */
4579 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4582 #ifndef RGL_SPECIFIC_CHANGES
4590 #ifdef V5GTF_SPECIFIC_CHANGES
4593 ysMtTskHdlr(); /* blocks, waiting for messages for L2
4594 * (processes L1 msgs) */
4596 /* get a message from the demand queue */
4598 #ifdef RLC_MAC_DAT_REQ_RBUF
4599 rgDlDatReqBatchProc();
4602 ret = mtTskHdlMsg(sTsk);
4605 /* exit the for loop here */
4608 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
4615 #endif /* TENB_T2K3K_SPECIFIC_CHANGES */
4618 void *pthreadCreateHdlr(void * arg)
4621 SPThreadCreateArg* pthreadCreateArg = (SPThreadCreateArg*)arg;
4622 /* mt038.201 changed how sem_wait is called */
4623 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4626 pthreadCreateArg->start_routine(pthreadCreateArg->argument);
4634 * Desc: This is the system task handler function. It blocks on
4635 * the system task's demand queue. On receiving a message,
4636 * it identifies the target TAPA task, verifies that the
4637 * TAPA task belongs to this system task and if so, calls
4638 * the activation function of that TAPA task with the
4639 * received message. The task activation function or the
4640 * timer activation function may be called.
4642 * Ret: (thread function)
4651 Ptr tskPtr /* pointer to task entry */
4657 /* get out the system task entry from the parameter */
4658 sTsk = (SsSTskEntry *) tskPtr;
4661 /* wait for SS to come up */
4663 /* mt038.201 changed how sem_wait is called */
4664 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4666 #ifdef XEON_SPECIFIC_CHANGES
4667 printf("\n**********MT Task Handler********\n");
4671 /* Wait for a message from the demand queue */
4672 #ifdef SS_CDMNDQ_SUPPORT
4673 ret = ssCDmndQWait(&sTsk->dQ);
4675 ret = ssDmndQWait(&sTsk->dQ);
4680 ret = mtTskHdlMsg(sTsk);
4695 * Desc: This is the system task handler function. It blocks on
4696 * the system task's demand queue. On receiving a message,
4697 * it identifies the target TAPA task, verifies that the
4698 * TAPA task belongs to this system task and if so, calls
4699 * the activation function of that TAPA task with the
4700 * received message. The task activation function or the
4701 * timer activation function may be called.
4703 * Ret: (thread function)
4725 /* mt028.201: modification: multiple procs support related changes */
4726 #ifndef SS_MULTIPLE_PROCS
4728 PAIFTMRS16 tmrActvFnMt = NULLP;
4730 /* mt015.301 Initialized the timer activation functions with NULLP */
4731 PFS16 tmrActvFn = NULLP;
4733 PAIFTMRS16 tmrActvFn;
4735 #endif /* SS_MULTIPLE_PROCS */
4736 /* mt003.301 Modifications */
4737 #ifdef SS_THREAD_PROFILE
4739 #endif /* SS_THREAD_PROFILE */
4742 ret = ssDmndQGet(&sTsk->dQ, &mBuf, SS_DQ_FIRST);
4745 /* nothing to receive */
4749 /* if we can't lock this system task entry, return the message */
4750 ret = SLock(&sTsk->lock);
4754 #if (ERRCLASS & ERRCLS_DEBUG)
4755 MTLOGERROR(ERRCLS_DEBUG, EMT007, (ErrVal) ret,
4756 "Could not lock system task entry");
4766 mBuf2 = mBuf->b_next;
4768 /* find out what kind of message this is */
4769 mInfo = (SsMsgInfo *)mBuf->b_rptr;
4770 #ifdef SS_MEM_WL_DEBUG
4771 mtTskBuffer1 = mBuf2;
4773 mtTskBuffer2 = mBuf2->b_next;
4775 if(mInfo == 0x5050505)
4779 cmAnalyseBtInfo((PTR) mBuf,4);
4781 printf("\n In trouble .... \n");
4783 else if (mInfo == 0x2020202)
4786 cmAnalyseBtInfo((PTR) mBuf,1);
4787 printf("\n In trouble .... \n");
4789 #endif /* SS_MEM_WL_DEBUG */
4790 switch (mInfo->eventInfo.event)
4792 /* this is a termination event, we die */
4794 /* release the message */
4797 /* Unlock the system task entry and lock the system
4798 * task table to clean our entry up.
4800 SUnlock(&sTsk->lock);
4802 ret = SLock(&osCp.sTskTblLock);
4806 #if (ERRCLASS & ERRCLS_DEBUG)
4807 MTLOGERROR(ERRCLS_DEBUG, EMT008, (ErrVal) ret,
4808 "Could not lock system task table");
4810 /* what to do here? */
4814 /* clean up the system task entry */
4817 /* mt003.301 Modifications - SDeRegTTsk */
4818 /* sTsk->numTTsks = 0; */
4819 SDestroyLock(&sTsk->lock);
4820 ssDestroyDmndQ(&sTsk->dQ);
4822 /* lock for current executing TAPA task ID */
4824 /* make this entry available in the system task table */
4825 sTsk->nxt = osCp.nxtSTskEntry;
4826 for (i = 0; i < SS_MAX_STSKS; i++)
4828 if (sTsk == &osCp.sTskTbl[i])
4830 osCp.nxtSTskEntry = i;
4837 /* unlock the system task table */
4838 SUnlock(&osCp.sTskTblLock);
4843 /* this is a data message or a permanent task keep-alive message */
4845 case SS_EVNT_PERMTICK:
4846 /* message to a task. find the destination task */
4847 /* mt028.201: modification: multiple procs support related changes */
4848 #ifdef SS_MULTIPLE_PROCS
4849 procIdIdx = SGetProcIdIdx(mInfo->pst.dstProcId);
4851 if (procIdIdx == SS_INV_PROCID_IDX)
4857 idx = osCp.tTskIds[procIdIdx][mInfo->pst.dstEnt][mInfo->pst.dstInst];
4858 #else /* SS_MULTIPLE_PROCS */
4859 idx = osCp.tTskIds[mInfo->pst.dstEnt][mInfo->pst.dstInst];
4860 #endif /* SS_MULTIPLE_PROCS */
4862 /* verify that it hasn't been deregistered */
4863 if (idx == SS_TSKNC)
4869 /* verify that this system task is still running it */
4870 tTsk = &osCp.tTskTbl[idx];
4871 if (tTsk->sTsk != sTsk)
4877 /* set the current executing TAPA task ID */
4878 sTsk->dep.ent = mInfo->pst.dstEnt;
4879 sTsk->dep.inst = mInfo->pst.dstInst;
4881 /* copy the Pst structure into a local duplicate */
4882 for (i = 0; i < (S16) sizeof(Pst); i++)
4883 *(((uint8_t *)(&nPst)) + i) = *(((uint8_t *)&mInfo->pst) + i);
4885 /* Give the message to the task activation function. If
4886 * its a normal data message, we pass it, if this is a
4887 * keep-alive message for a permanent task then we pass
4888 * NULLP in place of the message to the task activation
4891 if (mInfo->eventInfo.event == SS_EVNT_DATA)
4893 #ifndef RGL_SPECIFIC_CHANGES
4894 #ifdef SS_TSKLOG_ENABLE
4895 uint32_t t = MacGetTick();
4898 /* mt003.301 Modifications */
4899 #if SS_THREAD_PROFILE
4900 tTsk->curEvent = nPst.event;
4902 #endif /* SS_THREAD_PROFILE */
4903 tTsk->actvTsk(&nPst, mBuf);
4904 #ifndef RGL_SPECIFIC_CHANGES
4905 #ifdef SS_TSKLOG_ENABLE
4906 SStopTask(t,PID_SSI_TSK);
4909 #if SS_THREAD_PROFILE
4911 tTsk->curEvtTime = (uint32_t)(et2 - et1);
4912 tTsk->totTime += (uint64_t)tTsk->curEvtTime;
4913 #endif /* SS_THREAD_PROFILE */
4917 #if (ERRCLASS & ERRCLS_DEBUG)
4918 /* this message should only come to a permanent task */
4919 if (tTsk->tskType != SS_TSK_PERMANENT)
4921 MTLOGERROR(ERRCLS_DEBUG, EMT009, ERRZERO, "Logic failure");
4925 tTsk->actvTsk(&nPst, NULLP);
4927 /* We need to re-send this message back to ourselves so
4928 * the permanent task continues to run.
4930 /* Check if this task got deregistered or detached
4931 * by the activation function; if so, there's nothing
4932 * more to do here, otherwise go ahead.
4935 if (tTsk->used == TRUE && tTsk->sTsk != NULLP)
4937 ret = ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
4938 ((tTsk->tskPrior) * SS_MAX_MSG_PRI) +
4942 /* failure here is a real problem */
4945 #if (ERRCLASS & ERRCLS_DEBUG)
4946 MTLOGERROR(ERRCLS_DEBUG, EMT010, ERRZERO,
4947 "Could not write to demand queue");
4953 /* unset the current executing TAPA task ID */
4954 sTsk->dep.ent = ENTNC;
4955 sTsk->dep.inst = INSTNC;
4960 /* timer event. find the timer entry */
4961 idx = mInfo->eventInfo.u.tmr.tmrIdx;
4963 /* lock the timer table, coz we're going to peek in it */
4964 ret = SLock(&osCp.tmrTblLock);
4968 #if (ERRCLASS & ERRCLS_DEBUG)
4969 MTLOGERROR(ERRCLS_DEBUG, EMT011, (ErrVal) ret,
4970 "Could not lock timer table");
4976 /* Verify that this timer entry is still around and that it
4977 * belongs to our task.
4979 if (osCp.tmrTbl[idx].used == FALSE
4980 /* mt028.201: modification: multiple procs support related changes */
4981 #ifdef SS_MULTIPLE_PROCS
4982 || osCp.tmrTbl[idx].ownerProc != mInfo->pst.dstProcId
4983 #endif /* SS_MULTIPLE_PROCS */
4984 || osCp.tmrTbl[idx].ownerEnt != mInfo->pst.dstEnt
4985 || osCp.tmrTbl[idx].ownerInst != mInfo->pst.dstInst)
4987 SUnlock(&osCp.tmrTblLock);
4992 /* mt005.21: addition */
4993 /* set the current executing TAPA task ID */
4994 sTsk->dep.ent = mInfo->pst.dstEnt;
4995 sTsk->dep.inst = mInfo->pst.dstInst;
4997 #ifndef SS_MULTIPLE_PROCS
4999 /*mt006.301 Adding Initializing the tmrActvFnMt*/
5000 tmrActvFnMt = NULLP;
5001 if (osCp.tmrTbl[idx].ssTmrActvFn.mtFlag == TRUE)
5003 tmrActvFnMt = osCp.tmrTbl[idx].ssTmrActvFn.actvFnc.tmrActvFnMt;
5009 tmrActvFn = osCp.tmrTbl[idx].ssTmrActvFn.actvFnc.tmrActvFn;
5012 /* unlock the timer table */
5013 SUnlock(&osCp.tmrTblLock);
5015 /* activate the timer function */
5016 /* mt028.201: modification: multiple procs support related changes */
5017 #ifndef SS_MULTIPLE_PROCS
5021 tmrActvFnMt(osCp.tmrTbl[idx].ownerEnt,
5022 osCp.tmrTbl[idx].ownerInst);
5030 tmrActvFn(osCp.tmrTbl[idx].ownerProc, osCp.tmrTbl[idx].ownerEnt,
5031 osCp.tmrTbl[idx].ownerInst);
5032 #endif /* SS_MULTIPLE_PROCS */
5034 /*mt005.21: addition */
5035 /* unset the current executing TAPA task ID */
5036 sTsk->dep.ent = ENTNC;
5037 sTsk->dep.inst = INSTNC;
5040 /* return the message buffer */
5044 * mt003.301 - SDeRegTTsk fix
5046 case SS_EVNT_TTSK_TERM:
5047 #ifdef SS_MULTIPLE_PROCS
5048 procIdIdx = SGetProcIdIdx(mInfo->pst.dstProcId);
5050 if (procIdIdx == SS_INV_PROCID_IDX)
5056 idx = osCp.tTskIds[procIdIdx][mInfo->pst.dstEnt][mInfo->pst.dstInst];
5057 #else /* SS_MULTIPLE_PROCS */
5058 idx = osCp.tTskIds[mInfo->pst.dstEnt][mInfo->pst.dstInst];
5059 #endif /* SS_MULTIPLE_PROCS */
5061 /* verify that it hasn't been deregistered */
5062 if (idx == SS_TSKNC)
5068 /* verify that this system task is still running it */
5069 tTsk = &osCp.tTskTbl[idx];
5070 if (tTsk->sTsk != sTsk)
5075 #ifdef SS_MULTIPLE_PROCS
5076 ssdProcTTskTerm(procIdIdx, tTsk, idx);
5078 ssdProcTTskTerm(tTsk, idx);
5084 #if (ERRCLASS & ERRCLS_DEBUG)
5085 MTLOGERROR(ERRCLS_DEBUG, EMT012, (ErrVal) ret,
5092 } while (mBuf != NULLP);
5095 /* unlock the system task entry */
5096 SUnlock(&sTsk->lock);
5099 /* yield for other threads */
5100 /* mt024.201 - changed to use SSI SThreadYield instead of sleep */
5109 * Fun: mtTmrHdlrPublic
5111 Void mtTmrHdlrPublic()
5113 if (SLock(&osCp.tmrTblLock) != ROK)
5115 #if (ERRCLASS & ERRCLS_DEBUG)
5116 MTLOGERROR(ERRCLS_DEBUG, EMT016, ERRZERO, "Could not lock timer table");
5120 cmPrcTmr(&osCp.dep.tmrTqCp, osCp.dep.tmrTq, mtTimeout);
5121 /* unlock the timer table */
5122 SUnlock(&osCp.tmrTblLock);
5130 * Desc: The timer handler thread function. Counts time
5131 * and invokes the common timer function on each
5134 * Ret: (thread function)
5141 /*mt041.201 Modified SSI tick handling in mtTmrHdlr() */
5142 static Void *mtTmrHdlr
5144 void *parm /* unused */
5147 /*mt004.301-addede new region*/
5148 /* mt010.301 Removed SS_FAP portion and
5149 * enabled oroginal code in function mtTmrHdlr */
5153 uint32_t i, cnt, oldTicks, newTicks;
5154 struct timeval tv1,tv2;
5155 /* mt038.201 added return */
5157 /* mt039.201 changes for nanosleep */
5158 struct timespec tsN;
5159 static uint32_t err_in_usec;
5161 /*mt013.301 : doesn't need TRC macro ,as this will never return*/
5166 /* mt027.201 - Modification for SRegCfgTmr support */
5167 /* check SS_TICKS_SEC */
5168 if (SS_1MS < SS_TICKS_SEC)
5170 MTLOGERROR(ERRCLS_DEBUG, EMT013, ERRZERO, "Minimum SSI ticks is 1ms");
5173 /* mt025.201 - Addition to stop timer handler till task registration is done */
5174 /* wait for SS to come up */
5175 /* mt038.201 changed how sem_wait is called */
5176 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
5179 /* mt027.201 - Modification for SRegCfgTmr support */
5180 /* set up parameter to nanosleep() for SS_TICKS_SEC */
5182 ts.tv_nsec = (MT_TICK_CNT * 1000);
5183 /* mt039.201 changes for nanosleep */
5189 if (gettimeofday(&tv1, NULL) == -1)
5191 #if (ERRCLASS & ERRCLS_DEBUG)
5192 MTLOGERROR(ERRCLS_DEBUG, EMT014, (ErrVal) errno,
5193 "Error in clock_gettime");
5203 #ifndef STUB_TTI_HANDLING_5GTF
5204 printf("Returning from mtTmrHdlr()\n");
5209 /* mt039.201 changes for nanosleep */
5210 /* sleep for MT_TICK_CNT milli seconds */
5211 ts.tv_nsec = (MT_TICK_CNT - err_in_usec) * 1000;
5212 while ((ret = nanosleep (&ts, &tsN) != ROK) && (errno == EINTR))
5214 ts.tv_nsec = tsN.tv_nsec;
5219 if (gettimeofday(&tv2,NULL) == -1)
5221 #if (ERRCLASS & ERRCLS_DEBUG)
5222 MTLOGERROR(ERRCLS_DEBUG, EMT015, (ErrVal) errno,
5223 "Error in clock_gettime");
5227 /*mt013.301 : changed check while calculating timer to fix
5228 * diffrence between MTSS time and real unix time
5230 if ((tv2.tv_sec == tv1.tv_sec)&&(tv2.tv_usec > tv1.tv_usec))
5232 time_int = (tv2.tv_usec - tv1.tv_usec);
5234 else if (tv2.tv_sec > tv1.tv_sec)
5236 time_int = ((tv2.tv_sec - tv1.tv_sec)*1000000) + (tv2.tv_usec - tv1.tv_usec);
5238 else /* ts2 < ts1, this will not happen in normal scenario */
5240 /* to make sure cnt = 1 */
5242 time_int = MT_TICK_CNT;
5245 oldTicks = osCp.dep.sysTicks;
5246 osCp.dep.sysTicks += (time_int/(MT_TICK_CNT - err_in_usec));
5247 err_in_usec = (time_int % (MT_TICK_CNT - err_in_usec));
5248 newTicks = osCp.dep.sysTicks;
5249 tv1.tv_usec = tv2.tv_usec;
5250 tv1.tv_sec = tv2.tv_sec;
5252 cnt = newTicks - oldTicks;
5254 while(err_in_usec >= MT_TICK_CNT)
5257 err_in_usec -= MT_TICK_CNT;
5259 if( cnt >= MT_MAX_TICK_CNT_VAL)
5260 cnt = MT_MIN_TICK_CNT_VAL;
5261 /* call the common timer tick handler */
5262 for (i = 0; i < cnt; i++)
5264 /* mt008.301: cmPrcTmr is guarded with a lock */
5265 /* lock the timer table */
5266 if (SLock(&osCp.tmrTblLock) != ROK)
5268 #if (ERRCLASS & ERRCLS_DEBUG)
5269 MTLOGERROR(ERRCLS_DEBUG, EMT016, ERRZERO, "Could not lock timer table");
5273 cmPrcTmr(&osCp.dep.tmrTqCp, osCp.dep.tmrTq, mtTimeout);
5274 /* unlock the timer table */
5275 SUnlock(&osCp.tmrTblLock);
5279 /* mt009.21: addition */
5280 return ( (Void *) NULLP);
5281 /* will not reach here */
5289 * Desc: Process timer event. Called from the common timer
5290 * code when a timeout occurs.
5301 PTR tCb, /* control block */
5302 S16 evnt /* event */
5311 #ifndef TENB_RTLIN_CHANGES
5314 /* mt028.201: modification: multiple procs support related changes */
5315 #ifdef SS_MULTIPLE_PROCS
5317 #endif /* SS_MULTIPLE_PROCS */
5318 #ifdef RGL_SPECIFIC_CHANGES
5319 #ifdef MSPD_MLOG_NEW
5320 uint32_t t = GetTIMETICK();
5326 /* get the timer entry */
5327 tEnt = (SsTmrEntry *) tCb;
5330 /* if the timer was deleted, this will be NULL, so drop it */
5336 /* mt008.301 Deletion: tmrTbl Lock is moved to mtTmrHdlr */
5339 /* Hmmmm, the timer might have been deleted while we've been
5340 * working at getting here, so we just skip this.
5342 if (tEnt->used == FALSE)
5348 /* Set up and send a timer message to the destination tasks'
5351 #ifndef SS_MULTICORE_SUPPORT
5352 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
5354 #ifdef RGL_SPECIFIC_CHANGES
5355 if (SGetMsg((SS_DFLT_REGION), SS_DFLT_POOL, &mBuf) != ROK)
5357 if (SGetMsg((osCp.sTskTbl[0].region), SS_DFLT_POOL, &mBuf) != ROK)
5362 #if (ERRCLASS & ERRCLS_DEBUG)
5363 MTLOGERROR(ERRCLS_DEBUG, EMT017, ERRZERO, "Could not get message");
5369 mInfo = (SsMsgInfo *)mBuf->b_rptr;
5370 mInfo->eventInfo.event = SS_EVNT_TIMER;
5371 mInfo->eventInfo.u.tmr.tmrIdx = tEnt->tmrId;
5373 mInfo->pst.dstEnt = tEnt->ownerEnt;
5374 mInfo->pst.dstInst = tEnt->ownerInst;
5375 mInfo->pst.srcEnt = tEnt->ownerEnt;
5376 mInfo->pst.srcInst = tEnt->ownerInst;
5377 /* mt028.201: modification: multiple procs support related changes */
5378 #ifndef SS_MULTIPLE_PROCS
5379 mInfo->pst.dstProcId = SFndProcId();
5380 mInfo->pst.srcProcId = SFndProcId();
5381 #else /* SS_MULTIPLE_PROCS */
5382 mInfo->pst.dstProcId = tEnt->ownerProc;
5383 mInfo->pst.srcProcId = tEnt->ownerProc;
5384 #endif /* SS_MULTIPLE_PROCS */
5385 mInfo->pst.selector = SEL_LC_NEW;
5386 #ifndef SS_MULTICORE_SUPPORT
5387 mInfo->pst.region = DFLT_REGION;
5390 mInfo->pst.pool = DFLT_POOL;
5391 mInfo->pst.prior = PRIOR0;
5392 mInfo->pst.route = RTESPEC;
5393 mInfo->pst.event = 0;
5396 #ifndef TENB_RTLIN_CHANGES
5397 /* get a semaphore for the TAPA task table */
5398 SS_ACQUIRE_SEMA(&osCp.tTskTblSem, ret);
5403 #if (ERRCLASS & ERRCLS_DEBUG)
5404 MTLOGERROR(ERRCLS_DEBUG, EMT018, ret, "Could not lock TAPA task table");
5412 /* find the owner TAPA task */
5413 /* mt028.201: modification: multiple procs support related changes */
5414 #ifdef SS_MULTIPLE_PROCS
5415 procIdIdx = SGetProcIdIdx(tEnt->ownerProc);
5416 idx = osCp.tTskIds[procIdIdx][tEnt->ownerEnt][tEnt->ownerInst];
5417 #else /* SS_MULTIPLE_PROCS */
5418 idx = osCp.tTskIds[tEnt->ownerEnt][tEnt->ownerInst];
5419 #endif /* SS_MULTIPLE_PROCS */
5420 if (idx == SS_TSKNC)
5422 #ifndef TENB_RTLIN_CHANGES
5423 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5430 /* ensure that the TAPA task is hale and hearty */
5431 tTsk = &osCp.tTskTbl[idx];
5434 #ifndef TENB_RTLIN_CHANGES
5435 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5440 /* Klock work fix ccpu00148484 */
5441 /* write the timer message to the queue of the destination task */
5442 /* mt008.301 : check sTsk before putting into it's DQ */
5443 if (tTsk->sTsk == NULLP)
5445 #ifndef TENB_RTLIN_CHANGES
5446 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5450 #if (ERRCLASS & ERRCLS_DEBUG)
5451 MTLOGERROR(ERRCLS_DEBUG, EMT019, ERRZERO,
5452 "Could not write to demand queue");
5457 #ifdef SS_LOCKLESS_MEMORY
5458 mInfo->pst.region = tTsk->sTsk->region;
5459 mInfo->region = tTsk->sTsk->region;
5460 #endif /* SS_LOCKLESS_MEMORY */
5461 if (ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
5462 (tTsk->tskPrior * SS_MAX_MSG_PRI) + PRIOR0) != ROK)
5464 #ifndef TENB_RTLIN_CHANGES
5465 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5469 #if (ERRCLASS & ERRCLS_DEBUG)
5470 MTLOGERROR(ERRCLS_DEBUG, EMT019, ERRZERO,
5471 "Could not write to demand queue");
5476 /* Fix for ccpu00130657 */
5477 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
5478 if (tTsk->sTsk->tskPrior == PRIOR0)
5481 WLS_WakeUp(mtGetWlsHdl());
5488 /* release the semaphore for the TAPA task table */
5489 #ifndef TENB_RTLIN_CHANGES
5490 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5494 /* restart the timer */
5495 arg.tq = osCp.dep.tmrTq;
5496 arg.tqCp = &osCp.dep.tmrTqCp;
5497 arg.timers = tEnt->dep.timers;
5498 arg.cb = (PTR) tEnt;
5502 arg.max = TMR_DEF_MAX;
5503 arg.wait = tEnt->interval;
5505 #ifdef RGL_SPECIFIC_CHANGES
5506 #ifdef MSPD_MLOG_NEW
5507 MLogTask(131313, RESOURCE_LARM, t, GetTIMETICK());
5519 * Desc: This thread reads the console and hands over any
5520 * data read to a user function.
5522 * Ret: (thread function)
5529 static Void *mtConHdlr
5531 Ptr parm /* unused */
5538 /*mt013.301 : doesn't need TRC macro ,as this will never return*/
5544 /* check if we have a console input file handle */
5545 if (osCp.dep.conInFp == NULLP)
5551 fd = fileno(osCp.dep.conInFp);
5556 if ((read(fd, &data, 1)) != 1)
5562 /* call rdConQ, defined by the system service user */
5572 #ifdef SS_DRVR_SUPPORT
5575 * Fun: Interrupt service task handler
5577 * Desc: This is the interrupt service task handler. It blocks
5578 * on a pipe from which it reads an isFlag structure. The
5579 * structure indicates which interrupt service task is to
5580 * be executed. The thread identifies the task, calls the
5581 * isTsk function and sends itself a message to repeat
5582 * this operation until it receives a message to cease.
5591 /* mt009.21: addition */
5592 static Void *mtIsTskHdlr
5594 Ptr tskPtr /* pointer to task entry */
5597 #if (ERRCLASS & ERRCLS_DEBUG)
5604 if (read(osCp.dep.isFildes[0], &isFlag, sizeof(isFlag)) != sizeof(isFlag))
5609 switch (isFlag.action)
5612 osCp.drvrTskTbl[isFlag.id].dep.flag = TRUE;
5614 /* call the interrupt service task activation function */
5615 osCp.drvrTskTbl[isFlag.id].isTsk(isFlag.id);
5617 /* send self a message to keep doing this */
5618 isFlag.action = MT_IS_RESET;
5620 #if (ERRCLASS & ERRCLS_DEBUG)
5621 ret = write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5622 if (ret != sizeof(isFlag))
5624 MTLOGERROR(ERRCLS_DEBUG, EMT020, ERRZERO,
5625 "write() to pipe failed");
5628 write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5635 osCp.drvrTskTbl[isFlag.id].dep.flag = FALSE;
5640 if (osCp.drvrTskTbl[isFlag.id].dep.flag)
5642 /* call the interrupt service task activation function */
5643 osCp.drvrTskTbl[isFlag.id].isTsk(isFlag.id);
5645 #if (ERRCLASS & ERRCLS_DEBUG)
5646 /* send self a message to do this again */
5647 ret = write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5649 if (ret != sizeof(isFlag))
5651 MTLOGERROR(ERRCLS_DEBUG, EMT021, ERRZERO,
5652 "write() to pipe failed");
5655 write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5663 /* where did THIS come from?? */
5667 /* mt009.21: addition */
5668 return ( (Void *) NULLP);
5672 #endif /* SS_DRVR_SUPPORT */
5673 #endif /* L2_L3_SPLIT */
5675 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
5679 * Fun: mtIntSigHndlr
5681 * Desc: Exit function, shuts down.
5690 Void mtIntSigHndlr(int arg)
5693 osCp.dep.sigEvnt=TRUE;
5696 #ifdef TENB_RTLIN_CHANGES
5704 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
5709 * Desc: function, shuts down.
5718 Void mtExitClnup(void)
5724 SGetSysTime(&ticks);
5726 sprintf(buf, "\n\nmtss(posix) ends\nticks: %u\n", ticks);
5728 sprintf(buf, "\n\nmtss(posix) ends\nticks: %lu\n", ticks);
5730 #ifdef SS_HISTOGRAM_SUPPORT
5734 osCp.dep.sigEvnt=FALSE;
5736 if (osCp.dep.fileOutFp)
5738 fclose(osCp.dep.fileOutFp);
5746 Void SIncrementTtiCount(Void)
5751 Ticks SGetTtiCount(Void)
5760 * Desc: This function displays a string to a given output
5765 * Notes: Buffer should be null terminated.
5767 * channel 0 is reserved for backwards compatibility
5775 S16 chan, /* channel */
5776 Txt *buf /* buffer */
5780 /* mt020.201 - Fixed typo */
5781 #if (ERRCLASS & ERRCLS_INT_PAR)
5784 MTLOGERROR(ERRCLS_INT_PAR, EMT022, ERRZERO, "Null pointer");
5789 #ifndef XEON_SPECIFIC_CHANGES
5790 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
5791 ssMemlog(buf, strlen(buf));
5796 /* mt012.301 :FIX for LOG RELATED ISSUE */
5804 if (osCp.dep.conOutFp) fwrite(buf, strlen(buf), 1, osCp.dep.conOutFp);
5810 if (osCp.dep.fileOutFp)
5811 fwrite(buf, strlen(buf), 1, osCp.dep.fileOutFp);
5812 /*mt031.201 added under compile time flag FLUSHBUFF a call to fflush() */
5815 fflush(osCp.dep.fileOutFp);
5828 * Desc: function, shuts down.
5840 /* mt030.201 added under compilet time flag SS_LINUX and SLES9_PLUS
5841 a loop to overcome the child processes being killed upon exiting the
5843 #ifdef SS_LINUX /* this should have already been defined */
5844 /* mt010.301 removed flag SLES9_PLUS */
5845 /* wait forever for children */
5849 if(osCp.dep.sigEvnt==TRUE)
5856 pthread_exit(NULLP);
5862 * Fun: Set date and time
5864 * Desc: This function is used to set the calendar
5869 * Notes: Unimplemented
5876 REG1 DateTime *dt /* date and time */
5889 * Fun: Get date and time
5891 * Desc: This function is used to determine the calendar
5892 * date and time. This information may be used for
5893 * some management functions.
5905 REG1 DateTime *dt /* date and time */
5908 /*-- mt035.201 : SSI enhancements for micro second in datetime struct --*/
5911 struct timespec ptime;
5913 struct timeval ptime;
5920 #if (ERRCLASS & ERRCLS_INT_PAR)
5923 MTLOGERROR(ERRCLS_INT_PAR, EMT023, ERRZERO, "Null pointer");
5932 localtime_r(&tt, &tme);
5935 clock_gettime(CLOCK_REALTIME, &ptime);
5937 gettimeofday(&ptime, NULL);
5939 localtime_r(&ptime.tv_sec, &tme);
5941 dt->month = (uint8_t) tme.tm_mon + 1;
5942 dt->day = (uint8_t) tme.tm_mday;
5943 dt->year = (uint8_t) tme.tm_year;
5944 dt->hour = (uint8_t) tme.tm_hour;
5945 dt->min = (uint8_t) tme.tm_min;
5946 dt->sec = (uint8_t) tme.tm_sec;
5949 #ifdef SS_DATETIME_USEC
5951 dt->usec = ptime.tv_nsec / 1000;
5953 dt->usec = ptime.tv_usec;
5955 #endif /*-- SS_DATETIME_USEC --*/
5961 * Get time from epoch in milliseconds
5963 * Fun: Get time from epoch in milliseconds
5965 * Desc: This function is used to get the time from epoch in milli seconds.
5966 * This information may be used for calculating a layer's activation function
5967 * execution time used for thread profiling.
5976 /* mt003.301 Modifications */
5979 EpcTime *et /* date and time */
5982 /* mt003.301 Modifications */
5983 static uint64_t now;
5984 uint64_t to_sec = 1000000;
5985 uint64_t to_nsec = 1000;
5987 struct timespec ptime;
5989 struct timeval ptime;
5994 #if (ERRCLASS & ERRCLS_INT_PAR)
6003 clock_gettime(CLOCK_REALTIME, &ptime);
6005 gettimeofday(&ptime, NULL);
6006 #endif /* SS_LINUX */
6008 now = (ptime.tv_sec * to_sec);
6011 now += (ptime.tv_nsec / to_nsec);
6012 #else /* SS_LINUX */
6013 now += (ptime.tv_usec);
6015 #endif /* SS_LINUX */
6016 now = (now / to_nsec);
6027 * Fun: Get system time
6029 * Desc: This function is used to determine the system time.
6033 * Notes: osCp.dep.sysTicks is updated by the timer thread.
6040 Ticks *sysTime /* system time */
6045 #if (ERRCLASS & ERRCLS_INT_PAR)
6046 if (sysTime == NULLP)
6048 MTLOGERROR(ERRCLS_INT_PAR, EMT024, ERRZERO, "Null pointer");
6054 *sysTime = osCp.dep.sysTicks;
6060 /* mt021.201 - Addition of SGetRefTime function */
6063 * Fun: Get referenced time
6065 * Desc: This function is used to determine the time in seconds
6066 * and microseconds from a reference time. The reference
6067 * time is expressed in seconds from UTC EPOC, January 1,
6073 * Notes: Macros are defined for reference times:
6074 * SS_REFTIME_01_01_1970
6075 * SS_REFTIME_01_01_2002
6082 uint32_t refTime, /* reference time */
6089 struct timespec ptime;
6091 struct timeval ptime;
6096 clock_gettime(CLOCK_REALTIME, &ptime);
6098 gettimeofday(&ptime, NULL);
6101 #if (ERRCLASS & ERRCLS_INT_PAR)
6102 if (sec == NULLP || usec == NULLP)
6104 MTLOGERROR(ERRCLS_INT_PAR, EMT025, ERRZERO, "Null pointer");
6107 /* mt022.201 - Modification to fix compile warning */
6108 if (refTime > (uint32_t)(ptime.tv_sec))
6110 MTLOGERROR(ERRCLS_INT_PAR, EMT026, ERRZERO, "Reference time exceeds present time");
6115 *sec = ptime.tv_sec - refTime;
6117 *usec = ptime.tv_nsec / 1000;
6119 *usec = ptime.tv_usec;
6129 * Fun: Get Random Number
6131 * Desc: Invoked by layer when a pseudorandom number is required.
6135 * Notes: Suggested approach uses shuffled Linear Congruential
6136 * Operators as described in Byte magazine October
6137 * 1984; "Generating and Testing Pseudorandom Numbers"
6144 Random *value /* random number */
6149 #if (ERRCLASS & ERRCLS_INT_PAR)
6152 /* mt011.21: addition */
6153 MTLOGERROR(ERRCLS_INT_PAR, EMT028, (ErrVal)0 , "Null pointer");
6159 *value = (Random) rand_r(&osCp.dep.randSeed);
6170 * Desc: This function exits from a task.
6174 * Notes: Currently does nothing.
6189 * Fun: Exit Interrupt
6191 * Desc: This function exits from an interrupt.
6195 * Notes: Currently does nothing.
6210 * Fun: Hold Interrupt
6212 * Desc: This function prohibits interrupts from being enabled until
6213 * release interrupt. This function should be called when
6214 * interrupts are disabled and prior to any call to system
6215 * services either by entry to an interrupt service routine or
6216 * by explicit call to disable interrupt.
6220 * Notes: Currently does nothing
6235 * Fun: Release Interrupt
6237 * Desc: This function allows interrupts to be enabled.
6241 * Notes: Currently does nothing.
6258 * Desc: Enable interrupts
6260 * Ret: ROK on success
6263 * Notes: Currently does nothing.
6268 inline S16 SEnbInt(void)
6280 * Desc: Disable interrupts
6282 * Ret: ROK on success
6285 * Notes: Currently does nothing.
6290 inline S16 SDisInt(void)
6302 * Desc: This function gets the function address stored at the
6303 * specified interrupt vector.
6307 * Notes: Currently does nothing.
6314 VectNmb vectNmb, /* vector number */
6315 PIF *vectFnct /* vector function */
6332 * Desc: This function installs the specified function at the
6333 * specified interrupt vector.
6337 * Notes: Currently does nothing.
6344 VectNmb vectNmb, /* vector number */
6345 PIF vectFnct /* vector function */
6357 /* mt028.201: modification: multiple procs support related changes */
6358 #ifndef SS_MULTIPLE_PROCS
6364 * Desc: This function gets the current entity and instance.
6367 * RFAILED - failed, general (optional)
6369 * Notes: This function may be called by the OS or Layer 1
6377 Ent *ent, /* entity */
6378 Inst *inst /* instance */
6389 #if (ERRCLASS & ERRCLS_INT_PAR)
6390 /* check pointers */
6391 if (ent == NULLP || inst == NULLP)
6393 MTLOGERROR(ERRCLS_INT_PAR, EMT029, ERRZERO, "Null pointer");
6399 /* get the thread id */
6400 tId = pthread_self();
6403 /* find the system task in whose context we're running */
6405 ret = SLock(&osCp.sTskTblLock);
6410 for (i = 0; i < SS_MAX_STSKS; i++)
6412 if (pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
6414 sTsk = &osCp.sTskTbl[i];
6420 *ent = sTsk->dep.ent;
6421 *inst = sTsk->dep.inst;
6423 SUnlock(&osCp.sTskTblLock);
6426 return (ret == ROK ? ROK : RFAILED);
6434 * Desc: This function sets the current entity and instance.
6445 Ent ent, /* entity */
6446 Inst inst /* instance */
6457 #if (ERRCLASS & ERRCLS_INT_PAR)
6458 /* check entity and instance IDs */
6459 if (ent >= ENTNC || inst >= INSTNC)
6461 MTLOGERROR(ERRCLS_INT_PAR, EMT030, ERRZERO, "Invalid entity/instance");
6467 /* get the thread id */
6468 tId = pthread_self();
6471 /* find the system task in whose context we're running */
6473 ret = SLock(&osCp.sTskTblLock);
6478 for (i = 0; i < SS_MAX_STSKS; i++)
6480 if (pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
6482 sTsk = &osCp.sTskTbl[i];
6488 sTsk->dep.ent = ent;
6489 sTsk->dep.inst = inst;
6491 SUnlock(&osCp.sTskTblLock);
6494 return (ret == ROK ? ROK : RFAILED);
6497 #endif /* SS_MULTIPLE_PROCS */
6499 #ifdef SS_DRVR_SUPPORT
6505 * Desc: Set interrupt pending flag
6507 * Ret: ROK on success
6515 inline S16 SSetIntPend
6517 uint16_t id, /* driver task identifier */
6518 Bool flag /* flag */
6526 #if (ERRCLASS & ERRCLS_INT_PAR)
6527 if (id >= SS_MAX_DRVRTSKS || osCp.drvrTskTbl[id].used == FALSE)
6529 MTLOGERROR(ERRCLS_INT_PAR, EMT031, id, "Invalid instance");
6536 isFlag.action = (flag ? MT_IS_SET : MT_IS_UNSET);
6538 if (write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag)) != sizeof(isFlag))
6546 #endif /* SS_DRVR_SUPPORT */
6549 #ifdef SS_LOCKLESS_MEMORY
6552 * Fun: SGlobMemInfoShow
6554 * Desc: This function displays the memory usage information
6555 * for the destined region. It will show the usage of
6556 * each configured bucket and the heap for the specified region.
6559 * RFAILED Region not registered
6564 S16 SGlobMemInfoShow(Void)
6568 CmMmGlobRegCb *globReg;
6571 globReg = osCp.globRegCb;
6573 sprintf(prntBuf, "--------------------------------------------------------------\n");
6574 SDisplay(0, prntBuf);
6575 sprintf(prntBuf, "Global Region Bucket Information\n");
6576 SDisplay(0, prntBuf);
6577 sprintf(prntBuf, "====================================================\n");
6578 SDisplay(0, prntBuf);
6579 sprintf(prntBuf, "Bucket Id Set Size Free Sets Allocated\n");
6580 SDisplay(0, prntBuf);
6581 sprintf(prntBuf, "====================================================\n");
6582 SDisplay(0, prntBuf);
6585 for (idx = 0; idx < globReg->numBkts; idx++)
6587 #ifdef XEON_SPECIFIC_CHANGES
6588 sprintf(prntBuf, "%2u %12lu %12lu %8lu %9lu\n",
6589 idx, globReg->bktTbl[idx].size, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6592 sprintf(prntBuf, "%2u %12lu %8lu %9lu\n",
6593 idx, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6595 sprintf(prntBuf, "%2u %12u %8u %9u\n",
6596 idx, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6599 SDisplay(0, prntBuf);
6601 sprintf(prntBuf, "--------------------------------------------------------------\n");
6602 SDisplay(0, prntBuf);
6607 #endif /* SS_LOCKLESS_MEMORY */
6610 Bool IsMemoryThresholdHit(Region reg, Pool pool)
6612 if((mtCMMRegCb[reg]->bktTbl[pool].numAlloc * 100 )/mtCMMRegCb[reg]->bktTbl[pool].numBlks > 70)
6614 MSPD_DBG("Threshold reached reg(%d) pool(%d) numAllc(%d) numBlks(%d)\n",
6617 mtCMMRegCb[reg]->bktTbl[pool].numAlloc,
6618 mtCMMRegCb[reg]->bktTbl[pool].numBlks);
6625 /* mt022.201 - Addition of SRegInfoShow function */
6630 * Desc: This function displays the memory usage information
6631 * for the destined region. It will show the usage of
6632 * each configured bucket and the heap for the specified region.
6635 * RFAILED Region not registered
6637 * Notes: A Sample Output from the function
6638 * Bucket Memory: region 1
6639 * ====================================================
6640 * Bucket Number of Blks configured Size Allocated
6641 * ====================================================
6649 * Heap Memory: region 1
6652 * Heap Segmented blocks: 0
6668 #if (ERRCLASS & ERRCLS_INT_PAR)
6669 if (region > (SS_MAX_REGS-1) )
6671 MTLOGERROR(ERRCLS_INT_PAR, EMT032, ERRZERO, "Invalid Region");
6678 #ifndef TENB_T2K3K_SPECIFIC_CHANGES
6679 sprintf(prntBuf, "\n\nBucket Memory: region %d\n", region);
6680 SDisplay(0, prntBuf);
6681 sprintf(prntBuf, "====================================================\n");
6682 SDisplay(0, prntBuf);
6683 sprintf(prntBuf, "Bucket Number of Blks configured Size Allocated\n");
6684 SDisplay(0, prntBuf);
6685 sprintf(prntBuf, "====================================================\n");
6686 SDisplay(0, prntBuf);
6690 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
6692 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
6694 sprintf((char *)prntBuf, "%2u %8u %5u %8u %8u\n",
6695 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6696 mtCMMRegCb[region]->bktTbl[idx].size,
6697 mtCMMRegCb[region]->bktTbl[idx].numAlloc,
6698 mtCMMRegCb[region]->bktTbl[idx].maxAlloc);
6700 sprintf((char *)prntBuf, "%2u %8lu %5lu %8lu %8lu\n",
6701 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6702 mtCMMRegCb[region]->bktTbl[idx].size,
6703 mtCMMRegCb[region]->bktTbl[idx].numAlloc,
6704 mtCMMRegCb[region]->bktTbl[idx].maxAlloc);
6707 /*mt009.301 Fixed 64BIT compilation warnings*/
6709 sprintf(prntBuf, "%2u %8u %5u %8u\n",
6710 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6711 mtCMMRegCb[region]->bktTbl[idx].size,
6712 mtCMMRegCb[region]->bktTbl[idx].numAlloc);
6714 sprintf(prntBuf, "%2u %8lu %5lu %8lu\n",
6715 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6716 mtCMMRegCb[region]->bktTbl[idx].size,
6717 mtCMMRegCb[region]->bktTbl[idx].numAlloc);
6719 #endif /* not TENB_RTLIN_CHANGES */
6720 SDisplay(0, prntBuf);
6721 *availmem = *availmem + (mtCMMRegCb[region]->bktTbl[idx].size * \
6722 (mtCMMRegCb[region]->bktTbl[idx].numBlks - \
6723 mtCMMRegCb[region]->bktTbl[idx].numAlloc));
6725 sprintf(prntBuf, "\n---------------\n");
6726 SDisplay(0, prntBuf);
6727 sprintf(prntBuf, "Heap Memory: region %d\n", region);
6728 SDisplay(0, prntBuf);
6729 /*mt009.301 Fixed 64BIT compilation warnings*/
6731 sprintf(prntBuf, "Heap Size: %u\n", mtCMMRegCb[region]->heapSize);
6733 sprintf(prntBuf, "Heap Size: %lu\n", mtCMMRegCb[region]->heapSize);
6735 SDisplay(0, prntBuf);
6736 /*mt009.301 Fixed 64BIT compilation warnings*/
6738 sprintf(prntBuf, "Heap Allocated: %u\n",
6739 (mtCMMRegCb[region]->heapSize - mtCMMRegCb[region]->heapCb.avlSize));
6741 sprintf(prntBuf, "Heap Allocated: %lu\n",
6742 (mtCMMRegCb[region]->heapSize - mtCMMRegCb[region]->heapCb.avlSize));
6744 SDisplay(0, prntBuf);
6745 *availmem = *availmem + mtCMMRegCb[region]->heapCb.avlSize;
6746 #if (ERRCLASS & ERRCLS_DEBUG)
6747 sprintf(prntBuf, "Heap Segmented blocks: %d\n",
6748 mtCMMRegCb[region]->heapCb.numFragBlk);
6749 SDisplay(0, prntBuf);
6754 #ifdef XEON_SPECIFIC_CHANGES
6755 #define SSI_MAX_BKT_THRESHOLD 6
6756 #define SSI_MAX_REG_THRESHOLD 2
6757 uint32_t SMemMaxThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6758 uint32_t SMemMidThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6759 uint32_t SMemLowThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6761 static Void SInitMemThreshold
6768 for (idx = 0; (idx < maxBkt && idx < mtCMMRegCb[region]->numBkts); idx++)
6770 SMemMaxThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*95)/100;
6771 SMemMidThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*85)/100;
6772 SMemLowThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*80)/100;
6773 printf("REGION:%d, BKT:%d max:%d mid:%d low:%d\n", region, idx, SMemMaxThreshold[region][idx], SMemMidThreshold[region][idx], SMemLowThreshold[region][idx]);
6777 S16 SRegReachedMemThreshold
6784 uint8_t memStatus = 3;
6785 static uint8_t initFlag = 1;
6789 SInitMemThreshold(region, maxBkt);
6792 for (idx = 0; (idx < maxBkt && idx < mtCMMRegCb[region]->numBkts); idx++)
6794 if(mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemMaxThreshold[region][idx])
6799 else if((mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemMidThreshold[region][idx]) && (memStatus >1))
6803 else if((mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemLowThreshold[region][idx]) && (memStatus >2))
6811 /* mt033.201 - addition of API to return the memory statistical data */
6816 * Desc: This function returns the memory usage information
6817 * for the destined region. It will return the usage of
6818 * each configured bucket and the heap for the specified region.
6821 * RFAILED Region not registered
6831 SsMemDbgInfo *dbgInfo
6837 #if (ERRCLASS & ERRCLS_INT_PAR)
6838 if (region >= mtMemoCfg.numRegions )
6840 MTLOGERROR(ERRCLS_INT_PAR, EMT033, ERRZERO, "Invalid Region");
6845 dbgInfo->availmem = 0;
6847 if (mtCMMRegCb[region]->numBkts > SS_MAX_BKT_PER_DBGTBL)
6848 dbgInfo->numBkts = SS_MAX_BKT_PER_DBGTBL;
6850 dbgInfo->numBkts = mtCMMRegCb[region]->numBkts;
6852 for (idx = 0; (idx < mtCMMRegCb[region]->numBkts) && (idx < SS_MAX_BKT_PER_DBGTBL); idx++)
6854 dbgInfo->bktDbgTbl[idx].numBlks = mtCMMRegCb[region]->bktTbl[idx].numBlks;
6855 dbgInfo->bktDbgTbl[idx].size = mtCMMRegCb[region]->bktTbl[idx].size;
6856 dbgInfo->bktDbgTbl[idx].numAlloc = mtCMMRegCb[region]->bktTbl[idx].numAlloc;
6858 dbgInfo->availmem += (mtCMMRegCb[region]->bktTbl[idx].size * \
6859 (mtCMMRegCb[region]->bktTbl[idx].numBlks - \
6860 mtCMMRegCb[region]->bktTbl[idx].numAlloc));
6863 dbgInfo->region = region;
6865 dbgInfo->heapSize = mtCMMRegCb[region]->heapSize;
6867 dbgInfo->heapAlloc = (mtCMMRegCb[region]->heapSize - \
6868 mtCMMRegCb[region]->heapCb.avlSize);
6870 dbgInfo->availmem += mtCMMRegCb[region]->heapCb.avlSize;
6872 #if (ERRCLASS & ERRCLS_DEBUG)
6873 dbgInfo->numFragBlk = mtCMMRegCb[region]->heapCb.numFragBlk;
6885 /* Send number of Region available */
6886 *numRegion = mtMemoCfg.numRegions;
6887 /* Send number of Pools available */
6888 *numPool = cfgRegInfo[0].numPools;
6893 /* mt033.201 - addition of APIs to print the memory statistical data
6894 * as defined by SSI enhancements
6896 #ifdef SSI_DEBUG_LEVEL1
6899 * Fun: SPrintRegMemStatusInfo
6901 * Desc: This function displays the memory usage information
6902 * for the destined region. It will show the total memory
6903 * used for static and dynamic memory if typeFlag is
6904 * SS_MEM_BKT_ALLOC_PROFILE. It will show the number of
6905 * memory block allocated for a particular size if typeFlag
6906 * is SS_MEM_BLK_SIZE_PROFILE from the hash list by
6907 * calling SRegPrintMemStats.
6916 S16 SPrintRegMemStatusInfo
6924 uint32_t statMemSize;
6925 uint32_t dynMemSize;
6928 #if (ERRCLASS & ERRCLS_INT_PAR)
6929 if (region >= mtMemoCfg.numRegions )
6931 MTLOGERROR(ERRCLS_INT_PAR, EMT034, ERRZERO, "Invalid Region");
6936 /* initialize the counters*/
6940 if (typeFlag == SS_MEM_BKT_ALLOC_PROFILE)
6942 /* total static and dynamic memory allocated from all the buckets in region requested */
6943 sprintf(prntBuf, "\nAllocated Memory profile of Buckets from region: %d \n", region);
6944 SDisplay(0, prntBuf);
6945 sprintf(prntBuf, "===========================================\n");
6946 SDisplay(0, prntBuf);
6947 sprintf(prntBuf, "Bucket Static Memory Dynamic Memory\n");
6948 SDisplay(0, prntBuf);
6949 sprintf(prntBuf, "===========================================\n");
6950 SDisplay(0, prntBuf);
6951 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
6953 /*mt009.301 Fixed 64BIT compilation warnings*/
6955 sprintf(prntBuf, "%2u %8u %8u\n", idx,
6956 mtCMMRegCb[region]->bktTbl[idx].staticMemUsed,
6957 mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed);
6959 sprintf(prntBuf, "%2lu %8lu %8lu\n", idx,
6960 mtCMMRegCb[region]->bktTbl[idx].staticMemUsed,
6961 mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed);
6963 SDisplay(0, prntBuf);
6964 /* update the total count */
6965 statMemSize += mtCMMRegCb[region]->bktTbl[idx].staticMemUsed;
6966 dynMemSize += mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed;
6969 /*mt009.301 Fixed 64BIT compilation warnings*/
6971 sprintf(prntBuf, "Total Static Memory allocated from buckets: %u\n", statMemSize);
6972 SDisplay(0, prntBuf);
6973 sprintf(prntBuf, "Total Dynamic Memory allocated from buckets: %u\n", dynMemSize);
6975 sprintf(prntBuf, "Total Static Memory allocated from buckets: %lu\n", statMemSize);
6976 SDisplay(0, prntBuf);
6977 /*mt010.301 fix for compilation error*/
6978 sprintf(prntBuf, "Total Dynamic Memory allocated from buckets: %lu\n", dynMemSize);
6980 SDisplay(0, prntBuf);
6982 sprintf(prntBuf, "\n\nAllocated Memory profile from Heap of region: %d \n", region);
6983 SDisplay(0, prntBuf);
6984 /*mt009.301 Fixed 64BIT compilation warnings*/
6986 sprintf(prntBuf, "STATIC MEMORY: %u DYNAMIC MEMORY:%u \n",
6987 mtCMMRegCb[region]->heapCb.staticHeapMemUsed, mtCMMRegCb[region]->heapCb.dynamicHeapMemUsed);
6989 sprintf(prntBuf, "STATIC MEMORY: %lu DYNAMIC MEMORY:%lu \n",
6990 mtCMMRegCb[region]->heapCb.staticHeapMemUsed, mtCMMRegCb[region]->heapCb.dynamicHeapMemUsed);
6992 SDisplay(0, prntBuf);
6994 else if (typeFlag == SS_MEM_BLK_SIZE_PROFILE)
6996 /* Bucket Memory allocation Statistics */
6997 return (SPrintRegMemStats(region));
7002 sprintf(prntBuf, "\n Invalid choice \n");
7003 SDisplay(0, prntBuf);
7011 * Fun: SPrintRegMemStats
7013 * Desc: This function displays the memory usage information for
7014 * the destined region. It will show the number of memory
7015 * block allocated for a particular size from the hash list.
7024 static S16 SPrintRegMemStats(Region region)
7026 CmMmHashListCp *hashListCp;
7032 hashListCp = &mtCMMRegCb[region]->hashListCp;
7034 sprintf(prntBuf, "\n\nSize Vs. NumAttempts and Alloc/Dealloc profile of region %d\n", region);
7035 SDisplay(0, prntBuf);
7036 sprintf(prntBuf, "Maximum Entries: %u Current Entries: %u\n",
7037 hashListCp->numOfbins, hashListCp->numOfEntries);
7038 SDisplay(0, prntBuf);
7039 sprintf(prntBuf, "===================================\n");
7040 SDisplay(0, prntBuf);
7041 sprintf(prntBuf, "Block Size Total number of requests\n");
7042 SDisplay(0, prntBuf);
7043 sprintf(prntBuf, "===================================\n");
7044 SDisplay(0, prntBuf);
7046 for (idx = 0, cntEnt=0; (cntEnt < hashListCp->numOfEntries) &&
7047 (idx < CMM_STAT_HASH_TBL_LEN); idx++)
7049 if (hashListCp->hashList[idx].numAttempts)
7052 /*mt009.301 Fixed 64BIT compilation warnings*/
7054 sprintf(prntBuf, "%8u %8u\n", hashListCp->hashList[idx].size,
7055 hashListCp->hashList[idx].numAttempts);
7057 sprintf(prntBuf, "%8lu %8lu\n", hashListCp->hashList[idx].size,
7058 hashListCp->hashList[idx].numAttempts);
7060 SDisplay(0, prntBuf);
7064 sprintf(prntBuf, "\nAllocation/De-allocation profile in Buckets\n");
7065 SDisplay(0, prntBuf);
7066 sprintf(prntBuf, "=================================================\n");
7067 SDisplay(0, prntBuf);
7068 sprintf(prntBuf, "Bucket Num of Alloc Attempts Num of De-alloc Attempts\n");
7069 SDisplay(0, prntBuf);
7070 sprintf(prntBuf, "=================================================\n");
7071 SDisplay(0, prntBuf);
7073 /* Print the statistics of total number of alloc/de-alloc attempts in each bucket of this region */
7074 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
7076 /*mt009.301 Fixed 64BIT compilation warnings*/
7078 sprintf(prntBuf, "%4u %8u %8u\n", idx,
7079 mtCMMRegCb[region]->bktTbl[idx].numAllocAttempts,
7080 mtCMMRegCb[region]->bktTbl[idx].numDeallocAttempts);
7082 sprintf(prntBuf, "%4lu %8lu %8lu\n", idx,
7083 mtCMMRegCb[region]->bktTbl[idx].numAllocAttempts,
7084 mtCMMRegCb[region]->bktTbl[idx].numDeallocAttempts);
7086 SDisplay(0, prntBuf);
7088 sprintf(prntBuf, "\nAllocation/De-allocation profile in Heap\n");
7089 SDisplay(0, prntBuf);
7090 /*mt009.301 Fixed 64BIT compilation warnings*/
7092 sprintf(prntBuf, "Num of Alloc Attempts: %u Num of De-alloc Attempts: %u\n",
7093 mtCMMRegCb[region]->heapCb.numAllocAttempts,
7094 mtCMMRegCb[region]->heapCb.numDeallocAttempts);
7096 sprintf(prntBuf, "Num of Alloc Attempts: %lu Num of De-alloc Attempts: %lu\n",
7097 mtCMMRegCb[region]->heapCb.numAllocAttempts,
7098 mtCMMRegCb[region]->heapCb.numDeallocAttempts);
7100 SDisplay(0, prntBuf);
7101 sprintf(prntBuf, "\n");
7102 SDisplay(0, prntBuf);
7109 * Fun: SRegMemErrHdlr
7111 * Desc: This function handles the errors returned from the memory
7112 * related functions. Customers are suggested to modify this
7113 * API according to their specific requirement.
7132 if (errCode == RDBLFREE)
7134 sprintf(prntBuf, "\nDouble free attempted at location:%8p in region:%d\n", ptr, region);
7135 SDisplay(0, prntBuf);
7137 else if (errCode == RTRAMPLINGNOK)
7139 sprintf(prntBuf, "\nMemory Trampling crossed Threshold in region:%d\n", region);
7140 SDisplay(0, prntBuf);
7148 * Fun: SPrintRegMemProfile
7150 * Desc: This function displays the memory profile information
7151 * for the destined region. This function prints for:
7152 * 1) each memory bucket-Block address, size, size for which it is allocated, free/allocated, static/dynamic
7153 * 2) heap - memory block address, size, requested size, free/allocated, static/dynamic
7162 S16 SPrintRegMemProfile
7169 CmMmBlkHdr *curBktBlk;
7171 Size offsetToNxtBlk;
7179 #if (ERRCLASS & ERRCLS_INT_PAR)
7180 if (region >= mtMemoCfg.numRegions )
7182 MTLOGERROR(ERRCLS_INT_PAR, EMT035, ERRZERO, "Invalid Region");
7187 regCb = mtCMMRegCb[region];
7189 /* memory profile */
7190 sprintf(prntBuf, "\n\nFull Memory Profile of region %d\n", region);
7191 SDisplay(0, prntBuf);
7193 /* bucket profile */
7194 sprintf(prntBuf, "\nBucket Profile\n");
7195 SDisplay(0, prntBuf);
7197 for (idx = 0; idx < regCb->numBkts; idx++)
7200 /*mt009.301 Fixed 64BIT compilation warnings*/
7202 sprintf(prntBuf, "\nBucket number:%4u of Size:%u Num of Blocks: %u\n",
7203 idx, regCb->bktTbl[idx].size, regCb->bktTbl[idx].numBlks);
7205 sprintf(prntBuf, "\nBucket number:%4lu of Size:%lu Num of Blocks: %lu\n",
7206 idx, regCb->bktTbl[idx].size, regCb->bktTbl[idx].numBlks);
7208 SDisplay(0, prntBuf);
7210 sprintf(prntBuf, "==========================================================================\n");
7211 SDisplay(0, prntBuf);
7212 sprintf(prntBuf, " Block Location Free/Allocated Static/dynamic Size requested\n");
7213 SDisplay(0, prntBuf);
7214 sprintf(prntBuf, "==========================================================================\n");
7215 SDisplay(0, prntBuf);
7217 offsetToNxtBlk = regCb->bktTbl[idx].size + sizeof(CmMmBlkHdr);
7219 for (blkCnt=0, curBktBlk = (CmMmBlkHdr *)(regCb->bktTbl[idx].bktStartPtr);
7220 ((curBktBlk) && (blkCnt < regCb->bktTbl[idx].numBlks));
7221 curBktBlk = (CmMmBlkHdr *)((Data *)curBktBlk + offsetToNxtBlk), blkCnt++)
7223 /*mt009.301 Fixed 64BIT compilation warnings*/
7225 sprintf(prntBuf, "%6u %8p", blkCnt, (void *)curBktBlk);
7227 sprintf(prntBuf, "%6lu %8p", blkCnt, (void *)curBktBlk);
7229 SDisplay(0, prntBuf);
7230 /* check if it is a sane block, elxe jump to next block */
7231 if (cmMmRegIsBlkSane(curBktBlk) != ROK)
7233 sprintf(prntBuf, " Trampled \n");
7234 SDisplay(0, prntBuf);
7239 if (CMM_IS_STATIC(curBktBlk->memFlags))
7241 /*mt009.301 Fixed 64BIT compilation warnings*/
7243 sprintf(prntBuf, " Allocated Static %8u\n", curBktBlk->requestedSize);
7245 sprintf(prntBuf, " Allocated Static %8lu\n", curBktBlk->requestedSize);
7247 SDisplay(0, prntBuf);
7249 else if (CMM_IS_DYNAMIC(curBktBlk->memFlags))
7251 /*mt009.301 Fixed 64BIT compilation warnings*/
7253 sprintf(prntBuf, " Allocated Dynamic %8u\n", curBktBlk->requestedSize);
7255 sprintf(prntBuf, " Allocated Dynamic %8lu\n", curBktBlk->requestedSize);
7257 SDisplay(0, prntBuf);
7259 else if (CMM_IS_FREE(curBktBlk->memFlags))
7261 /*mt009.301 Fixed 64BIT compilation warnings*/
7263 sprintf(prntBuf, " Free %8u\n", curBktBlk->requestedSize);
7265 sprintf(prntBuf, " Free %8lu\n", curBktBlk->requestedSize);
7267 SDisplay(0, prntBuf);
7271 sprintf(prntBuf, " Trampled \n");
7272 SDisplay(0, prntBuf);
7278 sprintf(prntBuf, "\nHeap Profile\n");
7279 SDisplay(0, prntBuf);
7281 /* point to heapCb */
7282 heapCb = &(regCb->heapCb);
7284 sprintf(prntBuf, "\nHeap Start: %8p Heap End: %8p\n", heapCb->vStart, heapCb->vEnd);
7285 SDisplay(0, prntBuf);
7286 sprintf(prntBuf, "==========================================================================\n");
7287 SDisplay(0, prntBuf);
7288 sprintf(prntBuf, " Block Location Size Free/Allocated Static/dynamic Size requested\n");
7289 SDisplay(0, prntBuf);
7290 sprintf(prntBuf, "==========================================================================\n");
7291 SDisplay(0, prntBuf);
7293 /* traverse the entire heap to output the heap profile */
7294 hdrSize = sizeof(CmHEntry);
7295 for (blkCnt=0, curHBlk = (CmHEntry *)heapCb->vStart;
7296 ((curHBlk) && (curHBlk < (CmHEntry *)heapCb->vEnd)); blkCnt++)
7298 /*mt009.301 Fixed 64BIT compilation warnings*/
7300 sprintf(prntBuf, "%6u %8p", blkCnt, (void *)curHBlk);
7302 sprintf(prntBuf, "%6lu %8p", blkCnt, (void *)curHBlk);
7304 SDisplay(0, prntBuf);
7306 /* check if it is a sane block, elxe jump to next block */
7307 if (cmMmRegIsBlkSane((CmMmBlkHdr *)curHBlk) != ROK)
7309 sprintf(prntBuf, " Trampled \n");
7310 SDisplay(0, prntBuf);
7312 sprintf(prntBuf, "Trampled block encountered: Stopping heap profile\n");
7313 SDisplay(0, prntBuf);
7316 * To go to next block in the heap we do not have any offset value
7317 * other than curHBlk->size. As the block is already trampled
7318 * we cannot rely on this size. So it is better to stop here unless there
7319 * exists any other mechanism(?) to know the offset to next block.
7324 /*mt009.301 Fixed 64BIT compilation warnings*/
7326 sprintf(prntBuf, " %8u", curHBlk->size);
7328 sprintf(prntBuf, " %8lu", curHBlk->size);
7330 SDisplay(0, prntBuf);
7332 if (CMM_IS_STATIC(curHBlk->memFlags))
7334 /*mt009.301 Fixed 64BIT compilation warnings*/
7336 sprintf(prntBuf, " Allocated Static %8u\n", curHBlk->requestedSize);
7338 sprintf(prntBuf, " Allocated Static %8lu\n", curHBlk->requestedSize);
7340 SDisplay(0, prntBuf);
7342 else if (CMM_IS_DYNAMIC(curHBlk->memFlags))
7344 /*mt009.301 Fixed 64BIT compilation warnings*/
7346 sprintf(prntBuf, " Allocated Dynamic %8u\n", curHBlk->requestedSize);
7348 sprintf(prntBuf, " Allocated Dynamic %8lu\n", curHBlk->requestedSize);
7350 SDisplay(0, prntBuf);
7352 else if (CMM_IS_FREE(curHBlk->memFlags))
7354 /*mt009.301 Fixed 64BIT compilation warnings*/
7356 sprintf(prntBuf, " Free %8u\n", curHBlk->requestedSize);
7358 sprintf(prntBuf, " Free %8lu\n", curHBlk->requestedSize);
7360 SDisplay(0, prntBuf);
7364 sprintf(prntBuf, " Trampled \n");
7365 SDisplay(0, prntBuf);
7367 /* goto next block in the heap */
7368 curHBlk = (CmHEntry *)((Data *)curHBlk + hdrSize + curHBlk->size);
7374 #endif /* SSI_DEBUG_LEVEL1 */
7376 /*-- mt035.201 : Added new API for timestamp --*/
7379 * Fun: Get TimeStamp
7381 * Desc: This function is used to Get TimeStamp in micro seconds
7398 struct timespec ptime;
7400 struct timeval ptime;
7409 clock_gettime(CLOCK_REALTIME, &ptime);
7411 gettimeofday(&ptime, NULL);
7414 /* Obtain the time of day, and convert it to a tm struct. --*/
7415 ptm = localtime (&ptime.tv_sec);
7416 /* Klock work fix ccpu00148484 */
7419 /* Format the date and time, down to a single second. --*/
7420 strftime (time_string, sizeof (time_string), "%a %b %d %Y %H:%M:%S", ptm);
7423 /* Compute microseconds. --*/
7425 microseconds = ptime.tv_nsec / 1000;
7427 microseconds = ptime.tv_usec;
7430 /* Print the formatted time, in seconds, followed by a decimal point
7431 and the microseconds. --*/
7432 /*mt009.301 Fixed 64BIT compilation warnings*/
7434 sprintf(ts, "%s.%03d", time_string, microseconds);
7436 sprintf(ts, "%s.%03ld", time_string, microseconds);
7442 /*-- mt037.201 : Added new API for SGetSystemTsk --*/
7445 * Fun: Get SGetSystemTsk
7447 * Desc: This function is used to Get sytem task id
7456 uint32_t SGetSystemTsk(Void)
7459 return (pthread_self());
7461 } /* end of SGetSystemTsk */
7463 #ifdef SS_MULTICORE_SUPPORT
7466 * Fun: Add Timer thread into system task table
7468 * Desc: This function is used to add the system task
7469 * associated with Timer thread.
7478 static SsSTskEntry* ssdAddTmrSTsk(Void)
7484 /* lock the system task table */
7485 ret = SLock(&osCp.sTskTblLock);
7489 #if (ERRCLASS & ERRCLS_DEBUG)
7490 MTLOGERROR(ERRCLS_DEBUG, EMT039, (ErrVal) ret,
7491 "Could not lock system task table");
7497 /* check count of system tasks */
7498 if (osCp.numSTsks == SS_MAX_STSKS)
7501 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7503 #if (ERRCLASS & ERRCLS_DEBUG)
7504 MTLOGERROR(ERRCLS_DEBUG, EMT040, ERRZERO,
7505 "Could not give the Semaphore");
7510 #if (ERRCLASS & ERRCLS_ADD_RES)
7511 MTLOGERROR(ERRCLS_ADD_RES, EMT041, ERRZERO, "Too many system tasks");
7518 /* initialize the system task entry with the information we have */
7519 sTsk = &osCp.sTskTbl[osCp.nxtSTskEntry];
7521 /* store the system task priority */
7522 sTsk->tskPrior = SS_NORM_TSK_PRI;
7524 /* initialize the demand queue */
7525 if (ssInitDmndQ(&sTsk->dQ) != ROK)
7528 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7530 #if (ERRCLASS & ERRCLS_DEBUG)
7531 MTLOGERROR(ERRCLS_DEBUG, EMT042, ERRZERO,
7532 "Could not give the Semaphore");
7537 #if (ERRCLASS & ERRCLS_DEBUG)
7538 MTLOGERROR(ERRCLS_DEBUG, EMT043, (ErrVal) ret,
7539 "Could not initialize demand queue");
7545 /* initialize the system task entry lock */
7546 if (SInitLock(&sTsk->lock, SS_STSKENTRY_LOCK) != ROK)
7548 ssDestroyDmndQ(&sTsk->dQ);
7550 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7552 #if (ERRCLASS & ERRCLS_DEBUG)
7553 MTLOGERROR(ERRCLS_DEBUG, EMT044, ERRZERO,
7554 "Could not give the Semaphore");
7559 #if (ERRCLASS & ERRCLS_DEBUG)
7560 MTLOGERROR(ERRCLS_DEBUG, EMT045, (ErrVal) ret,
7561 "Could not initialize system task entry lock");
7568 /* success, update the table */
7569 sTsk->tskId = osCp.nxtSTskEntry;
7571 sTsk->termPend = FALSE;
7572 osCp.nxtSTskEntry = sTsk->nxt;
7575 /* unlock the system task table */
7577 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7579 #if (ERRCLASS & ERRCLS_DEBUG)
7580 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
7581 "Could not give the Semaphore");
7588 #endif /* SS_MULTICORE_SUPPORT */
7589 /* mt003.301 Readwrite lock and recursive mutex additions */
7590 #ifdef SS_LOCK_SUPPORT
7593 * Fun: ssdInitLockNew
7595 * Desc: This function is used to initialise lock/mutex
7604 S16 ssdInitLockNew(SLockInfo *lockId,uint8_t lockType)
7607 #ifdef SS_REC_LOCK_SUPPORT
7608 pthread_mutexattr_t attr;
7609 #endif /* SS_REC_LOCK_SUPPORT */
7610 Txt prntBuf[PRNTSZE];
7616 #ifdef SS_RDWR_LOCK_SUPPORT
7619 if((retVal = pthread_rwlock_init((&(lockId->l.rdWrLockId)), NULLP)) != ROK)
7621 sprintf(prntBuf, "\n\n ssdInitLockNew(): Initialization of read write lock failed,Error# retVal %d\n", retVal);
7622 SDisplay(0, prntBuf);
7627 #endif /* SS_RDWR_LOCK_SUPPORT */
7628 #ifdef SS_REC_LOCK_SUPPORT
7631 retVal = pthread_mutexattr_init(&attr);
7635 sprintf(prntBuf,"\n ssdInitLockNew(): mutexattr init failed,Error# %d \n",retVal);
7640 retVal = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
7642 retVal = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
7646 sprintf(prntBuf,"\n ssdInitLockNew(): mutexattr settype failed,Error# %d \n",retVal);
7647 pthread_mutexattr_destroy(&attr);
7651 retVal = pthread_mutex_init((pthread_mutex_t *)&(lockId->l.recurLock), &attr);
7654 sprintf(prntBuf,"\n ssdInitLockNew(): mutex init failed,Error# %d \n",retVal);
7655 pthread_mutexattr_destroy(&attr);
7661 #endif /* SS_REC_LOCK_SUPPORT */
7664 sprintf(prntBuf, "\n\n ssdInitLockNew(): Invalid lock type %d\n", lockType);
7665 SDisplay(0, prntBuf);
7675 * Desc: This function is used to aquire the read write lock
7684 S16 ssdLockNew(SLockInfo *lockId,uint8_t lockType)
7687 Txt prntBuf[PRNTSZE];
7693 #ifdef SS_RDWR_LOCK_SUPPORT
7696 if((retVal = pthread_rwlock_rdlock(&(lockId->l.rdWrLockId))) != ROK)
7698 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7699 SDisplay(0, prntBuf);
7706 if((retVal = pthread_rwlock_wrlock(&(lockId->l.rdWrLockId))) != ROK)
7708 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the write lock,Error# %d\n", retVal);
7709 SDisplay(0, prntBuf);
7716 if((retVal = pthread_rwlock_tryrdlock(&(lockId->l.rdWrLockId))) != ROK)
7718 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7719 SDisplay(0, prntBuf);
7726 if((retVal = pthread_rwlock_trywrlock(&(lockId->l.rdWrLockId))) != ROK)
7728 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7729 SDisplay(0, prntBuf);
7734 #endif /* SS_RDWR_LOCK_SUPPORT */
7735 #ifdef SS_REC_LOCK_SUPPORT
7738 if((retVal = pthread_mutex_lock(&(lockId->l.recurLock)) != ROK))
7740 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the recursive mutex,Error# %d\n", retVal);
7741 SDisplay(0, prntBuf);
7746 #endif /* SS_REC_LOCK_SUPPORT */
7749 sprintf(prntBuf, "\n\n ssdLockNew(): Invalid lock type %d\n", lockType);
7750 SDisplay(0, prntBuf);
7763 * Desc: This function is used to Unlock the read write lock
7772 S16 ssdUnlockNew(SLockInfo *lockId,uint8_t lockType)
7775 Txt prntBuf[PRNTSZE];
7781 #ifdef SS_RDWR_LOCK_SUPPORT
7784 if((retVal = pthread_rwlock_unlock(&(lockId->l.rdWrLockId))) != ROK)
7786 sprintf(prntBuf, "\n\n ssdUnLockNew(): Failed to unlock the lock,Error# %d\n", retVal);
7787 SDisplay(0, prntBuf);
7792 #endif /* SS_RDWR_LOCK_SUPPORT */
7793 #ifdef SS_REC_LOCK_SUPPORT
7796 if((retVal = pthread_mutex_unlock(&(lockId->l.recurLock)) != ROK))
7798 sprintf(prntBuf, "\n\n ssdUnLockNew(): Failed to aquire the recursive mutex,Error# %d\n", retVal);
7799 SDisplay(0, prntBuf);
7804 #endif /* SS_REC_LOCK_SUPPORT */
7807 sprintf(prntBuf, "\n\n ssdUnlockNew(): Invalid lock type %d\n", lockType);
7808 SDisplay(0, prntBuf);
7817 * Fun: ssdDestroyLockNew
7819 * Desc: This function is used to destroy the read write lock
7828 S16 ssdDestroyLockNew(SLockInfo *lockId,uint8_t lockType)
7830 Txt prntBuf[PRNTSZE];
7836 #ifdef SS_RDWR_LOCK_SUPPORT
7839 if((retVal = pthread_rwlock_destroy(&(lockId->l.rdWrLockId))) != ROK)
7841 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Failed to destroy the lock,Error# %d\n", retVal);
7842 SDisplay(0, prntBuf);
7847 #endif /* SS_RDWR_LOCK_SUPPORT */
7848 #ifdef SS_REC_LOCK_SUPPORT
7851 if((retVal = pthread_mutex_destroy(&(lockId->l.recurLock)) != ROK))
7853 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Failed to destroy the mutex,Error# %d\n", retVal);
7854 SDisplay(0, prntBuf);
7859 #endif /* SS_REC_LOCK_SUPPORT */
7862 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Invalid lock type %d\n", lockType);
7863 SDisplay(0, prntBuf);
7869 #endif /* SS_LOCK_SUPPORT */
7871 /* mt005.301 : Cavium Changes */
7872 #ifdef SS_SEUM_CAVIUM
7876 * Fun: ssInitRcvWork
7878 * Desc: This is the initializtion function of receive
7882 * RFAILED - failed, general (optional)
7884 * Notes: Function to initialize the work queue packet
7885 * receiving thread. This creates the new thread to
7886 * receive the work and sets the affinity.
7891 S16 ssInitRcvWork(void)
7893 pthread_attr_t attr;
7897 /* set the required attributes */
7898 pthread_attr_init(&attr);
7899 pthread_attr_setstacksize(&attr, (size_t)MT_ISTASK_STACK);
7900 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
7901 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
7903 /* Create a new thread to receive the work queue messages */
7904 if ((pthread_create(&thread, &attr, workRcvTsk, NULLP)) != 0)
7906 pthread_attr_destroy(&attr);
7911 pthread_attr_destroy(&attr);
7915 }/* ssInitRcvWork */
7922 * Desc: This is the handler function of receive
7926 * RFAILED - failed, general (optional)
7928 * Notes:The handler function of the work queue receiver task.
7929 * This will be waiting for the work and after receiving
7930 * it, work will converted and posted to that entityt
7936 static void *workRcvTsk(Ptr ptr)
7939 cvmx_wqe_t *workPtr;
7940 Buffer *mBuf, *rcvdBuf;
7941 SsMsgInfo *minfoPtr;
7950 /* get the work if its avilable */
7951 workPtr = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
7953 if ( workPtr == NULLP )
7955 /* If there is no work then sleep for 10 usec */
7957 ts.tv_nsec = 500000;
7959 nanosleep(&ts, NULLP);
7963 switch(workPtr->tag)
7965 /* Switch over according to the tag value */
7966 case SS_CVMX_MBUF_TAG:
7968 rcvdBuf = (Buffer*)workPtr->packet_ptr.ptr;
7970 /* Convert the physical address to Pointers */
7971 ret = SConvPhyPtr(&rcvdBuf);
7974 /* mt011.301: Cavium 32 bit changes */
7975 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
7979 /* Copy the buffer to this region */
7980 ret = SCpyFpaMsg(rcvdBuf, SS_DFLT_REGION, SS_DFLT_POOL, &mBuf);
7983 /* mt011.301: Cavium 32 bit changes */
7984 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
7988 /* mt011.301: Cavium 32 bit changes */
7989 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
7991 minfoPtr = (SsMsgInfo*)mBuf->b_rptr;
7993 /* Get the post strucutre and Post the message */
7994 if ( minfoPtr != NULLP)
7996 SMemCpy( &pst, &minfoPtr->pst, sizeof(Pst));
7998 (Void)SPstTsk(&pst, mBuf);
8000 /* Free the buffer allocated if it cannot be sent */
8009 /* Invalid tag value, drop the work */
8010 /* mt011.301: Cavium 32 bit changes */
8011 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
8020 #endif /* SS_SEUM_CAVIUM */
8022 #ifdef TENB_RTLIN_CHANGES
8023 S16 SInitLock(SLockId *l, uint8_t t)
8026 pthread_mutexattr_t prior;
8027 pthread_mutexattr_init(&prior);
8028 #ifndef RGL_SPECIFIC_CHANGES
8029 pthread_mutexattr_setprotocol(&prior, PTHREAD_PRIO_INHERIT);
8031 r = pthread_mutex_init(l, &prior);
8032 pthread_mutexattr_destroy(&prior);
8036 #ifdef SS_THR_REG_MAP
8039 * Fun: ssRegMainThread
8041 * Desc: This function is used to add the memory region
8042 * mapping for the main thread.
8044 * Ret: VOID (Always successful)
8052 Void ssRegMainThread(Void)
8055 if(SS_INVALID_THREAD_REG_MAP != SS_GET_THREAD_MEM_REGION())
8057 printf("not able to get different Id for main thread\n");
8060 /* Here the default region is added as we dont have any region associated with
8061 * Main thread. The thread should not perform any allocation except
8062 * the initial configuratin
8064 #ifdef XEON_SPECIFIC_CHANGES
8065 SS_GET_THREAD_MEM_REGION() = mtMemoCfg.numRegions;
8067 SS_GET_THREAD_MEM_REGION() =
8074 * Fun: ssCheckAndAddMemoryRegionMap
8076 * Desc: This function is used to add the memory region
8077 * mapping for the provided sTsk associated thread.
8078 * If the threadId can be placed in the thread memory
8079 * region mapping table and returns success if it is able
8080 * to place. If not, it keeps the thread ID in the static
8081 * local array and increments the count. Once thread Id
8082 * is successfully placed in the thread memory region mapping
8083 * table, pthread_cancel is sent for all the previous threads
8084 * which are failed to place in table.
8086 * Ret: TRUE - Thread ID successfully placed in thread memory region
8088 * FALSE - If thread Id is not placed in thread memory region
8091 * Notes:mapping tablemapping tablng tablee
8096 S32 ssCheckAndAddMemoryRegionMap
8098 pthread_t threadId, /* Thread Id of system task */
8099 Region region /* Region associated with thread */
8102 static uint32_t createdThreads;
8103 static pthread_t createdThreadIds[SS_MAX_THREAD_CREATE_RETRY];
8107 /* Here 0xFF is considered as invalid region and if the mapping table
8108 * contains 0xFF, that mapping entry is free
8110 if(SS_INVALID_THREAD_REG_MAP !=
8111 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)])
8113 /* Klock work fix ccpu00148484 */
8114 if(!(createdThreads < SS_MAX_THREAD_CREATE_RETRY))
8116 printf("failed in index = %ld\n", ((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP));
8117 printf("Not able to get the different thread ID, exiting\n");
8120 createdThreadIds[createdThreads++] = threadId;
8123 /* If we found free mapping table entry, place the region and send pthread_cancel
8124 * for all the thread Ids which are created before this
8126 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)] = region;
8127 #ifdef XEON_SPECIFIC_CHANGES
8128 printf("ThreadId %ld, Thread Idx %d, Region %d\n", threadId,
8129 ((threadId >> SS_MEM_THREAD_ID_SHIFT) %
8130 SS_MAX_THREAD_REGION_MAP), region);
8132 for(indx = 0; indx < createdThreads; indx++)
8134 #ifdef XEON_SPECIFIC_CHANGES
8135 printf("Sending pthred Cancel to thread Id %d \n",createdThreadIds[indx]);
8137 pthread_cancel(createdThreadIds[indx]);
8143 } /* ssCheckAndAddMemoryRegionMap */
8147 * Fun: ssCheckAndDelMemoryRegionMap
8149 * Desc: This function is used to add the memory region
8150 * mapping for the provided sTsk associated thread.
8151 * If the threadId can be placed in the thread memory
8152 * region mapping table and returns success if it is able
8153 * to place. If not, it keeps the thread ID in the static
8154 * local array and increments the count. Once thread Id
8155 * is successfully placed in the thread memory region mapping
8156 * table, pthread_cancel is sent for all the previous threads
8157 * which are failed to place in table.
8159 * Ret: TRUE - Thread ID successfully placed in thread memory region
8161 * FALSE - If thread Id is not placed in thread memory region
8164 * Notes:mapping tablemapping tablng tablee
8169 S32 ssCheckAndDelMemoryRegionMap
8171 pthread_t threadId /* Thread Id of system task */
8176 /* Raghu To-Do Check with team, is it necessary to acquire lock
8177 * as del and add may go parallel */
8178 /* Here 0xFF is considered as invalid region and if the mapping table
8179 * contains 0xFF, that mapping entry is free
8181 if(SS_INVALID_THREAD_REG_MAP ==
8182 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)])
8185 printf("Invalid Thread ID (%ld)\n", (uint32_t)threadId);
8187 printf("Invalid Thread ID (%d)\n", (uint32_t)threadId);
8191 /* If we found free mapping table entry, place the region and send pthread_cancel
8192 * for all the thread Ids which are created before this
8194 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)] = SS_INVALID_THREAD_REG_MAP;
8198 } /* ssCheckAndAddMemoryRegionMap */
8202 #ifdef SS_TSKLOG_ENABLE
8207 * Desc: This function will return current time through input parameter.
8210 * RFAILED - failed, general (optional)
8218 volatile uint32_t *startTime,
8222 #ifdef MSPD_MLOG_NEW
8223 *startTime = GetTIMETICK();
8232 * Desc: This function will return current time through input parameter.
8233 * and take the difference of start time provided as input parameter
8237 * RFAILED - failed, general (optional)
8245 volatile uint32_t startTime,
8249 /*uint32_t stopTime;*/
8252 case PID_MAC_HARQ_IND:
8253 case PID_SCH_TTI_IND:
8255 case PID_MAC_DAT_IND:
8256 case PID_MAC_SF_ALLOC_REQ:
8257 case PID_MAC_STA_RSP:
8258 case PID_MAC_DL_SCHD:
8259 case PID_MAC_DL_CQI_IND:
8260 case PID_MAC_UL_CQI_IND:
8261 case PID_MAC_UL_SCHD:
8262 case PID_MAC_TTI_IND:
8263 case PID_CL_RCV_PHY_MSG:
8264 case PID_CL_HARQ_STA_IND:
8265 case PID_MAC_AM_HARQ_RLS:
8266 case PID_CL_DL_BATCH_PROC:
8267 case PID_CL_DLM_PRC_TTI_IND:
8268 case PID_CRC_IND_REAL:
8269 case PID_CRC_IND_DUMMY:
8270 case PID_TTI_LATENCY:
8271 case PID_RECPREQ_PROC:
8274 MLogTask(0, taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8276 MLogTask(taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8279 MLogTask(taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8288 volatile uint32_t * startTime,
8298 volatile uint32_t startTime,
8305 #endif /*#ifdef SS_TSKLOG_ENABLE */
8306 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
8308 * This primitive is used to calculate the CPU Utilization per Core
8313 * @return Void - function is always success
8315 Void UpdateSocCpuInfo
8317 CmCpuStatsInfo *cpuInfo,
8322 S8 mipsStr[MIPS_STRING_LEN];
8329 /* Open the file which holds the MIPS available value */
8330 mipsFd = fopen(MIPS_FILE, "r");
8337 /* Get the free mips available value from the file */
8338 if(NULLP == fgets(mipsStr, 24, mipsFd))
8340 printf("fgets to get the free mips available failed\n");
8345 strtok(mipsStr, " ");
8347 strPart = strtok(NULLP, " ");
8349 if(idx == CM_L2_CPU_UTIL)
8351 if(strPart != NULLP)
8353 l2FreeCpu = atoi(strPart);
8354 l2CpuUsed = 100 - l2FreeCpu;
8355 cpuInfo->cpuUtil[0].totCpuUtil += l2CpuUsed;
8356 cpuInfo->cpuUtil[0].maxCpuUtil = GET_CPU_MAX((cpuInfo->cpuUtil[0].maxCpuUtil), l2CpuUsed);
8357 cpuInfo->cpuUtil[0].numSamples++;
8360 if(idx == CM_L3_CPU_UTIL)
8362 strPart = strtok(NULLP, " ");
8363 if(strPart != NULLP)
8365 l3FreeCpu = atoi(strPart);
8366 l3CpuUsed = 100 - l3FreeCpu;
8367 cpuInfo->cpuUtil[0].totCpuUtil += l3CpuUsed;
8368 cpuInfo->cpuUtil[0].maxCpuUtil = GET_CPU_MAX((cpuInfo->cpuUtil[0].maxCpuUtil), l3CpuUsed);
8369 cpuInfo->cpuUtil[0].numSamples++;
8372 if(idx == CM_L2_CPU_UTIL)
8374 cpuInfo->numCores = CM_NUM_L2_CORES ;
8376 else if(idx == CM_L3_CPU_UTIL)
8378 cpuInfo->numCores = CM_NUM_L3_CORES ;
8384 #endif /* TENB_T2K3K_SPECIFIC_CHANGES */
8385 #ifdef SS_MULTICORE_SUPPORT
8388 * Fun: Add Timer thread into system task table
8390 * Desc: This function is used to add the system task
8391 * associated with Timer thread.
8400 static SsSTskEntry* ssdReAddTmrSTsk(
8408 /* lock the system task table */
8409 ret = SLock(&osCp.sTskTblLock);
8413 #if (ERRCLASS & ERRCLS_DEBUG)
8414 MTLOGERROR(ERRCLS_DEBUG, EMT039, (ErrVal) ret,
8415 "Could not lock system task table");
8421 /* initialize the system task entry with the information we have */
8422 sTsk = &osCp.sTskTbl[idx];
8427 SDestroyLock(&sTsk->lock);
8428 ssDestroyDmndQ(&sTsk->dQ);
8431 /* store the system task priority */
8432 sTsk->tskPrior = SS_NORM_TSK_PRI;
8434 /* initialize the demand queue */
8435 if (ssInitDmndQ(&sTsk->dQ) != ROK)
8438 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8440 #if (ERRCLASS & ERRCLS_DEBUG)
8441 MTLOGERROR(ERRCLS_DEBUG, EMT042, ERRZERO,
8442 "Could not give the Semaphore");
8447 #if (ERRCLASS & ERRCLS_DEBUG)
8448 MTLOGERROR(ERRCLS_DEBUG, EMT043, (ErrVal) ret,
8449 "Could not initialize demand queue");
8455 /* initialize the system task entry lock */
8456 if (SInitLock(&sTsk->lock, SS_STSKENTRY_LOCK) != ROK)
8458 ssDestroyDmndQ(&sTsk->dQ);
8460 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8462 #if (ERRCLASS & ERRCLS_DEBUG)
8463 MTLOGERROR(ERRCLS_DEBUG, EMT044, ERRZERO,
8464 "Could not give the Semaphore");
8469 #if (ERRCLASS & ERRCLS_DEBUG)
8470 MTLOGERROR(ERRCLS_DEBUG, EMT045, (ErrVal) ret,
8471 "Could not initialize system task entry lock");
8478 /* success, update the table */
8479 sTsk->tskId = idx + 1;
8481 sTsk->termPend = FALSE;
8483 /* unlock the system task table */
8485 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8487 #if (ERRCLASS & ERRCLS_DEBUG)
8488 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
8489 "Could not give the Semaphore");
8496 #endif /* SS_MULTICORE_SUPPORT */
8501 * Fun: Initialize timer table
8503 * Desc: This function initializes MTSS-specific information
8504 * in the timer table.
8513 S16 ssdReInitTmr(void)
8515 pthread_attr_t attr;
8516 struct sched_param param_sched;
8517 #ifndef XEON_SPECIFIC_CHANGES
8520 #ifdef SS_MULTICORE_SUPPORT
8522 #endif /* SS_MULTICORE_SUPPORT */
8523 #ifdef SS_THR_REG_MAP
8524 uint32_t threadCreated = FALSE;
8525 #endif /* SS_THR_REG_MAP */
8528 #ifndef XEON_SPECIFIC_CHANGES
8529 ret = ssCheckAndDelMemoryRegionMap(osCp.dep.tmrHdlrTID);
8532 #if (ERRCLASS & ERRCLS_DEBUG)
8533 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
8534 "Could not give the Semaphore");
8540 osCp.dep.tmrTqCp.tmrLen = SS_MAX_TMRS;
8541 /* mt010.21: addition */
8543 #ifdef SS_MULTICORE_SUPPORT
8544 sTsk = ssdReAddTmrSTsk(0);
8549 #endif /* SS_MULTICORE_SUPPORT */
8550 /* create the timer handler thread */
8552 pthread_attr_init(&attr);
8553 /* mt021.201 - Addition to set stack size */
8554 pthread_attr_setstacksize(&attr, (size_t)MT_TMRTASK_STACK);
8555 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
8556 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
8557 pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
8558 param_sched.sched_priority = sched_get_priority_max(SCHED_FIFO);
8559 pthread_attr_setschedparam(&attr, ¶m_sched);
8562 #ifdef SS_THR_REG_MAP
8563 /* When the thread is created, we check for the memory mapping table if
8564 * threadId can be placed in thread memory map table. If it is not able to place
8565 * threadId is stored in tmporary array. Once thread is created successful,
8566 * thread_cancel is sent for each thread which are created before. All the
8567 * threads are made to wait on sema which is cancel point for thread.
8569 while(threadCreated == FALSE)
8572 if ((pthread_create(&osCp.dep.tmrHdlrTID, &attr, mtTmrHdlr, NULLP)) != 0)
8574 /* mt020.201 - Addition for destroying thread attribute object attr */
8575 pthread_attr_destroy(&attr);
8580 #ifdef SS_THR_REG_MAP
8581 threadCreated = ssCheckAndAddMemoryRegionMap(osCp.dep.tmrHdlrTID,
8584 #endif /* SS_THR_REG_MAP */
8585 #ifdef SS_MEM_WL_DEBUG
8586 tmpRegTidMap[sTsk->region] = osCp.dep.tmrHdlrTID;
8589 /* mt020.201 - Addition for destroying thread attribute object attr */
8590 pthread_attr_destroy(&attr);
8591 sem_post(&osCp.dep.ssStarted);
8595 /**********************************************************************
8597 **********************************************************************/