1 /*******************************************************************************
2 ################################################################################
3 # Copyright (c) [2017-2019] [Radisys] #
5 # Licensed under the Apache License, Version 2.0 (the "License"); #
6 # you may not use this file except in compliance with the License. #
7 # You may obtain a copy of the License at #
9 # http://www.apache.org/licenses/LICENSE-2.0 #
11 # Unless required by applicable law or agreed to in writing, software #
12 # distributed under the License is distributed on an "AS IS" BASIS, #
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
14 # See the License for the specific language governing permissions and #
15 # limitations under the License. #
16 ################################################################################
17 *******************************************************************************/
19 /********************************************************************20**
21 Name: Multi-threaded System Services - Solaris
25 Desc: C source code for the MTSS-Solaris implementation of
30 *********************************************************************21*/
35 #ifndef _POSIX_C_SOURCE
36 #define _POSIX_C_SOURCE 199309L
38 /* mt003.301 moved env files to use the __USE_UNIX98 flag in sys includes */
45 #include <sys/types.h>
50 /* mt003.301: included sys/time.h
51 * for both solaris and linux
54 /* mt008.21: addition */
59 /* header include files (.h) */
62 #include "common_def.h"
63 #include "mt_ss.h" /* MTSS specific */
64 #include "mt_err.h" /* MTSS error defines */
66 #include "ss_queue.h" /* queues */
67 #include "ss_task.h" /* tasking */
68 #include "ss_msg.h" /* messaging */
69 #include "ss_mem.h" /* memory management interface */
70 #include "ss_gen.h" /* general */
71 /* mt003.301 Additions - Task deregistration */
72 #include "ss_err.h" /* error */
73 #include "cm_mem.h" /* common memory manager */
74 /* mt001.301 : Additions */
75 #ifdef SS_THREAD_PROFILE
78 #ifdef SS_LOCKLESS_MEMORY
83 /* multi-core support enhancement */
84 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
85 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
91 #include <sys/types.h>
92 #include <sys/processor.h>
93 #include <sys/procset.h>
96 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
97 /* mt001.301 : Additions */
99 #include <sys/types.h>
100 #include <sys/socket.h>
101 #include <netinet/in.h>
102 #include <arpa/inet.h>
103 #endif /* SS_WATCHDOG */
105 #ifdef SS_USE_WLS_MEM
106 #include <rte_common.h>
107 #include <rte_debug.h>
111 /* header/extern include files (.x) */
113 #include "gen.x" /* general layer */
114 #include "ssi.x" /* system services */
116 #include "cm5.x" /* common timers */
118 #include "mt_ss.x" /* MTSS specific */
119 #ifdef SS_LOCKLESS_MEMORY
120 #include "mt_ss_wl.x" /* MTSS specific */
121 #endif /* SS_LOCKLESS_MEMORY */
123 #include "ss_queue.x" /* queues */
124 #include "ss_task.x" /* tasking */
125 #include "ss_timer.x" /* timers */
126 #include "ss_strm.x" /* STREAMS */
127 #include "ss_msg.x" /* messaging */
128 #include "ss_mem.x" /* memory management interface */
129 #include "ss_drvr.x" /* driver tasks */
130 #include "ss_gen.x" /* general */
131 #ifdef SS_LOCKLESS_MEMORY
132 #include "cm_llist.x"
134 #include "cm_mem_wl.x" /* common memory manager */
136 #include "cm_mem.x" /* common memory manager */
137 #endif /* SS_LOCKLESS_MEMORY */
138 #include "cm_lte.x" /* common memory manager */
139 /* mt001.301 : Additions */
140 #ifdef SS_LOGGER_SUPPORT
142 #endif /* SS_LOGGER_SUPPORT */
144 /*mt005.301: Cavium Changes */
145 #ifdef SS_SEUM_CAVIUM
146 /* cvmx includes files */
147 #include "cvmx-config.h"
149 #include "cvmx-pow.h"
150 #include "cvmx-tim.h"
151 #include "cvmx-fpa.h"
152 #include "cvmx-helper-fpa.h"
153 #include "cvmx-malloc.h"
154 #endif /* SS_SEUM_CAVIUM */
157 #include "mt_plat_t33.h"
158 #include "mt_plat_t33.x"
159 #include "sys/syscall.h"
162 #if defined(RGL_SPECIFIC_CHANGES) || defined(INTEL_WLS) || defined(SS_USE_WLS_MEM)
164 #include <hugetlbfs.h>
167 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
168 S16 rgBatchProc (Void);
170 #ifdef RLC_MAC_DAT_REQ_RBUF
171 S16 rgDlDatReqBatchProc ARGS((
174 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
175 S16 rgBatchProc ARGS((
179 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
180 /* general purpose debug zone */
181 char my_buffer2[4096 * 4] = { 0 };
182 char my_buffer[4096] = { 0 };
183 int my_buffer_idx = 0;
187 #define sigsegv_print(x, ...) my_buffer_idx += sprintf(&my_buffer[my_buffer_idx], x "\n", ##__VA_ARGS__)
189 struct sigcontext my_uc_mcontext = { 0 };
194 #include <ucontext.h>
198 #define SIGSEGV_STACK_GENERIC
199 #define REGFORMAT "%x\n"
201 #ifdef XEON_SPECIFIC_CHANGES
202 Void cmPrcTmr ARGS((CmTqCp* tqCp, CmTqType* tq, PFV func));
205 void dump_external(void);
207 static Void mtDelSigals(Void)
211 memset(&sa, 0, sizeof(struct sigaction));
212 sigemptyset(&sa.sa_mask);
213 sa.sa_handler = SIG_DFL;
214 sigaction(SIGSEGV, &sa, NULL);
216 memset(&sa, 0, sizeof(struct sigaction));
217 sigemptyset(&sa.sa_mask);
218 sa.sa_handler = SIG_DFL;
219 sigaction(SIGILL, &sa, NULL);
223 static void signal_segv(int signum, siginfo_t * info, void *ptr)
225 static const char *si_codes[3] = { "", "SEGV_MAPERR", "SEGV_ACCERR" };
228 ucontext_t *ucontext = (ucontext_t *) ptr;
229 #ifdef XEON_SPECIFIC_CHANGES
231 int *p32 = (int *) 0x2fff0000;
236 printf("\nsegv ooops @ %p\n", info->si_addr);
239 printf("\nSegmentation Fault!\n");
240 printf("\ninfo.si_signo = %d\n", signum);
241 printf("\ninfo.si_errno = %d\n", info->si_errno);
242 printf("\ninfo.si_code = %d (%s)\n", info->si_code, si_codes[info->si_code]);
243 printf("\ninfo.si_addr = %p\n", info->si_addr);
245 memcpy(&my_uc_mcontext, &ucontext->uc_mcontext, sizeof(struct sigcontext));
248 #ifndef RGL_SPECIFIC_CHANGES
249 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r0);
250 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r1);
251 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r2);
252 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r3);
253 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r4);
254 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r5);
255 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r6);
256 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r7);
257 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r8);
258 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r9);
259 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_r10);
260 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_fp);
261 printf("\nreg[%02d] = 0x" REGFORMAT, i++, (unsigned int)ucontext->uc_mcontext.arm_ip);
262 printf("\nreg[sp] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_sp);
263 printf("\nreg[lr] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_lr);
264 printf("\nreg[pc] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_pc);
265 printf("\nreg[cpsr] = 0x" REGFORMAT, (unsigned int)ucontext->uc_mcontext.arm_cpsr);
268 printf("\nStack trace (non-dedicated):\n");
270 sz = backtrace(buffer, 50);
271 strings = backtrace_symbols(buffer, sz);
272 for (i = 0; i < sz; ++i)
273 printf("%s\n", strings[i]);
275 printf("\nEnd of stack trace\n");
277 #ifdef XEON_SPECIFIC_CHANGES
282 /* Lets first print our debug information */
283 printf("\nBefore dumping our Debug info\n");
285 printf("\nAfter dumping our Debug info\n");
287 /* Disable the signal and make the enodeb to dump. This will make
288 * eNB to generate the core with dumping the ccpu log
295 /* End printing debug information */
300 /*** TBD: IMPORTANT ***
301 *** The following definition is temporary. This must be removed
302 *** when all products have been updated with latest ssi.h file OR
303 *** all ssi.h files have been updated to contain this definitions
305 /* New error class for FTHA added */
307 #define ERRCLS_FTHA 0x8
308 #endif /* ERRCLS_FTHA */
310 typedef struct _SPThreadCreateArg
312 void *argument; /* argument that is to be passed to the actual pthread */
313 void *(*start_routine) (void *); /* function from which pthread starts */
316 void *pthreadCreateHdlr(void* arg);
318 #ifdef SS_LOCKLESS_MEMORY
319 Buffer *mtTskBuffer1;
320 Buffer *mtTskBuffer2;
322 pthread_t tmpRegTidMap[20];
324 S16 SGlobMemInfoShow(void);
325 #endif /* SS_LOCKLESS_MEMORY */
328 APP_CONTEXT AppContext;
332 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
333 unsigned int tlPost(void *handle);
336 /* forward references */
337 /* mt003.301 Modifications - Moved to ss_gen.x */
338 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
339 Void *mtTskHdlrT2kL2 ARGS((Void*));
340 void mtSigSegvHndlr ARGS((void));
341 void mtSigUsr2Hndlr ARGS((void));
344 static S16 ssdSetPthreadAttr ARGS ((S32 tskPrior, pthread_attr_t *attr));
345 static Void *mtTskHdlr ARGS((void *));
346 static S16 mtTskHdlMsg ARGS((SsSTskEntry *sTsk));
348 static Void *mtTmrHdlr ARGS((void *));
349 static Void mtTimeout ARGS((PTR tCb, S16 evnt));
351 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
352 static Void mtIntSigHndlr ARGS((int));
353 static Void mtExitClnup ARGS((void));
356 static Void *mtConHdlr ARGS((void *));
360 #ifdef SS_DRVR_SUPPORT
361 static Void *mtIsTskHdlr ARGS((void *));
365 /* mt020.201 - Addition for no command line available */
367 static Void mtGetOpts ARGS((void));
368 /* mt003.301 Additions - File Based task registration made
369 * common for both MULTICORE and NON-MULTICORE
371 static Bool fileBasedMemCfg = FALSE;
374 /* mt033.201 - addition of local function to print the statistics such as
375 * (size vs. numAttempts) and (allocations vs. deallocations)
377 #ifdef SSI_DEBUG_LEVEL1
378 static S16 SPrintRegMemStats ARGS((Region region));
379 #endif /* SSI_DEBUG_LEVEL1 */
381 #ifdef SS_MULTICORE_SUPPORT
382 static SsSTskEntry* ssdAddTmrSTsk(Void);
383 static SsSTskEntry* ssdReAddTmrSTsk ARGS((uint8_t idx));
384 #ifndef SS_LOCKLESS_MEMORY
385 #ifndef RGL_SPECIFIC_CHANGES
386 static S16 ssdInitMemInfo ARGS((void));
391 /* mt005.301: Cavium changes */
392 #ifdef SS_SEUM_CAVIUM
393 static Void *workRcvTsk ARGS((void *));
394 #endif /* SS_SEUM_CAVIUM */
396 #ifdef SS_THR_REG_MAP
397 S32 ssCheckAndAddMemoryRegionMap ARGS((pthread_t threadId,
399 S32 ssCheckAndDelMemoryRegionMap ARGS((pthread_t threadId));
400 #endif /* SS_THR_REG_MAP */
402 /* type declarations */
404 #ifdef SS_DRVR_SUPPORT
405 typedef struct mtIsFlag
415 /* public variable declarations */
417 Cntr cfgNumRegs = SS_MAX_REGS;
418 /* Set memory configuration as false.
419 * Set to true if memory configuration through file is successfull.
421 Bool memConfigured = FALSE;
422 /* mt022.201 - Modification for shared memory relay region and memcal tool */
423 SsRegCfg cfgRegInfo[SS_MAX_REGS] =
426 SS_DFLT_REGION, SS_MAX_POOLS_PER_REG - 1,
428 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
429 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
430 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
431 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
432 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
433 { SS_POOL_STATIC, 0 }
439 SS_DFLT_REGION + 1, SS_MAX_POOLS_PER_REG - 1,
441 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
442 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
443 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
444 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
445 { SS_POOL_STATIC, 0 }
448 #endif /* INTEL_WLS */
450 #ifdef SS_LOCKLESS_MEMORY
453 SS_DFLT_REGION + 1, SS_MAX_POOLS_PER_REG - 1,
455 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
456 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
457 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
458 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
459 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
460 { SS_POOL_STATIC, 0 }
464 SS_DFLT_REGION + 2, SS_MAX_POOLS_PER_REG - 1,
466 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
467 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
468 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
469 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
470 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
471 { SS_POOL_STATIC, 0 }
475 SS_DFLT_REGION + 3, SS_MAX_POOLS_PER_REG - 1,
477 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
478 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
479 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
480 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
481 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
482 { SS_POOL_STATIC, 0 }
486 SS_DFLT_REGION + 4, SS_MAX_POOLS_PER_REG - 1,
488 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
489 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
490 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
491 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
492 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
493 { SS_POOL_STATIC, 0 }
497 SS_DFLT_REGION + 5, SS_MAX_POOLS_PER_REG - 1,
499 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
500 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
501 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
502 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
503 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
504 { SS_POOL_STATIC, 0 }
508 SS_DFLT_REGION + 6, SS_MAX_POOLS_PER_REG - 1,
510 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
511 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
512 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
513 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
514 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
515 { SS_POOL_STATIC, 0 }
518 #ifndef INTEL_WLS_MEM
521 SS_DFLT_REGION + 7, SS_MAX_POOLS_PER_REG - 1,
523 { SS_POOL_DYNAMIC, MT_POOL_0_DSIZE },
524 { SS_POOL_DYNAMIC, MT_POOL_1_DSIZE },
525 { SS_POOL_DYNAMIC, MT_POOL_2_DSIZE },
526 { SS_POOL_DYNAMIC, MT_POOL_3_DSIZE },
527 { SS_POOL_DYNAMIC, MT_POOL_4_DSIZE },
528 { SS_POOL_STATIC, 0 }
533 #endif /* SS_LOCKLESS_MEMORY */
535 /* mt003.301 Modifications - File Based task registration made
536 * common for both MULTICORE and NON-MULTICORE
539 #ifdef SS_LOCKLESS_MEMORY
540 MtDynMemCfg mtDynMemoCfg =
542 SS_MAX_REGS, /* number of regions */
545 SS_DFLT_REGION, /* region id */
546 MT_MAX_BKTS, /* number of buckets */
548 /* block size, no. of blocks, Upper threshold, lower threshold */
549 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
550 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
551 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
552 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
553 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
557 SS_DFLT_REGION + 1, /* region id */
558 MT_MAX_BKTS, /* number of buckets */
560 /* block size, no. of blocks, Upper threshold, lower threshold */
561 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
562 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
563 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
564 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
565 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
569 SS_DFLT_REGION + 2, /* region id */
570 MT_MAX_BKTS, /* number of buckets */
572 /* block size, no. of blocks, Upper threshold, lower threshold */
573 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
574 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
575 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
576 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
577 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
581 SS_DFLT_REGION + 3, /* region id */
582 MT_MAX_BKTS, /* number of buckets */
584 /* block size, no. of blocks, Upper threshold, lower threshold */
585 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
586 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
587 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
588 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
589 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
593 SS_DFLT_REGION + 4, /* region id */
594 MT_MAX_BKTS, /* number of buckets */
596 /* block size, no. of blocks, Upper threshold, lower threshold */
597 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
598 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
599 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
600 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
601 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
605 SS_DFLT_REGION + 5, /* region id */
606 MT_MAX_BKTS, /* number of buckets */
608 /* block size, no. of blocks, Upper threshold, lower threshold */
609 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
610 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
611 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
612 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
613 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
617 SS_DFLT_REGION + 6, /* region id */
618 MT_MAX_BKTS, /* number of buckets */
620 /* block size, no. of blocks, Upper threshold, lower threshold */
621 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
622 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
623 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
624 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
625 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
628 #ifndef INTEL_WLS_MEM
631 SS_DFLT_REGION + 7, /* region id */
632 MT_MAX_BKTS, /* number of buckets */
634 /* block size, no. of blocks, Upper threshold, lower threshold */
635 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
636 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
637 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
638 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
639 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
643 #if ((defined (SPLIT_RLC_DL_TASK)) && (!defined (L2_L3_SPLIT)))
646 SS_DFLT_REGION + 7, /* region id */
647 MT_MAX_BKTS, /* number of buckets */
649 /* block size, no. of blocks, Upper threshold, lower threshold */
650 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
651 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
652 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD},
653 {SS_BLK_RELEASE_THRESHOLD, SS_BLK_ACQUIRE_THRESHOLD}
661 MtGlobMemCfg mtGlobMemoCfg =
663 MT_MAX_BKTS, /* number of buckets */
666 /* block size, no. of blocks, Upper threshold, lower threshold */
667 {MT_BKT_0_DSIZE, (MT_BKT_0_NUMBLKS + MT_BKT_0_NUMBLKS), SS_DFLT_MEM_BLK_SET_SIZE},
668 {MT_BKT_1_DSIZE, MT_BKT_1_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
669 {MT_BKT_2_DSIZE, MT_BKT_2_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
670 {MT_BKT_3_DSIZE, MT_BKT_3_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE},
671 {MT_BKT_4_DSIZE, MT_BKT_4_NUMBLKS, SS_DFLT_MEM_BLK_SET_SIZE}
673 {1024, 12800 /* MT_BKT_0_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE},
674 {1664, 12800 /* MT_BKT_1_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE},
675 {4096, 3840 /* MT_BKT_2_NUMBLKS*/, SS_DFLT_MEM_BLK_SET_SIZE},
676 {MT_BKT_3_DSIZE, 12800 /* MT_BKT_3_NUMBLKS */, SS_DFLT_MEM_BLK_SET_SIZE}
680 #endif /* SS_LOCKLESS_MEMORY */
682 /* mt022.201 - Modification for memory calculator tool */
683 /* mt018.201 - added memory configuration matrix */
687 SS_MAX_REGS - 1, /* number of regions */
689 #ifndef XEON_SPECIFIC_CHANGES
690 SS_MAX_REGS, /* number of regions */
697 SS_DFLT_REGION, /* region id */
698 MT_MAX_BKTS, /* number of buckets */
699 MT_HEAP_SIZE, /* heap size */
701 #ifndef XEON_SPECIFIC_CHANGES
702 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
703 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
704 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
705 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
706 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS}
708 {256, 491520}, /* 60 pages of 2M*/
709 {512, 12288}, /* 3 pages of 2M */
710 {2048, 99328}, /* 97 Pages of 2M */
711 {8192, 75008}, /* 293 Pages of 2M */
712 {16384, 4096} /* 32 pages of 2M */
717 #ifndef SS_LOCKLESS_MEMORY
719 SS_DFLT_REGION + 1, /* region id */
720 MT_MAX_BKTS, /* number of buckets */
721 /*MT_HEAP_SIZE 7194304 */ 10485760, /* heap size */
723 //{MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
724 //{MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
725 //{MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
726 //{MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS} /* block size, no. of blocks */
734 #endif /* SS_LOCKLESS_MEMORY */
735 #endif /* INTEL_WLS */
736 #ifdef SS_LOCKLESS_MEMORY
738 SS_DFLT_REGION + 1, /* region id */
739 MT_MAX_BKTS, /* number of buckets */
740 MT_HEAP_SIZE, /* heap size */
742 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
743 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
744 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
745 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
746 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
750 SS_DFLT_REGION + 2, /* region id */
751 MT_MAX_BKTS, /* number of buckets */
752 MT_HEAP_SIZE, /* heap size */
754 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
755 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
756 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
757 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
758 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
762 SS_DFLT_REGION + 3, /* region id */
763 MT_MAX_BKTS, /* number of buckets */
764 MT_HEAP_SIZE, /* heap size */
766 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
767 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
768 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
769 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
770 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
774 SS_DFLT_REGION + 4, /* region id */
775 MT_MAX_BKTS, /* number of buckets */
776 MT_HEAP_SIZE, /* heap size */
778 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
779 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
780 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
781 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
782 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
786 SS_DFLT_REGION + 5, /* region id */
787 MT_MAX_BKTS, /* number of buckets */
788 MT_HEAP_SIZE, /* heap size */
790 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
791 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
792 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
793 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
794 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
798 SS_DFLT_REGION + 6, /* region id */
799 MT_MAX_BKTS, /* number of buckets */
800 MT_HEAP_SIZE, /* heap size */
802 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
803 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
804 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
805 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
806 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
809 #ifndef INTEL_WLS_MEM
812 SS_DFLT_REGION + 7, /* region id */
813 MT_MAX_BKTS, /* number of buckets */
814 MT_HEAP_SIZE, /* heap size */
816 {MT_BKT_0_DSIZE, MT_BKT_0_STATIC_NUMBLKS}, /* block size, no. of blocks */
817 {MT_BKT_1_DSIZE, MT_BKT_1_STATIC_NUMBLKS}, /* block size, no. of blocks */
818 {MT_BKT_2_DSIZE, MT_BKT_2_STATIC_NUMBLKS}, /* block size, no. of blocks */
819 {MT_BKT_3_DSIZE, MT_BKT_3_STATIC_NUMBLKS}, /* block size, no. of blocks */
820 {MT_BKT_4_DSIZE, MT_BKT_4_STATIC_NUMBLKS} /* block size, no. of blocks */
824 #endif /* SS_LOCKLESS_MEMORY */
828 /* mt003.301 Modifications - File Based task registration made
829 * common for both MULTICORE and NON-MULTICORE
830 * bucket info, as different regions may request for different no.
833 MtBktCfg mtBktInfo[MT_MAX_BKTS];
834 S16 msArgc; /* argc */
835 Txt **msArgv; /* argv */
836 S16 msOptInd; /* SGetOpt vars */
837 S8 *msOptArg; /* SGetOpt vars */
840 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
841 typedef struct _MtRegMemSz
847 #ifdef SS_USE_WLS_MEM
848 static MtRegMemSz mtDynMemSz[MT_MAX_BKTS];
849 static S16 SPartitionWlsDynMem();
850 static S16 SAllocateWlsDynMem();
853 static MtRegMemSz mtRegMemSz[MT_MAX_BKTS+1];
858 /* private variable declarations */
859 /* mt018.201 - change mtCMMRegCfg as array of pointers */
860 static CmMmRegCfg *mtCMMRegCfg[SS_MAX_REGS];
861 static CmMmRegCb *mtCMMRegCb[SS_MAX_REGS];
862 /* mt003.301 - Fixed compilation warnings */
863 /*mt004.301-addede new veriable for FAP*/
864 /*mt010.301 - removed veriable defined for FA*/
867 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
870 void mtSetNtlHdl(unsigned int hdl)
875 unsigned int mtGetNtlHdl()
877 return(osCp.ntl.hdl);
881 void mtGetWlsHdl(void **hdlr)
883 *hdlr = osCp.wls.intf;
886 #ifdef XEON_MULTIPLE_CELL_CHANGES
887 S8 gWrWlsDeviceName[MAX_WLS_DEVICE_NAME_LEN];
888 S16 smWrReadWlsConfigParams (Void);
891 static int SOpenWlsIntf()
895 #define WLS_DEVICE_NAME "wls0"
897 char *my_argv[] = {"gnodeb", "-c3", "--proc-type=auto", "--file-prefix", WLS_DEVICE_NAME, "--iova-mode=pa"};
898 printf("\nCalling rte_eal_init: ");
899 for (i = 0; i < RTE_DIM(my_argv); i++)
901 printf("%s ", my_argv[i]);
905 if (rte_eal_init(RTE_DIM(my_argv), my_argv) < 0)
906 rte_panic("\nCannot init EAL\n");
909 #ifdef XEON_SPECIFIC_CHANGES
910 #ifdef XEON_MULTIPLE_CELL_CHANGES
911 hdl = WLS_Open(gWrWlsDeviceName, 1);
913 hdl = WLS_Open(WLS_DEVICE_NAME, 1);
916 hdl = WLS_Open(WLS_DEVICE_NAME, WLS_MASTER_CLIENT, WLS_MEM_SIZE);
923 printf("\nCould not open WLS Interface \n");
938 * Desc: This function is the entry point for the final binary. It
939 * calls SInit() in the common code. It can be replaced by a
940 * user function if required (SInit() must still be called).
942 * Ret: none on success
952 int argc, /* argument count */
953 char **argv /* argument vector */
957 #ifdef XEON_MULTIPLE_CELL_CHANGES
958 /* Read the WLS parameters from the file and copy into global control block */
959 if(smWrReadWlsConfigParams() != ROK)
961 fprintf(stderr, "Failed to read WLS params from file wr_cfg.txt");
963 } /* end of if statement */
966 #if defined (INTEL_WLS) || defined (SS_USE_WLS_MEM)
969 #endif /* INTEL_WLS */
973 /* mt003.301 Modifications */
976 printf("\n SInit failed, SSI could not start \n");
977 /* pthread_exit(NULLP);*/ /* Commented to Come out of Main thread*/
981 /*mt010.301 cleanup part exposed to user*/
992 * Desc: This function is the entry point for the final binary. It
993 * calls SInit() in the common code. It can be replaced by a
994 * user function if required (SInit() must still be called).
996 * Ret: none on success
1006 int argc, /* argument count */
1007 char **argv /* argument vector */
1023 * initialization functions
1028 * Fun: Initialize OS control point
1030 * Desc: This function initializes MTSS-specific information
1031 * in the OS control point.
1040 S16 ssdInitGen(void)
1042 struct sigaction act;
1044 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1045 struct sigaction sa;
1049 /*mt014.301 : 4GMX release related changes*/
1050 #ifdef SS_4GMX_UCORE
1053 /* mt005.301 : Cavium changes */
1054 #ifdef SS_SEUM_CAVIUM
1055 /* set group mask for the core */
1056 cvmx_pow_set_group_mask(cvmx_get_core_num(), SS_CVMX_GRP_MASK);
1057 #endif /* SS_SEUM_CAVIUM */
1059 osCp.dep.sysTicks = 0;
1061 /* mt020.201 - Addition for no command line available */
1063 /* parse command line */
1065 /* mt003.301 Additions */
1066 if(fileBasedMemCfg == TRUE && memConfigured == FALSE)
1068 printf("\n File Based Memory configuration failed \n");
1073 #ifndef RGL_SPECIFIC_CHANGES /* ANOOP :: This ssdInitMemInfo() was present in 2.1 */
1074 #ifndef SS_LOCKLESS_MEMORY
1075 #ifdef SS_MULTICORE_SUPPORT
1076 if(memConfigured == FALSE)
1082 /* initialize the started semaphore */
1083 if (sem_init(&osCp.dep.ssStarted, 0, 0) != 0)
1088 /* mt028.201 added compile time flag to allow not to mask signals */
1090 /* mask all signals in the main thread */
1092 sigdelset(&set, SIGINT);
1093 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1094 sigdelset(&set, SIGSEGV);
1095 sigdelset(&set, SIGUSR2);
1096 sigdelset(&set, SIGILL);
1097 #ifdef XEON_SPECIFIC_CHANGES
1098 sigdelset(&set, SIGABRT);
1099 sigdelset(&set, SIGTERM);
1100 sigdelset(&set, SIGHUP);
1103 pthread_sigmask(SIG_SETMASK, &set, NULLP);
1104 #endif /* UNMASK_SIG */
1106 /* install a SIGINT handler to shutdown */
1107 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
1109 /*Initialize SIGSEGV Signal */
1110 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
1112 memset(&sa, 0, sizeof(struct sigaction));
1113 sigemptyset(&sa.sa_mask);
1114 sa.sa_sigaction = signal_segv;
1115 sa.sa_flags = SA_SIGINFO;
1116 #ifndef XEON_SPECIFIC_CHANGES
1117 sigaction(SIGSEGV, &sa, NULL);
1119 memset(&sa, 0, sizeof(struct sigaction));
1120 sigemptyset(&sa.sa_mask);
1121 sa.sa_sigaction = signal_segv;
1122 sa.sa_flags = SA_SIGINFO;
1124 sigaction(SIGILL, &sa, NULL);
1126 if(sigaction(SIGILL, &sa, NULL) != 0)
1128 printf("\nFailed to process sigaction for the SIGILL\n");
1131 if(sigaction(SIGSEGV, &sa, NULL) != 0)
1133 printf("\nFailed to process sigaction for the SIGSEGV\n");
1136 if(sigaction(SIGABRT, &sa, NULL) != 0)
1138 printf("\nFailed to process sigaction for the SIGABRT\n");
1141 if(sigaction(SIGTERM, &sa, NULL) != 0)
1143 printf("\nFailed to process sigaction for the SIGTERM\n");
1146 if(sigaction(SIGHUP, &sa, NULL) != 0)
1148 printf("\nFailed to process sigaction for the SIGHUP\n");
1153 signal (SIGSEGV, mtSigSegvHndlr);
1154 signal (SIGKILL, mtSigSegvHndlr);
1155 signal (SIGUSR2, mtSigUsr2Hndlr);
1160 signal (SIGINT, mtStopHndlr);
1163 act.sa_handler = mtIntSigHndlr;
1164 sigfillset(&act.sa_mask);
1166 if (sigaction(SIGINT, &act, NULLP) != 0)
1172 /* mt040.201 initialise random seed */
1173 osCp.dep.randSeed = time(NULLP);
1181 * Fun: De-initialize OS control point
1183 * Desc: This function reverses the initialization in ssdInitGen().
1192 Void ssdDeinitGen(void)
1196 sem_destroy(&osCp.dep.ssStarted);
1201 #ifdef SS_LOCKLESS_MEMORY
1205 * Fun: ssPutDynMemBlkSet
1207 * Desc: Returns the set of dynamic Blocks into the global region
1210 * Ret: ROK - successful,
1211 * RFAILED - unsuccessful.
1218 S16 ssPutDynMemBlkSet
1220 uint8_t bktIdx, /* Index to bucket list */
1221 CmMmBlkSetElement *dynMemSetElem /* Memory set element which is needs to be
1222 added to global region */
1225 CmMmGlobRegCb *globReg;
1226 CmMmGlobalBktCb *bktCb;
1230 globReg = osCp.globRegCb;
1232 #if (ERRCLASS & ERRCLS_INT_PAR)
1233 if(bktIdx >= globReg->numBkts)
1237 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1239 bktCb = &(globReg->bktTbl[bktIdx]);
1241 for(blkCnt = 0; blkCnt < bktCb->bucketSetSize; blkCnt++)
1243 blkPtr = dynMemSetElem->nextBktPtr;
1244 dynMemSetElem->nextBktPtr = *((CmMmEntry **)blkPtr);
1245 free((Void *)blkPtr);
1248 dynMemSetElem->nextBktPtr = NULLP;
1249 dynMemSetElem->numFreeBlks = 0;
1256 * Fun: ssGetDynMemBlkSet
1258 * Desc: Gets the set of dynamic memory blocks from the global region
1261 * Ret: ROK - successful,
1262 * RFAILED - unsuccessful.
1269 S16 ssGetDynMemBlkSet
1271 uint8_t bktIdx, /* Index to bucket list */
1272 CmMmBlkSetElement *dynMemSetElem /* Memory set element which is updated
1273 with new set values */
1277 CmMmGlobRegCb *globReg;
1278 CmMmGlobalBktCb *bktCb;
1283 globReg = osCp.globRegCb;
1285 #if (ERRCLASS & ERRCLS_INT_PAR)
1286 if(bktIdx >= globReg->numBkts)
1290 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1292 bktCb = &(globReg->bktTbl[bktIdx]);
1293 basePtr = &(dynMemSetElem->nextBktPtr);
1295 for(blkCnt = 0; blkCnt < bktCb->bucketSetSize; blkCnt++)
1297 blkPtr = (Data *)malloc(bktCb->size);
1299 basePtr = (CmMmEntry **)blkPtr;
1302 dynMemSetElem->numFreeBlks = bktCb->bucketSetSize;
1306 } /* ssGetDynMemBlkSet */
1311 * Fun: ssPutDynMemBlkSet
1313 * Desc: Returns the set of dynamic Blocks into the global region
1316 * Ret: ROK - successful,
1317 * RFAILED - unsuccessful.
1324 S16 ssPutDynMemBlkSet
1326 uint8_t bktIdx, /* Index to bucket list */
1327 CmMmBlkSetElement *dynMemSetElem, /* Memory set element which is needs to be
1328 added to global region */
1329 uint32_t doNotBlockForLock /* Boolean whether to block for lock or not */
1332 CmMmGlobRegCb *globReg;
1333 CmMmGlobalBktCb *bktCb;
1335 CmMmBlkSetElement *globMemNode;
1339 globReg = osCp.globRegCb;
1341 #if (ERRCLASS & ERRCLS_INT_PAR)
1342 if(bktIdx >= globReg->numBkts)
1346 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1348 bktCb = &(globReg->bktTbl[bktIdx]);
1350 /* Lock the global region first. If the doNotBlockForLock is non-zero, the
1351 try lock is used as it is not required to block as it will be taken
1352 in the next go else it will be blocked for lock as we have to get the
1355 SLock(&(bktCb->bucketLock));
1361 /* Get a free node from the free node linked list */
1362 lstNode = cmLListFirst(&(bktCb->listFreeBktSet));
1363 if(lstNode == NULLP)
1365 SUnlock(&(bktCb->bucketLock));
1369 cmLListDelFrm(&(bktCb->listFreeBktSet), lstNode);
1371 /* Copy the content of the received element information on to free node
1372 * and add it to valid linked list */
1373 globMemNode = (CmMmBlkSetElement *)lstNode->node;
1374 globMemNode->numFreeBlks = dynMemSetElem->numFreeBlks;
1375 globMemNode->nextBktPtr = dynMemSetElem->nextBktPtr;
1376 dynMemSetElem->numFreeBlks = 0;
1377 dynMemSetElem->nextBktPtr = NULLP;
1379 cmLListAdd2Tail(&(bktCb->listValidBktSet), &(globMemNode->memSetNode));
1381 SUnlock(&(bktCb->bucketLock));
1389 * Fun: ssGetDynMemBlkSet
1391 * Desc: Gets the set of dynamic memory blocks from the global region
1394 * Ret: ROK - successful,
1395 * RFAILED - unsuccessful.
1397 * Notes: The parameter doNotBlockForLock specifies whether to block for lock
1403 S16 ssGetDynMemBlkSet
1405 uint8_t bktIdx, /* Index to bucket list */
1406 CmMmBlkSetElement *dynMemSetElem, /* Memory set element which is updated
1407 with new set values */
1408 uint32_t doNotBlockForLock /* Boolean whether to block for lock or not */
1411 CmMmGlobRegCb *globReg;
1412 CmMmGlobalBktCb *bktCb;
1414 CmMmBlkSetElement *globMemNode;
1418 globReg = osCp.globRegCb;
1420 #if (ERRCLASS & ERRCLS_INT_PAR)
1421 if(bktIdx >= globReg->numBkts)
1425 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1427 bktCb = &(globReg->bktTbl[bktIdx]);
1429 /* Lock the global region first. If the doNotBlockForLock is non-zero, the
1430 try lock is used as it is not required to block as it will be taken
1431 in the next go else it will be blocked for lock as we have to get the
1434 SLock(&(bktCb->bucketLock));
1439 lstNode = cmLListFirst(&(bktCb->listValidBktSet));
1441 if(lstNode == NULLP)
1443 SUnlock(&(bktCb->bucketLock));
1447 /* Delete the node from the valid linked list and copy the values of the
1448 * elements of structrues into pointer */
1449 cmLListDelFrm(&(bktCb->listValidBktSet), lstNode);
1450 globMemNode = (CmMmBlkSetElement *)lstNode->node;
1451 dynMemSetElem->numFreeBlks = globMemNode->numFreeBlks;
1452 dynMemSetElem->nextBktPtr = globMemNode->nextBktPtr;
1454 /* Add this node to the free node linked list */
1455 cmLListAdd2Tail(&(bktCb->listFreeBktSet), lstNode);
1457 SUnlock(&(bktCb->bucketLock));
1461 } /* ssGetDynMemBlkSet */
1464 #define NUM_CALLS_TO_CHECK_MEM_DYN_AGAIN 100
1465 uint32_t gDynMemAlrm[4];
1466 static uint32_t memoryCheckCounter;
1468 uint32_t isMemThreshReached(Region reg)
1470 CmMmGlobRegCb *globReg;
1471 CmMmGlobalBktCb *bktCb;
1472 uint8_t bktIdx= reg;
1474 globReg = osCp.globRegCb;
1476 #if (ERRCLASS & ERRCLS_INT_PAR)
1477 if(bktIdx >= globReg->numBkts)
1481 #endif /* ERRCLASS & ERRCLS_INT_PAR */
1483 bktCb = &(globReg->bktTbl[bktIdx]);
1485 if(gDynMemAlrm[bktIdx])
1487 // printf ("\nunder memory bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1488 SLock(&(bktCb->bucketLock));
1489 if(bktCb->listValidBktSet.count > 25)
1491 gDynMemAlrm[bktIdx] = FALSE;
1492 // printf ("\nrecoverd bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1494 SUnlock(&(bktCb->bucketLock));
1500 if(memoryCheckCounter++ >= NUM_CALLS_TO_CHECK_MEM_DYN_AGAIN)
1502 // printf ("\nCHECK bktCb->listValidBktSet.count %d bktIdx %d\n",bktCb->listValidBktSet.count ,bktIdx);
1503 SLock(&(bktCb->bucketLock));
1504 if(bktCb->listValidBktSet.count < 15 )
1505 gDynMemAlrm[bktIdx] = TRUE;
1506 memoryCheckCounter = 0;
1507 SUnlock(&(bktCb->bucketLock));
1513 #endif /* USE_MALLOC */
1514 #endif /* SS_LOCKLESS_MEMORY */
1516 #ifdef SS_USE_ICC_MEMORY
1519 * Fun: Initialize region/pool tables
1521 * Desc: This function initializes MTSS-specific information
1522 * in the region/pool tables and configures the common
1523 * memory manager for use.
1532 Void * ssGetIccHdl(Region region)
1534 CmMmDynRegCb *dynRegCb;
1536 /* Klock work fix ccpu00148484 */
1537 if(!(region < SS_MAX_REGS))
1542 dynRegCb = (CmMmDynRegCb *)osCp.dynRegionTbl[region].regCb;
1544 return (dynRegCb->iccHdl);
1546 #endif /* SS_USE_ICC_MEMORY */
1548 #ifdef T2K_MEM_LEAK_DBG
1549 RegionMemLeakInfo regMemLeakInfo;
1550 #endif /* T2K_MEM_LEAK_DBG */
1552 #ifdef SS_USE_WLS_MEM
1553 static S16 SPartitionWlsDynMem()
1556 uint8_t *bktMemStrtAddr = (uint8_t *)(((uint8_t*)osCp.wls.allocAddr) + (4 * 1024 * 1024));
1558 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1560 mtDynMemSz[i].startAddr = bktMemStrtAddr;
1561 bktMemStrtAddr += mtDynMemSz[i].reqdSz;
1564 printf("\nGlobal Memory Info: \n");
1565 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1567 printf("mtDynMemSz[%d]: [0x%016lx]\n", i, (unsigned long int)mtDynMemSz[i].startAddr);
1572 static S16 SAllocateWlsDynMem()
1577 memset(&mtDynMemSz[0], 0, sizeof(mtDynMemSz));
1579 for (i = 0 ; i < mtGlobMemoCfg.numBkts ; i++)
1581 reqdMemSz += (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1582 mtDynMemSz[i].reqdSz += (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1584 osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf,
1585 #ifdef INTEL_L1_V19_10
1588 (reqdMemSz + (4 * 1024 * 1024)));
1590 printf("\n *************** \n WLS memory: %lx, %d\n", (PTR)osCp.wls.allocAddr, reqdMemSz);
1591 SPartitionWlsDynMem();
1599 S16 SPartitionWlsMemory()
1604 uint64_t pageSize[1], hugePageSize;
1607 long int pageSize[1], hugePageSize;
1610 #define DIV_ROUND_OFFSET(X,Y) ( X/Y + ((X%Y)?1:0) )
1612 uint8_t *regMemStrtAddr = (uint8_t *)osCp.wls.allocAddr;
1614 gethugepagesizes(pageSize,1);
1615 hugePageSize = pageSize[0];
1616 for (i = 0; i < 1; i++)
1618 mtRegMemSz[i].startAddr = regMemStrtAddr;
1619 //CM_LOG_DEBUG(CM_LOG_ID_MT, "Global Region-->Bkt[%d] Addr:%p\n", i, mtRegMemSz[i].startAddr);
1621 numHugePg = DIV_ROUND_OFFSET(mtRegMemSz[i].reqdSz, hugePageSize);
1622 reqdSz = numHugePg * hugePageSize;
1623 regMemStrtAddr += reqdSz;
1624 #ifdef T2K_MEM_LEAK_DBG
1625 /* Since wls is region 0 */
1626 regMemLeakInfo.regStartAddr[i] = (uint64_t)mtRegMemSz[i].startAddr;
1627 regMemLeakInfo.numActvRegions++;
1628 #endif /* T2K_MEM_LEAK_DBG */
1630 //Store last region addr for validation
1631 mtRegMemSz[i].startAddr = regMemStrtAddr;
1635 #ifdef SS_MEM_WL_DEBUG
1636 Void SChkAddrValid(int type, int region, PTR ptr)
1638 char *tryPtr = NULL;
1639 if(type == 0) //Global
1641 if(ptr < mtRegMemSz[0].startAddr || ptr >=
1642 (mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr + mtGlobMemoCfg.heapSize))
1644 printf("\n****INVALID PTR in Global Region: ptr:%p start:%p end:%p***\n", ptr, mtRegMemSz[0].startAddr, mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr);
1650 if(ptr > mtRegMemSz[0].startAddr && ptr <= mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr )
1652 printf("\n****INVALID PTR in Static Region: ptr:%p start:%p end:%p***\n", ptr, mtRegMemSz[0].startAddr, mtRegMemSz[mtGlobMemoCfg.numBkts].startAddr);
1658 #endif /* SS_MEM_WL_DEBUG */
1660 S16 SPartitionStaticMemory(uint8_t *startAddr)
1665 uint8_t *regMemStrtAddr = (uint8_t *)startAddr;
1668 //for (i = 0; i < mtMemoCfg.numRegions; i++)
1669 for (i = 1; i < mtMemoCfg.numRegions; i++)
1671 mtRegMemSz[i].startAddr = regMemStrtAddr;
1672 reqdSz = /* regMemStrtAddr + */mtRegMemSz[i].reqdSz;
1673 regMemStrtAddr += reqdSz;
1674 #ifdef T2K_MEM_LEAK_DBG
1675 { /* Since region 1 onwards are used for non wls */
1676 regMemLeakInfo.regStartAddr[i] = (uint64_t)mtRegMemSz[i].startAddr;
1677 regMemLeakInfo.numActvRegions++;
1679 #endif /* T2K_MEM_LEAK_DBG */
1683 S16 SAllocateWlsMem()
1691 //memset(&mtRegMemSz[0], sizeof(mtRegMemSz), 0);
1692 memset(&mtRegMemSz[0], 0, sizeof(mtRegMemSz));
1694 for (i = 0; i < 1; i++)
1696 /* allocate space for the region */
1697 region = &mtMemoCfg.region[i];
1698 reqdMemSz += region->heapsize;
1699 mtRegMemSz[i].reqdSz += region->heapsize;
1701 for (j = 0; j < region->numBkts; j++)
1703 reqdMemSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1704 mtRegMemSz[i].reqdSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1707 osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf, (512 *1024 * 1024));
1708 //osCp.wls.allocAddr = WLS_Alloc(osCp.wls.intf, (reqdMemSz + (1024 * 1024 * 2 * 2)));
1710 printf("\n ************* \n WLS memory: %llx, %ld\n ****** \n", osCp.wls.allocAddr, reqdMemSz);
1712 printf("\n ************* \n WLS memory: %lx, %d\n ****** \n", (PTR)osCp.wls.allocAddr, reqdMemSz);
1714 SPartitionWlsMemory();
1717 S16 SAllocateStaticMem()
1726 //memset(&mtRegMemSz[0], sizeof(mtRegMemSz), 0);
1728 //for (i = 0; i < mtMemoCfg.numRegions; i++)
1729 for (i = 1; i < mtMemoCfg.numRegions; i++)
1731 /* allocate space for the region */
1732 region = &mtMemoCfg.region[i];
1733 reqdMemSz += region->heapsize;
1734 mtRegMemSz[i].reqdSz += region->heapsize;
1736 for (j = 0; j < region->numBkts; j++)
1738 reqdMemSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1739 mtRegMemSz[i].reqdSz += region->bkt[j].blkSize * region->bkt[j].numBlks;
1743 startAddr = malloc(reqdMemSz + (1024 * 10));
1745 printf("\n ************* \n Static memory: %llx, %ld\n ****** \n", startAddr, reqdMemSz);
1747 printf("\n ************* \n Static memory: %lx, %d\n ****** \n", (PTR)startAddr, reqdMemSz);
1749 SPartitionStaticMemory(startAddr);
1752 #endif /* INTEL_WLS */
1758 * Fun: Initialize region/pool tables
1760 * Desc: This function initializes MTSS-specific information
1761 * in the region/pool tables and configures the common
1762 * memory manager for use.
1771 S16 ssdInitMem(void)
1773 /* mt018.201 - added local variable */
1777 MtRegCfg *region = NULLP;
1778 Txt errMsg[256] = {'\0'};
1779 #ifdef SS_LOCKLESS_MEMORY
1780 CmMmDynRegCb *dynRegCb =0;
1781 #ifdef SS_USE_ICC_MEMORY
1783 CmMmGlobRegCb *globReg = NULLP;
1786 #endif /* SS_LOCKLESS_MEMORY */
1789 /* Use the default SSI memory manager if the ICC memory manager is not
1790 * avilable. If ICC memory manager is avilable, it will be used for
1791 * all sharable memory allocation and de-allocation */
1792 #ifdef SS_LOCKLESS_MEMORY
1793 #ifdef SS_USE_ICC_MEMORY
1794 #ifndef YS_PHY_3_8_2
1796 for (i = 0; i < mtDynMemoCfg.numRegions; i++)
1798 dynRegCb = (CmMmDynRegCb *)calloc(1, sizeof(CmMmDynRegCb));
1799 if(dynRegCb == NULLP)
1803 for(k = 0; k < mtDynMemoCfg.region[i].numBkts; k++)
1805 dynRegCb->bktSize[k] = mtGlobMemoCfg.bkt[k].blkSize;
1807 dynRegCb->region = i;
1808 cmMmDynRegInit(dynRegCb);
1809 printf("\niccHdl = %lx\n", (PTR)dynRegCb->iccHdl);
1812 /* ysIccHdl = dynRegCb->iccHdl; */
1815 /* Initialize the global region first */
1816 osCp.globRegCb = calloc(1, sizeof(CmMmGlobRegCb));
1818 if(osCp.globRegCb == NULLP)
1823 globReg = (CmMmGlobRegCb *)osCp.globRegCb;
1825 #ifdef SS_USE_WLS_MEM
1826 SAllocateWlsDynMem();
1829 for(i = 0; i < mtGlobMemoCfg.numBkts; i++)
1831 memSize = (mtGlobMemoCfg.bkt[i].blkSize * mtGlobMemoCfg.bkt[i].numBlks);
1832 #if !defined (INTEL_WLS) && defined (SS_USE_WLS_MEM)
1833 globReg->bktTbl[i].startAddr = (Data *)mtDynMemSz[i].startAddr;
1834 printf("\nStarting Address of Bkt Entry [%d]: [0x%016lx], memSize[%d]\n", i, (unsigned long int)globReg->bktTbl[i].startAddr, memSize);
1837 globReg->bktTbl[i].startAddr = (Data *)calloc(memSize, sizeof(Data));
1839 globReg->bktTbl[i].startAddr = (Data *)mtRegMemSz[i].startAddr;
1842 if(globReg->bktTbl[i].startAddr == NULLP)
1846 globReg->bktTbl[i].poolId = i;
1847 globReg->bktTbl[i].size = mtGlobMemoCfg.bkt[i].blkSize;
1848 globReg->bktTbl[i].numBlks = mtGlobMemoCfg.bkt[i].numBlks;
1849 globReg->bktTbl[i].bucketSetSize = mtGlobMemoCfg.bkt[i].bucketSetSize;
1852 globReg->numBkts = mtGlobMemoCfg.numBkts;
1853 cmMmGlobRegInit(globReg);
1855 /* Initialize the dynamic task regions and sanity check for the theshold
1857 for (i = 0; i < mtDynMemoCfg.numRegions; i++)
1859 dynRegCb = (CmMmDynRegCb *)calloc(1, sizeof(CmMmDynRegCb));
1860 if(dynRegCb == NULLP)
1864 for(k = 0; k < mtDynMemoCfg.region[i].numBkts; k++)
1866 if((mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold <
1867 mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold) ||
1868 (mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold == 0) ||
1869 (mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold == 0))
1871 #ifdef XEON_SPECIFIC_CHANGES
1876 dynRegCb->bktTbl[k].poolId = k;
1877 dynRegCb->bktTbl[k].size = mtGlobMemoCfg.bkt[k].blkSize;
1878 dynRegCb->bktTbl[k].blkSetRelThreshold = mtDynMemoCfg.region[i].bkt[k].blkSetRelThreshold;
1879 dynRegCb->bktTbl[k].blkSetAcquireThreshold = mtDynMemoCfg.region[i].bkt[k].blkSetAcquireThreshold;
1880 dynRegCb->bktTbl[k].bucketSetSize = mtGlobMemoCfg.bkt[k].bucketSetSize;
1881 if(dynRegCb->bktMaxBlkSize < dynRegCb->bktTbl[k].size)
1883 dynRegCb->bktMaxBlkSize = dynRegCb->bktTbl[k].size;
1886 dynRegCb->region = i;
1887 dynRegCb->numBkts = mtDynMemoCfg.region[i].numBkts;
1888 cmMmDynRegInit(dynRegCb);
1890 #endif /* SS_USE_ICC_MEMORY */
1891 #endif /* SS_LOCKLESS_MEMORY */
1893 #ifdef T2K_MEM_LEAK_DBG
1895 /* Initailize mem leak tool memorys for debguing */
1896 regMemLeakInfo.numActvRegions=0;
1897 for(reg=0; reg <SS_MAX_REGS; reg++)
1899 regMemLeakInfo.gMemLeakInfo[reg] = malloc(sizeof(T2kMeamLeakInfo)*T2K_MEM_LEAK_INFO_TABLE_SIZE);
1900 memset(regMemLeakInfo.gMemLeakInfo[reg],0x0,
1901 sizeof(T2kMeamLeakInfo)*T2K_MEM_LEAK_INFO_TABLE_SIZE);
1902 regMemLeakInfo.regStartAddr[reg] = 0;
1905 regMemLeakInfo.regStartAddr[reg] = 0;
1906 if (pthread_mutex_init(&(regMemLeakInfo.memLock[reg]), NULL) != 0)
1908 printf("\n mutex init failed\n");
1914 /* Now allocate WLS memory */
1916 SAllocateStaticMem();
1918 /* mt018.201 - CMM Initialization */
1919 for (i = 0; i < mtMemoCfg.numRegions; i++)
1921 /* allocate space for the region control block */
1922 mtCMMRegCb[i] = (CmMmRegCb *)calloc(1, sizeof(CmMmRegCb));
1923 #ifdef TENB_RTLIN_CHANGES
1924 mlock(mtCMMRegCb[i], sizeof(CmMmRegCb));
1926 if (mtCMMRegCb[i] == NULLP)
1928 sprintf(errMsg,"\n ssdInitMem(): Could not allocated memory \
1929 for the Region:%d control block\n",i);
1931 for (k = 0; k < i; k++)
1933 cmMmRegDeInit(mtCMMRegCb[k]);
1934 free(mtCMMRegCfg[k]->vAddr);
1935 free(mtCMMRegCb[k]);
1936 free(mtCMMRegCfg[k]);
1941 mtCMMRegCfg[i] = (CmMmRegCfg *)calloc(1, sizeof(CmMmRegCfg));
1942 #ifdef TENB_RTLIN_CHANGES
1943 mlock(mtCMMRegCfg[i], sizeof(CmMmRegCfg));
1945 if (mtCMMRegCfg[i] == NULLP)
1947 for (k = 0; k < i; k++)
1949 cmMmRegDeInit(mtCMMRegCb[k]);
1950 free(mtCMMRegCfg[k]->vAddr);
1951 free(mtCMMRegCb[k]);
1952 free(mtCMMRegCfg[k]);
1954 free(mtCMMRegCb[i]);
1959 /* allocate space for the region */
1960 region = &mtMemoCfg.region[i];
1961 mtCMMRegCfg[i]->size = region->heapsize;
1962 for (j = 0; j < region->numBkts; j++)
1964 /* mt033.201 - addition for including the header size while computing the total size */
1965 #ifdef SSI_DEBUG_LEVEL1
1966 mtCMMRegCfg[i]->size += (region->bkt[j].blkSize + sizeof(CmMmBlkHdr)) *\
1967 (region->bkt[j].numBlks);
1969 mtCMMRegCfg[i]->size += region->bkt[j].blkSize * region->bkt[j].numBlks;
1970 #endif /* SSI_DEBUG_LEVEL1 */
1973 mtCMMRegCfg[i]->vAddr = (Data *)mtRegMemSz[i].startAddr;
1975 mtCMMRegCfg[i]->vAddr = (Data *)calloc(mtCMMRegCfg[i]->size,
1978 #ifdef XEON_SPECIFIC_CHANGES
1979 CM_LOG_DEBUG(CM_LOG_ID_MT, "Static Region-->Bkt[%d] Addr:[%p] RegionId=[%d] Size=[%d] \n",
1980 i, mtCMMRegCfg[i]->vAddr, region->regionId, mtCMMRegCfg[i]->size);
1982 #ifdef TENB_RTLIN_CHANGES
1983 mlock(mtCMMRegCfg[i]->vAddr, mtCMMRegCfg[i]->size*sizeof(Data));
1986 if (mtCMMRegCfg[i]->vAddr == NULLP)
1988 sprintf(errMsg,"\n ssdInitMem(): Could not allocate memory \
1989 for the Region:%d \n",i);
1991 for (k = 0; k < i; k++)
1993 cmMmRegDeInit(mtCMMRegCb[k]);
1994 free(mtCMMRegCfg[k]->vAddr);
1995 free(mtCMMRegCb[k]);
1996 free(mtCMMRegCfg[k]);
1998 free(mtCMMRegCb[i]);
1999 free(mtCMMRegCfg[i]);
2004 /* set up the CMM configuration structure */
2005 mtCMMRegCfg[i]->lType = SS_LOCK_MUTEX;
2006 mtCMMRegCfg[i]->chFlag = 0;
2007 mtCMMRegCfg[i]->bktQnSize = MT_BKTQNSIZE;
2008 mtCMMRegCfg[i]->numBkts = region->numBkts;
2010 for (j = 0; j < region->numBkts; j++)
2012 mtCMMRegCfg[i]->bktCfg[j].size = region->bkt[j].blkSize;
2013 mtCMMRegCfg[i]->bktCfg[j].numBlks = region->bkt[j].numBlks;
2016 /* initialize the CMM */
2017 #ifdef SS_LOCKLESS_MEMORY
2018 if (cmMmStatRegInit(region->regionId, mtCMMRegCb[i], mtCMMRegCfg[i]) != ROK)
2020 if (cmMmRegInit(region->regionId, mtCMMRegCb[i], mtCMMRegCfg[i]) != ROK)
2021 #endif /* SS_LOCKLESS_MEMORY */
2023 for (k = 0; k < i; k++)
2025 cmMmRegDeInit(mtCMMRegCb[k]);
2026 free(mtCMMRegCfg[k]->vAddr);
2027 free(mtCMMRegCb[k]);
2028 free(mtCMMRegCfg[k]);
2030 free(mtCMMRegCfg[i]->vAddr);
2031 free(mtCMMRegCb[i]);
2032 free(mtCMMRegCfg[i]);
2037 /* initialize the STREAMS module */
2038 /* mt019.201: STREAMS module will only apply to DFLT_REGION */
2039 if (region->regionId == 0)
2041 if (ssStrmCfg(region->regionId, region->regionId) != ROK)
2043 for (k = 0; k < i; k++)
2045 cmMmRegDeInit(mtCMMRegCb[k]);
2046 free(mtCMMRegCfg[k]->vAddr);
2047 free(mtCMMRegCb[k]);
2048 free(mtCMMRegCfg[k]);
2050 cmMmRegDeInit(mtCMMRegCb[i]);
2051 free(mtCMMRegCfg[i]->vAddr);
2052 free(mtCMMRegCb[i]);
2053 free(mtCMMRegCfg[i]);
2058 /* mt001.301 : Additions */
2059 #ifdef SS_MEM_LEAK_STS
2061 #endif /* SS_MEM_LEAK_STS */
2069 * Fun: De-initialize region/pool tables
2071 * Desc: This function reverses the initialization in ssdInitMem().
2080 Void ssdDeinitMem(void)
2082 /* mt018.201 - added local variables */
2085 /* mt008.301 Additions */
2086 #ifdef SS_MEM_LEAK_STS
2087 cmDeinitMemLeakMdl();
2088 #endif /* SS_MEM_LEAK_STS */
2090 for (i = 0; i < mtMemoCfg.numRegions; i++)
2092 cmMmRegDeInit(mtCMMRegCb[i]);
2093 free(mtCMMRegCfg[i]->vAddr);
2094 free(mtCMMRegCb[i]);
2095 free(mtCMMRegCfg[i]);
2104 * Fun: Initialize task table
2106 * Desc: This function initializes MTSS-specific information
2107 * in the task table.
2116 S16 ssdInitTsk(void)
2118 /* mt001.301 : Additions */
2119 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
2120 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
2121 uint32_t tskInd = 0;
2122 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
2126 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
2127 #if defined(SS_MULTICORE_SUPPORT) || defined(SS_AFFINITY_SUPPORT)
2128 /* initialize system task information */
2129 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
2131 osCp.sTskTbl[tskInd].dep.lwpId = 0;
2133 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
2140 * Fun: Deinitialize task table
2142 * Desc: This function reverses the initialization perfomed in
2152 Void ssdDeinitTsk(void)
2159 #ifdef SS_DRVR_SUPPORT
2162 * Fun: Initialize driver task table
2164 * Desc: This function initializes MTSS-specific information
2165 * in the driver task table.
2174 S16 ssdInitDrvr(void)
2178 pthread_attr_t attr;
2183 /* initialize the dependent portion of the driver task entries */
2184 for (i = 0; i < SS_MAX_DRVRTSKS; i++)
2186 osCp.drvrTskTbl[i].dep.flag = FALSE;
2190 /* create pipe for communication between SSetIntPend() and
2191 * the isTskHdlr thread.
2193 if (pipe(osCp.dep.isFildes) != 0)
2199 /* create the isTskHdlr thread */
2200 pthread_attr_init(&attr);
2201 /* mt021.201 - Addition to set stack size */
2202 pthread_attr_setstacksize(&attr, (size_t)MT_ISTASK_STACK);
2203 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2204 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2205 if ((pthread_create(&osCp.dep.isTskHdlrTID, &attr, mtIsTskHdlr, NULLP)) != 0)
2207 /* mt020.201 - Addition for destroying thread attribute object attr */
2208 pthread_attr_destroy(&attr);
2214 /*mt014.301 : 4GMX release related changes*/
2215 #ifdef SS_4GMX_UCORE
2223 /* mt020.201 - Addition for destroying thread attribute object attr */
2224 pthread_attr_destroy(&attr);
2233 * Fun: Deinitialize driver information
2235 * Desc: This function reverses the initialization performed in
2245 Void ssdDeinitDrvr(void)
2247 /* mt008.301: Terminate the Driver Task on exit */
2248 while(pthread_cancel(osCp.dep.isTskHdlrTID));
2251 TL_Close(AppContext.hUAII);
2252 if (clusterMode == RADIO_CLUSTER_MODE)
2254 TL_Close(AppContext.hUAII_second);
2260 #endif /* SS_DRVR_SUPPORT */
2265 * Fun: Initialize timer table
2267 * Desc: This function initializes MTSS-specific information
2268 * in the timer table.
2277 S16 ssdInitTmr(void)
2279 pthread_attr_t attr;
2280 struct sched_param param_sched;
2281 /* mt010.21: addition */
2283 #ifdef SS_MULTICORE_SUPPORT
2285 #endif /* SS_MULTICORE_SUPPORT */
2286 #ifdef SS_THR_REG_MAP
2287 uint32_t threadCreated = FALSE;
2288 #endif /* SS_THR_REG_MAP */
2292 osCp.dep.tmrTqCp.tmrLen = SS_MAX_TMRS;
2293 /* mt010.21: addition */
2294 osCp.dep.tmrTqCp.nxtEnt = 0;
2295 for (i=0; i< SS_MAX_TMRS; i++)
2297 osCp.dep.tmrTq[i].first = (CmTimer *)NULLP;
2300 #ifdef SS_MULTICORE_SUPPORT
2301 sTsk = ssdAddTmrSTsk();
2306 #endif /* SS_MULTICORE_SUPPORT */
2307 /* create the timer handler thread */
2308 pthread_attr_init(&attr);
2309 /* mt021.201 - Addition to set stack size */
2310 pthread_attr_setstacksize(&attr, (size_t)MT_TMRTASK_STACK);
2311 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2312 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2313 pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
2314 param_sched.sched_priority = sched_get_priority_max(SCHED_FIFO);
2315 pthread_attr_setschedparam(&attr, ¶m_sched);
2318 #ifdef SS_THR_REG_MAP
2319 /* When the thread is created, we check for the memory mapping table if
2320 * threadId can be placed in thread memory map table. If it is not able to place
2321 * threadId is stored in tmporary array. Once thread is created successful,
2322 * thread_cancel is sent for each thread which are created before. All the
2323 * threads are made to wait on sema which is cancel point for thread.
2325 while(threadCreated == FALSE)
2328 if ((pthread_create(&osCp.dep.tmrHdlrTID, &attr, mtTmrHdlr, NULLP)) != 0)
2330 /* mt020.201 - Addition for destroying thread attribute object attr */
2331 pthread_attr_destroy(&attr);
2336 #ifdef SS_THR_REG_MAP
2337 threadCreated = ssCheckAndAddMemoryRegionMap(osCp.dep.tmrHdlrTID,
2340 #endif /* SS_THR_REG_MAP */
2341 #ifdef SS_MEM_WL_DEBUG
2342 tmpRegTidMap[sTsk->region] = osCp.dep.tmrHdlrTID;
2345 /* mt020.201 - Addition for destroying thread attribute object attr */
2346 pthread_attr_destroy(&attr);
2355 * Fun: Deinitialize timer table
2357 * Desc: This function reverses the initialization performed in
2367 Void ssdDeinitTmr(void)
2369 #ifdef SS_MULTICORE_SUPPORT
2372 #endif /* SS_MULTICORE_SUPPORT */
2375 #ifdef SS_MULTICORE_SUPPORT
2376 ret = SLock(&osCp.sTskTblLock);
2380 #if (ERRCLASS & ERRCLS_DEBUG)
2381 MTLOGERROR(ERRCLS_DEBUG, EMT008, (ErrVal) ret,
2382 "Could not lock system task table");
2386 sTsk = &osCp.sTskTbl[0]; /* first entry is timer entry always */
2387 /* clean up the system task entry */
2391 SDestroyLock(&sTsk->lock);
2392 ssDestroyDmndQ(&sTsk->dQ);
2395 /* make this entry available in the system task table */
2396 sTsk->nxt = osCp.nxtSTskEntry;
2397 osCp.nxtSTskEntry = 0;
2401 /* unlock the system task table */
2402 SUnlock(&osCp.sTskTblLock);
2404 #endif /* SS_MULTICORE_SUPPORT */
2405 /* mt008.301: Terminate the timer thread on exit */
2406 while(pthread_cancel(osCp.dep.tmrHdlrTID));
2416 * Desc: Pre-tst() initialization.
2425 S16 ssdInitLog(void)
2427 /* mt027.201 - Modification to fix warnings with no STDIN and STDOUT */
2431 pthread_attr_t attr;
2434 #endif /* CONSTDIO */
2439 /* mt008.301: ssdInitFinal changed to ssdInitLog */
2444 osCp.dep.conInFp = (FILE *) stdin;
2445 osCp.dep.conOutFp = (FILE *) stdout;
2446 /* added compile time flag CONRD: mt017.21 */
2450 /* disable canonical input processing */
2451 fd = fileno(osCp.dep.conInFp);
2452 if ((tcgetattr(fd, &tio)) != 0)
2454 printf("\nError: disable canonical input processing\n");
2458 tio.c_lflag &= ~ICANON;
2459 tio.c_cc[VMIN] = 1; /* wait for a minimum of 1 character input */
2460 tio.c_cc[VTIME] = 0;
2461 if ((tcsetattr(fd, TCSANOW, &tio)) != 0)
2463 printf("\nError: while tcsetattr() processing\n");
2467 #endif /* CONSTDIO */
2470 /* set up the input fd to block when no data is available */
2471 fd = fileno(osCp.dep.conInFp);
2472 flags = fcntl(fd, F_GETFL, &flags);
2473 flags &= ~O_NONBLOCK;
2474 if (fcntl(fd, F_SETFL, flags) == -1)
2476 printf("\nError: while fcntl processing\n");
2481 /* create the console handler thread */
2482 pthread_attr_init(&attr);
2483 /* mt021.201 - Addition to set stack size */
2484 pthread_attr_setstacksize(&attr, (size_t)MT_CONSOLE_STACK);
2485 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
2486 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
2489 if((SCreatePThread(&osCp.dep.conHdlrTID, &attr, mtConHdlr, NULLP)) != 0)
2491 /* mt020.201 - Addition for destroying thread attribute object attr */
2492 pthread_attr_destroy(&attr);
2494 printf("\nError: Logging Thread creation failed \n");
2498 /* mt020.201 - Addition for destroying thread attribute object attr */
2499 pthread_attr_destroy(&attr);
2513 * Desc: This function reverses the initialization performed in
2523 /* mt008.301: ssdDeinitFinal changed to ssdDeinitLog */
2524 Void ssdDeinitLog(void)
2526 /* mt008.301: ssdDeinitFinal changed to ssdDeinitLog */
2529 /* mt008.301: Terminate the console reader on exit */
2530 while(pthread_cancel(osCp.dep.conHdlrTID));
2536 /* mt001.301 : Additions */
2540 S16 ssdInitWatchDog(uint16_t port)
2543 Txt prntBuf[PRNTSZE];
2546 #ifdef SS_WATCHDOG_IPV6
2547 struct sockaddr_in6 tmpaddr;
2549 struct sockaddr_in tmpaddr;
2550 #endif /* SS_WATCHDOG_IPV6 */
2551 #ifdef SS_MULTIPLE_PROCS
2552 ProcId procId = SS_WD_WDPROC;
2553 if (SAddProcIdLst(1, &procId) != ROK)
2557 #endif /* SS_MULTIPLE_PROCS */
2560 SInitLock(&osCp.wdCp.wdLock, SS_LOCK_MUTEX);
2562 /* Create a watch dog system task */
2563 SCreateSTsk(0, &(osCp.wdCp.watchDgTskId));
2565 /* Create a watch dog reveiver system task */
2566 SCreateSTsk(0, &(osCp.wdCp.watchDgRcvrTskId));
2568 /* Register and attach watch dog TAPA task */
2569 #ifdef SS_MULTIPLE_PROCS
2570 SRegTTsk (procId, ENTDW, INST0, TTNORM, PRIOR0, NULLP, watchDgActvTsk);
2571 SAttachTTsk (procId, ENTDW, INST0, osCp.wdCp.watchDgTskId);
2573 SRegTTsk ( ENTDW, INST0, TTNORM, PRIOR0, NULLP, watchDgActvTsk);
2574 SAttachTTsk ( ENTDW, INST0, osCp.wdCp.watchDgTskId);
2575 #endif /* SS_MULTIPLE_PROCS */
2576 /* Register and attach watch dog receiver TAPA task */
2577 #ifdef SS_MULTIPLE_PROCS
2578 SRegTTsk (procId, ENTHB, INST0, TTNORM, PRIOR0, NULLP, watchDgRcvrActvTsk);
2579 SAttachTTsk (procId, ENTHB, INST0, osCp.wdCp.watchDgRcvrTskId);
2581 SRegTTsk ( ENTHB, INST0, TTNORM, PRIOR0, NULLP, watchDgRcvrActvTsk);
2582 SAttachTTsk ( ENTHB, INST0, osCp.wdCp.watchDgRcvrTskId);
2583 #endif /* SS_MULTIPLE_PROCS */
2585 #ifndef SS_MULTIPLE_PROCS
2586 osCp.wdCp.watchDgPst.srcProcId = SFndProcId();
2587 osCp.wdCp.watchDgPst.dstProcId = SFndProcId();
2589 osCp.wdCp.watchDgPst.srcProcId = procId;
2590 osCp.wdCp.watchDgPst.dstProcId = procId;
2591 #endif /* SS_MULTIPLE_PROCS */
2593 /* Initialise the pst structure */
2594 ssdInitWatchDgPst(&(osCp.wdCp.watchDgPst));
2595 /* Initialize the watch dog timer resolution default is 1 sec */
2597 cmInitTimers(osCp.wdCp.watchDgTmr, (uint8_t)1);
2598 osCp.wdCp.watchDgTqCp.nxtEnt = 0;
2599 osCp.wdCp.watchDgTqCp.tmrLen = 1;
2600 for(idx = 0; idx < 1; idx++)
2602 osCp.wdCp.watchDgTs[idx].first = NULLP;
2603 osCp.wdCp.watchDgTs[idx].tail = NULLP;
2605 #ifdef SS_MULTIPLE_PROCS
2606 SRegCfgTmr(procId,ENTDW, INST0, 10, SS_100MS, ssdWatchDgActvTmr);
2608 SRegCfgTmr(ENTDW, INST0, 10, SS_100MS, ssdWatchDgActvTmr);
2609 #endif /* SS_MULTIPLE_PROCS */
2611 /* Create the watch dog receiver socket */
2612 osCp.wdCp.globWd.sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
2613 if(osCp.wdCp.globWd.sock == -1)
2615 sprintf(prntBuf,"ssdInitWatchDog: socket failed errno [%d]\n", errno);
2619 #ifdef SS_WATCHDOG_IPV6
2620 tmpaddr.sin6_len = sizeof(tmpadDr);
2621 tmpaddr.sin6_family = AF_INET6;
2622 tmpaddr.sin6_addr = in6addr_any;
2623 tmpaddr.sin6_port = htons(port);
2625 tmpaddr.sin_family = AF_INET;
2626 tmpaddr.sin_addr.s_addr = htonl(INADDR_ANY);
2627 tmpaddr.sin_port = htons(port);
2628 #endif /* SS_WATCHDOG_IPV6 */
2630 if(bind(osCp.wdCp.globWd.sock, (struct sockaddr *)&tmpaddr, sizeof(struct sockaddr)) != 0
2633 sprintf(prntBuf,"ssdInitWatchDog: bind failed errno [%d]\n", errno);
2637 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
2641 #ifndef SS_MULTIPLE_PROCS
2642 pst.srcProcId = SFndProcId();
2643 pst.dstProcId = SFndProcId();
2645 pst.srcProcId = procId;
2646 pst.dstProcId = procId;
2647 #endif /* SS_MULTIPLE_PROCS */
2648 pst.event = EVTSSHRTBTREQ;
2649 ssdInitWatchDgPst(&pst);
2650 SPstTsk(&pst, mBuf);
2655 S16 ssdInitWatchDgPst(Pst *pst)
2658 pst->selector = SS_LOOSE_COUPLING;
2660 pst->region = DFLT_REGION; /* region */
2661 pst->pool = DFLT_POOL; /* pool */
2663 pst->prior = PRIOR0; /* priority */
2664 pst->route = RTESPEC; /* route */
2666 pst->dstEnt = ENTHB; /* destination entity */
2668 pst->srcEnt = ENTDW; /* source entity */
2674 #ifdef SS_MULTIPLE_PROCS
2675 S16 ssdWatchDgActvTmr
2682 S16 ssdWatchDgActvTmr(Void)
2683 #endif /* SS_MULTIPLE_PROCS */
2686 cmPrcTmr(&osCp.wdCp.watchDgTqCp, osCp.wdCp.watchDgTs, (PFV)ssdWatchDgTmrEvt);
2691 Void ssdWatchDgTmrEvt
2693 PTR cb, /* control block */
2694 S16 event /* timer number */
2697 /* mt003.301 Fixed warings */
2701 Txt prntBuf[PRNTSZE];
2710 SPrint("Timer Heartbeat Request Expired");
2712 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2717 SLock(&osCp.wdCp.wdLock);
2718 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2720 if(osCp.wdCp.globWd.wdsta[i].status == 0)
2722 sprintf(prntBuf, "Node [ %s ] Down. Calling user callback\n", inet_ntoa(osCp.wdCp.globWd.wdsta[i].addr));
2724 if(osCp.wdCp.globWd.callback != 0)
2726 osCp.wdCp.globWd.callback(osCp.wdCp.globWd.data);
2730 SUnlock(&osCp.wdCp.wdLock);
2732 if(!osCp.wdCp.globWd.watchdogStop)
2734 ssdStartWatchDgTmr(NULLP, SS_TMR_HRTBT, osCp.wdCp.globWd.timeout);
2735 ssdSndHrtBtMsg(restartTmr, SS_WD_HB_REQ);
2745 Void ssdStartWatchDgTmr
2756 Txt prntBuf[PRNTSZE];
2760 /* mt003.301 Modifications */
2763 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2764 if(event == SS_TMR_HRTBT)
2766 SPrint("\nSTART SS_TMR_HRTBT");
2773 SLock(&osCp.wdCp.wdLock);
2774 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2776 osCp.wdCp.globWd.wdsta[i].status = 0;
2778 SUnlock(&osCp.wdCp.wdLock);
2780 arg.tq = osCp.wdCp.watchDgTs;
2781 arg.tqCp = &osCp.wdCp.watchDgTqCp;
2782 arg.timers = osCp.wdCp.watchDgTmr;
2783 arg.cb = (PTR)NULLP;
2785 arg.wait = osCp.wdCp.globWd.timeout = wait;
2793 Void ssdStopWatchDgTmr
2802 Txt prntBuf[PRNTSZE];
2806 /* mt003.301 Modifications */
2809 sprintf(prntBuf," Time: %02d:%02d:%02d\n",dt.hour,dt.min, dt.sec);
2810 if(event == SS_TMR_HRTBT)
2812 SPrint("STOP SS_TMR_HRTBT");
2816 SLock(&osCp.wdCp.wdLock);
2817 for(i=0; i < osCp.wdCp.globWd.numNodes; i++)
2819 osCp.wdCp.globWd.wdsta[i].status = 0;
2821 SUnlock(&osCp.wdCp.wdLock);
2824 arg.tq = osCp.wdCp.watchDgTs;
2825 arg.tqCp = &osCp.wdCp.watchDgTqCp;
2826 arg.timers = osCp.wdCp.watchDgTmr;
2827 arg.cb = (PTR)NULLP;
2846 Txt prntBuf[PRNTSZE];
2848 struct sockaddr_in tmpaddr;
2849 char hbMsg[SS_WD_HB_MSG_SIZE];
2856 sprintf(prntBuf,"TX HEARTBEAT REQ Time: %02d:%02d:%02d\n", dt.hour, dt.min, dt.sec);
2860 /* Pack the message */
2861 strcpy(hbMsg, "<HB>REQ</HB>");
2863 /* Send the heartbeat messages to all the configured nodes */
2864 SLock(&osCp.wdCp.wdLock);
2865 for (n=0; n < osCp.wdCp.globWd.numNodes; n++)
2867 if(osCp.wdCp.globWd.wdsta[n].addr.s_addr == 0)
2872 /* Identify the destination node */
2873 #ifdef SS_WATCHDOG_IPV6
2874 tmpaddr.sin6_len = sizeof(tmpaddr);
2875 tmpaddr.sin6_family = AF_INET6;
2876 tmpaddr.sin6_addr = osCp.wdCp.globWd.wdsta[n].addr;
2877 tmpaddr.sin_port = osCp.wdCp.globWd.wdsta[n].port;
2879 tmpaddr.sin_family = AF_INET;
2880 tmpaddr.sin_addr.s_addr = osCp.wdCp.globWd.wdsta[n].addr.s_addr;
2881 tmpaddr.sin_port = osCp.wdCp.globWd.wdsta[n].port;
2882 #endif /* SS_WATCHDOG_IPV6 */
2884 err = sendto(osCp.wdCp.globWd.sock, hbMsg, strlen(hbMsg), 0, (struct sockaddr *)&tmpaddr, sizeof(struct sockaddr));
2888 sprintf(prntBuf,"ssdSndHrtBtMsg: HB to node [%s:%d] failed status[%d]\n",
2889 inet_ntoa(tmpaddr.sin_addr), tmpaddr.sin_port, errno);
2896 sprintf(prntBuf,"ssdSndHrtBtMsg: HB to node [%s:%d] sent[%d]\n", inet_ntoa(tmpaddr.sin_addr), tmpaddr.sin_port, err);
2901 SUnlock(&osCp.wdCp.wdLock);
2906 #endif /* SS_WATCHDOG */
2910 /* mt022.201 - Modification to fix problem when NOCMDLINE is defined */
2916 * Desc: This function gets command line options.
2925 static Void mtGetOpts(void)
2932 FILE *memOpt; /* memory options file pointer */
2935 /* mt007.301 : Fix related to file based mem config on 64 bit machine */
2941 /*KWORK_FIX: Initializing the variable for avoidning corruption */
2943 /*mt010.301 Fix for reading the variables on 64 bit/32bit platforms correctly */
2949 #ifdef SS_LOCKLESS_MEMORY
2964 osCp.dep.fileOutFp = (FILE *)NULLP;
2966 /* initialize memOpt */
2967 memOpt = (FILE *) NULLP;
2974 while ((ret = SGetOpt(argc, argv, "o:f:s:m:c:")) != EOF)
2979 /* mt001.301 : Additions */
2980 #ifdef SS_MEM_LEAK_STS
2982 cmMemOpenMemLkFile(msOptArg);
2986 osCp.dep.fileOutFp = fopen(msOptArg, "w");
2989 fileBasedMemCfg = TRUE;
2990 memOpt = fopen(msOptArg, "r");
2992 /* if file does not exist or could not be opened then use the
2993 * default memory configuration as defined in mt_ss.h
2995 if (memOpt == (FILE *) NULLP)
2997 sprintf(pBuf, "\nMTSS: Memory configuration file: %s could not\
2998 be opened, using default mem configuration\n", msOptArg);
3003 while (fgets((Txt *)line, 256, memOpt) != NULLP)
3005 if(line[0] == '#' || line[0] < '0' || line[0] > '9') /* Comment line or non numeric character, so skip it and read next line */
3011 case 0: /*** INPUT: Number of regions ***/
3012 sscanf(line, "%ld", (long *) &numReg);
3013 mtMemoCfg.numRegions = numReg;
3014 if(mtMemoCfg.numRegions > SS_MAX_REGS)
3016 printf("\n No. of regions are > SS_MAX_REGS:%d \n",SS_MAX_REGS);
3022 case 1: /*** INPUT: Number of buckets and number of Pools ***/
3023 sscanf(line, "%ld %ld", (long *) &numBkts, (long *) &numPools);
3024 if(numBkts > MT_MAX_BKTS)
3026 printf("\n No. of buckets are > MT_MAX_BKTS :%d \n",MT_MAX_BKTS);
3030 if(numPools > SS_MAX_POOLS_PER_REG)
3032 printf("\n No. of pools are > SS_MAX_POOLS_PER_REG:%d \n",SS_MAX_POOLS_PER_REG);
3037 * Delay updation from local variable to global
3038 * structure of number of regions and heap data to
3039 * counter error conditions present above.
3041 for(idx = 0; idx < cfgNumRegs; idx++)
3043 mtMemoCfg.region[idx].numBkts = numBkts;
3044 cfgRegInfo[idx].region = idx;
3045 cfgRegInfo[idx].numPools = numPools;
3047 * Initialize the pool info as static type with size zero
3049 for(poolIdx = 0; poolIdx < numPools; poolIdx++)
3051 cfgRegInfo[idx].pools[poolIdx].type = SS_POOL_STATIC;
3052 cfgRegInfo[idx].pools[poolIdx].size = 0;
3057 case 2: /*** INPUT: Bucket Id and size of the bucket ***/
3058 if(bktUpdtCnt < numBkts) /* more set of bucket can be added */
3060 sscanf(line, "%ld %ld",(long *)&bktIdx, (long *) &bktSz);
3062 if(bktIdx >= numBkts)
3064 printf("\n Invalid Bucket Id, may be >= the No. of buckets:%ld\n",numBkts);
3069 mtBktInfo[bktIdx].blkSize = bktSz;
3071 if(bktUpdtCnt == numBkts)
3073 i++; /*done reading bkt info, start reading individual region info*/
3077 case 3: /*** INPUT: Region Id (ranges from 0 to numRegions-1) **/
3078 sscanf(line,"%ld",(long *) ®Id);
3079 if(regId >= mtMemoCfg.numRegions)
3081 printf("\n Invalid Region Id, may be >= the No. of regions:%d\n",mtMemoCfg.numRegions);
3082 #ifndef XEON_SPECIFIC_CHANGES
3087 mtMemoCfg.region[regId].regionId = regId;
3090 case 4: /*** INPUT: BktId (ranges from 0 to numBkts-1), No. of blks ***/
3091 if(bktUpdtCnt < numBkts)
3093 sscanf(line, "%ld %ld",(long *)&bktIdx, (long *)&bktNum);
3094 if(bktIdx >= numBkts)
3096 printf("\n Invalid Bucket Id, may be >= the No. of buckets:%ld\n",numBkts);
3101 if(bktIdx < MT_MAX_BKTS)
3103 mtMemoCfg.region[regId].bkt[bktIdx].blkSize = mtBktInfo[bktIdx].blkSize;
3104 mtMemoCfg.region[regId].bkt[bktIdx].numBlks = bktNum;
3105 cfgRegInfo[regId].pools[bktIdx].type = SS_POOL_DYNAMIC;
3106 cfgRegInfo[regId].pools[bktIdx].size = mtBktInfo[bktIdx].blkSize - (sizeof(SsMblk)+sizeof(SsDblk));
3109 if(bktUpdtCnt == numBkts)
3116 case 5: /* INPUT: Heapsize ***/
3117 sscanf(line, "%ld", (long *) &heapSz);
3118 mtMemoCfg.region[regId].heapsize = heapSz;
3120 if(regUpdtCnt != mtMemoCfg.numRegions)
3129 #ifdef SS_LOCKLESS_MEMORY
3131 sscanf(line, "%ld", (long *) &numBkts);
3132 mtGlobMemoCfg.numBkts = numBkts;
3133 #ifndef XEON_SPECIFIC_CHANGES
3134 mtDynMemoCfg.numRegions = mtMemoCfg.numRegions;
3137 #ifdef XEON_SPECIFIC_CHANGES
3138 CM_LOG_DEBUG(CM_LOG_ID_MT, "numRegions = %d numBkts = %d\n",
3139 mtDynMemoCfg.numRegions, mtGlobMemoCfg.numBkts);
3140 for(idx = 0; idx < mtDynMemoCfg.numRegions; idx++)
3142 for(idx = 0; idx < mtMemoCfg.numRegions; idx++)
3145 mtDynMemoCfg.region[idx].regionId = idx;
3146 mtDynMemoCfg.region[idx].numBkts = numBkts;
3154 if(bktUpdtCnt < numBkts)
3156 sscanf(line, "%ld %ld %ld %ld %ld %ld", (long *) &bktIdx,
3157 (long *) &bktSz, (long *) &bktNum,
3158 (long *) &bktSetSize, (long *) &bktRelThr,
3159 (long *) &bktAqurThr);
3160 /* Klock work fix ccpu00148484 */
3161 if(bktIdx < SS_MAX_POOLS_PER_REG)
3163 mtGlobMemoCfg.bkt[bktIdx].blkSize = bktSz;
3164 mtGlobMemoCfg.bkt[bktIdx].numBlks = bktNum;
3165 mtGlobMemoCfg.bkt[bktIdx].bucketSetSize = bktSetSize;
3166 #ifdef XEON_SPECIFIC_CHANGES
3167 CM_LOG_DEBUG(CM_LOG_ID_MT, "Pool [%d] blkSize %d numBlks %d bucketSetSize %d\n",
3168 bktUpdtCnt, mtGlobMemoCfg.bkt[bktIdx].blkSize,
3169 mtGlobMemoCfg.bkt[bktIdx].numBlks, mtGlobMemoCfg.bkt[bktIdx].bucketSetSize);
3171 if(bktIdx >= SS_MAX_POOLS_PER_REG)
3173 printf("\nNo. of Buckets/pools are > SS_MAX_POOLS_PER_REG:%d\n",SS_MAX_POOLS_PER_REG);
3179 for(idx = 0; idx < mtMemoCfg.numRegions; idx++)
3181 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetRelThreshold = bktRelThr;
3182 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetAcquireThreshold = bktAqurThr;
3183 #ifdef XEON_SPECIFIC_CHANGES
3184 CM_LOG_DEBUG(CM_LOG_ID_MT, "Pool [%d] blkSetRelThreshold %d blkSetAcquireThreshold %d\n",
3185 bktUpdtCnt, mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetRelThreshold,
3186 mtDynMemoCfg.region[idx].bkt[bktIdx].blkSetAcquireThreshold);
3192 #ifdef XEON_SPECIFIC_CHANGES
3193 if(bktUpdtCnt == numBkts)
3199 case 8: /* INPUT: Global Heapsize ***/
3200 sscanf(line, "%ld", (long *) &heapSz);
3201 mtGlobMemoCfg.heapSize = heapSz;
3202 CM_LOG_DEBUG(CM_LOG_ID_MT, "Global Heap size = %d\n", mtGlobMemoCfg.heapSize);
3210 memConfigured = FALSE;
3214 memConfigured = TRUE;
3222 /* mt028.201: modification: multiple procs support related changes */
3223 #ifndef SS_MULTIPLE_PROCS
3226 osCp.procId = PID_STK((ProcId) strtol(msOptArg, NULLP, 0));
3228 osCp.procId = (ProcId) strtol(msOptArg, NULLP, 0);
3231 #else /* SS_MULTIPLE_PROCS */
3235 procId = PID_STK((ProcId) strtol(msOptArg, NULLP, 0));
3237 procId = (ProcId) strtol(msOptArg, NULLP, 0);
3239 SAddProcIdLst(1, &procId);
3242 #endif /* SS_MULTIPLE_PROCS */
3246 osCp.configFilePath = msOptArg;
3270 * Desc: Get options from command line
3272 * Ret: option - success
3274 * EOF - end of options
3276 * Notes: Handles command lines like the following
3279 * then command line should look like this...
3280 * -a foo -b foo1 -c -d foo
3284 * while ((ret = SGetOpt(msArgc, msArgv, "ls")) != EOF )
3289 * nloops = atoi(msArgv[msOptInd]);
3292 * state1 = atoi(msArgv[msOptInd]);
3304 int argc, /* argument count */
3305 char **argv, /* argument value */
3306 char *opts /* options */
3309 /* mt020.201 - Removed for no command line */
3317 /* mt020.201 - Addition for no command line */
3329 /*mt013.301 : Changes as per coding standards*/
3330 if (msOptInd >= (S16) argc || argv[msOptInd][0] == '\0')
3336 if (!strcmp(argv[msOptInd], "--"))
3341 else if (argv[msOptInd][0] != '-')
3349 c = argv[msOptInd][sp];
3350 if (c == ':' || (cp = (S8 *) strchr(opts, c)) == (S8 *) NULLP)
3352 if (argv[msOptInd][++sp] == '\0')
3363 if (argv[msOptInd][sp+1] != '\0') msOptArg = &argv[msOptInd++][sp+1];
3366 if (++msOptInd >= (S16) argc)
3371 else msOptArg = argv[msOptInd++];
3378 if (argv[msOptInd][++sp] == '\0')
3390 #endif /* NOCMDLINE */
3398 * Desc: This function starts system services execution; the
3399 * permanent tasks are started and the system enters a
3416 /* mt025.201 - Modification for adding lock to timer handler */
3417 for (i = 0; i <= SS_MAX_STSKS + 5; i++)
3419 sem_post(&osCp.dep.ssStarted);
3428 * indirect interface functions to system services service user
3434 * Fun: ssdAttachTTsk
3436 * Desc: This function sends the initial tick message to a TAPA
3437 * task if the task is a permanent task.
3448 SsTTskEntry *tTsk /* pointer to TAPA task entry */
3455 if (tTsk->tskType == SS_TSK_PERMANENT)
3457 /* Send a permanent tick message to this task, to start
3460 ret = SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf);
3463 #if (ERRCLASS & ERRCLS_DEBUG)
3464 MTLOGERROR(ERRCLS_DEBUG, EMT001, ret, "SGetMsg() failed");
3469 mInfo = (SsMsgInfo *)mBuf->b_rptr;
3470 mInfo->eventInfo.event = SS_EVNT_PERMTICK;
3472 /* set up post structure */
3473 /* mt028.201: modification: multiple procs support related changes */
3474 #ifndef SS_MULTIPLE_PROCS
3475 mInfo->pst.dstProcId = SFndProcId();
3476 mInfo->pst.srcProcId = SFndProcId();
3477 #else /* SS_MULTIPLE_PROCS */
3478 mInfo->pst.dstProcId = tTsk->proc;
3479 mInfo->pst.srcProcId = tTsk->proc;
3480 #endif /* SS_MULTIPLE_PROCS */
3481 mInfo->pst.selector = SEL_LC_NEW;
3482 mInfo->pst.region = DFLT_REGION;
3483 mInfo->pst.pool = DFLT_POOL;
3484 mInfo->pst.prior = PRIOR3;
3485 mInfo->pst.route = RTESPEC;
3486 mInfo->pst.event = 0;
3487 mInfo->pst.dstEnt = tTsk->ent;
3488 mInfo->pst.dstInst = tTsk->inst;
3489 mInfo->pst.srcEnt = tTsk->ent;
3490 mInfo->pst.srcInst = tTsk->inst;
3492 ret = ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
3493 (tTsk->tskPrior * SS_MAX_MSG_PRI) + PRIOR3);
3499 #if (ERRCLASS & ERRCLS_DEBUG)
3500 MTLOGERROR(ERRCLS_DEBUG, EMT002, ret,
3501 "Could not write to demand queue");
3514 * Fun: ssdDetachTTsk
3516 * Desc: Does nothing.
3527 SsTTskEntry *tTsk /* pointer to TAPA task entry */
3537 * Fun: ssdCreateSTsk
3539 * Desc: This function creates a system task. A thread is started
3540 * on the system task handler function defined later.
3551 SsSTskEntry *sTsk /* pointer to system task entry */
3555 pthread_attr_t attr;
3556 /* struct sched_param param_sched;*/
3558 #ifdef SS_THR_REG_MAP
3559 uint32_t threadCreated = FALSE;
3564 #ifdef SS_SINGLE_THREADED
3565 /* mt001.301 : Additions */
3567 #ifdef SS_MULTICORE_SUPPORT
3568 if (osCp.numSTsks > 1)
3570 if (osCp.numSTsks > 0)
3571 #endif /* SS_MULTICORE_SUPPORT */
3573 #ifdef SS_MULTICORE_SUPPORT
3574 if (osCp.numSTsks > 3)
3576 if (osCp.numSTsks > 2)
3577 #endif /* SS_MULTICORE_SUPPORT */
3578 #endif /* SS_WATCHDOG */
3585 /* set the current executing entity and instance IDs to
3586 * 'not configured'. create the lock to access them.
3588 sTsk->dep.ent = ENTNC;
3589 sTsk->dep.inst = INSTNC;
3592 /* create the thread */
3593 pthread_attr_init(&attr);
3594 ssdSetPthreadAttr(sTsk->tskPrior, &attr);
3596 printf("\nCreating thread here %s %d\n", __FILE__, __LINE__);
3597 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
3598 if (sTsk->tskPrior == 0)
3600 printf("\nCreating RT thread #######################\n");
3601 #ifdef SS_THR_REG_MAP
3602 /* When the thread is created, we check for the memory mapping table if
3603 * threadId can be placed in thread memory map table. If it is not able to place
3604 * threadId is stored in tmporary array. Once thread is created successful,
3605 * thread_cancel is sent for each thread which are created before. All the
3606 * threads are made to wait on sema which is cancel point for thread.
3608 while(threadCreated == FALSE)
3611 ret = pthread_create(&sTsk->dep.tId, &attr, mtTskHdlr, (Ptr)sTsk);
3614 DU_LOG("\nDU APP : Failed to create thread. Cause[%d]",ret);
3615 pthread_attr_destroy(&attr);
3617 #if (ERRCLASS & ERRCLS_DEBUG)
3618 MTLOGERROR(ERRCLS_DEBUG, EMT004, ERRZERO, "Could not create thread");
3623 #ifdef SS_THR_REG_MAP
3624 threadCreated = ssCheckAndAddMemoryRegionMap(sTsk->dep.tId,
3632 #ifdef SS_THR_REG_MAP
3633 /* When the thread is created, we check for the memory mapping table if
3634 * threadId can be placed in thread memory map table. If it is not able to place
3635 * threadId is stored in tmporary array. Once thread is created successful,
3636 * thread_cancel is sent for each thread which are created before. All the
3637 * threads are made to wait on sema which is cancel point for thread.
3639 while(threadCreated == FALSE)
3642 ret = pthread_create(&sTsk->dep.tId, &attr, mtTskHdlr, (Ptr)sTsk);
3646 /* mt020.201 - Addition for destroying thread attribute object attr */
3647 pthread_attr_destroy(&attr);
3649 #if (ERRCLASS & ERRCLS_DEBUG)
3650 MTLOGERROR(ERRCLS_DEBUG, EMT004, ERRZERO, "Could not create thread");
3655 #ifdef SS_THR_REG_MAP
3656 threadCreated = ssCheckAndAddMemoryRegionMap(sTsk->dep.tId,
3663 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
3664 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
3666 static uint32_t stLwpId = 3;
3667 sTsk->dep.lwpId = ++stLwpId;
3669 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
3671 /* mt020.201 - Addition for destroying thread attribute object attr */
3672 pthread_attr_destroy(&attr);
3681 pthread_attr_t* attr,
3682 void *(*start_routine) (void *),
3687 #ifdef SS_THR_REG_MAP
3688 uint32_t threadCreated = FALSE;
3691 SPThreadCreateArg* threadArg = (SPThreadCreateArg*)malloc(sizeof(SPThreadCreateArg));
3692 /* Klock work fix ccpu00148484 */
3693 if(threadArg == NULLP)
3697 threadArg->argument = arg;
3698 threadArg->start_routine = start_routine;
3701 printf("\nCreating thread here %s %d\n", __FILE__, __LINE__);
3703 #ifdef SS_THR_REG_MAP
3704 /* When the thread is created, we check for the memory mapping table if
3705 * threadId can be placed in thread memory map table. If it is not able to place
3706 * threadId is stored in tmporary array. Once thread is created successful,
3707 * thread_cancel is sent for each thread which are created before. All the
3708 * threads are made to wait on sema which is cancel point for thread.
3710 while(threadCreated == FALSE)
3713 /*pthreadCreateHdlr */
3714 if (((retVal = pthread_create(tid, attr, pthreadCreateHdlr, threadArg))) != 0)
3719 #ifdef SS_THR_REG_MAP
3720 threadCreated = ssCheckAndAddMemoryRegionMap(*tid, SS_MAX_REGS - 1);
3731 * Fun: Set Pthread Attributes
3733 * Desc: This function is used to set various explicit
3734 * pthread attributes like, priority scheduling,etc
3744 static S16 ssdSetPthreadAttr
3747 pthread_attr_t *attr
3750 struct sched_param param;
3753 SMemSet(¶m, 0, sizeof(param));
3755 #ifndef TENB_T2K3K_SPECIFIC_CHANGES
3756 param.sched_priority = 100 - 1 - tskPrior;
3758 param.sched_priority = 100 - 10 - tskPrior;
3761 #if 1/* Nawas:: Overriding DL RLC prority to one higher than iccserv */
3762 /* TODO:: This can be avoided by reducing the priority
3763 * of iccserv thread in l1_master.sh*/
3765 if (clusterMode == RADIO_CLUSTER_MODE)
3767 if(tskPrior == PRIOR1)
3769 param.sched_priority = 91;
3776 printf("\nSet priority %u\n", param.sched_priority);
3778 /* Set Scheduler to explicit, without this non of the below
3779 pthread attr works */
3780 #ifdef TENB_RTLIN_CHANGES
3781 pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED);
3784 pthread_attr_setstacksize(attr, (size_t)MT_TASK_STACK);
3785 pthread_attr_setscope(attr, PTHREAD_SCOPE_SYSTEM);
3786 pthread_attr_setdetachstate(attr, PTHREAD_CREATE_DETACHED);
3787 #ifdef TENB_RTLIN_CHANGES
3788 pthread_attr_setschedpolicy(attr, SCHED_FIFO);
3790 pthread_attr_setschedparam(attr, ¶m);
3794 } /* ssdSetPthreadAttr */
3796 /************* multi-core support **************/
3797 /*mt013.301 :Added SS_AFFINITY_SUPPORT */
3798 #if defined(SS_MULTICORE_SUPPORT) ||defined(SS_AFFINITY_SUPPORT)
3802 * Fun: Get the current core/cpu affinity for a thread/lwp
3804 * Desc: This function is used to get the current processor/core
3805 * affinity for a a system task (thread/lwp). It sets the
3806 * affinity based on the mode supplied by the caller.
3809 * RFAILED - failed, general (optional)
3818 SSTskId *tskId, /* filled in with system task ID */
3819 uint32_t *coreId /* the core/processor id to which the affinity is set */
3829 uint32_t cpuInd = 0;
3830 /*mt013.301 :Fix for TRACE5 feature crash due to missing TRC MACRO*/
3833 uint32_t lwpId = *tskId;
3837 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3839 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3841 tId = osCp.sTskTbl[tskInd].dep.tId;
3846 /* if tskId is not found in the tskTbl */
3847 if (tskInd == SS_MAX_STSKS)
3849 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3854 /* initialize the cpu mask */
3857 /* set thread affinity for linux */
3858 if (pthread_getaffinity_np(tId, sizeof(cpuSet), &cpuSet) < 0)
3860 #if (ERRCLASS & ERRCLS_DEBUG)
3861 MTLOGERROR(ERRCLS_DEBUG, EMT037, ERRZERO, "Could not get thread affinity\n");
3864 } /* end if pthread_setaffinity fails */
3866 for (cpuInd = 0; cpuInd <CPU_SETSIZE; cpuInd++)
3868 if (CPU_ISSET (cpuInd, & cpuSet))
3877 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3879 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3881 lwpId = osCp.sTskTbl[tskInd].dep.lwpId;
3886 /* if tskId is not found in the tskTbl */
3887 if (tskInd == SS_MAX_STSKS)
3889 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3893 /* set thread affinity for Solaris */
3894 if (processor_bind(P_LWPID, lwpId, PBIND_QUERY, (processorid_t*)coreId) < 0)
3896 #if (ERRCLASS & ERRCLS_DEBUG)
3897 MTLOGERROR(ERRCLS_DEBUG, EMT037, ERRZERO, "Could not get thread affinity\n");
3900 } /* end if processor_bind fails */
3903 #endif /* SS_LINUX */
3907 } /* ssdGetAffinity */
3912 * Fun: Set the core/cpu affinity for a thread/lwp
3914 * Desc: This function is used to set processor/core affinity for a
3915 * a system task (thread/lwp). It sets the affinity based on the
3916 * mode supplied by the caller.
3919 * RFAILED - failed, general (optional)
3928 SSTskId *tskId, /* filled in with system task ID */
3929 uint32_t coreId /* the core/processor id to which the affinity has to be set */
3933 uint32_t tskInd = 0;
3938 /*mt013.301 :Fix for TRACE5 feature crash due to missing TRC MACRO*/
3941 uint32_t lwpId = *tskId;
3947 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3949 /* Here tskId can not be used as index as the task may be terminated if
3950 there is a TERM even for that tsk, thus breaking the task Id numbering
3952 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3954 tId = osCp.sTskTbl[tskInd].dep.tId;
3959 /* if tskId is not found in the tskTbl */
3960 if (tskInd == SS_MAX_STSKS)
3962 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
3966 /* initialize the cpu mask */
3969 /* set the cpu mask */
3970 CPU_SET(coreId, &cpuSet);
3972 /* set thread affinity for linux */
3973 if (pthread_setaffinity_np(tId, sizeof(cpuSet), &cpuSet) < 0)
3975 #if (ERRCLASS & ERRCLS_DEBUG)
3976 MTLOGERROR(ERRCLS_DEBUG, EMT038, ERRZERO, "Could not set thread affinity\n");
3979 } /* end if pthread_setaffinity fails */
3983 for (tskInd = 0; tskInd < SS_MAX_STSKS; tskInd++)
3985 /* comment: modify to use tskId as lwpId to avoid the loop and the new lwpId variable in dep */
3986 if (osCp.sTskTbl[tskInd].tskId == *tskId)
3988 lwpId = osCp.sTskTbl[tskInd].dep.lwpId;
3993 /* if tskId is not found in the tskTbl */
3994 if (tskInd == SS_MAX_STSKS)
3996 MTLOGERROR(ERRCLS_DEBUG, EMT036, ERRZERO, "Invalid system task Id\n");
4000 /* set thread affinity for Solaris */
4001 if (processor_bind(P_LWPID, lwpId, coreId, NULL) < 0)
4003 #if (ERRCLASS & ERRCLS_DEBUG)
4004 MTLOGERROR(ERRCLS_DEBUG, EMT038, ERRZERO, "Could not set thread affinity\n");
4007 } /* end if processor_bind fails */
4010 #endif /* SS_LINUX */
4012 } /* ssdSetAffinity */
4014 #endif /* SS_MULTICORE_SUPPORT || SS_AFFINITY_SUPPORT */
4015 /************ end multi-core support *************/
4020 * Fun: ssdDestroySTsk
4022 * Desc: This function destroys a system task. A terminate
4023 * event message is sent to the thread function.
4034 SsSTskEntry *sTsk /* pointer to system task entry */
4043 /* we send a message to this system task to tell it to die */
4044 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
4047 #if (ERRCLASS & ERRCLASS_DEBUG)
4048 MTLOGERROR(ERRCLS_DEBUG, EMT005, ERRZERO, "Could not get a message");
4054 mInfo = (SsMsgInfo *)mBuf->b_rptr;
4055 mInfo->eventInfo.event = SS_EVNT_TERM;
4057 if (ssDmndQPutLast(&sTsk->dQ, mBuf, 0) != ROK)
4061 #if (ERRCLASS & ERRCLASS_DEBUG)
4062 MTLOGERROR(ERRCLS_DEBUG, EMT006, ERRZERO,
4063 "Could not write to demand queue");
4073 /* mt023.201 - Added SThreadYield function to yield CPU
4077 * Desc: This function defers thread execution to any other ready
4088 S16 SThreadYield(void)
4092 /* mt024.201 - seperated Linux and other UNIX implementations
4098 /* Set sleep value to 0 to yield CPU */
4102 return (select(0,0,0,0,&tw) == 0 ? ROK : RFAILED);
4104 #else /* other UNICes */
4106 return (sleep(0) == 0 ? ROK : RFAILED);
4108 #endif /* SS_LINUX */
4115 * Fun: Register timer
4117 * Desc: This function is used to register a timer
4118 * function for the service user. System services
4119 * will invoke the timer activation function
4120 * passed to it at the specified intervals.
4124 * Notes: Timing is handled by the common timers. The
4125 * ticks are handled by a thread that uses
4126 * nanosleep() and thus timing precision will not
4134 SsTmrEntry *tmr /* pointer to timer entry */
4142 /* initialize common timers */
4143 cmInitTimers(tmr->dep.timers, TMR_DEF_MAX);
4146 /* start the timer */
4147 arg.tq = osCp.dep.tmrTq;
4148 arg.tqCp = &osCp.dep.tmrTqCp;
4149 arg.timers = tmr->dep.timers;
4154 arg.max = TMR_DEF_MAX;
4155 arg.wait = tmr->interval;
4165 * Fun: Deregister timer
4167 * Desc: This function is used to deregister a timer function.
4178 SsTmrEntry *tmr /* pointer to timer entry */
4186 /* stop the timer */
4187 arg.tq = osCp.dep.tmrTq;
4188 arg.tqCp = &osCp.dep.tmrTqCp;
4189 arg.timers = tmr->dep.timers;
4194 arg.max = TMR_DEF_MAX;
4195 arg.wait = tmr->interval;
4205 * Fun: Critical error
4207 * Desc: This function is called when a critical error occurs.
4218 Seq seq, /* sequence number */
4219 Reason reason /* reset reason */
4229 /* get calling task ID */
4230 tId = pthread_self();
4233 /* set up the message to display */
4234 sprintf(errBuf, "\n\nFATAL ERROR - taskid = %x, errno = %d,"
4235 "reason = %d\n\n", (uint8_t)tId, seq, reason);
4239 /* delete all system tasks */
4240 for (i = 0; i < SS_MAX_STSKS; i++)
4242 if (osCp.sTskTbl[i].used
4243 && !pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
4245 pthread_kill(osCp.sTskTbl[i].dep.tId, SIGKILL);
4251 pthread_exit(NULLP);
4254 /* won't reach here */
4263 * Desc: This function is called to log an error.
4274 Ent ent, /* Calling layer's entity id */
4275 Inst inst, /* Calling layer's instance id */
4276 ProcId procId, /* Calling layer's processor id */
4277 Txt *file, /* file name where error occured */
4278 S32 line, /* line in file where error occured */
4279 ErrCls errCls, /* error class */
4280 ErrCode errCode, /* layer unique error code */
4281 ErrVal errVal, /* error value */
4282 Txt *errDesc /* description of error */
4295 /* get calling task ID */
4297 tId = pthread_self();
4303 case ERRCLS_ADD_RES:
4304 errClsMsg = "ERRCLS_ADD_RES";
4307 case ERRCLS_INT_PAR:
4308 errClsMsg = "ERRCLS_INT_PAR";
4312 errClsMsg = "ERRCLS_DEBUG";
4315 /* mt028.201 : Addition - ERRCLS_FTHA changes */
4317 errClsMsg = "ERRCLS_FTHA";
4321 errClsMsg = "INVALID ERROR CLASS!";
4326 /*mt009.301 Fixed 64BIT compilation warnings*/
4329 "\nmtss(posix): sw error: ent: %03d inst: %03d proc id: %03d \n"
4330 "file: %s line: %03d errcode: %05d errcls: %s\n"
4331 "errval: %05d errdesc: %s\n",
4332 ent, inst, procId, file, line, errCode, errClsMsg, errVal, errDesc);
4335 "\nmtss(posix): sw error: ent: %03d inst: %03d proc id: %03d \n"
4336 "file: %s line: %03ld errcode: %05ld errcls: %s\n"
4337 "errval: %05ld errdesc: %s\n",
4338 ent, inst, procId, file, line, errCode, errClsMsg, errVal, errDesc);
4340 SDisplay(0, errBuf);
4341 /* mt001.301 : Additions */
4342 #ifdef SS_LOGGER_SUPPORT
4344 #endif /* SS_LOGGER_SUPPORT */
4348 /* debug errors halt the system */
4349 if (errCls == ERRCLS_DEBUG)
4351 /* mt001.301 : Additions */
4352 #ifdef SS_LOGGER_SUPPORT
4354 #endif /* SS_LOGGER_SUPPORT */
4355 /* delete all system tasks */
4356 for (i = 0; i < SS_MAX_STSKS; i++)
4358 if (osCp.sTskTbl[i].used
4359 && !pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
4361 pthread_kill(osCp.sTskTbl[i].dep.tId, SIGKILL);
4367 pthread_exit(NULLP);
4379 * Fun: Register driver task
4381 * Desc: This function is called to register the handlers for a
4393 SsDrvrTskEntry *drvrTsk /* driver task entry */
4400 /* mt001.30 : Additions */
4403 * Fun: Deregister driver task
4405 * Desc: This function is called to deregister the handlers for a
4417 SsDrvrTskEntry *drvrTsk /* driver task entry */
4430 * mt003.301 Additions - SDeRegTTsk fix
4432 #ifdef SS_MULTIPLE_PROCS
4439 #else /*SS_MULTIPLE_PROCS*/
4445 #endif /*SS_MULTIPLE_PROCS*/
4447 #ifdef SS_MULTIPLE_PROCS
4460 /* We check the sTsk element; if it is not NULLP, the
4461 * task is attached. So we have to detach it before
4462 * deregistering the task.
4464 ret = SLock(&osCp.sTskTblLock);
4467 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not lock system task table");
4470 SS_ACQUIRE_ALL_SEMA(&osCp.tTskTblSem, ret);
4473 #if (ERRCLASS & ERRCLS_DEBUG)
4474 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not lock TAPA task table");
4476 if ( SUnlock(&osCp.sTskTblLock) != ROK)
4478 #if (ERRCLASS & ERRCLS_DEBUG)
4479 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not Unlock system task table");
4487 #ifdef SS_MULTIPLE_PROCS
4489 if (tTsk->initTsk != NULLP)
4492 (Void)(*(tTsk->initTsk))(proc, ent, inst,
4495 &(osCp.tTskTbl[idx].xxCb));
4497 (Void)(*(tTsk->initTsk))(proc, ent, inst,
4500 &(osCp.tTskTbl[idx].xxCb));
4501 #endif /* USE_MEMCAL */
4503 #endif /* SS_MULTIPLE_PROCS */
4505 if (tTsk->sTsk != NULLP)
4509 sTsk->dep.ent = ent;
4510 sTsk->dep.inst = inst;
4512 for (n = 0; n < SS_MAX_TTSKS; n++)
4514 if (sTsk->tTsks[n] == idx)
4516 sTsk->tTsks[n] = SS_INVALID_IDX;
4522 /* call the implementation to detach the task */
4523 ssdDetachTTsk(tTsk);
4525 sTsk->dep.ent = ENTNC;
4526 sTsk->dep.inst = INSTNC;
4529 /* Now we empty the entry for this task and update the table
4532 #ifdef SS_MULTIPLE_PROCS
4533 osCp.tTskIds[procIdx][ent][inst] = SS_TSKNC;
4534 #else /* SS_MULTIPLE_PROCS */
4535 osCp.tTskIds[ent][inst] = SS_TSKNC;
4536 #endif /* SS_MULTIPLE_PROCS */
4539 #ifdef SS_MULTIPLE_PROCS
4540 tTsk->proc = PROCNC;
4541 #endif /* SS_MULTIPLE_PROCS */
4543 tTsk->inst = INSTNC;
4544 tTsk->tskType = TTUND;
4545 tTsk->initTsk = NULLP;
4546 tTsk->actvTsk = NULLP;
4549 tTsk->nxt = osCp.nxtTTskEntry;
4550 osCp.nxtTTskEntry = idx;
4553 #ifdef SS_MULTIPLE_PROCS
4554 /* mark the control block for this task as invalid */
4555 osCp.tTskTbl[idx].xxCb = NULLP;
4558 SS_RELEASE_ALL_SEMA(&osCp.tTskTblSem);
4559 if ( SUnlock(&osCp.sTskTblLock) != ROK)
4561 #if (ERRCLASS & ERRCLS_DEBUG)
4562 MTLOGERROR(ERRCLS_DEBUG, EMTXXX, ERRZERO, "Could not Unlock system task table");
4569 //#ifndef SPLIT_RLC_DL_TASK
4570 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
4571 #if defined (L2_L3_SPLIT) && defined(SPLIT_RLC_DL_TASK)
4572 Void ysMtTskHdlr(Void);
4573 Void ysMtPollPhyMsg(uint8_t region);
4574 Void ysMtRcvPhyMsg(Void);
4575 Void *mtTskHdlrT2kL2
4577 Ptr tskPtr /* pointer to task entry */
4583 /* wait for SS to come up */
4584 /* It is required to block on this semaphore before starting actual processing of
4585 the thread becasue the creator of this thread might want to cance it without
4586 doing any processing. When this semaphore is released, means the creator gives
4587 the go ahead for actual processing and we should never come back to this point */
4588 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4597 ysMtPollPhyMsg(0); /* blocks, waiting for messages for L2
4598 * (processes L1 msgs) */
4604 Void ysMtTskHdlr(Void);
4605 Void YsPhyRecvMsg();
4606 Void *mtTskHdlrT2kL2
4608 Ptr tskPtr /* pointer to task entry */
4614 /* get out the system task entry from the parameter */
4615 sTsk = (SsSTskEntry *) tskPtr;
4617 /* wait for SS to come up */
4618 /* It is required to block on this semaphore before starting actual processing of
4619 the thread becasue the creator of this thread might want to cance it without
4620 doing any processing. When this semaphore is released, means the creator gives
4621 the go ahead for actual processing and we should never come back to this point */
4622 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4625 #ifndef RGL_SPECIFIC_CHANGES
4633 #ifdef V5GTF_SPECIFIC_CHANGES
4636 ysMtTskHdlr(); /* blocks, waiting for messages for L2
4637 * (processes L1 msgs) */
4639 /* get a message from the demand queue */
4641 #ifdef RLC_MAC_DAT_REQ_RBUF
4642 rgDlDatReqBatchProc();
4645 ret = mtTskHdlMsg(sTsk);
4648 /* exit the for loop here */
4651 #if defined(SPLIT_RLC_DL_TASK) && defined(RLC_MAC_STA_RSP_RBUF)
4658 #endif /* TENB_T2K3K_SPECIFIC_CHANGES */
4661 void *pthreadCreateHdlr(void * arg)
4664 SPThreadCreateArg* pthreadCreateArg = (SPThreadCreateArg*)arg;
4665 /* mt038.201 changed how sem_wait is called */
4666 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4669 pthreadCreateArg->start_routine(pthreadCreateArg->argument);
4677 * Desc: This is the system task handler function. It blocks on
4678 * the system task's demand queue. On receiving a message,
4679 * it identifies the target TAPA task, verifies that the
4680 * TAPA task belongs to this system task and if so, calls
4681 * the activation function of that TAPA task with the
4682 * received message. The task activation function or the
4683 * timer activation function may be called.
4685 * Ret: (thread function)
4694 Ptr tskPtr /* pointer to task entry */
4700 /* get out the system task entry from the parameter */
4701 sTsk = (SsSTskEntry *) tskPtr;
4704 /* wait for SS to come up */
4706 /* mt038.201 changed how sem_wait is called */
4707 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
4709 #ifdef XEON_SPECIFIC_CHANGES
4710 printf("\n**********MT Task Handler********\n");
4714 /* Wait for a message from the demand queue */
4715 #ifdef SS_CDMNDQ_SUPPORT
4716 ret = ssCDmndQWait(&sTsk->dQ);
4718 ret = ssDmndQWait(&sTsk->dQ);
4723 ret = mtTskHdlMsg(sTsk);
4738 * Desc: This is the system task handler function. It blocks on
4739 * the system task's demand queue. On receiving a message,
4740 * it identifies the target TAPA task, verifies that the
4741 * TAPA task belongs to this system task and if so, calls
4742 * the activation function of that TAPA task with the
4743 * received message. The task activation function or the
4744 * timer activation function may be called.
4746 * Ret: (thread function)
4761 SsTTskEntry *tTsk=NULLP;
4764 Buffer *mBuf2=NULLP;
4766 SsMsgInfo *mInfo=NULLP;
4768 /* mt028.201: modification: multiple procs support related changes */
4769 #ifndef SS_MULTIPLE_PROCS
4771 PAIFTMRS16 tmrActvFnMt = NULLP;
4773 /* mt015.301 Initialized the timer activation functions with NULLP */
4774 PFS16 tmrActvFn = NULLP;
4776 PAIFTMRS16 tmrActvFn =NULLP;
4777 uint16_t procIdIdx =0;
4778 #endif /* SS_MULTIPLE_PROCS */
4779 /* mt003.301 Modifications */
4780 #ifdef SS_THREAD_PROFILE
4782 #endif /* SS_THREAD_PROFILE */
4785 ret = ssDmndQGet(&sTsk->dQ, &mBuf, SS_DQ_FIRST);
4788 /* nothing to receive */
4792 /* if we can't lock this system task entry, return the message */
4793 ret = SLock(&sTsk->lock);
4797 #if (ERRCLASS & ERRCLS_DEBUG)
4798 MTLOGERROR(ERRCLS_DEBUG, EMT007, (ErrVal) ret,
4799 "Could not lock system task entry");
4809 mBuf2 = mBuf->b_next;
4811 /* find out what kind of message this is */
4812 mInfo = (SsMsgInfo *)mBuf->b_rptr;
4813 #ifdef SS_MEM_WL_DEBUG
4814 mtTskBuffer1 = mBuf2;
4816 mtTskBuffer2 = mBuf2->b_next;
4818 if(mInfo == 0x5050505)
4822 cmAnalyseBtInfo((PTR) mBuf,4);
4824 printf("\n In trouble .... \n");
4826 else if (mInfo == 0x2020202)
4829 cmAnalyseBtInfo((PTR) mBuf,1);
4830 printf("\n In trouble .... \n");
4832 #endif /* SS_MEM_WL_DEBUG */
4833 switch (mInfo->eventInfo.event)
4835 /* this is a termination event, we die */
4837 /* release the message */
4840 /* Unlock the system task entry and lock the system
4841 * task table to clean our entry up.
4843 SUnlock(&sTsk->lock);
4845 ret = SLock(&osCp.sTskTblLock);
4849 #if (ERRCLASS & ERRCLS_DEBUG)
4850 MTLOGERROR(ERRCLS_DEBUG, EMT008, (ErrVal) ret,
4851 "Could not lock system task table");
4853 /* what to do here? */
4857 /* clean up the system task entry */
4860 /* mt003.301 Modifications - SDeRegTTsk */
4861 /* sTsk->numTTsks = 0; */
4862 SDestroyLock(&sTsk->lock);
4863 ssDestroyDmndQ(&sTsk->dQ);
4865 /* lock for current executing TAPA task ID */
4867 /* make this entry available in the system task table */
4868 sTsk->nxt = osCp.nxtSTskEntry;
4869 for (i = 0; i < SS_MAX_STSKS; i++)
4871 if (sTsk == &osCp.sTskTbl[i])
4873 osCp.nxtSTskEntry = i;
4880 /* unlock the system task table */
4881 SUnlock(&osCp.sTskTblLock);
4886 /* this is a data message or a permanent task keep-alive message */
4888 case SS_EVNT_PERMTICK:
4889 /* message to a task. find the destination task */
4890 /* mt028.201: modification: multiple procs support related changes */
4891 #ifdef SS_MULTIPLE_PROCS
4892 procIdIdx = SGetProcIdIdx(mInfo->pst.dstProcId);
4894 if (procIdIdx == SS_INV_PROCID_IDX)
4900 idx = osCp.tTskIds[procIdIdx][mInfo->pst.dstEnt][mInfo->pst.dstInst];
4901 #else /* SS_MULTIPLE_PROCS */
4902 idx = osCp.tTskIds[mInfo->pst.dstEnt][mInfo->pst.dstInst];
4903 #endif /* SS_MULTIPLE_PROCS */
4905 /* verify that it hasn't been deregistered */
4906 if (idx == SS_TSKNC)
4912 /* verify that this system task is still running it */
4913 tTsk = &osCp.tTskTbl[idx];
4914 if (tTsk->sTsk != sTsk)
4920 /* set the current executing TAPA task ID */
4921 sTsk->dep.ent = mInfo->pst.dstEnt;
4922 sTsk->dep.inst = mInfo->pst.dstInst;
4924 /* copy the Pst structure into a local duplicate */
4925 for (i = 0; i < (S16) sizeof(Pst); i++)
4926 *(((uint8_t *)(&nPst)) + i) = *(((uint8_t *)&mInfo->pst) + i);
4928 /* Give the message to the task activation function. If
4929 * its a normal data message, we pass it, if this is a
4930 * keep-alive message for a permanent task then we pass
4931 * NULLP in place of the message to the task activation
4934 if (mInfo->eventInfo.event == SS_EVNT_DATA)
4936 #ifndef RGL_SPECIFIC_CHANGES
4937 #ifdef SS_TSKLOG_ENABLE
4938 uint32_t t = MacGetTick();
4941 /* mt003.301 Modifications */
4942 #if SS_THREAD_PROFILE
4943 tTsk->curEvent = nPst.event;
4945 #endif /* SS_THREAD_PROFILE */
4946 tTsk->actvTsk(&nPst, mBuf);
4947 #ifndef RGL_SPECIFIC_CHANGES
4948 #ifdef SS_TSKLOG_ENABLE
4949 SStopTask(t,PID_SSI_TSK);
4952 #if SS_THREAD_PROFILE
4954 tTsk->curEvtTime = (uint32_t)(et2 - et1);
4955 tTsk->totTime += (uint64_t)tTsk->curEvtTime;
4956 #endif /* SS_THREAD_PROFILE */
4960 #if (ERRCLASS & ERRCLS_DEBUG)
4961 /* this message should only come to a permanent task */
4962 if (tTsk->tskType != SS_TSK_PERMANENT)
4964 MTLOGERROR(ERRCLS_DEBUG, EMT009, ERRZERO, "Logic failure");
4968 tTsk->actvTsk(&nPst, NULLP);
4970 /* We need to re-send this message back to ourselves so
4971 * the permanent task continues to run.
4973 /* Check if this task got deregistered or detached
4974 * by the activation function; if so, there's nothing
4975 * more to do here, otherwise go ahead.
4978 if (tTsk->used == TRUE && tTsk->sTsk != NULLP)
4980 ret = ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
4981 ((tTsk->tskPrior) * SS_MAX_MSG_PRI) +
4985 /* failure here is a real problem */
4988 #if (ERRCLASS & ERRCLS_DEBUG)
4989 MTLOGERROR(ERRCLS_DEBUG, EMT010, ERRZERO,
4990 "Could not write to demand queue");
4996 /* unset the current executing TAPA task ID */
4997 sTsk->dep.ent = ENTNC;
4998 sTsk->dep.inst = INSTNC;
5003 /* timer event. find the timer entry */
5004 idx = mInfo->eventInfo.u.tmr.tmrIdx;
5006 /* lock the timer table, coz we're going to peek in it */
5007 ret = SLock(&osCp.tmrTblLock);
5011 #if (ERRCLASS & ERRCLS_DEBUG)
5012 MTLOGERROR(ERRCLS_DEBUG, EMT011, (ErrVal) ret,
5013 "Could not lock timer table");
5019 /* Verify that this timer entry is still around and that it
5020 * belongs to our task.
5022 if (osCp.tmrTbl[idx].used == FALSE
5023 /* mt028.201: modification: multiple procs support related changes */
5024 #ifdef SS_MULTIPLE_PROCS
5025 || osCp.tmrTbl[idx].ownerProc != mInfo->pst.dstProcId
5026 #endif /* SS_MULTIPLE_PROCS */
5027 || osCp.tmrTbl[idx].ownerEnt != mInfo->pst.dstEnt
5028 || osCp.tmrTbl[idx].ownerInst != mInfo->pst.dstInst)
5030 SUnlock(&osCp.tmrTblLock);
5035 /* mt005.21: addition */
5036 /* set the current executing TAPA task ID */
5037 sTsk->dep.ent = mInfo->pst.dstEnt;
5038 sTsk->dep.inst = mInfo->pst.dstInst;
5040 #ifndef SS_MULTIPLE_PROCS
5042 /*mt006.301 Adding Initializing the tmrActvFnMt*/
5043 tmrActvFnMt = NULLP;
5044 if (osCp.tmrTbl[idx].ssTmrActvFn.mtFlag == TRUE)
5046 tmrActvFnMt = osCp.tmrTbl[idx].ssTmrActvFn.actvFnc.tmrActvFnMt;
5052 tmrActvFn = osCp.tmrTbl[idx].ssTmrActvFn.actvFnc.tmrActvFn;
5055 /* unlock the timer table */
5056 SUnlock(&osCp.tmrTblLock);
5058 /* activate the timer function */
5059 /* mt028.201: modification: multiple procs support related changes */
5060 #ifndef SS_MULTIPLE_PROCS
5064 tmrActvFnMt(osCp.tmrTbl[idx].ownerEnt,
5065 osCp.tmrTbl[idx].ownerInst);
5073 tmrActvFn(osCp.tmrTbl[idx].ownerProc, osCp.tmrTbl[idx].ownerEnt,
5074 osCp.tmrTbl[idx].ownerInst);
5075 #endif /* SS_MULTIPLE_PROCS */
5077 /*mt005.21: addition */
5078 /* unset the current executing TAPA task ID */
5079 sTsk->dep.ent = ENTNC;
5080 sTsk->dep.inst = INSTNC;
5083 /* return the message buffer */
5087 * mt003.301 - SDeRegTTsk fix
5089 case SS_EVNT_TTSK_TERM:
5090 #ifdef SS_MULTIPLE_PROCS
5091 procIdIdx = SGetProcIdIdx(mInfo->pst.dstProcId);
5093 if (procIdIdx == SS_INV_PROCID_IDX)
5099 idx = osCp.tTskIds[procIdIdx][mInfo->pst.dstEnt][mInfo->pst.dstInst];
5100 #else /* SS_MULTIPLE_PROCS */
5101 idx = osCp.tTskIds[mInfo->pst.dstEnt][mInfo->pst.dstInst];
5102 #endif /* SS_MULTIPLE_PROCS */
5104 /* verify that it hasn't been deregistered */
5105 if (idx == SS_TSKNC)
5111 /* verify that this system task is still running it */
5112 tTsk = &osCp.tTskTbl[idx];
5113 if (tTsk->sTsk != sTsk)
5118 #ifdef SS_MULTIPLE_PROCS
5119 ssdProcTTskTerm(procIdIdx, tTsk, idx);
5121 ssdProcTTskTerm(tTsk, idx);
5127 #if (ERRCLASS & ERRCLS_DEBUG)
5128 MTLOGERROR(ERRCLS_DEBUG, EMT012, (ErrVal) ret,
5135 } while (mBuf != NULLP);
5138 /* unlock the system task entry */
5139 SUnlock(&sTsk->lock);
5142 /* yield for other threads */
5143 /* mt024.201 - changed to use SSI SThreadYield instead of sleep */
5152 * Fun: mtTmrHdlrPublic
5154 Void mtTmrHdlrPublic()
5156 if (SLock(&osCp.tmrTblLock) != ROK)
5158 #if (ERRCLASS & ERRCLS_DEBUG)
5159 MTLOGERROR(ERRCLS_DEBUG, EMT016, ERRZERO, "Could not lock timer table");
5163 cmPrcTmr(&osCp.dep.tmrTqCp, osCp.dep.tmrTq, mtTimeout);
5164 /* unlock the timer table */
5165 SUnlock(&osCp.tmrTblLock);
5173 * Desc: The timer handler thread function. Counts time
5174 * and invokes the common timer function on each
5177 * Ret: (thread function)
5184 /*mt041.201 Modified SSI tick handling in mtTmrHdlr() */
5185 static Void *mtTmrHdlr
5187 void *parm /* unused */
5190 /*mt004.301-addede new region*/
5191 /* mt010.301 Removed SS_FAP portion and
5192 * enabled oroginal code in function mtTmrHdlr */
5196 uint32_t i, cnt, oldTicks, newTicks;
5197 struct timeval tv1,tv2;
5198 /* mt038.201 added return */
5200 /* mt039.201 changes for nanosleep */
5201 struct timespec tsN;
5202 static uint32_t err_in_usec;
5204 /*mt013.301 : doesn't need TRC macro ,as this will never return*/
5209 /* mt027.201 - Modification for SRegCfgTmr support */
5210 /* check SS_TICKS_SEC */
5211 if (SS_1MS < SS_TICKS_SEC)
5213 MTLOGERROR(ERRCLS_DEBUG, EMT013, ERRZERO, "Minimum SSI ticks is 1ms");
5216 /* mt025.201 - Addition to stop timer handler till task registration is done */
5217 /* wait for SS to come up */
5218 /* mt038.201 changed how sem_wait is called */
5219 while ((ret = sem_wait(&osCp.dep.ssStarted) != ROK) && (errno == EINTR))
5222 /* mt027.201 - Modification for SRegCfgTmr support */
5223 /* set up parameter to nanosleep() for SS_TICKS_SEC */
5225 ts.tv_nsec = (MT_TICK_CNT * 1000);
5226 /* mt039.201 changes for nanosleep */
5232 if (gettimeofday(&tv1, NULL) == -1)
5234 #if (ERRCLASS & ERRCLS_DEBUG)
5235 MTLOGERROR(ERRCLS_DEBUG, EMT014, (ErrVal) errno,
5236 "Error in clock_gettime");
5246 #ifndef STUB_TTI_HANDLING_5GTF
5247 printf("\nReturning from mtTmrHdlr()\n");
5252 /* mt039.201 changes for nanosleep */
5253 /* sleep for MT_TICK_CNT milli seconds */
5254 ts.tv_nsec = (MT_TICK_CNT - err_in_usec) * 1000;
5255 while ((ret = nanosleep (&ts, &tsN) != ROK) && (errno == EINTR))
5257 ts.tv_nsec = tsN.tv_nsec;
5262 if (gettimeofday(&tv2,NULL) == -1)
5264 #if (ERRCLASS & ERRCLS_DEBUG)
5265 MTLOGERROR(ERRCLS_DEBUG, EMT015, (ErrVal) errno,
5266 "Error in clock_gettime");
5270 /*mt013.301 : changed check while calculating timer to fix
5271 * diffrence between MTSS time and real unix time
5273 if ((tv2.tv_sec == tv1.tv_sec)&&(tv2.tv_usec > tv1.tv_usec))
5275 time_int = (tv2.tv_usec - tv1.tv_usec);
5277 else if (tv2.tv_sec > tv1.tv_sec)
5279 time_int = ((tv2.tv_sec - tv1.tv_sec)*1000000) + (tv2.tv_usec - tv1.tv_usec);
5281 else /* ts2 < ts1, this will not happen in normal scenario */
5283 /* to make sure cnt = 1 */
5285 time_int = MT_TICK_CNT;
5288 oldTicks = osCp.dep.sysTicks;
5289 osCp.dep.sysTicks += (time_int/(MT_TICK_CNT - err_in_usec));
5290 err_in_usec = (time_int % (MT_TICK_CNT - err_in_usec));
5291 newTicks = osCp.dep.sysTicks;
5292 tv1.tv_usec = tv2.tv_usec;
5293 tv1.tv_sec = tv2.tv_sec;
5295 cnt = newTicks - oldTicks;
5297 while(err_in_usec >= MT_TICK_CNT)
5300 err_in_usec -= MT_TICK_CNT;
5302 if( cnt >= MT_MAX_TICK_CNT_VAL)
5303 cnt = MT_MIN_TICK_CNT_VAL;
5304 /* call the common timer tick handler */
5305 for (i = 0; i < cnt; i++)
5307 /* mt008.301: cmPrcTmr is guarded with a lock */
5308 /* lock the timer table */
5309 if (SLock(&osCp.tmrTblLock) != ROK)
5311 #if (ERRCLASS & ERRCLS_DEBUG)
5312 MTLOGERROR(ERRCLS_DEBUG, EMT016, ERRZERO, "Could not lock timer table");
5316 cmPrcTmr(&osCp.dep.tmrTqCp, osCp.dep.tmrTq, mtTimeout);
5317 /* unlock the timer table */
5318 SUnlock(&osCp.tmrTblLock);
5322 /* mt009.21: addition */
5323 return ( (Void *) NULLP);
5324 /* will not reach here */
5332 * Desc: Process timer event. Called from the common timer
5333 * code when a timeout occurs.
5344 PTR tCb, /* control block */
5345 S16 evnt /* event */
5354 #ifndef TENB_RTLIN_CHANGES
5357 /* mt028.201: modification: multiple procs support related changes */
5358 #ifdef SS_MULTIPLE_PROCS
5360 #endif /* SS_MULTIPLE_PROCS */
5361 #ifdef RGL_SPECIFIC_CHANGES
5362 #ifdef MSPD_MLOG_NEW
5363 uint32_t t = GetTIMETICK();
5369 /* get the timer entry */
5370 tEnt = (SsTmrEntry *) tCb;
5373 /* if the timer was deleted, this will be NULL, so drop it */
5379 /* mt008.301 Deletion: tmrTbl Lock is moved to mtTmrHdlr */
5382 /* Hmmmm, the timer might have been deleted while we've been
5383 * working at getting here, so we just skip this.
5385 if (tEnt->used == FALSE)
5391 /* Set up and send a timer message to the destination tasks'
5394 #ifndef SS_MULTICORE_SUPPORT
5395 if (SGetMsg(SS_DFLT_REGION, SS_DFLT_POOL, &mBuf) != ROK)
5397 #ifdef RGL_SPECIFIC_CHANGES
5398 if (SGetMsg((SS_DFLT_REGION), SS_DFLT_POOL, &mBuf) != ROK)
5400 if (SGetMsg((osCp.sTskTbl[0].region), SS_DFLT_POOL, &mBuf) != ROK)
5405 #if (ERRCLASS & ERRCLS_DEBUG)
5406 MTLOGERROR(ERRCLS_DEBUG, EMT017, ERRZERO, "Could not get message");
5412 mInfo = (SsMsgInfo *)mBuf->b_rptr;
5413 mInfo->eventInfo.event = SS_EVNT_TIMER;
5414 mInfo->eventInfo.u.tmr.tmrIdx = tEnt->tmrId;
5416 mInfo->pst.dstEnt = tEnt->ownerEnt;
5417 mInfo->pst.dstInst = tEnt->ownerInst;
5418 mInfo->pst.srcEnt = tEnt->ownerEnt;
5419 mInfo->pst.srcInst = tEnt->ownerInst;
5420 /* mt028.201: modification: multiple procs support related changes */
5421 #ifndef SS_MULTIPLE_PROCS
5422 mInfo->pst.dstProcId = SFndProcId();
5423 mInfo->pst.srcProcId = SFndProcId();
5424 #else /* SS_MULTIPLE_PROCS */
5425 mInfo->pst.dstProcId = tEnt->ownerProc;
5426 mInfo->pst.srcProcId = tEnt->ownerProc;
5427 #endif /* SS_MULTIPLE_PROCS */
5428 mInfo->pst.selector = SEL_LC_NEW;
5429 #ifndef SS_MULTICORE_SUPPORT
5430 mInfo->pst.region = DFLT_REGION;
5433 mInfo->pst.pool = DFLT_POOL;
5434 mInfo->pst.prior = PRIOR0;
5435 mInfo->pst.route = RTESPEC;
5436 mInfo->pst.event = 0;
5439 #ifndef TENB_RTLIN_CHANGES
5440 /* get a semaphore for the TAPA task table */
5441 SS_ACQUIRE_SEMA(&osCp.tTskTblSem, ret);
5446 #if (ERRCLASS & ERRCLS_DEBUG)
5447 MTLOGERROR(ERRCLS_DEBUG, EMT018, ret, "Could not lock TAPA task table");
5455 /* find the owner TAPA task */
5456 /* mt028.201: modification: multiple procs support related changes */
5457 #ifdef SS_MULTIPLE_PROCS
5458 procIdIdx = SGetProcIdIdx(tEnt->ownerProc);
5459 idx = osCp.tTskIds[procIdIdx][tEnt->ownerEnt][tEnt->ownerInst];
5460 #else /* SS_MULTIPLE_PROCS */
5461 idx = osCp.tTskIds[tEnt->ownerEnt][tEnt->ownerInst];
5462 #endif /* SS_MULTIPLE_PROCS */
5463 if (idx == SS_TSKNC)
5465 #ifndef TENB_RTLIN_CHANGES
5466 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5473 /* ensure that the TAPA task is hale and hearty */
5474 tTsk = &osCp.tTskTbl[idx];
5477 #ifndef TENB_RTLIN_CHANGES
5478 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5483 /* Klock work fix ccpu00148484 */
5484 /* write the timer message to the queue of the destination task */
5485 /* mt008.301 : check sTsk before putting into it's DQ */
5486 if (tTsk->sTsk == NULLP)
5488 #ifndef TENB_RTLIN_CHANGES
5489 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5493 #if (ERRCLASS & ERRCLS_DEBUG)
5494 MTLOGERROR(ERRCLS_DEBUG, EMT019, ERRZERO,
5495 "Could not write to demand queue");
5500 #ifdef SS_LOCKLESS_MEMORY
5501 mInfo->pst.region = tTsk->sTsk->region;
5502 mInfo->region = tTsk->sTsk->region;
5503 #endif /* SS_LOCKLESS_MEMORY */
5504 if (ssDmndQPutLast(&tTsk->sTsk->dQ, mBuf,
5505 (tTsk->tskPrior * SS_MAX_MSG_PRI) + PRIOR0) != ROK)
5507 #ifndef TENB_RTLIN_CHANGES
5508 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5512 #if (ERRCLASS & ERRCLS_DEBUG)
5513 MTLOGERROR(ERRCLS_DEBUG, EMT019, ERRZERO,
5514 "Could not write to demand queue");
5519 /* Fix for ccpu00130657 */
5520 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
5521 if (tTsk->sTsk->tskPrior == PRIOR0)
5524 WLS_WakeUp(mtGetWlsHdl());
5531 /* release the semaphore for the TAPA task table */
5532 #ifndef TENB_RTLIN_CHANGES
5533 SS_RELEASE_SEMA(&osCp.tTskTblSem);
5537 /* restart the timer */
5538 arg.tq = osCp.dep.tmrTq;
5539 arg.tqCp = &osCp.dep.tmrTqCp;
5540 arg.timers = tEnt->dep.timers;
5541 arg.cb = (PTR) tEnt;
5545 arg.max = TMR_DEF_MAX;
5546 arg.wait = tEnt->interval;
5548 #ifdef RGL_SPECIFIC_CHANGES
5549 #ifdef MSPD_MLOG_NEW
5550 MLogTask(131313, RESOURCE_LARM, t, GetTIMETICK());
5562 * Desc: This thread reads the console and hands over any
5563 * data read to a user function.
5565 * Ret: (thread function)
5572 static Void *mtConHdlr
5574 Ptr parm /* unused */
5581 /*mt013.301 : doesn't need TRC macro ,as this will never return*/
5587 /* check if we have a console input file handle */
5588 if (osCp.dep.conInFp == NULLP)
5594 fd = fileno(osCp.dep.conInFp);
5599 if ((read(fd, &data, 1)) != 1)
5605 /* call rdConQ, defined by the system service user */
5615 #ifdef SS_DRVR_SUPPORT
5618 * Fun: Interrupt service task handler
5620 * Desc: This is the interrupt service task handler. It blocks
5621 * on a pipe from which it reads an isFlag structure. The
5622 * structure indicates which interrupt service task is to
5623 * be executed. The thread identifies the task, calls the
5624 * isTsk function and sends itself a message to repeat
5625 * this operation until it receives a message to cease.
5634 /* mt009.21: addition */
5635 static Void *mtIsTskHdlr
5637 Ptr tskPtr /* pointer to task entry */
5640 #if (ERRCLASS & ERRCLS_DEBUG)
5647 if (read(osCp.dep.isFildes[0], &isFlag, sizeof(isFlag)) != sizeof(isFlag))
5652 switch (isFlag.action)
5655 osCp.drvrTskTbl[isFlag.id].dep.flag = TRUE;
5657 /* call the interrupt service task activation function */
5658 osCp.drvrTskTbl[isFlag.id].isTsk(isFlag.id);
5660 /* send self a message to keep doing this */
5661 isFlag.action = MT_IS_RESET;
5663 #if (ERRCLASS & ERRCLS_DEBUG)
5664 ret = write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5665 if (ret != sizeof(isFlag))
5667 MTLOGERROR(ERRCLS_DEBUG, EMT020, ERRZERO,
5668 "write() to pipe failed");
5671 write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5678 osCp.drvrTskTbl[isFlag.id].dep.flag = FALSE;
5683 if (osCp.drvrTskTbl[isFlag.id].dep.flag)
5685 /* call the interrupt service task activation function */
5686 osCp.drvrTskTbl[isFlag.id].isTsk(isFlag.id);
5688 #if (ERRCLASS & ERRCLS_DEBUG)
5689 /* send self a message to do this again */
5690 ret = write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5692 if (ret != sizeof(isFlag))
5694 MTLOGERROR(ERRCLS_DEBUG, EMT021, ERRZERO,
5695 "write() to pipe failed");
5698 write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag));
5706 /* where did THIS come from?? */
5710 /* mt009.21: addition */
5711 return ( (Void *) NULLP);
5715 #endif /* SS_DRVR_SUPPORT */
5716 #endif /* L2_L3_SPLIT */
5718 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
5722 * Fun: mtIntSigHndlr
5724 * Desc: Exit function, shuts down.
5733 Void mtIntSigHndlr(int arg)
5736 osCp.dep.sigEvnt=TRUE;
5739 #ifdef TENB_RTLIN_CHANGES
5747 /*mt010.301 Fix for core when run with -o option and when killed with SIGINT*/
5752 * Desc: function, shuts down.
5761 Void mtExitClnup(void)
5767 SGetSysTime(&ticks);
5769 sprintf(buf, "\n\nmtss(posix) ends\nticks: %u\n", ticks);
5771 sprintf(buf, "\n\nmtss(posix) ends\nticks: %lu\n", ticks);
5773 #ifdef SS_HISTOGRAM_SUPPORT
5777 osCp.dep.sigEvnt=FALSE;
5779 if (osCp.dep.fileOutFp)
5781 fclose(osCp.dep.fileOutFp);
5789 Void SIncrementTtiCount(Void)
5794 Ticks SGetTtiCount(Void)
5803 * Desc: This function displays a string to a given output
5808 * Notes: Buffer should be null terminated.
5810 * channel 0 is reserved for backwards compatibility
5818 S16 chan, /* channel */
5819 Txt *buf /* buffer */
5823 /* mt020.201 - Fixed typo */
5824 #if (ERRCLASS & ERRCLS_INT_PAR)
5827 MTLOGERROR(ERRCLS_INT_PAR, EMT022, ERRZERO, "Null pointer");
5832 #ifndef XEON_SPECIFIC_CHANGES
5833 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
5834 ssMemlog(buf, strlen(buf));
5839 /* mt012.301 :FIX for LOG RELATED ISSUE */
5847 if (osCp.dep.conOutFp) fwrite(buf, strlen(buf), 1, osCp.dep.conOutFp);
5853 if (osCp.dep.fileOutFp)
5854 fwrite(buf, strlen(buf), 1, osCp.dep.fileOutFp);
5855 /*mt031.201 added under compile time flag FLUSHBUFF a call to fflush() */
5858 fflush(osCp.dep.fileOutFp);
5871 * Desc: function, shuts down.
5883 /* mt030.201 added under compilet time flag SS_LINUX and SLES9_PLUS
5884 a loop to overcome the child processes being killed upon exiting the
5886 #ifdef SS_LINUX /* this should have already been defined */
5887 /* mt010.301 removed flag SLES9_PLUS */
5888 /* wait forever for children */
5892 if(osCp.dep.sigEvnt==TRUE)
5899 pthread_exit(NULLP);
5905 * Fun: Set date and time
5907 * Desc: This function is used to set the calendar
5912 * Notes: Unimplemented
5919 REG1 DateTime *dt /* date and time */
5932 * Fun: Get date and time
5934 * Desc: This function is used to determine the calendar
5935 * date and time. This information may be used for
5936 * some management functions.
5948 REG1 DateTime *dt /* date and time */
5951 /*-- mt035.201 : SSI enhancements for micro second in datetime struct --*/
5954 struct timespec ptime;
5956 struct timeval ptime;
5963 #if (ERRCLASS & ERRCLS_INT_PAR)
5966 MTLOGERROR(ERRCLS_INT_PAR, EMT023, ERRZERO, "Null pointer");
5975 localtime_r(&tt, &tme);
5978 clock_gettime(CLOCK_REALTIME, &ptime);
5980 gettimeofday(&ptime, NULL);
5982 localtime_r(&ptime.tv_sec, &tme);
5984 dt->month = (uint8_t) tme.tm_mon + 1;
5985 dt->day = (uint8_t) tme.tm_mday;
5986 dt->year = (uint8_t) tme.tm_year;
5987 dt->hour = (uint8_t) tme.tm_hour;
5988 dt->min = (uint8_t) tme.tm_min;
5989 dt->sec = (uint8_t) tme.tm_sec;
5992 #ifdef SS_DATETIME_USEC
5994 dt->usec = ptime.tv_nsec / 1000;
5996 dt->usec = ptime.tv_usec;
5998 #endif /*-- SS_DATETIME_USEC --*/
6004 * Get time from epoch in milliseconds
6006 * Fun: Get time from epoch in milliseconds
6008 * Desc: This function is used to get the time from epoch in milli seconds.
6009 * This information may be used for calculating a layer's activation function
6010 * execution time used for thread profiling.
6019 /* mt003.301 Modifications */
6022 EpcTime *et /* date and time */
6025 /* mt003.301 Modifications */
6026 static uint64_t now;
6027 uint64_t to_sec = 1000000;
6028 uint64_t to_nsec = 1000;
6030 struct timespec ptime;
6032 struct timeval ptime;
6037 #if (ERRCLASS & ERRCLS_INT_PAR)
6046 clock_gettime(CLOCK_REALTIME, &ptime);
6048 gettimeofday(&ptime, NULL);
6049 #endif /* SS_LINUX */
6051 now = (ptime.tv_sec * to_sec);
6054 now += (ptime.tv_nsec / to_nsec);
6055 #else /* SS_LINUX */
6056 now += (ptime.tv_usec);
6058 #endif /* SS_LINUX */
6059 now = (now / to_nsec);
6070 * Fun: Get system time
6072 * Desc: This function is used to determine the system time.
6076 * Notes: osCp.dep.sysTicks is updated by the timer thread.
6083 Ticks *sysTime /* system time */
6088 #if (ERRCLASS & ERRCLS_INT_PAR)
6089 if (sysTime == NULLP)
6091 MTLOGERROR(ERRCLS_INT_PAR, EMT024, ERRZERO, "Null pointer");
6097 *sysTime = osCp.dep.sysTicks;
6103 /* mt021.201 - Addition of SGetRefTime function */
6106 * Fun: Get referenced time
6108 * Desc: This function is used to determine the time in seconds
6109 * and microseconds from a reference time. The reference
6110 * time is expressed in seconds from UTC EPOC, January 1,
6116 * Notes: Macros are defined for reference times:
6117 * SS_REFTIME_01_01_1970
6118 * SS_REFTIME_01_01_2002
6125 uint32_t refTime, /* reference time */
6132 struct timespec ptime;
6134 struct timeval ptime;
6139 clock_gettime(CLOCK_REALTIME, &ptime);
6141 gettimeofday(&ptime, NULL);
6144 #if (ERRCLASS & ERRCLS_INT_PAR)
6145 if (sec == NULLP || usec == NULLP)
6147 MTLOGERROR(ERRCLS_INT_PAR, EMT025, ERRZERO, "Null pointer");
6150 /* mt022.201 - Modification to fix compile warning */
6151 if (refTime > (uint32_t)(ptime.tv_sec))
6153 MTLOGERROR(ERRCLS_INT_PAR, EMT026, ERRZERO, "Reference time exceeds present time");
6158 *sec = ptime.tv_sec - refTime;
6160 *usec = ptime.tv_nsec / 1000;
6162 *usec = ptime.tv_usec;
6172 * Fun: Get Random Number
6174 * Desc: Invoked by layer when a pseudorandom number is required.
6178 * Notes: Suggested approach uses shuffled Linear Congruential
6179 * Operators as described in Byte magazine October
6180 * 1984; "Generating and Testing Pseudorandom Numbers"
6187 Random *value /* random number */
6192 #if (ERRCLASS & ERRCLS_INT_PAR)
6195 /* mt011.21: addition */
6196 MTLOGERROR(ERRCLS_INT_PAR, EMT028, (ErrVal)0 , "Null pointer");
6202 *value = (Random) rand_r(&osCp.dep.randSeed);
6213 * Desc: This function exits from a task.
6217 * Notes: Currently does nothing.
6232 * Fun: Exit Interrupt
6234 * Desc: This function exits from an interrupt.
6238 * Notes: Currently does nothing.
6253 * Fun: Hold Interrupt
6255 * Desc: This function prohibits interrupts from being enabled until
6256 * release interrupt. This function should be called when
6257 * interrupts are disabled and prior to any call to system
6258 * services either by entry to an interrupt service routine or
6259 * by explicit call to disable interrupt.
6263 * Notes: Currently does nothing
6278 * Fun: Release Interrupt
6280 * Desc: This function allows interrupts to be enabled.
6284 * Notes: Currently does nothing.
6301 * Desc: Enable interrupts
6303 * Ret: ROK on success
6306 * Notes: Currently does nothing.
6311 inline S16 SEnbInt(void)
6323 * Desc: Disable interrupts
6325 * Ret: ROK on success
6328 * Notes: Currently does nothing.
6333 inline S16 SDisInt(void)
6345 * Desc: This function gets the function address stored at the
6346 * specified interrupt vector.
6350 * Notes: Currently does nothing.
6357 VectNmb vectNmb, /* vector number */
6358 PIF *vectFnct /* vector function */
6375 * Desc: This function installs the specified function at the
6376 * specified interrupt vector.
6380 * Notes: Currently does nothing.
6387 VectNmb vectNmb, /* vector number */
6388 PIF vectFnct /* vector function */
6400 /* mt028.201: modification: multiple procs support related changes */
6401 #ifndef SS_MULTIPLE_PROCS
6407 * Desc: This function gets the current entity and instance.
6410 * RFAILED - failed, general (optional)
6412 * Notes: This function may be called by the OS or Layer 1
6420 Ent *ent, /* entity */
6421 Inst *inst /* instance */
6432 #if (ERRCLASS & ERRCLS_INT_PAR)
6433 /* check pointers */
6434 if (ent == NULLP || inst == NULLP)
6436 MTLOGERROR(ERRCLS_INT_PAR, EMT029, ERRZERO, "Null pointer");
6442 /* get the thread id */
6443 tId = pthread_self();
6446 /* find the system task in whose context we're running */
6448 ret = SLock(&osCp.sTskTblLock);
6453 for (i = 0; i < SS_MAX_STSKS; i++)
6455 if (pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
6457 sTsk = &osCp.sTskTbl[i];
6463 *ent = sTsk->dep.ent;
6464 *inst = sTsk->dep.inst;
6466 SUnlock(&osCp.sTskTblLock);
6469 return (ret == ROK ? ROK : RFAILED);
6477 * Desc: This function sets the current entity and instance.
6488 Ent ent, /* entity */
6489 Inst inst /* instance */
6500 #if (ERRCLASS & ERRCLS_INT_PAR)
6501 /* check entity and instance IDs */
6502 if (ent >= ENTNC || inst >= INSTNC)
6504 MTLOGERROR(ERRCLS_INT_PAR, EMT030, ERRZERO, "Invalid entity/instance");
6510 /* get the thread id */
6511 tId = pthread_self();
6514 /* find the system task in whose context we're running */
6516 ret = SLock(&osCp.sTskTblLock);
6521 for (i = 0; i < SS_MAX_STSKS; i++)
6523 if (pthread_equal(osCp.sTskTbl[i].dep.tId, tId))
6525 sTsk = &osCp.sTskTbl[i];
6531 sTsk->dep.ent = ent;
6532 sTsk->dep.inst = inst;
6534 SUnlock(&osCp.sTskTblLock);
6537 return (ret == ROK ? ROK : RFAILED);
6540 #endif /* SS_MULTIPLE_PROCS */
6542 #ifdef SS_DRVR_SUPPORT
6548 * Desc: Set interrupt pending flag
6550 * Ret: ROK on success
6558 inline S16 SSetIntPend
6560 uint16_t id, /* driver task identifier */
6561 Bool flag /* flag */
6569 #if (ERRCLASS & ERRCLS_INT_PAR)
6570 if (id >= SS_MAX_DRVRTSKS || osCp.drvrTskTbl[id].used == FALSE)
6572 MTLOGERROR(ERRCLS_INT_PAR, EMT031, id, "Invalid instance");
6579 isFlag.action = (flag ? MT_IS_SET : MT_IS_UNSET);
6581 if (write(osCp.dep.isFildes[1], &isFlag, sizeof(isFlag)) != sizeof(isFlag))
6589 #endif /* SS_DRVR_SUPPORT */
6592 #ifdef SS_LOCKLESS_MEMORY
6595 * Fun: SGlobMemInfoShow
6597 * Desc: This function displays the memory usage information
6598 * for the destined region. It will show the usage of
6599 * each configured bucket and the heap for the specified region.
6602 * RFAILED Region not registered
6607 S16 SGlobMemInfoShow(Void)
6611 CmMmGlobRegCb *globReg;
6614 globReg = osCp.globRegCb;
6616 sprintf(prntBuf, "--------------------------------------------------------------\n");
6617 SDisplay(0, prntBuf);
6618 sprintf(prntBuf, "Global Region Bucket Information\n");
6619 SDisplay(0, prntBuf);
6620 sprintf(prntBuf, "====================================================\n");
6621 SDisplay(0, prntBuf);
6622 sprintf(prntBuf, "Bucket Id Set Size Free Sets Allocated\n");
6623 SDisplay(0, prntBuf);
6624 sprintf(prntBuf, "====================================================\n");
6625 SDisplay(0, prntBuf);
6628 for (idx = 0; idx < globReg->numBkts; idx++)
6630 #ifdef XEON_SPECIFIC_CHANGES
6631 sprintf(prntBuf, "%2u %12lu %12lu %8lu %9lu\n",
6632 idx, globReg->bktTbl[idx].size, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6635 sprintf(prntBuf, "%2u %12lu %8lu %9lu\n",
6636 idx, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6638 sprintf(prntBuf, "%2u %12u %8u %9u\n",
6639 idx, globReg->bktTbl[idx].bucketSetSize, globReg->bktTbl[idx].listValidBktSet.count, globReg->bktTbl[idx].listFreeBktSet.count);
6642 SDisplay(0, prntBuf);
6644 sprintf(prntBuf, "--------------------------------------------------------------\n");
6645 SDisplay(0, prntBuf);
6650 #endif /* SS_LOCKLESS_MEMORY */
6653 Bool IsMemoryThresholdHit(Region reg, Pool pool)
6655 if((mtCMMRegCb[reg]->bktTbl[pool].numAlloc * 100 )/mtCMMRegCb[reg]->bktTbl[pool].numBlks > 70)
6657 MSPD_DBG("Threshold reached reg(%d) pool(%d) numAllc(%d) numBlks(%d)\n",
6660 mtCMMRegCb[reg]->bktTbl[pool].numAlloc,
6661 mtCMMRegCb[reg]->bktTbl[pool].numBlks);
6668 /* mt022.201 - Addition of SRegInfoShow function */
6673 * Desc: This function displays the memory usage information
6674 * for the destined region. It will show the usage of
6675 * each configured bucket and the heap for the specified region.
6678 * RFAILED Region not registered
6680 * Notes: A Sample Output from the function
6681 * Bucket Memory: region 1
6682 * ====================================================
6683 * Bucket Number of Blks configured Size Allocated
6684 * ====================================================
6692 * Heap Memory: region 1
6695 * Heap Segmented blocks: 0
6711 #if (ERRCLASS & ERRCLS_INT_PAR)
6712 if (region > (SS_MAX_REGS-1) )
6714 MTLOGERROR(ERRCLS_INT_PAR, EMT032, ERRZERO, "Invalid Region");
6721 #ifndef TENB_T2K3K_SPECIFIC_CHANGES
6722 sprintf(prntBuf, "\n\nBucket Memory: region %d\n", region);
6723 SDisplay(0, prntBuf);
6724 sprintf(prntBuf, "====================================================\n");
6725 SDisplay(0, prntBuf);
6726 sprintf(prntBuf, "Bucket Number of Blks configured Size Allocated\n");
6727 SDisplay(0, prntBuf);
6728 sprintf(prntBuf, "====================================================\n");
6729 SDisplay(0, prntBuf);
6733 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
6735 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
6737 sprintf((char *)prntBuf, "%2u %8u %5u %8u %8u\n",
6738 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6739 mtCMMRegCb[region]->bktTbl[idx].size,
6740 mtCMMRegCb[region]->bktTbl[idx].numAlloc,
6741 mtCMMRegCb[region]->bktTbl[idx].maxAlloc);
6743 sprintf((char *)prntBuf, "%2u %8lu %5lu %8lu %8lu\n",
6744 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6745 mtCMMRegCb[region]->bktTbl[idx].size,
6746 mtCMMRegCb[region]->bktTbl[idx].numAlloc,
6747 mtCMMRegCb[region]->bktTbl[idx].maxAlloc);
6750 /*mt009.301 Fixed 64BIT compilation warnings*/
6752 sprintf(prntBuf, "%2u %8u %5u %8u\n",
6753 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6754 mtCMMRegCb[region]->bktTbl[idx].size,
6755 mtCMMRegCb[region]->bktTbl[idx].numAlloc);
6757 sprintf(prntBuf, "%2u %8lu %5lu %8lu\n",
6758 idx, mtCMMRegCb[region]->bktTbl[idx].numBlks,
6759 mtCMMRegCb[region]->bktTbl[idx].size,
6760 mtCMMRegCb[region]->bktTbl[idx].numAlloc);
6762 #endif /* not TENB_RTLIN_CHANGES */
6763 SDisplay(0, prntBuf);
6764 *availmem = *availmem + (mtCMMRegCb[region]->bktTbl[idx].size * \
6765 (mtCMMRegCb[region]->bktTbl[idx].numBlks - \
6766 mtCMMRegCb[region]->bktTbl[idx].numAlloc));
6768 sprintf(prntBuf, "\n---------------\n");
6769 SDisplay(0, prntBuf);
6770 sprintf(prntBuf, "Heap Memory: region %d\n", region);
6771 SDisplay(0, prntBuf);
6772 /*mt009.301 Fixed 64BIT compilation warnings*/
6774 sprintf(prntBuf, "Heap Size: %u\n", mtCMMRegCb[region]->heapSize);
6776 sprintf(prntBuf, "Heap Size: %lu\n", mtCMMRegCb[region]->heapSize);
6778 SDisplay(0, prntBuf);
6779 /*mt009.301 Fixed 64BIT compilation warnings*/
6781 sprintf(prntBuf, "Heap Allocated: %u\n",
6782 (mtCMMRegCb[region]->heapSize - mtCMMRegCb[region]->heapCb.avlSize));
6784 sprintf(prntBuf, "Heap Allocated: %lu\n",
6785 (mtCMMRegCb[region]->heapSize - mtCMMRegCb[region]->heapCb.avlSize));
6787 SDisplay(0, prntBuf);
6788 *availmem = *availmem + mtCMMRegCb[region]->heapCb.avlSize;
6789 #if (ERRCLASS & ERRCLS_DEBUG)
6790 sprintf(prntBuf, "Heap Segmented blocks: %d\n",
6791 mtCMMRegCb[region]->heapCb.numFragBlk);
6792 SDisplay(0, prntBuf);
6797 #ifdef XEON_SPECIFIC_CHANGES
6798 #define SSI_MAX_BKT_THRESHOLD 6
6799 #define SSI_MAX_REG_THRESHOLD 2
6800 uint32_t SMemMaxThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6801 uint32_t SMemMidThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6802 uint32_t SMemLowThreshold[SSI_MAX_REG_THRESHOLD][SSI_MAX_BKT_THRESHOLD] = {{0}};
6804 static Void SInitMemThreshold
6811 for (idx = 0; (idx < maxBkt && idx < mtCMMRegCb[region]->numBkts); idx++)
6813 SMemMaxThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*95)/100;
6814 SMemMidThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*85)/100;
6815 SMemLowThreshold[region][idx] = (mtCMMRegCb[region]->bktTbl[idx].numBlks*80)/100;
6816 printf("\nREGION:%d, BKT:%d max:%d mid:%d low:%d\n", region, idx, SMemMaxThreshold[region][idx], SMemMidThreshold[region][idx], SMemLowThreshold[region][idx]);
6820 S16 SRegReachedMemThreshold
6827 uint8_t memStatus = 3;
6828 static uint8_t initFlag = 1;
6832 SInitMemThreshold(region, maxBkt);
6835 for (idx = 0; (idx < maxBkt && idx < mtCMMRegCb[region]->numBkts); idx++)
6837 if(mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemMaxThreshold[region][idx])
6842 else if((mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemMidThreshold[region][idx]) && (memStatus >1))
6846 else if((mtCMMRegCb[region]->bktTbl[idx].numAlloc >= SMemLowThreshold[region][idx]) && (memStatus >2))
6854 /* mt033.201 - addition of API to return the memory statistical data */
6859 * Desc: This function returns the memory usage information
6860 * for the destined region. It will return the usage of
6861 * each configured bucket and the heap for the specified region.
6864 * RFAILED Region not registered
6874 SsMemDbgInfo *dbgInfo
6880 #if (ERRCLASS & ERRCLS_INT_PAR)
6881 if (region >= mtMemoCfg.numRegions )
6883 MTLOGERROR(ERRCLS_INT_PAR, EMT033, ERRZERO, "Invalid Region");
6888 dbgInfo->availmem = 0;
6890 if (mtCMMRegCb[region]->numBkts > SS_MAX_BKT_PER_DBGTBL)
6891 dbgInfo->numBkts = SS_MAX_BKT_PER_DBGTBL;
6893 dbgInfo->numBkts = mtCMMRegCb[region]->numBkts;
6895 for (idx = 0; (idx < mtCMMRegCb[region]->numBkts) && (idx < SS_MAX_BKT_PER_DBGTBL); idx++)
6897 dbgInfo->bktDbgTbl[idx].numBlks = mtCMMRegCb[region]->bktTbl[idx].numBlks;
6898 dbgInfo->bktDbgTbl[idx].size = mtCMMRegCb[region]->bktTbl[idx].size;
6899 dbgInfo->bktDbgTbl[idx].numAlloc = mtCMMRegCb[region]->bktTbl[idx].numAlloc;
6901 dbgInfo->availmem += (mtCMMRegCb[region]->bktTbl[idx].size * \
6902 (mtCMMRegCb[region]->bktTbl[idx].numBlks - \
6903 mtCMMRegCb[region]->bktTbl[idx].numAlloc));
6906 dbgInfo->region = region;
6908 dbgInfo->heapSize = mtCMMRegCb[region]->heapSize;
6910 dbgInfo->heapAlloc = (mtCMMRegCb[region]->heapSize - \
6911 mtCMMRegCb[region]->heapCb.avlSize);
6913 dbgInfo->availmem += mtCMMRegCb[region]->heapCb.avlSize;
6915 #if (ERRCLASS & ERRCLS_DEBUG)
6916 dbgInfo->numFragBlk = mtCMMRegCb[region]->heapCb.numFragBlk;
6928 /* Send number of Region available */
6929 *numRegion = mtMemoCfg.numRegions;
6930 /* Send number of Pools available */
6931 *numPool = cfgRegInfo[0].numPools;
6936 /* mt033.201 - addition of APIs to print the memory statistical data
6937 * as defined by SSI enhancements
6939 #ifdef SSI_DEBUG_LEVEL1
6942 * Fun: SPrintRegMemStatusInfo
6944 * Desc: This function displays the memory usage information
6945 * for the destined region. It will show the total memory
6946 * used for static and dynamic memory if typeFlag is
6947 * SS_MEM_BKT_ALLOC_PROFILE. It will show the number of
6948 * memory block allocated for a particular size if typeFlag
6949 * is SS_MEM_BLK_SIZE_PROFILE from the hash list by
6950 * calling SRegPrintMemStats.
6959 S16 SPrintRegMemStatusInfo
6967 uint32_t statMemSize;
6968 uint32_t dynMemSize;
6971 #if (ERRCLASS & ERRCLS_INT_PAR)
6972 if (region >= mtMemoCfg.numRegions )
6974 MTLOGERROR(ERRCLS_INT_PAR, EMT034, ERRZERO, "Invalid Region");
6979 /* initialize the counters*/
6983 if (typeFlag == SS_MEM_BKT_ALLOC_PROFILE)
6985 /* total static and dynamic memory allocated from all the buckets in region requested */
6986 sprintf(prntBuf, "\nAllocated Memory profile of Buckets from region: %d \n", region);
6987 SDisplay(0, prntBuf);
6988 sprintf(prntBuf, "===========================================\n");
6989 SDisplay(0, prntBuf);
6990 sprintf(prntBuf, "Bucket Static Memory Dynamic Memory\n");
6991 SDisplay(0, prntBuf);
6992 sprintf(prntBuf, "===========================================\n");
6993 SDisplay(0, prntBuf);
6994 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
6996 /*mt009.301 Fixed 64BIT compilation warnings*/
6998 sprintf(prntBuf, "%2u %8u %8u\n", idx,
6999 mtCMMRegCb[region]->bktTbl[idx].staticMemUsed,
7000 mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed);
7002 sprintf(prntBuf, "%2lu %8lu %8lu\n", idx,
7003 mtCMMRegCb[region]->bktTbl[idx].staticMemUsed,
7004 mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed);
7006 SDisplay(0, prntBuf);
7007 /* update the total count */
7008 statMemSize += mtCMMRegCb[region]->bktTbl[idx].staticMemUsed;
7009 dynMemSize += mtCMMRegCb[region]->bktTbl[idx].dynamicMemUsed;
7012 /*mt009.301 Fixed 64BIT compilation warnings*/
7014 sprintf(prntBuf, "Total Static Memory allocated from buckets: %u\n", statMemSize);
7015 SDisplay(0, prntBuf);
7016 sprintf(prntBuf, "Total Dynamic Memory allocated from buckets: %u\n", dynMemSize);
7018 sprintf(prntBuf, "Total Static Memory allocated from buckets: %lu\n", statMemSize);
7019 SDisplay(0, prntBuf);
7020 /*mt010.301 fix for compilation error*/
7021 sprintf(prntBuf, "Total Dynamic Memory allocated from buckets: %lu\n", dynMemSize);
7023 SDisplay(0, prntBuf);
7025 sprintf(prntBuf, "\n\nAllocated Memory profile from Heap of region: %d \n", region);
7026 SDisplay(0, prntBuf);
7027 /*mt009.301 Fixed 64BIT compilation warnings*/
7029 sprintf(prntBuf, "STATIC MEMORY: %u DYNAMIC MEMORY:%u \n",
7030 mtCMMRegCb[region]->heapCb.staticHeapMemUsed, mtCMMRegCb[region]->heapCb.dynamicHeapMemUsed);
7032 sprintf(prntBuf, "STATIC MEMORY: %lu DYNAMIC MEMORY:%lu \n",
7033 mtCMMRegCb[region]->heapCb.staticHeapMemUsed, mtCMMRegCb[region]->heapCb.dynamicHeapMemUsed);
7035 SDisplay(0, prntBuf);
7037 else if (typeFlag == SS_MEM_BLK_SIZE_PROFILE)
7039 /* Bucket Memory allocation Statistics */
7040 return (SPrintRegMemStats(region));
7045 sprintf(prntBuf, "\n Invalid choice \n");
7046 SDisplay(0, prntBuf);
7054 * Fun: SPrintRegMemStats
7056 * Desc: This function displays the memory usage information for
7057 * the destined region. It will show the number of memory
7058 * block allocated for a particular size from the hash list.
7067 static S16 SPrintRegMemStats(Region region)
7069 CmMmHashListCp *hashListCp;
7075 hashListCp = &mtCMMRegCb[region]->hashListCp;
7077 sprintf(prntBuf, "\n\nSize Vs. NumAttempts and Alloc/Dealloc profile of region %d\n", region);
7078 SDisplay(0, prntBuf);
7079 sprintf(prntBuf, "Maximum Entries: %u Current Entries: %u\n",
7080 hashListCp->numOfbins, hashListCp->numOfEntries);
7081 SDisplay(0, prntBuf);
7082 sprintf(prntBuf, "===================================\n");
7083 SDisplay(0, prntBuf);
7084 sprintf(prntBuf, "Block Size Total number of requests\n");
7085 SDisplay(0, prntBuf);
7086 sprintf(prntBuf, "===================================\n");
7087 SDisplay(0, prntBuf);
7089 for (idx = 0, cntEnt=0; (cntEnt < hashListCp->numOfEntries) &&
7090 (idx < CMM_STAT_HASH_TBL_LEN); idx++)
7092 if (hashListCp->hashList[idx].numAttempts)
7095 /*mt009.301 Fixed 64BIT compilation warnings*/
7097 sprintf(prntBuf, "%8u %8u\n", hashListCp->hashList[idx].size,
7098 hashListCp->hashList[idx].numAttempts);
7100 sprintf(prntBuf, "%8lu %8lu\n", hashListCp->hashList[idx].size,
7101 hashListCp->hashList[idx].numAttempts);
7103 SDisplay(0, prntBuf);
7107 sprintf(prntBuf, "\nAllocation/De-allocation profile in Buckets\n");
7108 SDisplay(0, prntBuf);
7109 sprintf(prntBuf, "=================================================\n");
7110 SDisplay(0, prntBuf);
7111 sprintf(prntBuf, "Bucket Num of Alloc Attempts Num of De-alloc Attempts\n");
7112 SDisplay(0, prntBuf);
7113 sprintf(prntBuf, "=================================================\n");
7114 SDisplay(0, prntBuf);
7116 /* Print the statistics of total number of alloc/de-alloc attempts in each bucket of this region */
7117 for (idx = 0; idx < mtCMMRegCb[region]->numBkts; idx++)
7119 /*mt009.301 Fixed 64BIT compilation warnings*/
7121 sprintf(prntBuf, "%4u %8u %8u\n", idx,
7122 mtCMMRegCb[region]->bktTbl[idx].numAllocAttempts,
7123 mtCMMRegCb[region]->bktTbl[idx].numDeallocAttempts);
7125 sprintf(prntBuf, "%4lu %8lu %8lu\n", idx,
7126 mtCMMRegCb[region]->bktTbl[idx].numAllocAttempts,
7127 mtCMMRegCb[region]->bktTbl[idx].numDeallocAttempts);
7129 SDisplay(0, prntBuf);
7131 sprintf(prntBuf, "\nAllocation/De-allocation profile in Heap\n");
7132 SDisplay(0, prntBuf);
7133 /*mt009.301 Fixed 64BIT compilation warnings*/
7135 sprintf(prntBuf, "Num of Alloc Attempts: %u Num of De-alloc Attempts: %u\n",
7136 mtCMMRegCb[region]->heapCb.numAllocAttempts,
7137 mtCMMRegCb[region]->heapCb.numDeallocAttempts);
7139 sprintf(prntBuf, "Num of Alloc Attempts: %lu Num of De-alloc Attempts: %lu\n",
7140 mtCMMRegCb[region]->heapCb.numAllocAttempts,
7141 mtCMMRegCb[region]->heapCb.numDeallocAttempts);
7143 SDisplay(0, prntBuf);
7144 sprintf(prntBuf, "\n");
7145 SDisplay(0, prntBuf);
7152 * Fun: SRegMemErrHdlr
7154 * Desc: This function handles the errors returned from the memory
7155 * related functions. Customers are suggested to modify this
7156 * API according to their specific requirement.
7175 if (errCode == RDBLFREE)
7177 sprintf(prntBuf, "\nDouble free attempted at location:%8p in region:%d\n", ptr, region);
7178 SDisplay(0, prntBuf);
7180 else if (errCode == RTRAMPLINGNOK)
7182 sprintf(prntBuf, "\nMemory Trampling crossed Threshold in region:%d\n", region);
7183 SDisplay(0, prntBuf);
7191 * Fun: SPrintRegMemProfile
7193 * Desc: This function displays the memory profile information
7194 * for the destined region. This function prints for:
7195 * 1) each memory bucket-Block address, size, size for which it is allocated, free/allocated, static/dynamic
7196 * 2) heap - memory block address, size, requested size, free/allocated, static/dynamic
7205 S16 SPrintRegMemProfile
7212 CmMmBlkHdr *curBktBlk;
7214 Size offsetToNxtBlk;
7222 #if (ERRCLASS & ERRCLS_INT_PAR)
7223 if (region >= mtMemoCfg.numRegions )
7225 MTLOGERROR(ERRCLS_INT_PAR, EMT035, ERRZERO, "Invalid Region");
7230 regCb = mtCMMRegCb[region];
7232 /* memory profile */
7233 sprintf(prntBuf, "\n\nFull Memory Profile of region %d\n", region);
7234 SDisplay(0, prntBuf);
7236 /* bucket profile */
7237 sprintf(prntBuf, "\nBucket Profile\n");
7238 SDisplay(0, prntBuf);
7240 for (idx = 0; idx < regCb->numBkts; idx++)
7243 /*mt009.301 Fixed 64BIT compilation warnings*/
7245 sprintf(prntBuf, "\nBucket number:%4u of Size:%u Num of Blocks: %u\n",
7246 idx, regCb->bktTbl[idx].size, regCb->bktTbl[idx].numBlks);
7248 sprintf(prntBuf, "\nBucket number:%4lu of Size:%lu Num of Blocks: %lu\n",
7249 idx, regCb->bktTbl[idx].size, regCb->bktTbl[idx].numBlks);
7251 SDisplay(0, prntBuf);
7253 sprintf(prntBuf, "==========================================================================\n");
7254 SDisplay(0, prntBuf);
7255 sprintf(prntBuf, " Block Location Free/Allocated Static/dynamic Size requested\n");
7256 SDisplay(0, prntBuf);
7257 sprintf(prntBuf, "==========================================================================\n");
7258 SDisplay(0, prntBuf);
7260 offsetToNxtBlk = regCb->bktTbl[idx].size + sizeof(CmMmBlkHdr);
7262 for (blkCnt=0, curBktBlk = (CmMmBlkHdr *)(regCb->bktTbl[idx].bktStartPtr);
7263 ((curBktBlk) && (blkCnt < regCb->bktTbl[idx].numBlks));
7264 curBktBlk = (CmMmBlkHdr *)((Data *)curBktBlk + offsetToNxtBlk), blkCnt++)
7266 /*mt009.301 Fixed 64BIT compilation warnings*/
7268 sprintf(prntBuf, "%6u %8p", blkCnt, (void *)curBktBlk);
7270 sprintf(prntBuf, "%6lu %8p", blkCnt, (void *)curBktBlk);
7272 SDisplay(0, prntBuf);
7273 /* check if it is a sane block, elxe jump to next block */
7274 if (cmMmRegIsBlkSane(curBktBlk) != ROK)
7276 sprintf(prntBuf, " Trampled \n");
7277 SDisplay(0, prntBuf);
7282 if (CMM_IS_STATIC(curBktBlk->memFlags))
7284 /*mt009.301 Fixed 64BIT compilation warnings*/
7286 sprintf(prntBuf, " Allocated Static %8u\n", curBktBlk->requestedSize);
7288 sprintf(prntBuf, " Allocated Static %8lu\n", curBktBlk->requestedSize);
7290 SDisplay(0, prntBuf);
7292 else if (CMM_IS_DYNAMIC(curBktBlk->memFlags))
7294 /*mt009.301 Fixed 64BIT compilation warnings*/
7296 sprintf(prntBuf, " Allocated Dynamic %8u\n", curBktBlk->requestedSize);
7298 sprintf(prntBuf, " Allocated Dynamic %8lu\n", curBktBlk->requestedSize);
7300 SDisplay(0, prntBuf);
7302 else if (CMM_IS_FREE(curBktBlk->memFlags))
7304 /*mt009.301 Fixed 64BIT compilation warnings*/
7306 sprintf(prntBuf, " Free %8u\n", curBktBlk->requestedSize);
7308 sprintf(prntBuf, " Free %8lu\n", curBktBlk->requestedSize);
7310 SDisplay(0, prntBuf);
7314 sprintf(prntBuf, " Trampled \n");
7315 SDisplay(0, prntBuf);
7321 sprintf(prntBuf, "\nHeap Profile\n");
7322 SDisplay(0, prntBuf);
7324 /* point to heapCb */
7325 heapCb = &(regCb->heapCb);
7327 sprintf(prntBuf, "\nHeap Start: %8p Heap End: %8p\n", heapCb->vStart, heapCb->vEnd);
7328 SDisplay(0, prntBuf);
7329 sprintf(prntBuf, "==========================================================================\n");
7330 SDisplay(0, prntBuf);
7331 sprintf(prntBuf, " Block Location Size Free/Allocated Static/dynamic Size requested\n");
7332 SDisplay(0, prntBuf);
7333 sprintf(prntBuf, "==========================================================================\n");
7334 SDisplay(0, prntBuf);
7336 /* traverse the entire heap to output the heap profile */
7337 hdrSize = sizeof(CmHEntry);
7338 for (blkCnt=0, curHBlk = (CmHEntry *)heapCb->vStart;
7339 ((curHBlk) && (curHBlk < (CmHEntry *)heapCb->vEnd)); blkCnt++)
7341 /*mt009.301 Fixed 64BIT compilation warnings*/
7343 sprintf(prntBuf, "%6u %8p", blkCnt, (void *)curHBlk);
7345 sprintf(prntBuf, "%6lu %8p", blkCnt, (void *)curHBlk);
7347 SDisplay(0, prntBuf);
7349 /* check if it is a sane block, elxe jump to next block */
7350 if (cmMmRegIsBlkSane((CmMmBlkHdr *)curHBlk) != ROK)
7352 sprintf(prntBuf, " Trampled \n");
7353 SDisplay(0, prntBuf);
7355 sprintf(prntBuf, "Trampled block encountered: Stopping heap profile\n");
7356 SDisplay(0, prntBuf);
7359 * To go to next block in the heap we do not have any offset value
7360 * other than curHBlk->size. As the block is already trampled
7361 * we cannot rely on this size. So it is better to stop here unless there
7362 * exists any other mechanism(?) to know the offset to next block.
7367 /*mt009.301 Fixed 64BIT compilation warnings*/
7369 sprintf(prntBuf, " %8u", curHBlk->size);
7371 sprintf(prntBuf, " %8lu", curHBlk->size);
7373 SDisplay(0, prntBuf);
7375 if (CMM_IS_STATIC(curHBlk->memFlags))
7377 /*mt009.301 Fixed 64BIT compilation warnings*/
7379 sprintf(prntBuf, " Allocated Static %8u\n", curHBlk->requestedSize);
7381 sprintf(prntBuf, " Allocated Static %8lu\n", curHBlk->requestedSize);
7383 SDisplay(0, prntBuf);
7385 else if (CMM_IS_DYNAMIC(curHBlk->memFlags))
7387 /*mt009.301 Fixed 64BIT compilation warnings*/
7389 sprintf(prntBuf, " Allocated Dynamic %8u\n", curHBlk->requestedSize);
7391 sprintf(prntBuf, " Allocated Dynamic %8lu\n", curHBlk->requestedSize);
7393 SDisplay(0, prntBuf);
7395 else if (CMM_IS_FREE(curHBlk->memFlags))
7397 /*mt009.301 Fixed 64BIT compilation warnings*/
7399 sprintf(prntBuf, " Free %8u\n", curHBlk->requestedSize);
7401 sprintf(prntBuf, " Free %8lu\n", curHBlk->requestedSize);
7403 SDisplay(0, prntBuf);
7407 sprintf(prntBuf, " Trampled \n");
7408 SDisplay(0, prntBuf);
7410 /* goto next block in the heap */
7411 curHBlk = (CmHEntry *)((Data *)curHBlk + hdrSize + curHBlk->size);
7417 #endif /* SSI_DEBUG_LEVEL1 */
7419 /*-- mt035.201 : Added new API for timestamp --*/
7422 * Fun: Get TimeStamp
7424 * Desc: This function is used to Get TimeStamp in micro seconds
7441 struct timespec ptime;
7443 struct timeval ptime;
7452 clock_gettime(CLOCK_REALTIME, &ptime);
7454 gettimeofday(&ptime, NULL);
7457 /* Obtain the time of day, and convert it to a tm struct. --*/
7458 ptm = localtime (&ptime.tv_sec);
7459 /* Klock work fix ccpu00148484 */
7462 /* Format the date and time, down to a single second. --*/
7463 strftime (time_string, sizeof (time_string), "%a %b %d %Y %H:%M:%S", ptm);
7466 /* Compute microseconds. --*/
7468 microseconds = ptime.tv_nsec / 1000;
7470 microseconds = ptime.tv_usec;
7473 /* Print the formatted time, in seconds, followed by a decimal point
7474 and the microseconds. --*/
7475 /*mt009.301 Fixed 64BIT compilation warnings*/
7477 sprintf(ts, "%s.%03d", time_string, microseconds);
7479 sprintf(ts, "%s.%03ld", time_string, microseconds);
7485 /*-- mt037.201 : Added new API for SGetSystemTsk --*/
7488 * Fun: Get SGetSystemTsk
7490 * Desc: This function is used to Get sytem task id
7499 uint32_t SGetSystemTsk(Void)
7502 return (pthread_self());
7504 } /* end of SGetSystemTsk */
7506 #ifdef SS_MULTICORE_SUPPORT
7509 * Fun: Add Timer thread into system task table
7511 * Desc: This function is used to add the system task
7512 * associated with Timer thread.
7521 static SsSTskEntry* ssdAddTmrSTsk(Void)
7527 /* lock the system task table */
7528 ret = SLock(&osCp.sTskTblLock);
7532 #if (ERRCLASS & ERRCLS_DEBUG)
7533 MTLOGERROR(ERRCLS_DEBUG, EMT039, (ErrVal) ret,
7534 "Could not lock system task table");
7540 /* check count of system tasks */
7541 if (osCp.numSTsks == SS_MAX_STSKS)
7544 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7546 #if (ERRCLASS & ERRCLS_DEBUG)
7547 MTLOGERROR(ERRCLS_DEBUG, EMT040, ERRZERO,
7548 "Could not give the Semaphore");
7553 #if (ERRCLASS & ERRCLS_ADD_RES)
7554 MTLOGERROR(ERRCLS_ADD_RES, EMT041, ERRZERO, "Too many system tasks");
7561 /* initialize the system task entry with the information we have */
7562 sTsk = &osCp.sTskTbl[osCp.nxtSTskEntry];
7564 /* store the system task priority */
7565 sTsk->tskPrior = SS_NORM_TSK_PRI;
7567 /* initialize the demand queue */
7568 if (ssInitDmndQ(&sTsk->dQ) != ROK)
7571 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7573 #if (ERRCLASS & ERRCLS_DEBUG)
7574 MTLOGERROR(ERRCLS_DEBUG, EMT042, ERRZERO,
7575 "Could not give the Semaphore");
7580 #if (ERRCLASS & ERRCLS_DEBUG)
7581 MTLOGERROR(ERRCLS_DEBUG, EMT043, (ErrVal) ret,
7582 "Could not initialize demand queue");
7588 /* initialize the system task entry lock */
7589 if (SInitLock(&sTsk->lock, SS_STSKENTRY_LOCK) != ROK)
7591 ssDestroyDmndQ(&sTsk->dQ);
7593 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7595 #if (ERRCLASS & ERRCLS_DEBUG)
7596 MTLOGERROR(ERRCLS_DEBUG, EMT044, ERRZERO,
7597 "Could not give the Semaphore");
7602 #if (ERRCLASS & ERRCLS_DEBUG)
7603 MTLOGERROR(ERRCLS_DEBUG, EMT045, (ErrVal) ret,
7604 "Could not initialize system task entry lock");
7611 /* success, update the table */
7612 sTsk->tskId = osCp.nxtSTskEntry;
7614 sTsk->termPend = FALSE;
7615 osCp.nxtSTskEntry = sTsk->nxt;
7618 /* unlock the system task table */
7620 if ( SUnlock(&osCp.sTskTblLock) != ROK)
7622 #if (ERRCLASS & ERRCLS_DEBUG)
7623 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
7624 "Could not give the Semaphore");
7631 #endif /* SS_MULTICORE_SUPPORT */
7632 /* mt003.301 Readwrite lock and recursive mutex additions */
7633 #ifdef SS_LOCK_SUPPORT
7636 * Fun: ssdInitLockNew
7638 * Desc: This function is used to initialise lock/mutex
7647 S16 ssdInitLockNew(SLockInfo *lockId,uint8_t lockType)
7650 #ifdef SS_REC_LOCK_SUPPORT
7651 pthread_mutexattr_t attr;
7652 #endif /* SS_REC_LOCK_SUPPORT */
7653 Txt prntBuf[PRNTSZE];
7659 #ifdef SS_RDWR_LOCK_SUPPORT
7662 if((retVal = pthread_rwlock_init((&(lockId->l.rdWrLockId)), NULLP)) != ROK)
7664 sprintf(prntBuf, "\n\n ssdInitLockNew(): Initialization of read write lock failed,Error# retVal %d\n", retVal);
7665 SDisplay(0, prntBuf);
7670 #endif /* SS_RDWR_LOCK_SUPPORT */
7671 #ifdef SS_REC_LOCK_SUPPORT
7674 retVal = pthread_mutexattr_init(&attr);
7678 sprintf(prntBuf,"\n ssdInitLockNew(): mutexattr init failed,Error# %d \n",retVal);
7683 retVal = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
7685 retVal = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
7689 sprintf(prntBuf,"\n ssdInitLockNew(): mutexattr settype failed,Error# %d \n",retVal);
7690 pthread_mutexattr_destroy(&attr);
7694 retVal = pthread_mutex_init((pthread_mutex_t *)&(lockId->l.recurLock), &attr);
7697 sprintf(prntBuf,"\n ssdInitLockNew(): mutex init failed,Error# %d \n",retVal);
7698 pthread_mutexattr_destroy(&attr);
7704 #endif /* SS_REC_LOCK_SUPPORT */
7707 sprintf(prntBuf, "\n\n ssdInitLockNew(): Invalid lock type %d\n", lockType);
7708 SDisplay(0, prntBuf);
7718 * Desc: This function is used to aquire the read write lock
7727 S16 ssdLockNew(SLockInfo *lockId,uint8_t lockType)
7730 Txt prntBuf[PRNTSZE];
7736 #ifdef SS_RDWR_LOCK_SUPPORT
7739 if((retVal = pthread_rwlock_rdlock(&(lockId->l.rdWrLockId))) != ROK)
7741 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7742 SDisplay(0, prntBuf);
7749 if((retVal = pthread_rwlock_wrlock(&(lockId->l.rdWrLockId))) != ROK)
7751 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the write lock,Error# %d\n", retVal);
7752 SDisplay(0, prntBuf);
7759 if((retVal = pthread_rwlock_tryrdlock(&(lockId->l.rdWrLockId))) != ROK)
7761 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7762 SDisplay(0, prntBuf);
7769 if((retVal = pthread_rwlock_trywrlock(&(lockId->l.rdWrLockId))) != ROK)
7771 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the read lock,Error# %d\n", retVal);
7772 SDisplay(0, prntBuf);
7777 #endif /* SS_RDWR_LOCK_SUPPORT */
7778 #ifdef SS_REC_LOCK_SUPPORT
7781 if((retVal = pthread_mutex_lock(&(lockId->l.recurLock)) != ROK))
7783 sprintf(prntBuf, "\n\n ssdLockNew(): Failed to aquire the recursive mutex,Error# %d\n", retVal);
7784 SDisplay(0, prntBuf);
7789 #endif /* SS_REC_LOCK_SUPPORT */
7792 sprintf(prntBuf, "\n\n ssdLockNew(): Invalid lock type %d\n", lockType);
7793 SDisplay(0, prntBuf);
7806 * Desc: This function is used to Unlock the read write lock
7815 S16 ssdUnlockNew(SLockInfo *lockId,uint8_t lockType)
7818 Txt prntBuf[PRNTSZE];
7824 #ifdef SS_RDWR_LOCK_SUPPORT
7827 if((retVal = pthread_rwlock_unlock(&(lockId->l.rdWrLockId))) != ROK)
7829 sprintf(prntBuf, "\n\n ssdUnLockNew(): Failed to unlock the lock,Error# %d\n", retVal);
7830 SDisplay(0, prntBuf);
7835 #endif /* SS_RDWR_LOCK_SUPPORT */
7836 #ifdef SS_REC_LOCK_SUPPORT
7839 if((retVal = pthread_mutex_unlock(&(lockId->l.recurLock)) != ROK))
7841 sprintf(prntBuf, "\n\n ssdUnLockNew(): Failed to aquire the recursive mutex,Error# %d\n", retVal);
7842 SDisplay(0, prntBuf);
7847 #endif /* SS_REC_LOCK_SUPPORT */
7850 sprintf(prntBuf, "\n\n ssdUnlockNew(): Invalid lock type %d\n", lockType);
7851 SDisplay(0, prntBuf);
7860 * Fun: ssdDestroyLockNew
7862 * Desc: This function is used to destroy the read write lock
7871 S16 ssdDestroyLockNew(SLockInfo *lockId,uint8_t lockType)
7873 Txt prntBuf[PRNTSZE];
7879 #ifdef SS_RDWR_LOCK_SUPPORT
7882 if((retVal = pthread_rwlock_destroy(&(lockId->l.rdWrLockId))) != ROK)
7884 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Failed to destroy the lock,Error# %d\n", retVal);
7885 SDisplay(0, prntBuf);
7890 #endif /* SS_RDWR_LOCK_SUPPORT */
7891 #ifdef SS_REC_LOCK_SUPPORT
7894 if((retVal = pthread_mutex_destroy(&(lockId->l.recurLock)) != ROK))
7896 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Failed to destroy the mutex,Error# %d\n", retVal);
7897 SDisplay(0, prntBuf);
7902 #endif /* SS_REC_LOCK_SUPPORT */
7905 sprintf(prntBuf, "\n\n ssdDestroyLockNew(): Invalid lock type %d\n", lockType);
7906 SDisplay(0, prntBuf);
7912 #endif /* SS_LOCK_SUPPORT */
7914 /* mt005.301 : Cavium Changes */
7915 #ifdef SS_SEUM_CAVIUM
7919 * Fun: ssInitRcvWork
7921 * Desc: This is the initializtion function of receive
7925 * RFAILED - failed, general (optional)
7927 * Notes: Function to initialize the work queue packet
7928 * receiving thread. This creates the new thread to
7929 * receive the work and sets the affinity.
7934 S16 ssInitRcvWork(void)
7936 pthread_attr_t attr;
7940 /* set the required attributes */
7941 pthread_attr_init(&attr);
7942 pthread_attr_setstacksize(&attr, (size_t)MT_ISTASK_STACK);
7943 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
7944 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
7946 /* Create a new thread to receive the work queue messages */
7947 if ((pthread_create(&thread, &attr, workRcvTsk, NULLP)) != 0)
7949 pthread_attr_destroy(&attr);
7954 pthread_attr_destroy(&attr);
7958 }/* ssInitRcvWork */
7965 * Desc: This is the handler function of receive
7969 * RFAILED - failed, general (optional)
7971 * Notes:The handler function of the work queue receiver task.
7972 * This will be waiting for the work and after receiving
7973 * it, work will converted and posted to that entityt
7979 static void *workRcvTsk(Ptr ptr)
7982 cvmx_wqe_t *workPtr;
7983 Buffer *mBuf, *rcvdBuf;
7984 SsMsgInfo *minfoPtr;
7993 /* get the work if its avilable */
7994 workPtr = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
7996 if ( workPtr == NULLP )
7998 /* If there is no work then sleep for 10 usec */
8000 ts.tv_nsec = 500000;
8002 nanosleep(&ts, NULLP);
8006 switch(workPtr->tag)
8008 /* Switch over according to the tag value */
8009 case SS_CVMX_MBUF_TAG:
8011 rcvdBuf = (Buffer*)workPtr->packet_ptr.ptr;
8013 /* Convert the physical address to Pointers */
8014 ret = SConvPhyPtr(&rcvdBuf);
8017 /* mt011.301: Cavium 32 bit changes */
8018 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
8022 /* Copy the buffer to this region */
8023 ret = SCpyFpaMsg(rcvdBuf, SS_DFLT_REGION, SS_DFLT_POOL, &mBuf);
8026 /* mt011.301: Cavium 32 bit changes */
8027 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
8031 /* mt011.301: Cavium 32 bit changes */
8032 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
8034 minfoPtr = (SsMsgInfo*)mBuf->b_rptr;
8036 /* Get the post strucutre and Post the message */
8037 if ( minfoPtr != NULLP)
8039 SMemCpy( &pst, &minfoPtr->pst, sizeof(Pst));
8041 (Void)SPstTsk(&pst, mBuf);
8043 /* Free the buffer allocated if it cannot be sent */
8052 /* Invalid tag value, drop the work */
8053 /* mt011.301: Cavium 32 bit changes */
8054 cvmx_fpa_free(workPtr, SS_CVMX_WQE_POOL, 0);
8063 #endif /* SS_SEUM_CAVIUM */
8065 #ifdef TENB_RTLIN_CHANGES
8066 S16 SInitLock(SLockId *l, uint8_t t)
8069 pthread_mutexattr_t prior;
8070 pthread_mutexattr_init(&prior);
8071 #ifndef RGL_SPECIFIC_CHANGES
8072 pthread_mutexattr_setprotocol(&prior, PTHREAD_PRIO_INHERIT);
8074 r = pthread_mutex_init(l, &prior);
8075 pthread_mutexattr_destroy(&prior);
8079 #ifdef SS_THR_REG_MAP
8082 * Fun: ssRegMainThread
8084 * Desc: This function is used to add the memory region
8085 * mapping for the main thread.
8087 * Ret: VOID (Always successful)
8095 Void ssRegMainThread(Void)
8098 if(SS_INVALID_THREAD_REG_MAP != SS_GET_THREAD_MEM_REGION())
8100 printf("\nnot able to get different Id for main thread\n");
8103 /* Here the default region is added as we dont have any region associated with
8104 * Main thread. The thread should not perform any allocation except
8105 * the initial configuratin
8107 #ifdef XEON_SPECIFIC_CHANGES
8108 SS_GET_THREAD_MEM_REGION() = mtMemoCfg.numRegions;
8110 SS_GET_THREAD_MEM_REGION() =
8117 * Fun: ssCheckAndAddMemoryRegionMap
8119 * Desc: This function is used to add the memory region
8120 * mapping for the provided sTsk associated thread.
8121 * If the threadId can be placed in the thread memory
8122 * region mapping table and returns success if it is able
8123 * to place. If not, it keeps the thread ID in the static
8124 * local array and increments the count. Once thread Id
8125 * is successfully placed in the thread memory region mapping
8126 * table, pthread_cancel is sent for all the previous threads
8127 * which are failed to place in table.
8129 * Ret: TRUE - Thread ID successfully placed in thread memory region
8131 * FALSE - If thread Id is not placed in thread memory region
8134 * Notes:mapping tablemapping tablng tablee
8139 S32 ssCheckAndAddMemoryRegionMap
8141 pthread_t threadId, /* Thread Id of system task */
8142 Region region /* Region associated with thread */
8145 static uint32_t createdThreads;
8146 static pthread_t createdThreadIds[SS_MAX_THREAD_CREATE_RETRY];
8150 /* Here 0xFF is considered as invalid region and if the mapping table
8151 * contains 0xFF, that mapping entry is free
8153 if(SS_INVALID_THREAD_REG_MAP !=
8154 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)])
8156 /* Klock work fix ccpu00148484 */
8157 if(!(createdThreads < SS_MAX_THREAD_CREATE_RETRY))
8159 printf("\nfailed in index = %ld\n", ((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP));
8160 printf("\nNot able to get the different thread ID, exiting\n");
8163 createdThreadIds[createdThreads++] = threadId;
8166 /* If we found free mapping table entry, place the region and send pthread_cancel
8167 * for all the thread Ids which are created before this
8169 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)] = region;
8170 #ifdef XEON_SPECIFIC_CHANGES
8171 printf("\nThreadId %ld, Thread Idx %d, Region %d\n", threadId,
8172 ((threadId >> SS_MEM_THREAD_ID_SHIFT) %
8173 SS_MAX_THREAD_REGION_MAP), region);
8175 for(indx = 0; indx < createdThreads; indx++)
8177 #ifdef XEON_SPECIFIC_CHANGES
8178 printf("\nSending pthred Cancel to thread Id %d \n",createdThreadIds[indx]);
8180 pthread_cancel(createdThreadIds[indx]);
8186 } /* ssCheckAndAddMemoryRegionMap */
8190 * Fun: ssCheckAndDelMemoryRegionMap
8192 * Desc: This function is used to add the memory region
8193 * mapping for the provided sTsk associated thread.
8194 * If the threadId can be placed in the thread memory
8195 * region mapping table and returns success if it is able
8196 * to place. If not, it keeps the thread ID in the static
8197 * local array and increments the count. Once thread Id
8198 * is successfully placed in the thread memory region mapping
8199 * table, pthread_cancel is sent for all the previous threads
8200 * which are failed to place in table.
8202 * Ret: TRUE - Thread ID successfully placed in thread memory region
8204 * FALSE - If thread Id is not placed in thread memory region
8207 * Notes:mapping tablemapping tablng tablee
8212 S32 ssCheckAndDelMemoryRegionMap
8214 pthread_t threadId /* Thread Id of system task */
8219 /* Raghu To-Do Check with team, is it necessary to acquire lock
8220 * as del and add may go parallel */
8221 /* Here 0xFF is considered as invalid region and if the mapping table
8222 * contains 0xFF, that mapping entry is free
8224 if(SS_INVALID_THREAD_REG_MAP ==
8225 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)])
8228 printf("\nInvalid Thread ID (%ld)\n", (uint32_t)threadId);
8230 printf("\nInvalid Thread ID (%d)\n", (uint32_t)threadId);
8234 /* If we found free mapping table entry, place the region and send pthread_cancel
8235 * for all the thread Ids which are created before this
8237 osCp.threadMemoryRegionMap[((threadId >> SS_MEM_THREAD_ID_SHIFT) % SS_MAX_THREAD_REGION_MAP)] = SS_INVALID_THREAD_REG_MAP;
8241 } /* ssCheckAndAddMemoryRegionMap */
8245 #ifdef SS_TSKLOG_ENABLE
8250 * Desc: This function will return current time through input parameter.
8253 * RFAILED - failed, general (optional)
8261 volatile uint32_t *startTime,
8265 #ifdef MSPD_MLOG_NEW
8266 *startTime = GetTIMETICK();
8275 * Desc: This function will return current time through input parameter.
8276 * and take the difference of start time provided as input parameter
8280 * RFAILED - failed, general (optional)
8288 volatile uint32_t startTime,
8292 /*uint32_t stopTime;*/
8295 case PID_MAC_HARQ_IND:
8296 case PID_SCH_TTI_IND:
8298 case PID_MAC_DAT_IND:
8299 case PID_MAC_SF_ALLOC_REQ:
8300 case PID_MAC_STA_RSP:
8301 case PID_MAC_DL_SCHD:
8302 case PID_MAC_DL_CQI_IND:
8303 case PID_MAC_UL_CQI_IND:
8304 case PID_MAC_UL_SCHD:
8305 case PID_MAC_TTI_IND:
8306 case PID_CL_RCV_PHY_MSG:
8307 case PID_CL_HARQ_STA_IND:
8308 case PID_MAC_AM_HARQ_RLS:
8309 case PID_CL_DL_BATCH_PROC:
8310 case PID_CL_DLM_PRC_TTI_IND:
8311 case PID_CRC_IND_REAL:
8312 case PID_CRC_IND_DUMMY:
8313 case PID_TTI_LATENCY:
8314 case PID_RECPREQ_PROC:
8317 MLogTask(0, taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8319 MLogTask(taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8322 MLogTask(taskId, RESOURCE_LARM, startTime, GetTIMETICK());
8331 volatile uint32_t * startTime,
8341 volatile uint32_t startTime,
8348 #endif /*#ifdef SS_TSKLOG_ENABLE */
8349 #ifdef TENB_T2K3K_SPECIFIC_CHANGES
8351 * This primitive is used to calculate the CPU Utilization per Core
8356 * @return Void - function is always success
8358 Void UpdateSocCpuInfo
8360 CmCpuStatsInfo *cpuInfo,
8365 S8 mipsStr[MIPS_STRING_LEN];
8372 /* Open the file which holds the MIPS available value */
8373 mipsFd = fopen(MIPS_FILE, "r");
8380 /* Get the free mips available value from the file */
8381 if(NULLP == fgets(mipsStr, 24, mipsFd))
8383 printf("\nfgets to get the free mips available failed\n");
8388 strtok(mipsStr, " ");
8390 strPart = strtok(NULLP, " ");
8392 if(idx == CM_L2_CPU_UTIL)
8394 if(strPart != NULLP)
8396 l2FreeCpu = atoi(strPart);
8397 l2CpuUsed = 100 - l2FreeCpu;
8398 cpuInfo->cpuUtil[0].totCpuUtil += l2CpuUsed;
8399 cpuInfo->cpuUtil[0].maxCpuUtil = GET_CPU_MAX((cpuInfo->cpuUtil[0].maxCpuUtil), l2CpuUsed);
8400 cpuInfo->cpuUtil[0].numSamples++;
8403 if(idx == CM_L3_CPU_UTIL)
8405 strPart = strtok(NULLP, " ");
8406 if(strPart != NULLP)
8408 l3FreeCpu = atoi(strPart);
8409 l3CpuUsed = 100 - l3FreeCpu;
8410 cpuInfo->cpuUtil[0].totCpuUtil += l3CpuUsed;
8411 cpuInfo->cpuUtil[0].maxCpuUtil = GET_CPU_MAX((cpuInfo->cpuUtil[0].maxCpuUtil), l3CpuUsed);
8412 cpuInfo->cpuUtil[0].numSamples++;
8415 if(idx == CM_L2_CPU_UTIL)
8417 cpuInfo->numCores = CM_NUM_L2_CORES ;
8419 else if(idx == CM_L3_CPU_UTIL)
8421 cpuInfo->numCores = CM_NUM_L3_CORES ;
8427 #endif /* TENB_T2K3K_SPECIFIC_CHANGES */
8428 #ifdef SS_MULTICORE_SUPPORT
8431 * Fun: Add Timer thread into system task table
8433 * Desc: This function is used to add the system task
8434 * associated with Timer thread.
8443 static SsSTskEntry* ssdReAddTmrSTsk(
8451 /* lock the system task table */
8452 ret = SLock(&osCp.sTskTblLock);
8456 #if (ERRCLASS & ERRCLS_DEBUG)
8457 MTLOGERROR(ERRCLS_DEBUG, EMT039, (ErrVal) ret,
8458 "Could not lock system task table");
8464 /* initialize the system task entry with the information we have */
8465 sTsk = &osCp.sTskTbl[idx];
8470 SDestroyLock(&sTsk->lock);
8471 ssDestroyDmndQ(&sTsk->dQ);
8474 /* store the system task priority */
8475 sTsk->tskPrior = SS_NORM_TSK_PRI;
8477 /* initialize the demand queue */
8478 if (ssInitDmndQ(&sTsk->dQ) != ROK)
8481 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8483 #if (ERRCLASS & ERRCLS_DEBUG)
8484 MTLOGERROR(ERRCLS_DEBUG, EMT042, ERRZERO,
8485 "Could not give the Semaphore");
8490 #if (ERRCLASS & ERRCLS_DEBUG)
8491 MTLOGERROR(ERRCLS_DEBUG, EMT043, (ErrVal) ret,
8492 "Could not initialize demand queue");
8498 /* initialize the system task entry lock */
8499 if (SInitLock(&sTsk->lock, SS_STSKENTRY_LOCK) != ROK)
8501 ssDestroyDmndQ(&sTsk->dQ);
8503 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8505 #if (ERRCLASS & ERRCLS_DEBUG)
8506 MTLOGERROR(ERRCLS_DEBUG, EMT044, ERRZERO,
8507 "Could not give the Semaphore");
8512 #if (ERRCLASS & ERRCLS_DEBUG)
8513 MTLOGERROR(ERRCLS_DEBUG, EMT045, (ErrVal) ret,
8514 "Could not initialize system task entry lock");
8521 /* success, update the table */
8522 sTsk->tskId = idx + 1;
8524 sTsk->termPend = FALSE;
8526 /* unlock the system task table */
8528 if ( SUnlock(&osCp.sTskTblLock) != ROK)
8530 #if (ERRCLASS & ERRCLS_DEBUG)
8531 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
8532 "Could not give the Semaphore");
8539 #endif /* SS_MULTICORE_SUPPORT */
8544 * Fun: Initialize timer table
8546 * Desc: This function initializes MTSS-specific information
8547 * in the timer table.
8556 S16 ssdReInitTmr(void)
8558 pthread_attr_t attr;
8559 struct sched_param param_sched;
8560 #ifndef XEON_SPECIFIC_CHANGES
8563 #ifdef SS_MULTICORE_SUPPORT
8565 #endif /* SS_MULTICORE_SUPPORT */
8566 #ifdef SS_THR_REG_MAP
8567 uint32_t threadCreated = FALSE;
8568 #endif /* SS_THR_REG_MAP */
8571 #ifndef XEON_SPECIFIC_CHANGES
8572 ret = ssCheckAndDelMemoryRegionMap(osCp.dep.tmrHdlrTID);
8575 #if (ERRCLASS & ERRCLS_DEBUG)
8576 MTLOGERROR(ERRCLS_DEBUG, EMT046, ERRZERO,
8577 "Could not give the Semaphore");
8583 osCp.dep.tmrTqCp.tmrLen = SS_MAX_TMRS;
8584 /* mt010.21: addition */
8586 #ifdef SS_MULTICORE_SUPPORT
8587 sTsk = ssdReAddTmrSTsk(0);
8592 #endif /* SS_MULTICORE_SUPPORT */
8593 /* create the timer handler thread */
8595 pthread_attr_init(&attr);
8596 /* mt021.201 - Addition to set stack size */
8597 pthread_attr_setstacksize(&attr, (size_t)MT_TMRTASK_STACK);
8598 pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
8599 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
8600 pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
8601 param_sched.sched_priority = sched_get_priority_max(SCHED_FIFO);
8602 pthread_attr_setschedparam(&attr, ¶m_sched);
8605 #ifdef SS_THR_REG_MAP
8606 /* When the thread is created, we check for the memory mapping table if
8607 * threadId can be placed in thread memory map table. If it is not able to place
8608 * threadId is stored in tmporary array. Once thread is created successful,
8609 * thread_cancel is sent for each thread which are created before. All the
8610 * threads are made to wait on sema which is cancel point for thread.
8612 while(threadCreated == FALSE)
8615 if ((pthread_create(&osCp.dep.tmrHdlrTID, &attr, mtTmrHdlr, NULLP)) != 0)
8617 /* mt020.201 - Addition for destroying thread attribute object attr */
8618 pthread_attr_destroy(&attr);
8623 #ifdef SS_THR_REG_MAP
8624 threadCreated = ssCheckAndAddMemoryRegionMap(osCp.dep.tmrHdlrTID,
8627 #endif /* SS_THR_REG_MAP */
8628 #ifdef SS_MEM_WL_DEBUG
8629 tmpRegTidMap[sTsk->region] = osCp.dep.tmrHdlrTID;
8632 /* mt020.201 - Addition for destroying thread attribute object attr */
8633 pthread_attr_destroy(&attr);
8634 sem_post(&osCp.dep.ssStarted);
8638 /**********************************************************************
8640 **********************************************************************/